Compare commits

..

12 Commits

Author SHA1 Message Date
dayuan.jiang
6086c4177a fix: improve duplicate model validation UX in config dialog
- Add inline error display for duplicate model IDs
- Show red border on input when error exists
- Validate on blur with shake animation for edit errors
- Prevent saving empty model names
- Clear errors when user starts typing
- Simplify error styling (small red text, no heavy chips)
2025-12-22 22:27:49 +09:00
dayuan.jiang
33fd2a16e6 fix: improve duplicate model ID notifications
- Add toast notification when trying to add duplicate model
- Allow free typing when editing model ID, validate on blur
- Show warning toast instead of blocking input
2025-12-22 21:57:02 +09:00
dayuan.jiang
41c450516c fix: prevent duplicate model IDs within same provider
- Block adding model if ID already exists in provider
- Block editing model ID to match existing model in provider
2025-12-22 21:53:56 +09:00
dayuan.jiang
0e8783ccfb fix: UI/UX improvements for model configuration dialog
- Add gradient header styling with icon badge
- Change Configuration section icon from Key to Settings2
- Add duplicate model detection with warning banner and inline removal
- Filter out already-added models from suggestions dropdown
- Add type-to-confirm for deleting providers with 3+ models
- Enhance delete confirmation dialog with warning icon
- Improve model selector discoverability (show model name + chevron)
- Add truncation for long model names with title tooltip
- Remove AI provider settings from Settings dialog (now in Model Config)
- Extract ValidationButton into reusable component
2025-12-22 21:49:29 +09:00
dayuan.jiang
7cf6d7e7bd chore: remove unused code
- Remove unused setAccessCodeRequired state in chat-panel.tsx
- Remove unused getSelectedModel export in model-config.ts
2025-12-22 20:46:12 +09:00
dayuan.jiang
7ed7b29274 fix: complete bedrock support and UI/UX improvements
- Add bedrock to ALLOWED_CLIENT_PROVIDERS for client credentials
- Pass AWS credentials through full chain (headers → API → provider)
- Replace non-existent GPT-5 models with real ones (o1, o3-mini)
- Add accessibility: aria-labels, focus-visible rings, inline errors
- Add more AWS regions (Ohio, London, Paris, Mumbai, Seoul, São Paulo)
- Fix setTimeout cleanup with useRef on component unmount
- Fix TypeScript type consistency in getSelectedAIConfig fallback
2025-12-22 20:40:12 +09:00
dayuan.jiang
1be0cfa06c fix: reset validation button to Test after success 2025-12-22 20:13:57 +09:00
dayuan.jiang
1f6ef7ac90 fix: reset Test button after validation completes 2025-12-22 20:11:46 +09:00
dayuan.jiang
56ca9d3f48 feat: add AWS credentials support for Bedrock provider
- Add AWS Access Key ID, Secret Access Key, Region fields for Bedrock
- Show different credential fields based on provider type
- Update validation API to handle Bedrock with AWS credentials
- Add region selector with common AWS regions
2025-12-22 20:09:16 +09:00
dayuan.jiang
e089702949 refactor: revert shadcn component changes, apply hover fix at usage site 2025-12-22 20:02:58 +09:00
dayuan.jiang
89b0a96b95 feat: improve model config UI and move selector to chat input
- Move model selector from header to chat input (left of send button)
- Add per-model validation status (queued, running, valid, invalid)
- Filter model selector to only show verified models
- Add editable model IDs in config dialog
- Add custom model input field alongside suggested models dropdown
- Fix hover states on provider buttons and select triggers
- Update OpenAI suggested models with GPT-5 series
- Add alert-dialog component for delete confirmation
2025-12-22 20:00:45 +09:00
dayuan.jiang
1e916aa86e feat: add multi-provider model configuration
- Add model config dialog for managing multiple AI providers
- Support for OpenAI, Anthropic, Google, Azure, Bedrock, OpenRouter, DeepSeek, SiliconFlow, Ollama, and AI Gateway
- Add model selector dropdown in chat panel header
- Add API key validation endpoint
- Add custom model ID input with keyboard navigation
- Fix hover highlight in Command component
- Add suggested models for each provider including latest Claude 4.5 series
- Store configuration locally in browser
2025-12-22 17:58:05 +09:00
36 changed files with 1288 additions and 2625 deletions

View File

@@ -63,8 +63,6 @@ jobs:
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64
build-args: |
NEXT_PUBLIC_SHOW_ABOUT_AND_NOTICE=true
# Push to AWS ECR for App Runner auto-deploy
- name: Configure AWS credentials

View File

@@ -26,10 +26,6 @@ ENV NEXT_TELEMETRY_DISABLED=1
ARG NEXT_PUBLIC_DRAWIO_BASE_URL=https://embed.diagrams.net
ENV NEXT_PUBLIC_DRAWIO_BASE_URL=${NEXT_PUBLIC_DRAWIO_BASE_URL}
# Build-time argument to show About link and Notice icon
ARG NEXT_PUBLIC_SHOW_ABOUT_AND_NOTICE=false
ENV NEXT_PUBLIC_SHOW_ABOUT_AND_NOTICE=${NEXT_PUBLIC_SHOW_ABOUT_AND_NOTICE}
# Build Next.js application (standalone mode)
RUN npm run build

View File

@@ -117,9 +117,9 @@ export default function AboutCN() {
(TPS/TPM)
</p>
<p>
使 Opus 4.5 {" "}
使 Claude {" "}
<span className="font-semibold text-amber-700">
Haiku 4.5
minimax-m2
</span>
</p>

View File

@@ -126,9 +126,9 @@ export default function AboutJA() {
</p>
<p>
Opus 4.5 {" "}
Claude {" "}
<span className="font-semibold text-amber-700">
Haiku 4.5
minimax-m2
</span>{" "}
</p>

View File

@@ -129,9 +129,9 @@ export default function About() {
</p>
<p>
Due to the high usage, I have changed the
model from Opus 4.5 to{" "}
model from Claude to{" "}
<span className="font-semibold text-amber-700">
Haiku 4.5
minimax-m2
</span>
, which is more cost-effective.
</p>

View File

@@ -14,11 +14,6 @@ import path from "path"
import { z } from "zod"
import { getAIModel, supportsPromptCaching } from "@/lib/ai-providers"
import { findCachedResponse } from "@/lib/cached-responses"
import {
checkAndIncrementRequest,
isQuotaEnabled,
recordTokenUsage,
} from "@/lib/dynamo-quota-manager"
import {
getTelemetryConfig,
setTraceInput,
@@ -167,13 +162,9 @@ async function handleChatRequest(req: Request): Promise<Response> {
const { messages, xml, previousXml, sessionId } = await req.json()
// Get user IP for Langfuse tracking (hashed for privacy)
// Get user IP for Langfuse tracking
const forwardedFor = req.headers.get("x-forwarded-for")
const rawIp = forwardedFor?.split(",")[0]?.trim() || "anonymous"
const userId =
rawIp === "anonymous"
? rawIp
: `user-${Buffer.from(rawIp).toString("base64url").slice(0, 8)}`
const userId = forwardedFor?.split(",")[0]?.trim() || "anonymous"
// Validate sessionId for Langfuse (must be string, max 200 chars)
const validSessionId =
@@ -182,12 +173,9 @@ async function handleChatRequest(req: Request): Promise<Response> {
: undefined
// Extract user input text for Langfuse trace
// Find the last USER message, not just the last message (which could be assistant in multi-step tool flows)
const lastUserMessage = [...messages]
.reverse()
.find((m: any) => m.role === "user")
const lastMessage = messages[messages.length - 1]
const userInputText =
lastUserMessage?.parts?.find((p: any) => p.type === "text")?.text || ""
lastMessage?.parts?.find((p: any) => p.type === "text")?.text || ""
// Update Langfuse trace with input, session, and user
setTraceInput({
@@ -196,33 +184,6 @@ async function handleChatRequest(req: Request): Promise<Response> {
userId: userId,
})
// === SERVER-SIDE QUOTA CHECK START ===
// Quota is opt-in: only enabled when DYNAMODB_QUOTA_TABLE env var is set
const hasOwnApiKey = !!(
req.headers.get("x-ai-provider") && req.headers.get("x-ai-api-key")
)
// Skip quota check if: quota disabled, user has own API key, or is anonymous
if (isQuotaEnabled() && !hasOwnApiKey && userId !== "anonymous") {
const quotaCheck = await checkAndIncrementRequest(userId, {
requests: Number(process.env.DAILY_REQUEST_LIMIT) || 10,
tokens: Number(process.env.DAILY_TOKEN_LIMIT) || 200000,
tpm: Number(process.env.TPM_LIMIT) || 20000,
})
if (!quotaCheck.allowed) {
return Response.json(
{
error: quotaCheck.error,
type: quotaCheck.type,
used: quotaCheck.used,
limit: quotaCheck.limit,
},
{ status: 429 },
)
}
}
// === SERVER-SIDE QUOTA CHECK END ===
// === FILE VALIDATION START ===
const fileValidation = validateFileParts(messages)
if (!fileValidation.valid) {
@@ -276,10 +237,9 @@ async function handleChatRequest(req: Request): Promise<Response> {
// Get the appropriate system prompt based on model (extended for Opus/Haiku 4.5)
const systemMessage = getSystemPrompt(modelId, minimalStyle)
// Extract file parts (images) from the last user message
// Extract file parts (images) from the last message
const fileParts =
lastUserMessage?.parts?.filter((part: any) => part.type === "file") ||
[]
lastMessage.parts?.filter((part: any) => part.type === "file") || []
// User input only - XML is now in a separate cached system message
const formattedUserInput = `User input:
@@ -288,7 +248,7 @@ ${userInputText}
"""`
// Convert UIMessages to ModelMessages and add system message
const modelMessages = await convertToModelMessages(messages)
const modelMessages = convertToModelMessages(messages)
// DEBUG: Log incoming messages structure
console.log("[route.ts] Incoming messages count:", messages.length)
@@ -542,26 +502,12 @@ ${userInputText}
userId,
}),
}),
onFinish: ({ text, totalUsage }) => {
// AI SDK 6 telemetry auto-reports token usage on its spans
setTraceOutput(text)
// Record token usage for server-side quota tracking (if enabled)
// Use totalUsage (cumulative across all steps) instead of usage (final step only)
// Include all 4 token types: input, output, cache read, cache write
if (
isQuotaEnabled() &&
!hasOwnApiKey &&
userId !== "anonymous" &&
totalUsage
) {
const totalTokens =
(totalUsage.inputTokens || 0) +
(totalUsage.outputTokens || 0) +
(totalUsage.cachedInputTokens || 0) +
(totalUsage.inputTokenDetails?.cacheWriteTokens || 0)
recordTokenUsage(userId, totalTokens)
}
onFinish: ({ text, usage }) => {
// Pass usage to Langfuse (Bedrock streaming doesn't auto-report tokens to telemetry)
setTraceOutput(text, {
promptTokens: usage?.inputTokens,
completionTokens: usage?.outputTokens,
})
},
tools: {
// Client-side tool that will be executed on the client
@@ -731,9 +677,19 @@ Call this tool to get shape names and usage syntax for a specific library.`,
messageMetadata: ({ part }) => {
if (part.type === "finish") {
const usage = (part as any).totalUsage
// AI SDK 6 provides totalTokens directly
if (!usage) {
console.warn(
"[messageMetadata] No usage data in finish part",
)
return undefined
}
// Total input = non-cached + cached (these are separate counts)
// Note: cacheWriteInputTokens is not available on finish part
const totalInputTokens =
(usage.inputTokens ?? 0) + (usage.cachedInputTokens ?? 0)
return {
totalTokens: usage?.totalTokens ?? 0,
inputTokens: totalInputTokens,
outputTokens: usage.outputTokens ?? 0,
finishReason: (part as any).finishReason,
}
}

View File

@@ -27,18 +27,9 @@ export async function POST(req: Request) {
const { messageId, feedback, sessionId } = data
// Skip logging if no sessionId - prevents attaching to wrong user's trace
if (!sessionId) {
return Response.json({ success: true, logged: false })
}
// Get user IP for tracking (hashed for privacy)
// Get user IP for tracking
const forwardedFor = req.headers.get("x-forwarded-for")
const rawIp = forwardedFor?.split(",")[0]?.trim() || "anonymous"
const userId =
rawIp === "anonymous"
? rawIp
: `user-${Buffer.from(rawIp).toString("base64url").slice(0, 8)}`
const userId = forwardedFor?.split(",")[0]?.trim() || "anonymous"
try {
// Find the most recent chat trace for this session to attach the score to

View File

@@ -27,11 +27,6 @@ export async function POST(req: Request) {
const { filename, format, sessionId } = data
// Skip logging if no sessionId - prevents attaching to wrong user's trace
if (!sessionId) {
return Response.json({ success: true, logged: false })
}
try {
const timestamp = new Date().toISOString()

View File

@@ -11,66 +11,6 @@ import { createOllama } from "ollama-ai-provider-v2"
export const runtime = "nodejs"
/**
* SECURITY: Check if URL points to private/internal network (SSRF protection)
* Blocks: localhost, private IPs, link-local, AWS metadata service
*/
function isPrivateUrl(urlString: string): boolean {
try {
const url = new URL(urlString)
const hostname = url.hostname.toLowerCase()
// Block localhost
if (
hostname === "localhost" ||
hostname === "127.0.0.1" ||
hostname === "::1"
) {
return true
}
// Block AWS/cloud metadata endpoints
if (
hostname === "169.254.169.254" ||
hostname === "metadata.google.internal"
) {
return true
}
// Check for private IPv4 ranges
const ipv4Match = hostname.match(
/^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$/,
)
if (ipv4Match) {
const [, a, b] = ipv4Match.map(Number)
// 10.0.0.0/8
if (a === 10) return true
// 172.16.0.0/12
if (a === 172 && b >= 16 && b <= 31) return true
// 192.168.0.0/16
if (a === 192 && b === 168) return true
// 169.254.0.0/16 (link-local)
if (a === 169 && b === 254) return true
// 127.0.0.0/8 (loopback)
if (a === 127) return true
}
// Block common internal hostnames
if (
hostname.endsWith(".local") ||
hostname.endsWith(".internal") ||
hostname.endsWith(".localhost")
) {
return true
}
return false
} catch {
// Invalid URL - block it
return true
}
}
interface ValidateRequest {
provider: string
apiKey: string
@@ -102,14 +42,6 @@ export async function POST(req: Request) {
)
}
// SECURITY: Block SSRF attacks via custom baseUrl
if (baseUrl && isPrivateUrl(baseUrl)) {
return NextResponse.json(
{ valid: false, error: "Invalid base URL" },
{ status: 400 },
)
}
// Validate credentials based on provider
if (provider === "bedrock") {
if (!awsAccessKeyId || !awsSecretAccessKey || !awsRegion) {

View File

@@ -144,68 +144,6 @@
--sidebar-ring: oklch(0.7 0.16 265);
}
/* ============================================
REFINED MINIMAL DESIGN SYSTEM
============================================ */
:root {
/* Surface layers for depth */
--surface-0: oklch(1 0 0);
--surface-1: oklch(0.985 0.002 240);
--surface-2: oklch(0.97 0.004 240);
--surface-elevated: oklch(1 0 0);
/* Subtle borders */
--border-subtle: oklch(0.94 0.008 260);
--border-default: oklch(0.91 0.012 260);
/* Interactive states */
--interactive-hover: oklch(0.96 0.015 260);
--interactive-active: oklch(0.93 0.02 265);
/* Success state */
--success: oklch(0.65 0.18 145);
--success-muted: oklch(0.95 0.03 145);
/* Animation timing */
--duration-fast: 120ms;
--duration-normal: 200ms;
--duration-slow: 300ms;
--ease-out: cubic-bezier(0.16, 1, 0.3, 1);
--ease-in-out: cubic-bezier(0.4, 0, 0.2, 1);
--ease-spring: cubic-bezier(0.34, 1.56, 0.64, 1);
}
.dark {
--surface-0: oklch(0.15 0.015 260);
--surface-1: oklch(0.18 0.015 260);
--surface-2: oklch(0.22 0.015 260);
--surface-elevated: oklch(0.25 0.015 260);
--border-subtle: oklch(0.25 0.012 260);
--border-default: oklch(0.3 0.015 260);
--interactive-hover: oklch(0.25 0.02 265);
--interactive-active: oklch(0.3 0.025 270);
--success: oklch(0.7 0.16 145);
--success-muted: oklch(0.25 0.04 145);
}
/* Expose surface colors to Tailwind */
@theme inline {
--color-surface-0: var(--surface-0);
--color-surface-1: var(--surface-1);
--color-surface-2: var(--surface-2);
--color-surface-elevated: var(--surface-elevated);
--color-border-subtle: var(--border-subtle);
--color-border-default: var(--border-default);
--color-interactive-hover: var(--interactive-hover);
--color-interactive-active: var(--interactive-active);
--color-success: var(--success);
--color-success-muted: var(--success-muted);
}
@layer base {
* {
@apply border-border outline-ring/50;
@@ -319,83 +257,3 @@
-webkit-text-fill-color: transparent;
background-clip: text;
}
/* ============================================
REFINED DIALOG STYLES
============================================ */
/* Refined dialog shadow - multi-layer soft shadow */
.shadow-dialog {
box-shadow:
0 0 0 1px oklch(0 0 0 / 0.03),
0 2px 4px oklch(0 0 0 / 0.02),
0 12px 24px oklch(0 0 0 / 0.06),
0 24px 48px oklch(0 0 0 / 0.04);
}
.dark .shadow-dialog {
box-shadow:
0 0 0 1px oklch(1 0 0 / 0.05),
0 2px 4px oklch(0 0 0 / 0.2),
0 12px 24px oklch(0 0 0 / 0.3),
0 24px 48px oklch(0 0 0 / 0.2);
}
/* Dialog animations */
@keyframes dialog-in {
from {
opacity: 0;
transform: translate(-50%, -48%) scale(0.96);
}
to {
opacity: 1;
transform: translate(-50%, -50%) scale(1);
}
}
@keyframes dialog-out {
from {
opacity: 1;
transform: translate(-50%, -50%) scale(1);
}
to {
opacity: 0;
transform: translate(-50%, -48%) scale(0.96);
}
}
.animate-dialog-in {
animation: dialog-in var(--duration-normal) var(--ease-out) forwards;
}
.animate-dialog-out {
animation: dialog-out 150ms var(--ease-out) forwards;
}
/* Check pop animation for validation success */
@keyframes check-pop {
0% {
transform: scale(0.8);
opacity: 0;
}
50% {
transform: scale(1.1);
}
100% {
transform: scale(1);
opacity: 1;
}
}
.animate-check-pop {
animation: check-pop 0.25s var(--ease-spring) forwards;
}
/* Reduced motion support */
@media (prefers-reduced-motion: reduce) {
.animate-dialog-in,
.animate-dialog-out,
.animate-check-pop {
animation: none;
}
}

View File

@@ -1,24 +1,24 @@
import type { MetadataRoute } from "next"
import { getAssetUrl } from "@/lib/base-path"
export default function manifest(): MetadataRoute.Manifest {
return {
name: "Next AI Draw.io",
short_name: "AIDraw.io",
description:
"Create AWS architecture diagrams, flowcharts, and technical diagrams using AI. Free online tool integrating draw.io with AI assistance for professional diagram creation.",
start_url: getAssetUrl("/"),
start_url: "/",
display: "standalone",
background_color: "#f9fafb",
theme_color: "#171d26",
icons: [
{
src: getAssetUrl("/favicon-192x192.png"),
src: "/favicon-192x192.png",
sizes: "192x192",
type: "image/png",
purpose: "any",
},
{
src: getAssetUrl("/favicon-512x512.png"),
src: "/favicon-512x512.png",
sizes: "512x512",
type: "image/png",
purpose: "any",

View File

@@ -9,7 +9,6 @@ import {
Zap,
} from "lucide-react"
import { useDictionary } from "@/hooks/use-dictionary"
import { getAssetUrl } from "@/lib/base-path"
interface ExampleCardProps {
icon: React.ReactNode
@@ -80,7 +79,7 @@ export default function ExamplePanel({
setInput("Replicate this flowchart.")
try {
const response = await fetch(getAssetUrl("/example.png"))
const response = await fetch("/example.png")
const blob = await response.blob()
const file = new File([blob], "example.png", { type: "image/png" })
setFiles([file])
@@ -93,7 +92,7 @@ export default function ExamplePanel({
setInput("Replicate this in aws style")
try {
const response = await fetch(getAssetUrl("/architecture.png"))
const response = await fetch("/architecture.png")
const blob = await response.blob()
const file = new File([blob], "architecture.png", {
type: "image/png",
@@ -108,7 +107,7 @@ export default function ExamplePanel({
setInput("Summarize this paper as a diagram")
try {
const response = await fetch(getAssetUrl("/chain-of-thought.txt"))
const response = await fetch("/chain-of-thought.txt")
const blob = await response.blob()
const file = new File([blob], "chain-of-thought.txt", {
type: "text/plain",

View File

@@ -27,11 +27,9 @@ import {
ReasoningTrigger,
} from "@/components/ai-elements/reasoning"
import { ScrollArea } from "@/components/ui/scroll-area"
import { getApiEndpoint } from "@/lib/base-path"
import {
applyDiagramOperations,
convertToLegalXml,
extractCompleteMxCells,
isMxCellXmlComplete,
replaceNodes,
validateAndFixXml,
@@ -293,7 +291,7 @@ export function ChatMessageDisplay({
setFeedback((prev) => ({ ...prev, [messageId]: value }))
try {
await fetch(getApiEndpoint("/api/log-feedback"), {
await fetch("/api/log-feedback", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
@@ -316,28 +314,12 @@ export function ChatMessageDisplay({
const handleDisplayChart = useCallback(
(xml: string, showToast = false) => {
let currentXml = xml || ""
const startTime = performance.now()
// During streaming (showToast=false), extract only complete mxCell elements
// This allows progressive rendering even with partial/incomplete trailing XML
if (!showToast) {
const completeCells = extractCompleteMxCells(currentXml)
if (!completeCells) {
return
}
currentXml = completeCells
}
const currentXml = xml || ""
const convertedXml = convertToLegalXml(currentXml)
if (convertedXml !== previousXML.current) {
// Parse and validate XML BEFORE calling replaceNodes
const parser = new DOMParser()
// Wrap in root element for parsing multiple mxCell elements
const testDoc = parser.parseFromString(
`<root>${convertedXml}</root>`,
"text/xml",
)
const testDoc = parser.parseFromString(convertedXml, "text/xml")
const parseError = testDoc.querySelector("parsererror")
if (parseError) {
@@ -364,22 +346,7 @@ export function ChatMessageDisplay({
`<mxfile><diagram name="Page-1" id="page-1"><mxGraphModel><root><mxCell id="0"/><mxCell id="1" parent="0"/></root></mxGraphModel></diagram></mxfile>`
const replacedXML = replaceNodes(baseXML, convertedXml)
const xmlProcessTime = performance.now() - startTime
// During streaming (showToast=false), skip heavy validation for lower latency
// The quick DOM parse check above catches malformed XML
// Full validation runs on final output (showToast=true)
if (!showToast) {
previousXML.current = convertedXml
const loadStartTime = performance.now()
onDisplayChart(replacedXML, true)
console.log(
`[Streaming] XML processing: ${xmlProcessTime.toFixed(1)}ms, drawio load: ${(performance.now() - loadStartTime).toFixed(1)}ms`,
)
return
}
// Final output: run full validation and auto-fix
// Validate and auto-fix the XML
const validation = validateAndFixXml(replacedXML)
if (validation.valid) {
previousXML.current = convertedXml
@@ -392,19 +359,18 @@ export function ChatMessageDisplay({
)
}
// Skip validation in loadDiagram since we already validated above
const loadStartTime = performance.now()
onDisplayChart(xmlToLoad, true)
console.log(
`[Final] XML processing: ${xmlProcessTime.toFixed(1)}ms, validation+load: ${(performance.now() - loadStartTime).toFixed(1)}ms`,
)
} else {
console.error(
"[ChatMessageDisplay] XML validation failed:",
validation.error,
)
toast.error(
"Diagram validation failed. Please try regenerating.",
)
// Only show toast if this is the final XML (not during streaming)
if (showToast) {
toast.error(
"Diagram validation failed. Please try regenerating.",
)
}
}
} catch (error) {
console.error(
@@ -636,10 +602,17 @@ export function ChatMessageDisplay({
}
})
// NOTE: Don't cleanup debounce timeouts here!
// The cleanup runs on every re-render (when messages changes),
// which would cancel the timeout before it fires.
// Let the timeouts complete naturally - they're harmless if component unmounts.
// Cleanup: clear any pending debounce timeout on unmount
return () => {
if (debounceTimeoutRef.current) {
clearTimeout(debounceTimeoutRef.current)
debounceTimeoutRef.current = null
}
if (editDebounceTimeoutRef.current) {
clearTimeout(editDebounceTimeoutRef.current)
editDebounceTimeoutRef.current = null
}
}
}, [messages, handleDisplayChart, chartXML])
const renderToolPart = (part: ToolPartLike) => {

View File

@@ -21,21 +21,16 @@ import { ChatInput } from "@/components/chat-input"
import { ModelConfigDialog } from "@/components/model-config-dialog"
import { ResetWarningModal } from "@/components/reset-warning-modal"
import { SettingsDialog } from "@/components/settings-dialog"
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip"
import { useDiagram } from "@/contexts/diagram-context"
import { useDictionary } from "@/hooks/use-dictionary"
import { getSelectedAIConfig, useModelConfig } from "@/hooks/use-model-config"
import { getApiEndpoint } from "@/lib/base-path"
import { findCachedResponse } from "@/lib/cached-responses"
import { isPdfFile, isTextFile } from "@/lib/pdf-utils"
import { type FileData, useFileProcessor } from "@/lib/use-file-processor"
import { useQuotaManager } from "@/lib/use-quota-manager"
import { formatXML, isMxCellXmlComplete, wrapWithMxFile } from "@/lib/utils"
import { ChatMessageDisplay } from "./chat-message-display"
import LanguageToggle from "./language-toggle"
// localStorage keys for persistence
const STORAGE_MESSAGES_KEY = "next-ai-draw-io-messages"
@@ -76,7 +71,6 @@ interface ChatPanelProps {
const TOOL_ERROR_STATE = "output-error" as const
const DEBUG = process.env.NODE_ENV === "development"
const MAX_AUTO_RETRY_COUNT = 1
const MAX_CONTINUATION_RETRY_COUNT = 2 // Limit for truncation continuation retries
/**
* Check if auto-resubmit should happen based on tool errors.
@@ -174,7 +168,7 @@ export default function ChatPanel({
// Check config on mount
useEffect(() => {
fetch(getApiEndpoint("/api/config"))
fetch("/api/config")
.then((res) => res.json())
.then((data) => {
setDailyRequestLimit(data.dailyRequestLimit || 0)
@@ -217,8 +211,6 @@ export default function ChatPanel({
// Ref to track consecutive auto-retry count (reset on user action)
const autoRetryCountRef = useRef(0)
// Ref to track continuation retry count (for truncation handling)
const continuationRetryCountRef = useRef(0)
// Ref to accumulate partial XML when output is truncated due to maxOutputTokens
// When partialXmlRef.current.length > 0, we're in continuation mode
@@ -247,7 +239,7 @@ export default function ChatPanel({
setMessages,
} = useChat({
transport: new DefaultChatTransport({
api: getApiEndpoint("/api/chat"),
api: "/api/chat",
}),
async onToolCall({ toolCall }) {
if (DEBUG) {
@@ -556,43 +548,6 @@ Continue from EXACTLY where you stopped.`,
}
},
onError: (error) => {
// Handle server-side quota limit (429 response)
// AI SDK puts the full response body in error.message for non-OK responses
try {
const data = JSON.parse(error.message)
if (data.type === "request") {
quotaManager.showQuotaLimitToast(data.used, data.limit)
return
}
if (data.type === "token") {
quotaManager.showTokenLimitToast(data.used, data.limit)
return
}
if (data.type === "tpm") {
quotaManager.showTPMLimitToast(data.limit)
return
}
} catch {
// Not JSON, fall through to string matching for backwards compatibility
}
// Fallback to string matching
if (error.message.includes("Daily request limit")) {
quotaManager.showQuotaLimitToast()
return
}
if (error.message.includes("Daily token limit")) {
quotaManager.showTokenLimitToast()
return
}
if (
error.message.includes("Rate limit exceeded") ||
error.message.includes("tokens per minute")
) {
quotaManager.showTPMLimitToast()
return
}
// Silence access code error in console since it's handled by UI
if (!error.message.includes("Invalid or missing access code")) {
console.error("Chat error:", error)
@@ -669,6 +624,22 @@ Continue from EXACTLY where you stopped.`,
// DEBUG: Log finish reason to diagnose truncation
console.log("[onFinish] finishReason:", metadata?.finishReason)
console.log("[onFinish] metadata:", metadata)
if (metadata) {
// Use Number.isFinite to guard against NaN (typeof NaN === 'number' is true)
const inputTokens = Number.isFinite(metadata.inputTokens)
? (metadata.inputTokens as number)
: 0
const outputTokens = Number.isFinite(metadata.outputTokens)
? (metadata.outputTokens as number)
: 0
const actualTokens = inputTokens + outputTokens
if (actualTokens > 0) {
quotaManager.incrementTokenCount(actualTokens)
quotaManager.incrementTPMCount(actualTokens)
}
}
},
sendAutomaticallyWhen: ({ messages }) => {
const isInContinuationMode = partialXmlRef.current.length > 0
@@ -680,25 +651,15 @@ Continue from EXACTLY where you stopped.`,
if (!shouldRetry) {
// No error, reset retry count and clear state
autoRetryCountRef.current = 0
continuationRetryCountRef.current = 0
partialXmlRef.current = ""
return false
}
// Continuation mode: limited retries for truncation handling
// Continuation mode: unlimited retries (truncation continuation, not real errors)
// Server limits to 5 steps via stepCountIs(5)
if (isInContinuationMode) {
if (
continuationRetryCountRef.current >=
MAX_CONTINUATION_RETRY_COUNT
) {
toast.error(
`Continuation retry limit reached (${MAX_CONTINUATION_RETRY_COUNT}). The diagram may be too complex.`,
)
continuationRetryCountRef.current = 0
partialXmlRef.current = ""
return false
}
continuationRetryCountRef.current++
// Don't count against retry limit for continuation
// Quota checks still apply below
} else {
// Regular error: check retry count limit
if (autoRetryCountRef.current >= MAX_AUTO_RETRY_COUNT) {
@@ -713,6 +674,23 @@ Continue from EXACTLY where you stopped.`,
autoRetryCountRef.current++
}
// Check quota limits before auto-retry
const tokenLimitCheck = quotaManager.checkTokenLimit()
if (!tokenLimitCheck.allowed) {
quotaManager.showTokenLimitToast(tokenLimitCheck.used)
autoRetryCountRef.current = 0
partialXmlRef.current = ""
return false
}
const tpmCheck = quotaManager.checkTPMLimit()
if (!tpmCheck.allowed) {
quotaManager.showTPMLimitToast()
autoRetryCountRef.current = 0
partialXmlRef.current = ""
return false
}
return true
},
})
@@ -929,6 +907,9 @@ Continue from EXACTLY where you stopped.`,
xmlSnapshotsRef.current.set(messageIndex, chartXml)
saveXmlSnapshots()
// Check all quota limits
if (!checkAllQuotaLimits()) return
sendChatMessage(parts, chartXml, previousXml, sessionId)
// Token count is tracked in onFinish with actual server usage
@@ -1006,7 +987,30 @@ Continue from EXACTLY where you stopped.`,
saveXmlSnapshots()
}
// Send chat message with headers
// Check all quota limits (daily requests, tokens, TPM)
const checkAllQuotaLimits = (): boolean => {
const limitCheck = quotaManager.checkDailyLimit()
if (!limitCheck.allowed) {
quotaManager.showQuotaLimitToast()
return false
}
const tokenLimitCheck = quotaManager.checkTokenLimit()
if (!tokenLimitCheck.allowed) {
quotaManager.showTokenLimitToast(tokenLimitCheck.used)
return false
}
const tpmCheck = quotaManager.checkTPMLimit()
if (!tpmCheck.allowed) {
quotaManager.showTPMLimitToast()
return false
}
return true
}
// Send chat message with headers and increment quota
const sendChatMessage = (
parts: any,
xml: string,
@@ -1015,7 +1019,6 @@ Continue from EXACTLY where you stopped.`,
) => {
// Reset all retry/continuation state on user-initiated message
autoRetryCountRef.current = 0
continuationRetryCountRef.current = 0
partialXmlRef.current = ""
const config = getSelectedAIConfig()
@@ -1056,6 +1059,7 @@ Continue from EXACTLY where you stopped.`,
},
},
)
quotaManager.incrementRequestCount()
}
// Process files and append content to user text (handles PDF, text, and optionally images)
@@ -1143,8 +1147,13 @@ Continue from EXACTLY where you stopped.`,
setMessages(newMessages)
})
// Check all quota limits
if (!checkAllQuotaLimits()) return
// Now send the message after state is guaranteed to be updated
sendChatMessage(userParts, savedXml, previousXml, sessionId)
// Token count is tracked in onFinish with actual server usage
}
const handleEditMessage = async (messageIndex: number, newText: string) => {
@@ -1186,8 +1195,12 @@ Continue from EXACTLY where you stopped.`,
setMessages(newMessages)
})
// Check all quota limits
if (!checkAllQuotaLimits()) return
// Now send the edited message after state is guaranteed to be updated
sendChatMessage(newParts, savedXml, previousXml, sessionId)
// Token count is tracked in onFinish with actual server usage
}
// Collapsed view (desktop only)
@@ -1251,18 +1264,32 @@ Continue from EXACTLY where you stopped.`,
Next AI Drawio
</h1>
</div>
{!isMobile &&
process.env.NEXT_PUBLIC_SHOW_ABOUT_AND_NOTICE ===
"true" && (
<Link
href="/about"
target="_blank"
rel="noopener noreferrer"
className="text-sm text-muted-foreground hover:text-foreground transition-colors ml-2"
{!isMobile && (
<Link
href="/about"
target="_blank"
rel="noopener noreferrer"
className="text-sm text-muted-foreground hover:text-foreground transition-colors ml-2"
>
About
</Link>
)}
{!isMobile && (
<Link
href="/about"
target="_blank"
rel="noopener noreferrer"
>
<ButtonWithTooltip
tooltipContent="Due to high usage, I have changed the model to minimax-m2 and added some usage limits. See About page for details."
variant="ghost"
size="icon"
className="h-6 w-6 text-amber-500 hover:text-amber-600"
>
About
</Link>
)}
<AlertTriangle className="h-4 w-4" />
</ButtonWithTooltip>
</Link>
)}
</div>
<div className="flex items-center gap-1 justify-end overflow-visible">
<ButtonWithTooltip
@@ -1277,23 +1304,16 @@ Continue from EXACTLY where you stopped.`,
/>
</ButtonWithTooltip>
<div className="w-px h-5 bg-border mx-1" />
<Tooltip>
<TooltipTrigger asChild>
<a
href="https://github.com/DayuanJiang/next-ai-draw-io"
target="_blank"
rel="noopener noreferrer"
className="inline-flex items-center justify-center h-9 w-9 rounded-md text-muted-foreground hover:text-foreground hover:bg-accent transition-colors"
>
<FaGithub
className={`${isMobile ? "w-4 h-4" : "w-5 h-5"}`}
/>
</a>
</TooltipTrigger>
<TooltipContent>{dict.nav.github}</TooltipContent>
</Tooltip>
<a
href="https://github.com/DayuanJiang/next-ai-draw-io"
target="_blank"
rel="noopener noreferrer"
className="p-2 rounded-lg text-muted-foreground hover:text-foreground hover:bg-accent transition-colors"
>
<FaGithub
className={`${isMobile ? "w-4 h-4" : "w-5 h-5"}`}
/>
</a>
<ButtonWithTooltip
tooltipContent={dict.nav.settings}
variant="ghost"
@@ -1306,6 +1326,7 @@ Continue from EXACTLY where you stopped.`,
/>
</ButtonWithTooltip>
<div className="hidden sm:flex items-center gap-2">
<LanguageToggle />
{!isMobile && (
<ButtonWithTooltip
tooltipContent={dict.nav.hidePanel}

View File

@@ -0,0 +1,108 @@
"use client"
import { Globe } from "lucide-react"
import { usePathname, useRouter, useSearchParams } from "next/navigation"
import { Suspense, useEffect, useRef, useState } from "react"
import { i18n, type Locale } from "@/lib/i18n/config"
const LABELS: Record<string, string> = {
en: "EN",
zh: "中文",
ja: "日本語",
}
function LanguageToggleInner({ className = "" }: { className?: string }) {
const router = useRouter()
const pathname = usePathname() || "/"
const search = useSearchParams()
const [open, setOpen] = useState(false)
const [value, setValue] = useState<Locale>(i18n.defaultLocale)
const ref = useRef<HTMLDivElement | null>(null)
useEffect(() => {
const seg = pathname.split("/").filter(Boolean)
const first = seg[0]
if (first && i18n.locales.includes(first as Locale))
setValue(first as Locale)
else setValue(i18n.defaultLocale)
}, [pathname])
useEffect(() => {
function onDoc(e: MouseEvent) {
if (!ref.current) return
if (!ref.current.contains(e.target as Node)) setOpen(false)
}
if (open) document.addEventListener("mousedown", onDoc)
return () => document.removeEventListener("mousedown", onDoc)
}, [open])
const changeLocale = (lang: string) => {
const parts = pathname.split("/")
if (parts.length > 1 && i18n.locales.includes(parts[1] as Locale)) {
parts[1] = lang
} else {
parts.splice(1, 0, lang)
}
const newPath = parts.join("/") || "/"
const searchStr = search?.toString() ? `?${search.toString()}` : ""
setOpen(false)
router.push(newPath + searchStr)
}
return (
<div className={`relative inline-flex ${className}`} ref={ref}>
<button
aria-haspopup="menu"
aria-expanded={open}
onClick={() => setOpen((s) => !s)}
className="p-2 rounded-full hover:bg-accent/20 transition-colors text-muted-foreground"
aria-label="Change language"
>
<Globe className="w-5 h-5" />
</button>
{open && (
<div className="absolute right-0 top-full mt-2 w-40 bg-popover dark:bg-popover text-popover-foreground rounded-xl shadow-md border border-border/30 overflow-hidden z-50">
<div className="grid gap-0 divide-y divide-border/30">
{i18n.locales.map((loc) => (
<button
key={loc}
onClick={() => changeLocale(loc)}
className={`flex items-center gap-2 px-4 py-2 text-sm w-full text-left hover:bg-accent/10 transition-colors ${value === loc ? "bg-accent/10 font-semibold" : ""}`}
>
<span className="flex-1">
{LABELS[loc] ?? loc}
</span>
{value === loc && (
<span className="text-xs opacity-70">
</span>
)}
</button>
))}
</div>
</div>
)}
</div>
)
}
export default function LanguageToggle({
className = "",
}: {
className?: string
}) {
return (
<Suspense
fallback={
<button
className="p-2 rounded-full text-muted-foreground opacity-50"
disabled
>
<Globe className="w-5 h-5" />
</button>
}
>
<LanguageToggleInner className={className} />
</Suspense>
)
}

View File

@@ -52,7 +52,6 @@ import {
} from "@/components/ui/select"
import { useDictionary } from "@/hooks/use-dictionary"
import type { UseModelConfigReturn } from "@/hooks/use-model-config"
import { formatMessage } from "@/lib/i18n/utils"
import type { ProviderConfig, ProviderName } from "@/lib/types/model-config"
import { PROVIDER_INFO, SUGGESTED_MODELS } from "@/lib/types/model-config"
import { cn } from "@/lib/utils"
@@ -103,40 +102,39 @@ function ProviderLogo({
)
}
// Configuration section with title and optional action
function ConfigSection({
title,
icon: Icon,
action,
children,
// Reusable validation button component
function ValidationButton({
status,
onClick,
disabled,
}: {
title: string
icon: React.ComponentType<{ className?: string }>
action?: React.ReactNode
children: React.ReactNode
status: ValidationStatus
onClick: () => void
disabled: boolean
}) {
return (
<div className="space-y-4">
<div className="flex items-center justify-between">
<div className="flex items-center gap-2">
<Icon className="h-4 w-4 text-muted-foreground" />
<span className="text-xs font-medium text-muted-foreground uppercase tracking-wider">
{title}
</span>
</div>
{action}
</div>
{children}
</div>
)
}
// Card wrapper with subtle depth
function ConfigCard({ children }: { children: React.ReactNode }) {
return (
<div className="rounded-2xl border border-border-subtle bg-surface-2/50 p-5 space-y-5">
{children}
</div>
<Button
variant={status === "success" ? "outline" : "default"}
size="sm"
onClick={onClick}
disabled={disabled}
className={cn(
"h-9 px-4 min-w-[80px]",
status === "success" &&
"text-emerald-600 border-emerald-200 dark:border-emerald-800",
)}
>
{status === "validating" ? (
<Loader2 className="h-4 w-4 animate-spin" />
) : status === "success" ? (
<>
<Check className="h-4 w-4 mr-1.5" />
Verified
</>
) : (
"Test"
)}
</Button>
)
}
@@ -153,6 +151,7 @@ export function ModelConfigDialog({
const [validationStatus, setValidationStatus] =
useState<ValidationStatus>("idle")
const [validationError, setValidationError] = useState<string>("")
const [scrollState, setScrollState] = useState({ top: false, bottom: true })
const [customModelInput, setCustomModelInput] = useState("")
const scrollRef = useRef<HTMLDivElement>(null)
const validationResetTimeoutRef = useRef<ReturnType<
@@ -184,6 +183,26 @@ export function ModelConfigDialog({
(p) => p.id === selectedProviderId,
)
// Track scroll position for gradient shadows
useEffect(() => {
const scrollEl = scrollRef.current?.querySelector(
"[data-radix-scroll-area-viewport]",
) as HTMLElement | null
if (!scrollEl) return
const handleScroll = () => {
const { scrollTop, scrollHeight, clientHeight } = scrollEl
setScrollState({
top: scrollTop > 10,
bottom: scrollTop < scrollHeight - clientHeight - 10,
})
}
handleScroll() // Initial check
scrollEl.addEventListener("scroll", handleScroll)
return () => scrollEl.removeEventListener("scroll", handleScroll)
}, [selectedProvider])
// Cleanup validation reset timeout on unmount
useEffect(() => {
return () => {
@@ -368,120 +387,104 @@ export function ModelConfigDialog({
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent className="sm:max-w-4xl h-[80vh] max-h-[800px] overflow-hidden flex flex-col gap-0 p-0">
{/* Header */}
<DialogHeader className="px-6 pt-6 pb-4 shrink-0">
<DialogTitle className="flex items-center gap-3">
<div className="p-2 rounded-xl bg-surface-2">
<DialogContent className="sm:max-w-3xl h-[75vh] max-h-[700px] overflow-hidden flex flex-col gap-0 p-0">
<DialogHeader className="px-6 pt-6 pb-4 border-b bg-gradient-to-r from-primary/5 via-primary/3 to-transparent">
<DialogTitle className="flex items-center gap-2.5 text-xl font-semibold">
<div className="p-1.5 rounded-lg bg-primary/10">
<Server className="h-5 w-5 text-primary" />
</div>
{dict.modelConfig?.title || "AI Model Configuration"}
</DialogTitle>
<DialogDescription className="mt-1">
<DialogDescription className="text-sm">
{dict.modelConfig?.description ||
"Configure multiple AI providers and models for your workspace"}
</DialogDescription>
</DialogHeader>
<div className="flex flex-1 min-h-0 overflow-hidden border-t border-border-subtle">
<div className="flex flex-1 min-h-0 overflow-hidden">
{/* Provider List (Left Sidebar) */}
<div className="w-60 shrink-0 flex flex-col bg-surface-1/50 border-r border-border-subtle">
<div className="px-4 py-3">
<div className="w-56 flex-shrink-0 flex flex-col border-r bg-muted/20">
<div className="px-4 py-3 border-b">
<span className="text-xs font-medium text-muted-foreground uppercase tracking-wider">
{dict.modelConfig.providers}
Providers
</span>
</div>
<ScrollArea className="flex-1 px-2">
<div className="space-y-1 pb-2">
<ScrollArea className="flex-1">
<div className="p-2">
{config.providers.length === 0 ? (
<div className="px-3 py-8 text-center">
<div className="inline-flex items-center justify-center w-10 h-10 rounded-full bg-surface-2 mb-3">
<div className="inline-flex items-center justify-center w-10 h-10 rounded-full bg-muted mb-3">
<Plus className="h-5 w-5 text-muted-foreground" />
</div>
<p className="text-xs text-muted-foreground">
{dict.modelConfig.addProviderHint}
Add a provider to get started
</p>
</div>
) : (
config.providers.map((provider) => (
<button
key={provider.id}
type="button"
onClick={() => {
setSelectedProviderId(
provider.id,
)
setValidationStatus(
provider.validated
? "success"
: "idle",
)
setShowApiKey(false)
}}
className={cn(
"group flex items-center gap-3 px-3 py-2.5 rounded-xl w-full",
"text-left text-sm transition-all duration-150",
"hover:bg-interactive-hover",
"focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2",
selectedProviderId ===
provider.id &&
"bg-surface-0 shadow-sm ring-1 ring-border-subtle",
)}
>
<div
<div className="flex flex-col gap-1">
{config.providers.map((provider) => (
<button
key={provider.id}
type="button"
onClick={() => {
setSelectedProviderId(
provider.id,
)
setValidationStatus(
provider.validated
? "success"
: "idle",
)
setShowApiKey(false)
}}
className={cn(
"w-8 h-8 rounded-lg flex items-center justify-center",
"bg-surface-2 transition-colors duration-150",
"group flex items-center gap-3 px-3 py-2.5 rounded-lg text-left text-sm transition-all duration-150 hover:bg-accent focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2",
selectedProviderId ===
provider.id &&
"bg-primary/10",
"bg-background shadow-sm ring-1 ring-border",
)}
>
<ProviderLogo
provider={provider.provider}
className="flex-shrink-0"
/>
</div>
<span className="flex-1 truncate font-medium">
{getProviderDisplayName(
provider,
)}
</span>
{provider.validated ? (
<div className="flex-shrink-0 flex items-center justify-center w-5 h-5 rounded-full bg-success-muted">
<Check className="h-3 w-3 text-success" />
</div>
) : (
<ChevronRight
className={cn(
"h-4 w-4 text-muted-foreground/50 transition-transform duration-150",
selectedProviderId ===
provider.id &&
"translate-x-0.5",
<span className="flex-1 truncate font-medium">
{getProviderDisplayName(
provider,
)}
/>
)}
</button>
))
</span>
{provider.validated ? (
<div className="flex-shrink-0 flex items-center justify-center w-5 h-5 rounded-full bg-emerald-500/10">
<Check className="h-3 w-3 text-emerald-500" />
</div>
) : (
<ChevronRight
className={cn(
"h-4 w-4 text-muted-foreground/50 transition-transform",
selectedProviderId ===
provider.id &&
"translate-x-0.5",
)}
/>
)}
</button>
))}
</div>
)}
</div>
</ScrollArea>
{/* Add Provider */}
<div className="p-3 border-t border-border-subtle">
<div className="p-2 border-t">
<Select
onValueChange={(v) =>
handleAddProvider(v as ProviderName)
}
>
<SelectTrigger className="w-full h-9 rounded-xl bg-surface-0 border-border-subtle hover:bg-interactive-hover">
<SelectTrigger className="h-9 bg-background hover:bg-accent">
<Plus className="h-4 w-4 mr-2 text-muted-foreground" />
<SelectValue
placeholder={
dict.modelConfig.addProvider
}
/>
<SelectValue placeholder="Add Provider" />
</SelectTrigger>
<SelectContent>
{availableProviders.map((p) => (
@@ -504,23 +507,41 @@ export function ModelConfigDialog({
</div>
{/* Provider Details (Right Panel) */}
<div className="flex-1 min-w-0 flex flex-col overflow-hidden">
<div className="flex-1 min-w-0 overflow-hidden relative">
{selectedProvider ? (
<>
<ScrollArea className="flex-1" ref={scrollRef}>
<div className="p-6 space-y-8">
{/* Top gradient shadow */}
<div
className={cn(
"absolute top-0 left-0 right-0 h-8 bg-gradient-to-b from-background to-transparent z-10 pointer-events-none transition-opacity duration-200",
scrollState.top
? "opacity-100"
: "opacity-0",
)}
/>
{/* Bottom gradient shadow */}
<div
className={cn(
"absolute bottom-0 left-0 right-0 h-8 bg-gradient-to-t from-background to-transparent z-10 pointer-events-none transition-opacity duration-200",
scrollState.bottom
? "opacity-100"
: "opacity-0",
)}
/>
<ScrollArea className="h-full" ref={scrollRef}>
<div className="p-6 space-y-6">
{/* Provider Header */}
<div className="flex items-center gap-3">
<div className="flex items-center justify-center w-12 h-12 rounded-xl bg-surface-2">
<div className="flex items-center justify-center w-10 h-10 rounded-lg bg-muted">
<ProviderLogo
provider={
selectedProvider.provider
}
className="h-6 w-6"
className="h-5 w-5"
/>
</div>
<div className="flex-1 min-w-0">
<h3 className="font-semibold text-lg tracking-tight">
<h3 className="font-semibold text-base">
{
PROVIDER_INFO[
selectedProvider
@@ -528,43 +549,31 @@ export function ModelConfigDialog({
].label
}
</h3>
<p className="text-sm text-muted-foreground">
<p className="text-xs text-muted-foreground">
{selectedProvider.models
.length === 0
? dict.modelConfig
.noModelsConfigured
: formatMessage(
dict.modelConfig
.modelsConfiguredCount,
{
count: selectedProvider
.models
.length,
},
)}
? "No models configured"
: `${selectedProvider.models.length} model${selectedProvider.models.length > 1 ? "s" : ""} configured`}
</p>
</div>
{selectedProvider.validated && (
<div className="flex items-center gap-1.5 px-3 py-1.5 rounded-full bg-success-muted text-success">
<Check className="h-3.5 w-3.5 animate-check-pop" />
<div className="flex items-center gap-1.5 px-2.5 py-1 rounded-full bg-emerald-500/10 text-emerald-600 dark:text-emerald-400">
<Check className="h-3.5 w-3.5" />
<span className="text-xs font-medium">
{
dict.modelConfig
.verified
}
Verified
</span>
</div>
)}
</div>
{/* Configuration Section */}
<ConfigSection
title={
dict.modelConfig.configuration
}
icon={Settings2}
>
<ConfigCard>
<div className="space-y-4">
<div className="flex items-center gap-2 text-sm font-medium text-muted-foreground">
<Settings2 className="h-4 w-4" />
<span>Configuration</span>
</div>
<div className="rounded-xl border bg-card p-4 space-y-4">
{/* Display Name */}
<div className="space-y-2">
<Label
@@ -572,10 +581,7 @@ export function ModelConfigDialog({
className="text-xs font-medium flex items-center gap-1.5"
>
<Tag className="h-3.5 w-3.5 text-muted-foreground" />
{
dict.modelConfig
.displayName
}
Display Name
</Label>
<Input
id="provider-name"
@@ -610,11 +616,8 @@ export function ModelConfigDialog({
className="text-xs font-medium flex items-center gap-1.5"
>
<Key className="h-3.5 w-3.5 text-muted-foreground" />
{
dict
.modelConfig
.awsAccessKeyId
}
AWS Access Key
ID
</Label>
<Input
id="aws-access-key-id"
@@ -646,11 +649,8 @@ export function ModelConfigDialog({
className="text-xs font-medium flex items-center gap-1.5"
>
<Key className="h-3.5 w-3.5 text-muted-foreground" />
{
dict
.modelConfig
.awsSecretAccessKey
}
AWS Secret
Access Key
</Label>
<div className="relative">
<Input
@@ -674,11 +674,7 @@ export function ModelConfigDialog({
.value,
)
}
placeholder={
dict
.modelConfig
.enterSecretKey
}
placeholder="Enter your secret access key"
className="h-9 pr-10 font-mono text-xs"
/>
<button
@@ -711,11 +707,7 @@ export function ModelConfigDialog({
className="text-xs font-medium flex items-center gap-1.5"
>
<Link2 className="h-3.5 w-3.5 text-muted-foreground" />
{
dict
.modelConfig
.awsRegion
}
AWS Region
</Label>
<Select
value={
@@ -732,13 +724,7 @@ export function ModelConfigDialog({
}
>
<SelectTrigger className="h-9 font-mono text-xs hover:bg-accent">
<SelectValue
placeholder={
dict
.modelConfig
.selectRegion
}
/>
<SelectValue placeholder="Select region" />
</SelectTrigger>
<SelectContent className="max-h-64">
<SelectItem value="us-east-1">
@@ -823,7 +809,7 @@ export function ModelConfigDialog({
"h-9 px-4",
validationStatus ===
"success" &&
"text-success border-success/30 bg-success-muted hover:bg-success-muted",
"text-emerald-600 border-emerald-200 dark:border-emerald-800",
)}
>
{validationStatus ===
@@ -832,17 +818,11 @@ export function ModelConfigDialog({
) : validationStatus ===
"success" ? (
<>
<Check className="h-4 w-4 mr-1.5 animate-check-pop" />
{
dict
.modelConfig
.verified
}
<Check className="h-4 w-4 mr-1.5" />
Verified
</>
) : (
dict
.modelConfig
.test
"Test"
)}
</Button>
{validationStatus ===
@@ -866,11 +846,7 @@ export function ModelConfigDialog({
className="text-xs font-medium flex items-center gap-1.5"
>
<Key className="h-3.5 w-3.5 text-muted-foreground" />
{
dict
.modelConfig
.apiKey
}
API Key
</Label>
<div className="flex gap-2">
<div className="relative flex-1">
@@ -894,11 +870,7 @@ export function ModelConfigDialog({
.value,
)
}
placeholder={
dict
.modelConfig
.enterApiKey
}
placeholder="Enter your API key"
className="h-9 pr-10 font-mono text-xs"
/>
<button
@@ -942,7 +914,7 @@ export function ModelConfigDialog({
"h-9 px-4",
validationStatus ===
"success" &&
"text-success border-success/30 bg-success-muted hover:bg-success-muted",
"text-emerald-600 border-emerald-200 dark:border-emerald-800",
)}
>
{validationStatus ===
@@ -951,17 +923,11 @@ export function ModelConfigDialog({
) : validationStatus ===
"success" ? (
<>
<Check className="h-4 w-4 mr-1.5 animate-check-pop" />
{
dict
.modelConfig
.verified
}
<Check className="h-4 w-4 mr-1.5" />
Verified
</>
) : (
dict
.modelConfig
.test
"Test"
)}
</Button>
</div>
@@ -984,17 +950,9 @@ export function ModelConfigDialog({
className="text-xs font-medium flex items-center gap-1.5"
>
<Link2 className="h-3.5 w-3.5 text-muted-foreground" />
{
dict
.modelConfig
.baseUrl
}
Base URL
<span className="text-muted-foreground font-normal">
{
dict
.modelConfig
.optional
}
(optional)
</span>
</Label>
<Input
@@ -1016,30 +974,27 @@ export function ModelConfigDialog({
.provider
]
.defaultBaseUrl ||
dict
.modelConfig
.customEndpoint
"Custom endpoint URL"
}
className="h-9 rounded-xl font-mono text-xs"
className="h-9 font-mono text-xs"
/>
</div>
</>
)}
</ConfigCard>
</ConfigSection>
</div>
</div>
{/* Models Section */}
<ConfigSection
title={dict.modelConfig.models}
icon={Sparkles}
action={
<div className="space-y-4">
<div className="flex items-center justify-between">
<div className="flex items-center gap-2 text-sm font-medium text-muted-foreground">
<Sparkles className="h-4 w-4" />
<span>Models</span>
</div>
<div className="flex items-center gap-2">
<div className="relative">
<Input
placeholder={
dict.modelConfig
.customModelId
}
placeholder="Custom model ID..."
value={
customModelInput
}
@@ -1048,6 +1003,7 @@ export function ModelConfigDialog({
e.target
.value,
)
// Clear duplicate error when typing
if (
duplicateError
) {
@@ -1076,11 +1032,12 @@ export function ModelConfigDialog({
}
}}
className={cn(
"h-8 w-44 rounded-lg font-mono text-xs",
"h-8 w-48 font-mono text-xs",
duplicateError &&
"border-destructive focus-visible:ring-destructive",
)}
/>
{/* Show duplicate error for custom model input */}
{duplicateError && (
<p className="absolute top-full left-0 mt-1 text-[11px] text-destructive">
{duplicateError}
@@ -1090,7 +1047,7 @@ export function ModelConfigDialog({
<Button
variant="outline"
size="sm"
className="h-8 rounded-lg"
className="h-8"
onClick={() => {
if (
customModelInput.trim()
@@ -1127,16 +1084,12 @@ export function ModelConfigDialog({
0
}
>
<SelectTrigger className="w-28 h-8 rounded-lg hover:bg-interactive-hover">
<SelectTrigger className="w-32 h-8 hover:bg-accent">
<span className="text-xs">
{availableSuggestions.length ===
0
? dict
.modelConfig
.allAdded
: dict
.modelConfig
.suggested}
? "All added"
: "Suggested"}
</span>
</SelectTrigger>
<SelectContent className="max-h-72">
@@ -1160,25 +1113,22 @@ export function ModelConfigDialog({
</SelectContent>
</Select>
</div>
}
>
</div>
{/* Model List */}
<div className="rounded-2xl border border-border-subtle bg-surface-2/30 overflow-hidden min-h-[120px]">
<div className="rounded-xl border bg-card overflow-hidden min-h-[120px]">
{selectedProvider.models
.length === 0 ? (
<div className="p-6 text-center h-full flex flex-col items-center justify-center">
<div className="inline-flex items-center justify-center w-10 h-10 rounded-full bg-surface-2 mb-3">
<div className="p-4 text-center h-full flex flex-col items-center justify-center">
<div className="inline-flex items-center justify-center w-10 h-10 rounded-full bg-muted mb-2">
<Sparkles className="h-5 w-5 text-muted-foreground" />
</div>
<p className="text-sm text-muted-foreground">
{
dict.modelConfig
.noModelsConfigured
}
No models configured
</p>
</div>
) : (
<div className="divide-y divide-border-subtle">
<div className="divide-y">
{selectedProvider.models.map(
(model, index) => (
<div
@@ -1186,7 +1136,16 @@ export function ModelConfigDialog({
model.id
}
className={cn(
"transition-colors duration-150 hover:bg-interactive-hover/50",
"transition-colors hover:bg-muted/30",
index ===
0 &&
"rounded-t-xl",
index ===
selectedProvider
.models
.length -
1 &&
"rounded-b-xl",
)}
>
<div className="flex items-center gap-3 p-3 min-w-0">
@@ -1213,8 +1172,8 @@ export function ModelConfigDialog({
) : model.validated ===
true ? (
// Valid
<div className="w-full h-full rounded-lg bg-success-muted flex items-center justify-center">
<Check className="h-4 w-4 text-success" />
<div className="w-full h-full rounded-lg bg-emerald-500/10 flex items-center justify-center">
<Check className="h-4 w-4 text-emerald-500" />
</div>
) : model.validated ===
false ? (
@@ -1332,9 +1291,7 @@ export function ModelConfigDialog({
!newModelId
) {
showError(
dict
.modelConfig
.modelIdEmpty,
"Model ID cannot be empty",
)
return
}
@@ -1362,9 +1319,7 @@ export function ModelConfigDialog({
)
) {
showError(
dict
.modelConfig
.modelIdExists,
"This model ID already exists",
)
return
}
@@ -1415,7 +1370,7 @@ export function ModelConfigDialog({
</div>
)}
</div>
</ConfigSection>
</div>
{/* Danger Zone */}
<div className="pt-4">
@@ -1425,13 +1380,10 @@ export function ModelConfigDialog({
onClick={() =>
setDeleteConfirmOpen(true)
}
className="text-muted-foreground hover:text-destructive hover:bg-destructive/5 rounded-xl"
className="text-muted-foreground hover:text-destructive hover:bg-destructive/10"
>
<Trash2 className="h-4 w-4 mr-2" />
{
dict.modelConfig
.deleteProvider
}
Delete Provider
</Button>
</div>
</div>
@@ -1439,14 +1391,15 @@ export function ModelConfigDialog({
</>
) : (
<div className="h-full flex flex-col items-center justify-center p-8 text-center">
<div className="inline-flex items-center justify-center w-16 h-16 rounded-2xl bg-surface-2 mb-4">
<Server className="h-8 w-8 text-muted-foreground" />
<div className="inline-flex items-center justify-center w-16 h-16 rounded-2xl bg-gradient-to-br from-primary/10 to-primary/5 mb-4">
<Server className="h-8 w-8 text-primary/60" />
</div>
<h3 className="font-semibold text-lg tracking-tight mb-1">
{dict.modelConfig.configureProviders}
<h3 className="font-semibold mb-1">
Configure AI Providers
</h3>
<p className="text-sm text-muted-foreground max-w-xs">
{dict.modelConfig.selectProviderHint}
Select a provider from the list or add a new
one to configure API keys and models
</p>
</div>
)}
@@ -1454,10 +1407,10 @@ export function ModelConfigDialog({
</div>
{/* Footer */}
<div className="px-6 py-3 border-t border-border-subtle bg-surface-1/30 shrink-0">
<div className="px-6 py-3 border-t bg-muted/20">
<p className="text-xs text-muted-foreground text-center flex items-center justify-center gap-1.5">
<Key className="h-3 w-3" />
{dict.modelConfig.apiKeyStored}
API keys are stored locally in your browser
</p>
</div>
</DialogContent>
@@ -1476,16 +1429,19 @@ export function ModelConfigDialog({
<AlertCircle className="h-6 w-6 text-destructive" />
</div>
<AlertDialogTitle className="text-center">
{dict.modelConfig.deleteProvider}
Delete Provider
</AlertDialogTitle>
<AlertDialogDescription className="text-center">
{formatMessage(dict.modelConfig.deleteConfirmDesc, {
name: selectedProvider
Are you sure you want to delete{" "}
<span className="font-medium text-foreground">
{selectedProvider
? selectedProvider.name ||
PROVIDER_INFO[selectedProvider.provider]
.label
: "this provider",
})}
: "this provider"}
</span>
? This will remove all configured models and cannot
be undone.
</AlertDialogDescription>
</AlertDialogHeader>
{selectedProvider &&
@@ -1495,16 +1451,11 @@ export function ModelConfigDialog({
htmlFor="delete-confirm"
className="text-sm text-muted-foreground"
>
{formatMessage(
dict.modelConfig.typeToConfirm,
{
name:
selectedProvider.name ||
PROVIDER_INFO[
selectedProvider.provider
].label,
},
)}
Type &quot;
{selectedProvider.name ||
PROVIDER_INFO[selectedProvider.provider]
.label}
&quot; to confirm
</Label>
<Input
id="delete-confirm"
@@ -1512,17 +1463,13 @@ export function ModelConfigDialog({
onChange={(e) =>
setDeleteConfirmText(e.target.value)
}
placeholder={
dict.modelConfig.typeProviderName
}
placeholder="Type provider name..."
className="h-9"
/>
</div>
)}
<AlertDialogFooter>
<AlertDialogCancel>
{dict.modelConfig.cancel}
</AlertDialogCancel>
<AlertDialogCancel>Cancel</AlertDialogCancel>
<AlertDialogAction
onClick={handleDeleteProvider}
disabled={
@@ -1535,7 +1482,7 @@ export function ModelConfigDialog({
}
className="bg-destructive text-destructive-foreground hover:bg-destructive/90 disabled:opacity-50"
>
{dict.modelConfig.delete}
Delete
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>

View File

@@ -16,7 +16,6 @@ import {
ModelSelectorTrigger,
} from "@/components/ai-elements/model-selector"
import { ButtonWithTooltip } from "@/components/button-with-tooltip"
import { useDictionary } from "@/hooks/use-dictionary"
import type { FlattenedModel } from "@/lib/types/model-config"
import { cn } from "@/lib/utils"
@@ -68,7 +67,6 @@ export function ModelSelector({
onConfigure,
disabled = false,
}: ModelSelectorProps) {
const dict = useDictionary()
const [open, setOpen] = useState(false)
// Only show validated models in the selector
const validatedModels = useMemo(
@@ -98,8 +96,8 @@ export function ModelSelector({
}
const tooltipContent = selectedModel
? `${selectedModel.modelId} ${dict.modelConfig.clickToChange}`
: `${dict.modelConfig.usingServerDefault} ${dict.modelConfig.clickToChange}`
? `${selectedModel.modelId} (click to change)`
: "Using server default model (click to change)"
return (
<ModelSelectorRoot open={open} onOpenChange={setOpen}>
@@ -113,26 +111,22 @@ export function ModelSelector({
>
<Bot className="h-4 w-4 flex-shrink-0 text-muted-foreground" />
<span className="text-xs truncate">
{selectedModel
? selectedModel.modelId
: dict.modelConfig.default}
{selectedModel ? selectedModel.modelId : "Default"}
</span>
<ChevronDown className="h-3 w-3 flex-shrink-0 text-muted-foreground" />
</ButtonWithTooltip>
</ModelSelectorTrigger>
<ModelSelectorContent title={dict.modelConfig.selectModel}>
<ModelSelectorInput
placeholder={dict.modelConfig.searchModels}
/>
<ModelSelectorContent title="Select Model">
<ModelSelectorInput placeholder="Search models..." />
<ModelSelectorList>
<ModelSelectorEmpty>
{validatedModels.length === 0 && models.length > 0
? dict.modelConfig.noVerifiedModels
: dict.modelConfig.noModelsFound}
? "No verified models. Test your models first."
: "No models found."}
</ModelSelectorEmpty>
{/* Server Default Option */}
<ModelSelectorGroup heading={dict.modelConfig.default}>
<ModelSelectorGroup heading="Default">
<ModelSelectorItem
value="__server_default__"
onSelect={handleSelect}
@@ -151,7 +145,7 @@ export function ModelSelector({
/>
<Server className="mr-2 h-4 w-4 text-muted-foreground" />
<ModelSelectorName>
{dict.modelConfig.serverDefault}
Server Default
</ModelSelectorName>
</ModelSelectorItem>
</ModelSelectorGroup>
@@ -207,13 +201,13 @@ export function ModelSelector({
>
<Settings2 className="mr-2 h-4 w-4" />
<ModelSelectorName>
{dict.modelConfig.configureModels}
Configure Models...
</ModelSelectorName>
</ModelSelectorItem>
</ModelSelectorGroup>
{/* Info text */}
<div className="px-3 py-2 text-xs text-muted-foreground border-t">
{dict.modelConfig.onlyVerifiedShown}
Only verified models are shown
</div>
</ModelSelectorList>
</ModelSelectorContent>

View File

@@ -1,8 +1,7 @@
"use client"
import { Moon, Sun } from "lucide-react"
import { usePathname, useRouter, useSearchParams } from "next/navigation"
import { Suspense, useEffect, useState } from "react"
import { useEffect, useState } from "react"
import { Button } from "@/components/ui/button"
import {
Dialog,
@@ -13,49 +12,8 @@ import {
} from "@/components/ui/dialog"
import { Input } from "@/components/ui/input"
import { Label } from "@/components/ui/label"
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select"
import { Switch } from "@/components/ui/switch"
import { useDictionary } from "@/hooks/use-dictionary"
import { getApiEndpoint } from "@/lib/base-path"
import { i18n, type Locale } from "@/lib/i18n/config"
import { cn } from "@/lib/utils"
// Reusable setting item component for consistent layout
function SettingItem({
label,
description,
children,
}: {
label: string
description?: string
children: React.ReactNode
}) {
return (
<div className="flex items-center justify-between py-4 first:pt-0 last:pb-0">
<div className="space-y-0.5 pr-4">
<Label className="text-sm font-medium">{label}</Label>
{description && (
<p className="text-xs text-muted-foreground max-w-[260px]">
{description}
</p>
)}
</div>
<div className="shrink-0">{children}</div>
</div>
)
}
const LANGUAGE_LABELS: Record<Locale, string> = {
en: "English",
zh: "中文",
ja: "日本語",
}
interface SettingsDialogProps {
open: boolean
@@ -78,7 +36,7 @@ function getStoredAccessCodeRequired(): boolean | null {
return stored === "true"
}
function SettingsContent({
export function SettingsDialog({
open,
onOpenChange,
onCloseProtectionChange,
@@ -88,9 +46,6 @@ function SettingsContent({
onToggleDarkMode,
}: SettingsDialogProps) {
const dict = useDictionary()
const router = useRouter()
const pathname = usePathname() || "/"
const search = useSearchParams()
const [accessCode, setAccessCode] = useState("")
const [closeProtection, setCloseProtection] = useState(true)
const [isVerifying, setIsVerifying] = useState(false)
@@ -98,13 +53,12 @@ function SettingsContent({
const [accessCodeRequired, setAccessCodeRequired] = useState(
() => getStoredAccessCodeRequired() ?? false,
)
const [currentLang, setCurrentLang] = useState("en")
useEffect(() => {
// Only fetch if not cached in localStorage
if (getStoredAccessCodeRequired() !== null) return
fetch(getApiEndpoint("/api/config"))
fetch("/api/config")
.then((res) => {
if (!res.ok) throw new Error(`HTTP ${res.status}`)
return res.json()
@@ -123,17 +77,6 @@ function SettingsContent({
})
}, [])
// Detect current language from pathname
useEffect(() => {
const seg = pathname.split("/").filter(Boolean)
const first = seg[0]
if (first && i18n.locales.includes(first as Locale)) {
setCurrentLang(first)
} else {
setCurrentLang(i18n.defaultLocale)
}
}, [pathname])
useEffect(() => {
if (open) {
const storedCode =
@@ -150,18 +93,6 @@ function SettingsContent({
}
}, [open])
const changeLanguage = (lang: string) => {
const parts = pathname.split("/")
if (parts.length > 1 && i18n.locales.includes(parts[1] as Locale)) {
parts[1] = lang
} else {
parts.splice(1, 0, lang)
}
const newPath = parts.join("/") || "/"
const searchStr = search?.toString() ? `?${search.toString()}` : ""
router.push(newPath + searchStr)
}
const handleSave = async () => {
if (!accessCodeRequired) return
@@ -169,15 +100,12 @@ function SettingsContent({
setIsVerifying(true)
try {
const response = await fetch(
getApiEndpoint("/api/verify-access-code"),
{
method: "POST",
headers: {
"x-access-code": accessCode.trim(),
},
const response = await fetch("/api/verify-access-code", {
method: "POST",
headers: {
"x-access-code": accessCode.trim(),
},
)
})
const data = await response.json()
@@ -203,32 +131,20 @@ function SettingsContent({
}
return (
<DialogContent className="sm:max-w-lg p-0 gap-0">
{/* Header */}
<DialogHeader className="px-6 pt-6 pb-4">
<DialogTitle>{dict.settings.title}</DialogTitle>
<DialogDescription className="mt-1">
{dict.settings.description}
</DialogDescription>
</DialogHeader>
{/* Content */}
<div className="px-6 pb-6">
<div className="divide-y divide-border-subtle">
{/* Access Code (conditional) */}
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent className="sm:max-w-md">
<DialogHeader>
<DialogTitle>{dict.settings.title}</DialogTitle>
<DialogDescription>
{dict.settings.description}
</DialogDescription>
</DialogHeader>
<div className="space-y-4 py-2">
{accessCodeRequired && (
<div className="py-4 first:pt-0 space-y-3">
<div className="space-y-0.5">
<Label
htmlFor="access-code"
className="text-sm font-medium"
>
{dict.settings.accessCode}
</Label>
<p className="text-xs text-muted-foreground">
{dict.settings.accessCodeDescription}
</p>
</div>
<div className="space-y-2">
<Label htmlFor="access-code">
{dict.settings.accessCode}
</Label>
<div className="flex gap-2">
<Input
id="access-code"
@@ -242,60 +158,38 @@ function SettingsContent({
dict.settings.accessCodePlaceholder
}
autoComplete="off"
className="h-9"
/>
<Button
onClick={handleSave}
disabled={isVerifying || !accessCode.trim()}
className="h-9 px-4 rounded-xl"
>
{isVerifying ? "..." : dict.common.save}
</Button>
</div>
<p className="text-[0.8rem] text-muted-foreground">
{dict.settings.accessCodeDescription}
</p>
{error && (
<p className="text-xs text-destructive">
<p className="text-[0.8rem] text-destructive">
{error}
</p>
)}
</div>
)}
{/* Language */}
<SettingItem
label={dict.settings.language}
description={dict.settings.languageDescription}
>
<Select
value={currentLang}
onValueChange={changeLanguage}
>
<SelectTrigger
id="language-select"
className="w-[120px] h-9 rounded-xl"
>
<SelectValue />
</SelectTrigger>
<SelectContent>
{i18n.locales.map((locale) => (
<SelectItem key={locale} value={locale}>
{LANGUAGE_LABELS[locale]}
</SelectItem>
))}
</SelectContent>
</Select>
</SettingItem>
{/* Theme */}
<SettingItem
label={dict.settings.theme}
description={dict.settings.themeDescription}
>
<div className="flex items-center justify-between">
<div className="space-y-0.5">
<Label htmlFor="theme-toggle">
{dict.settings.theme}
</Label>
<p className="text-[0.8rem] text-muted-foreground">
{dict.settings.themeDescription}
</p>
</div>
<Button
id="theme-toggle"
variant="outline"
size="icon"
onClick={onToggleDarkMode}
className="h-9 w-9 rounded-xl border-border-subtle hover:bg-interactive-hover"
>
{darkMode ? (
<Sun className="h-4 w-4" />
@@ -303,35 +197,42 @@ function SettingsContent({
<Moon className="h-4 w-4" />
)}
</Button>
</SettingItem>
</div>
{/* Draw.io Style */}
<SettingItem
label={dict.settings.drawioStyle}
description={`${dict.settings.drawioStyleDescription} ${
drawioUi === "min"
? dict.settings.minimal
: dict.settings.sketch
}`}
>
<div className="flex items-center justify-between">
<div className="space-y-0.5">
<Label htmlFor="drawio-ui">
{dict.settings.drawioStyle}
</Label>
<p className="text-[0.8rem] text-muted-foreground">
{dict.settings.drawioStyleDescription}{" "}
{drawioUi === "min"
? dict.settings.minimal
: dict.settings.sketch}
</p>
</div>
<Button
id="drawio-ui"
variant="outline"
size="sm"
onClick={onToggleDrawioUi}
className="h-9 w-[120px] rounded-xl border-border-subtle hover:bg-interactive-hover font-normal"
>
{dict.settings.switchTo}{" "}
{drawioUi === "min"
? dict.settings.sketch
: dict.settings.minimal}
</Button>
</SettingItem>
</div>
{/* Close Protection */}
<SettingItem
label={dict.settings.closeProtection}
description={dict.settings.closeProtectionDescription}
>
<div className="flex items-center justify-between">
<div className="space-y-0.5">
<Label htmlFor="close-protection">
{dict.settings.closeProtection}
</Label>
<p className="text-[0.8rem] text-muted-foreground">
{dict.settings.closeProtectionDescription}
</p>
</div>
<Switch
id="close-protection"
checked={closeProtection}
@@ -344,34 +245,14 @@ function SettingsContent({
onCloseProtectionChange?.(checked)
}}
/>
</SettingItem>
</div>
</div>
</div>
{/* Footer */}
<div className="px-6 py-4 border-t border-border-subtle bg-surface-1/50 rounded-b-2xl">
<p className="text-xs text-muted-foreground text-center">
Version {process.env.APP_VERSION}
</p>
</div>
</DialogContent>
)
}
export function SettingsDialog(props: SettingsDialogProps) {
return (
<Dialog open={props.open} onOpenChange={props.onOpenChange}>
<Suspense
fallback={
<DialogContent className="sm:max-w-lg p-0">
<div className="h-80 flex items-center justify-center">
<div className="animate-spin h-6 w-6 border-2 border-primary border-t-transparent rounded-full" />
</div>
</DialogContent>
}
>
<SettingsContent {...props} />
</Suspense>
<div className="pt-4 border-t border-border/50">
<p className="text-[0.75rem] text-muted-foreground text-center">
Version {process.env.APP_VERSION}
</p>
</div>
</DialogContent>
</Dialog>
)
}

View File

@@ -38,10 +38,7 @@ function DialogOverlay({
<DialogPrimitive.Overlay
data-slot="dialog-overlay"
className={cn(
"fixed inset-0 z-50 bg-black/40 backdrop-blur-[2px]",
"data-[state=open]:animate-in data-[state=closed]:animate-out",
"data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0",
"duration-200",
"data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 fixed inset-0 z-50 bg-black/50",
className
)}
{...props}
@@ -60,32 +57,13 @@ function DialogContent({
<DialogPrimitive.Content
data-slot="dialog-content"
className={cn(
// Base styles
"fixed top-[50%] left-[50%] z-50 w-full",
"max-w-[calc(100%-2rem)] translate-x-[-50%] translate-y-[-50%]",
"grid gap-4 p-6",
// Refined visual treatment
"bg-surface-0 rounded-2xl border border-border-subtle shadow-dialog",
// Entry/exit animations
"data-[state=open]:animate-in data-[state=closed]:animate-out",
"data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0",
"data-[state=closed]:zoom-out-[0.98] data-[state=open]:zoom-in-[0.98]",
"data-[state=closed]:slide-out-to-top-[2%] data-[state=open]:slide-in-from-top-[2%]",
"duration-200 sm:max-w-lg",
"bg-background data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 fixed top-[50%] left-[50%] z-50 grid w-full max-w-[calc(100%-2rem)] translate-x-[-50%] translate-y-[-50%] gap-4 rounded-lg border p-6 shadow-lg duration-200 sm:max-w-lg",
className
)}
{...props}
>
{children}
<DialogPrimitive.Close className={cn(
"absolute top-4 right-4 rounded-xl p-1.5",
"text-muted-foreground/60 hover:text-foreground",
"hover:bg-interactive-hover",
"transition-all duration-150",
"focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2",
"disabled:pointer-events-none",
"[&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg]:size-4"
)}>
<DialogPrimitive.Close className="ring-offset-background focus:ring-ring data-[state=open]:bg-accent data-[state=open]:text-muted-foreground absolute top-4 right-4 rounded-xs opacity-70 transition-opacity hover:opacity-100 focus:ring-2 focus:ring-offset-2 focus:outline-hidden disabled:pointer-events-none [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4">
<XIcon />
<span className="sr-only">Close</span>
</DialogPrimitive.Close>
@@ -124,10 +102,7 @@ function DialogTitle({
return (
<DialogPrimitive.Title
data-slot="dialog-title"
className={cn(
"text-xl font-semibold tracking-tight leading-tight",
className
)}
className={cn("text-lg leading-none font-semibold", className)}
{...props}
/>
)
@@ -140,10 +115,7 @@ function DialogDescription({
return (
<DialogPrimitive.Description
data-slot="dialog-description"
className={cn(
"text-sm text-muted-foreground leading-relaxed",
className
)}
className={cn("text-muted-foreground text-sm", className)}
{...props}
/>
)

View File

@@ -8,30 +8,9 @@ function Input({ className, type, ...props }: React.ComponentProps<"input">) {
type={type}
data-slot="input"
className={cn(
// Base styles
"flex h-10 w-full min-w-0 rounded-xl px-3.5 py-2",
"border border-border-subtle bg-surface-1",
"text-sm text-foreground",
// Placeholder
"placeholder:text-muted-foreground/60",
// Selection
"selection:bg-primary selection:text-primary-foreground",
// Transitions
"transition-all duration-150 ease-out",
// Hover state
"hover:border-border-default",
// Focus state - refined ring
"focus:outline-none focus:border-primary focus:ring-2 focus:ring-primary/10",
// File input
"file:text-foreground file:inline-flex file:h-7 file:border-0",
"file:bg-transparent file:text-sm file:font-medium",
// Disabled
"disabled:pointer-events-none disabled:cursor-not-allowed disabled:opacity-50",
// Invalid state
"aria-invalid:border-destructive aria-invalid:ring-destructive/20",
"dark:aria-invalid:ring-destructive/40",
// Dark mode background
"dark:bg-surface-1",
"file:text-foreground placeholder:text-muted-foreground selection:bg-primary selection:text-primary-foreground dark:bg-input/30 border-input flex h-9 w-full min-w-0 rounded-md border bg-transparent px-3 py-1 text-base shadow-xs transition-[color,box-shadow] outline-none file:inline-flex file:h-7 file:border-0 file:bg-transparent file:text-sm file:font-medium disabled:pointer-events-none disabled:cursor-not-allowed disabled:opacity-50 md:text-sm",
"focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px]",
"aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive",
className
)}
{...props}

View File

@@ -5,7 +5,6 @@ import { createContext, useContext, useEffect, useRef, useState } from "react"
import type { DrawIoEmbedRef } from "react-drawio"
import { STORAGE_DIAGRAM_XML_KEY } from "@/components/chat-panel"
import type { ExportFormat } from "@/components/save-dialog"
import { getApiEndpoint } from "@/lib/base-path"
import { extractDiagramXML, validateAndFixXml } from "../lib/utils"
interface DiagramContextType {
@@ -330,7 +329,7 @@ export function DiagramProvider({ children }: { children: React.ReactNode }) {
sessionId?: string,
) => {
try {
await fetch(getApiEndpoint("/api/log-save"), {
await fetch("/api/log-save", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ filename, format, sessionId }),

View File

@@ -7,11 +7,6 @@ services:
context: .
args:
- NEXT_PUBLIC_DRAWIO_BASE_URL=http://localhost:8080
# Uncomment below for subdirectory deployment
# - NEXT_PUBLIC_BASE_PATH=/nextaidrawio
ports: ["3000:3000"]
env_file: .env
environment:
# For subdirectory deployment, uncomment and set your path:
# NEXT_PUBLIC_BASE_PATH: /nextaidrawio
depends_on: [drawio]

View File

@@ -68,10 +68,6 @@ AI_MODEL=global.anthropic.claude-sonnet-4-5-20250929-v1:0
# SILICONFLOW_API_KEY=sk-...
# SILICONFLOW_BASE_URL=https://api.siliconflow.com/v1 # Optional: switch to https://api.siliconflow.cn/v1 if needed
# SGLang Configuration (OpenAI-compatible)
# SGLANG_API_KEY=your-sglang-api-key
# SGLANG_BASE_URL=http://127.0.0.1:8000/v1 # Your SGLang endpoint
# Vercel AI Gateway Configuration
# Get your API key from: https://vercel.com/ai-gateway
# Model format: "provider/model" e.g., "openai/gpt-4o", "anthropic/claude-sonnet-4-5"
@@ -97,12 +93,6 @@ AI_MODEL=global.anthropic.claude-sonnet-4-5-20250929-v1:0
# NEXT_PUBLIC_DRAWIO_BASE_URL=https://embed.diagrams.net # Default: https://embed.diagrams.net
# Use this to point to a self-hosted draw.io instance
# Subdirectory Deployment (Optional)
# For deploying to a subdirectory (e.g., https://example.com/nextaidrawio)
# Set this to your subdirectory path with leading slash (e.g., /nextaidrawio)
# Leave empty for root deployment (default)
# NEXT_PUBLIC_BASE_PATH=/nextaidrawio
# PDF Input Feature (Optional)
# Enable PDF file upload to extract text and generate diagrams
# Enabled by default. Set to "false" to disable.

View File

@@ -19,13 +19,10 @@ export function register() {
const spanName = otelSpan.name
// Skip Next.js HTTP infrastructure spans
if (
spanName.startsWith("POST") ||
spanName.startsWith("GET") ||
spanName.startsWith("RSC") ||
spanName.startsWith("POST /") ||
spanName.startsWith("GET /") ||
spanName.includes("BaseServer") ||
spanName.includes("handleRequest") ||
spanName.includes("resolve page") ||
spanName.includes("start response")
spanName.includes("handleRequest")
) {
return false
}
@@ -39,5 +36,4 @@ export function register() {
// Register globally so AI SDK's telemetry also uses this processor
tracerProvider.register()
console.log("[Langfuse] Instrumentation initialized successfully")
}

View File

@@ -19,7 +19,6 @@ export type ProviderName =
| "openrouter"
| "deepseek"
| "siliconflow"
| "sglang"
| "gateway"
interface ModelConfig {
@@ -51,7 +50,6 @@ const ALLOWED_CLIENT_PROVIDERS: ProviderName[] = [
"openrouter",
"deepseek",
"siliconflow",
"sglang",
"gateway",
]
@@ -95,8 +93,8 @@ function parseIntSafe(
* Supports various AI SDK providers with their unique configuration options
*
* Environment variables:
* - OPENAI_REASONING_EFFORT: OpenAI reasoning effort level (minimal/low/medium/high) - for o1/o3/o4/gpt-5
* - OPENAI_REASONING_SUMMARY: OpenAI reasoning summary (auto/detailed) - auto-enabled for o1/o3/o4/gpt-5
* - OPENAI_REASONING_EFFORT: OpenAI reasoning effort level (minimal/low/medium/high) - for o1/o3/gpt-5
* - OPENAI_REASONING_SUMMARY: OpenAI reasoning summary (none/brief/detailed) - auto-enabled for o1/o3/gpt-5
* - ANTHROPIC_THINKING_BUDGET_TOKENS: Anthropic thinking budget in tokens (1024-64000)
* - ANTHROPIC_THINKING_TYPE: Anthropic thinking type (enabled)
* - GOOGLE_THINKING_BUDGET: Google Gemini 2.5 thinking budget in tokens (1024-100000)
@@ -118,19 +116,18 @@ function buildProviderOptions(
const reasoningEffort = process.env.OPENAI_REASONING_EFFORT
const reasoningSummary = process.env.OPENAI_REASONING_SUMMARY
// OpenAI reasoning models (o1, o3, o4, gpt-5) need reasoningSummary to return thoughts
// OpenAI reasoning models (o1, o3, gpt-5) need reasoningSummary to return thoughts
if (
modelId &&
(modelId.includes("o1") ||
modelId.includes("o3") ||
modelId.includes("o4") ||
modelId.includes("gpt-5"))
) {
options.openai = {
// Auto-enable reasoning summary for reasoning models
// Use 'auto' as default since not all models support 'detailed'
// Auto-enable reasoning summary for reasoning models (default: detailed)
reasoningSummary:
(reasoningSummary as "auto" | "detailed") || "auto",
(reasoningSummary as "none" | "brief" | "detailed") ||
"detailed",
}
// Optionally configure reasoning effort
@@ -153,7 +150,8 @@ function buildProviderOptions(
}
if (reasoningSummary) {
options.openai.reasoningSummary = reasoningSummary as
| "auto"
| "none"
| "brief"
| "detailed"
}
}
@@ -345,7 +343,6 @@ function buildProviderOptions(
case "deepseek":
case "openrouter":
case "siliconflow":
case "sglang":
case "gateway": {
// These providers don't have reasoning configs in AI SDK yet
// Gateway passes through to underlying providers which handle their own configs
@@ -370,7 +367,6 @@ const PROVIDER_ENV_VARS: Record<ProviderName, string | null> = {
openrouter: "OPENROUTER_API_KEY",
deepseek: "DEEPSEEK_API_KEY",
siliconflow: "SILICONFLOW_API_KEY",
sglang: "SGLANG_API_KEY",
gateway: "AI_GATEWAY_API_KEY",
}
@@ -436,7 +432,7 @@ function validateProviderCredentials(provider: ProviderName): void {
* Get the AI model based on environment variables
*
* Environment variables:
* - AI_PROVIDER: The provider to use (bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway)
* - AI_PROVIDER: The provider to use (bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow)
* - AI_MODEL: The model ID/name for the selected provider
*
* Provider-specific env vars:
@@ -452,8 +448,6 @@ function validateProviderCredentials(provider: ProviderName): void {
* - DEEPSEEK_BASE_URL: DeepSeek endpoint (optional)
* - SILICONFLOW_API_KEY: SiliconFlow API key
* - SILICONFLOW_BASE_URL: SiliconFlow endpoint (optional, defaults to https://api.siliconflow.com/v1)
* - SGLANG_API_KEY: SGLang API key
* - SGLANG_BASE_URL: SGLang endpoint (optional)
*/
export function getAIModel(overrides?: ClientOverrides): ModelConfig {
// SECURITY: Prevent SSRF attacks (GHSA-9qf7-mprq-9qgm)
@@ -522,7 +516,6 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
`- OPENROUTER_API_KEY for OpenRouter\n` +
`- AZURE_API_KEY for Azure\n` +
`- SILICONFLOW_API_KEY for SiliconFlow\n` +
`- SGLANG_API_KEY for SGLang\n` +
`Or set AI_PROVIDER=ollama for local Ollama.`,
)
} else {
@@ -588,16 +581,12 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
case "openai": {
const apiKey = overrides?.apiKey || process.env.OPENAI_API_KEY
const baseURL = overrides?.baseUrl || process.env.OPENAI_BASE_URL
if (baseURL) {
// Custom base URL = third-party proxy, use Chat Completions API
// for compatibility (most proxies don't support /responses endpoint)
const customOpenAI = createOpenAI({ apiKey, baseURL })
if (baseURL || overrides?.apiKey) {
const customOpenAI = createOpenAI({
apiKey,
...(baseURL && { baseURL }),
})
model = customOpenAI.chat(modelId)
} else if (overrides?.apiKey) {
// Custom API key but official OpenAI endpoint, use Responses API
// to support reasoning for gpt-5, o1, o3, o4 models
const customOpenAI = createOpenAI({ apiKey })
model = customOpenAI(modelId)
} else {
model = openai(modelId)
}
@@ -709,112 +698,6 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
break
}
case "sglang": {
const apiKey = overrides?.apiKey || process.env.SGLANG_API_KEY
const baseURL = overrides?.baseUrl || process.env.SGLANG_BASE_URL
const sglangProvider = createOpenAI({
apiKey,
baseURL,
// Add a custom fetch wrapper to intercept and fix the stream from sglang
fetch: async (url, options) => {
const response = await fetch(url, options)
if (!response.body) {
return response
}
// Create a transform stream to fix the non-compliant sglang stream
let buffer = ""
const decoder = new TextDecoder()
const transformStream = new TransformStream({
transform(chunk, controller) {
buffer += decoder.decode(chunk, { stream: true })
// Process all complete messages in the buffer
let messageEndPos
while (
(messageEndPos = buffer.indexOf("\n\n")) !== -1
) {
const message = buffer.substring(
0,
messageEndPos,
)
buffer = buffer.substring(messageEndPos + 2) // Move past the '\n\n'
if (message.startsWith("data: ")) {
const jsonStr = message.substring(6).trim()
if (jsonStr === "[DONE]") {
controller.enqueue(
new TextEncoder().encode(
message + "\n\n",
),
)
continue
}
try {
const data = JSON.parse(jsonStr)
const delta = data.choices?.[0]?.delta
if (delta) {
// Fix 1: remove invalid empty role
if (delta.role === "") {
delete delta.role
}
// Fix 2: remove non-standard reasoning_content field
if ("reasoning_content" in delta) {
delete delta.reasoning_content
}
}
// Re-serialize and forward the corrected data with the correct SSE format
controller.enqueue(
new TextEncoder().encode(
`data: ${JSON.stringify(data)}\n\n`,
),
)
} catch (e) {
// If parsing fails, forward the original message to avoid breaking the stream.
controller.enqueue(
new TextEncoder().encode(
message + "\n\n",
),
)
}
} else if (message.trim() !== "") {
// Pass through other message types (e.g., 'event: ...')
controller.enqueue(
new TextEncoder().encode(
message + "\n\n",
),
)
}
}
},
flush(controller) {
// If there's anything left in the buffer, forward it.
if (buffer.trim()) {
controller.enqueue(
new TextEncoder().encode(buffer),
)
}
},
})
const transformedBody =
response.body.pipeThrough(transformStream)
// Return a new response with the transformed body
return new Response(transformedBody, {
status: response.status,
statusText: response.statusText,
headers: response.headers,
})
},
})
model = sglangProvider.chat(modelId)
break
}
case "gateway": {
// Vercel AI Gateway - unified access to multiple AI providers
// Model format: "provider/model" e.g., "openai/gpt-4o", "anthropic/claude-sonnet-4-5"
@@ -838,7 +721,7 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
default:
throw new Error(
`Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway`,
`Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, gateway`,
)
}

View File

@@ -1,37 +0,0 @@
/**
* Get the base path for API calls and static assets
* This is used for subdirectory deployment support
*
* Example: If deployed at https://example.com/nextaidrawio, this returns "/nextaidrawio"
* For root deployment, this returns ""
*
* Set NEXT_PUBLIC_BASE_PATH environment variable to your subdirectory path (e.g., /nextaidrawio)
*/
export function getBasePath(): string {
// Read from environment variable (must start with NEXT_PUBLIC_ to be available on client)
const basePath = process.env.NEXT_PUBLIC_BASE_PATH || ""
if (basePath && !basePath.startsWith("/")) {
console.warn("NEXT_PUBLIC_BASE_PATH should start with /")
}
return basePath
}
/**
* Get full API endpoint URL
* @param endpoint - API endpoint path (e.g., "/api/chat", "/api/config")
* @returns Full API path with base path prefix
*/
export function getApiEndpoint(endpoint: string): string {
const basePath = getBasePath()
return `${basePath}${endpoint}`
}
/**
* Get full static asset URL
* @param assetPath - Asset path (e.g., "/example.png", "/chain-of-thought.txt")
* @returns Full asset path with base path prefix
*/
export function getAssetUrl(assetPath: string): string {
const basePath = getBasePath()
return `${basePath}${assetPath}`
}

View File

@@ -1,238 +0,0 @@
import {
ConditionalCheckFailedException,
DynamoDBClient,
GetItemCommand,
UpdateItemCommand,
} from "@aws-sdk/client-dynamodb"
// Quota tracking is OPT-IN: only enabled if DYNAMODB_QUOTA_TABLE is explicitly set
// OSS users who don't need quota tracking can simply not set this env var
const TABLE = process.env.DYNAMODB_QUOTA_TABLE
const DYNAMODB_REGION = process.env.DYNAMODB_REGION || "ap-northeast-1"
// Only create client if quota is enabled
const client = TABLE ? new DynamoDBClient({ region: DYNAMODB_REGION }) : null
/**
* Check if server-side quota tracking is enabled.
* Quota is opt-in: only enabled when DYNAMODB_QUOTA_TABLE env var is set.
*/
export function isQuotaEnabled(): boolean {
return !!TABLE
}
interface QuotaLimits {
requests: number // Daily request limit
tokens: number // Daily token limit
tpm: number // Tokens per minute
}
interface QuotaCheckResult {
allowed: boolean
error?: string
type?: "request" | "token" | "tpm"
used?: number
limit?: number
}
/**
* Check all quotas and increment request count atomically.
* Uses ConditionExpression to prevent race conditions.
* Returns which limit was exceeded if any.
*/
export async function checkAndIncrementRequest(
ip: string,
limits: QuotaLimits,
): Promise<QuotaCheckResult> {
// Skip if quota tracking not enabled
if (!client || !TABLE) {
return { allowed: true }
}
const today = new Date().toISOString().split("T")[0]
const currentMinute = Math.floor(Date.now() / 60000).toString()
const ttl = Math.floor(Date.now() / 1000) + 7 * 24 * 60 * 60
try {
// Atomic check-and-increment with ConditionExpression
// This prevents race conditions by failing if limits are exceeded
await client.send(
new UpdateItemCommand({
TableName: TABLE,
Key: { PK: { S: `IP#${ip}` } },
// Reset counts if new day/minute, then increment request count
UpdateExpression: `
SET lastResetDate = :today,
dailyReqCount = if_not_exists(dailyReqCount, :zero) + :one,
dailyTokenCount = if_not_exists(dailyTokenCount, :zero),
lastMinute = :minute,
tpmCount = if_not_exists(tpmCount, :zero),
#ttl = :ttl
`,
// Atomic condition: only succeed if ALL limits pass
// Uses attribute_not_exists for new items, then checks limits for existing items
ConditionExpression: `
(attribute_not_exists(lastResetDate) OR lastResetDate < :today OR
((attribute_not_exists(dailyReqCount) OR dailyReqCount < :reqLimit) AND
(attribute_not_exists(dailyTokenCount) OR dailyTokenCount < :tokenLimit))) AND
(attribute_not_exists(lastMinute) OR lastMinute <> :minute OR
attribute_not_exists(tpmCount) OR tpmCount < :tpmLimit)
`,
ExpressionAttributeNames: { "#ttl": "ttl" },
ExpressionAttributeValues: {
":today": { S: today },
":zero": { N: "0" },
":one": { N: "1" },
":minute": { S: currentMinute },
":ttl": { N: String(ttl) },
":reqLimit": { N: String(limits.requests || 999999) },
":tokenLimit": { N: String(limits.tokens || 999999) },
":tpmLimit": { N: String(limits.tpm || 999999) },
},
}),
)
return { allowed: true }
} catch (e: any) {
// Condition failed - need to determine which limit was exceeded
if (e instanceof ConditionalCheckFailedException) {
// Get current counts to determine which limit was hit
try {
const getResult = await client.send(
new GetItemCommand({
TableName: TABLE,
Key: { PK: { S: `IP#${ip}` } },
}),
)
const item = getResult.Item
const storedDate = item?.lastResetDate?.S
const storedMinute = item?.lastMinute?.S
const isNewDay = !storedDate || storedDate < today
const dailyReqCount = isNewDay
? 0
: Number(item?.dailyReqCount?.N || 0)
const dailyTokenCount = isNewDay
? 0
: Number(item?.dailyTokenCount?.N || 0)
const tpmCount =
storedMinute !== currentMinute
? 0
: Number(item?.tpmCount?.N || 0)
// Determine which limit was exceeded
if (limits.requests > 0 && dailyReqCount >= limits.requests) {
return {
allowed: false,
type: "request",
error: "Daily request limit exceeded",
used: dailyReqCount,
limit: limits.requests,
}
}
if (limits.tokens > 0 && dailyTokenCount >= limits.tokens) {
return {
allowed: false,
type: "token",
error: "Daily token limit exceeded",
used: dailyTokenCount,
limit: limits.tokens,
}
}
if (limits.tpm > 0 && tpmCount >= limits.tpm) {
return {
allowed: false,
type: "tpm",
error: "Rate limit exceeded (tokens per minute)",
used: tpmCount,
limit: limits.tpm,
}
}
// Condition failed but no limit clearly exceeded - race condition edge case
// Fail safe by allowing (could be a reset race)
console.warn(
`[quota] Condition failed but no limit exceeded for IP prefix: ${ip.slice(0, 8)}...`,
)
return { allowed: true }
} catch (getError: any) {
console.error(
`[quota] Failed to get quota details after condition failure, IP prefix: ${ip.slice(0, 8)}..., error: ${getError.message}`,
)
return { allowed: true } // Fail open
}
}
// Other DynamoDB errors - fail open
console.error(
`[quota] DynamoDB error (fail-open), IP prefix: ${ip.slice(0, 8)}..., error: ${e.message}`,
)
return { allowed: true }
}
}
/**
* Record token usage after response completes.
* Uses atomic operations to update both daily token count and TPM count.
* Handles minute boundaries atomically to prevent race conditions.
*/
export async function recordTokenUsage(
ip: string,
tokens: number,
): Promise<void> {
// Skip if quota tracking not enabled
if (!client || !TABLE) return
if (!Number.isFinite(tokens) || tokens <= 0) return
const currentMinute = Math.floor(Date.now() / 60000).toString()
const ttl = Math.floor(Date.now() / 1000) + 7 * 24 * 60 * 60
try {
// Try to update assuming same minute (most common case)
// Uses condition to ensure we're in the same minute
await client.send(
new UpdateItemCommand({
TableName: TABLE,
Key: { PK: { S: `IP#${ip}` } },
UpdateExpression:
"SET #ttl = :ttl ADD dailyTokenCount :tokens, tpmCount :tokens",
ConditionExpression: "lastMinute = :minute",
ExpressionAttributeNames: { "#ttl": "ttl" },
ExpressionAttributeValues: {
":minute": { S: currentMinute },
":tokens": { N: String(tokens) },
":ttl": { N: String(ttl) },
},
}),
)
} catch (e: any) {
if (e instanceof ConditionalCheckFailedException) {
// Different minute - reset TPM count and set new minute
try {
await client.send(
new UpdateItemCommand({
TableName: TABLE,
Key: { PK: { S: `IP#${ip}` } },
UpdateExpression:
"SET lastMinute = :minute, tpmCount = :tokens, #ttl = :ttl ADD dailyTokenCount :tokens",
ExpressionAttributeNames: { "#ttl": "ttl" },
ExpressionAttributeValues: {
":minute": { S: currentMinute },
":tokens": { N: String(tokens) },
":ttl": { N: String(ttl) },
},
}),
)
} catch (retryError: any) {
console.error(
`[quota] Failed to record tokens (retry), IP prefix: ${ip.slice(0, 8)}..., tokens: ${tokens}, error: ${retryError.message}`,
)
}
} else {
console.error(
`[quota] Failed to record tokens, IP prefix: ${ip.slice(0, 8)}..., tokens: ${tokens}, error: ${e.message}`,
)
}
}
}

View File

@@ -14,7 +14,6 @@
"about": "About",
"editor": "Editor",
"newChat": "Start fresh chat",
"github": "GitHub",
"settings": "Settings",
"hidePanel": "Hide chat panel (Ctrl+B)",
"showPanel": "Show chat panel (Ctrl+B)",
@@ -88,8 +87,6 @@
"overrides": "Overrides",
"clearSettings": "Clear Settings",
"useServerDefault": "Use Server Default",
"language": "Language",
"languageDescription": "Choose your interface language.",
"theme": "Theme",
"themeDescription": "Dark/Light mode for interface and DrawIO canvas.",
"drawioStyle": "DrawIO Style",
@@ -150,7 +147,6 @@
"tokenLimit": "Daily Token Limit Reached",
"tpmLimit": "Rate Limit",
"tpmMessage": "Too many requests. Please wait a moment.",
"tpmMessageDetailed": "Rate limit reached ({limit} tokens/min). Please wait {seconds} seconds before sending another request.",
"messageApi": "Oops — you've reached the daily API limit for this demo! As an indie developer covering all the API costs myself, I have to set these limits to keep things sustainable.",
"messageToken": "Oops — you've reached the daily token limit for this demo! As an indie developer covering all the API costs myself, I have to set these limits to keep things sustainable.",
"tip": "<strong>Tip:</strong> You can use your own API key (click the Settings icon) or self-host the project to bypass these limits.",
@@ -202,47 +198,6 @@
"apiKeyStored": "API keys are stored locally in your browser",
"test": "Test",
"validationError": "Validation failed",
"addModelFirst": "Add at least one model to validate",
"providers": "Providers",
"addProviderHint": "Add a provider to get started",
"verified": "Verified",
"configuration": "Configuration",
"displayName": "Display Name",
"awsAccessKeyId": "AWS Access Key ID",
"awsSecretAccessKey": "AWS Secret Access Key",
"awsRegion": "AWS Region",
"selectRegion": "Select region",
"apiKey": "API Key",
"enterApiKey": "Enter your API key",
"enterSecretKey": "Enter your secret access key",
"baseUrl": "Base URL",
"optional": "(optional)",
"customEndpoint": "Custom endpoint URL",
"models": "Models",
"customModelId": "Custom model ID...",
"allAdded": "All added",
"suggested": "Suggested",
"noModelsConfigured": "No models configured",
"modelIdEmpty": "Model ID cannot be empty",
"modelIdExists": "This model ID already exists",
"configureProviders": "Configure AI Providers",
"selectProviderHint": "Select a provider from the list or add a new one to configure API keys and models",
"deleteConfirmDesc": "Are you sure you want to delete {name}? This will remove all configured models and cannot be undone.",
"typeToConfirm": "Type \"{name}\" to confirm",
"typeProviderName": "Type provider name...",
"modelsConfiguredCount": "{count} model(s) configured",
"validationFailedCount": "{count} model(s) failed validation",
"cancel": "Cancel",
"delete": "Delete",
"clickToChange": "(click to change)",
"usingServerDefault": "Using server default model",
"selectModel": "Select Model",
"searchModels": "Search models...",
"noVerifiedModels": "No verified models. Test your models first.",
"noModelsFound": "No models found.",
"default": "Default",
"serverDefault": "Server Default",
"configureModels": "Configure Models...",
"onlyVerifiedShown": "Only verified models are shown"
"addModelFirst": "Add at least one model to validate"
}
}

View File

@@ -14,7 +14,6 @@
"about": "概要",
"editor": "エディタ",
"newChat": "新しいチャットを開始",
"github": "GitHub",
"settings": "設定",
"hidePanel": "チャットパネルを非表示 (Ctrl+B)",
"showPanel": "チャットパネルを表示 (Ctrl+B)",
@@ -88,8 +87,6 @@
"overrides": "上書き",
"clearSettings": "設定をクリア",
"useServerDefault": "サーバーデフォルトを使用",
"language": "言語",
"languageDescription": "インターフェース言語を選択します。",
"theme": "テーマ",
"themeDescription": "インターフェースと DrawIO キャンバスのダーク/ライトモード。",
"drawioStyle": "DrawIO スタイル",
@@ -150,7 +147,6 @@
"tokenLimit": "1日のトークン制限に達しました",
"tpmLimit": "レート制限",
"tpmMessage": "リクエストが多すぎます。しばらくお待ちください。",
"tpmMessageDetailed": "レート制限に達しました({limit}トークン/分)。{seconds}秒待ってからもう一度リクエストしてください。",
"messageApi": "おっと — このデモの1日の API 制限に達しました!個人開発者として API コストをすべて負担しているため、持続可能性を保つためにこれらの制限を設定する必要があります。",
"messageToken": "おっと — このデモの1日のトークン制限に達しました個人開発者として API コストをすべて負担しているため、持続可能性を保つためにこれらの制限を設定する必要があります。",
"tip": "<strong>ヒント:</strong>独自の API キーを使用する(設定アイコンをクリック)か、プロジェクトをセルフホストしてこれらの制限を回避できます。",
@@ -202,47 +198,6 @@
"apiKeyStored": "APIキーはブラウザにローカル保存されます",
"test": "テスト",
"validationError": "検証に失敗しました",
"addModelFirst": "検証するには少なくとも1つのモデルを追加してください",
"providers": "プロバイダー",
"addProviderHint": "プロバイダーを追加して開始",
"verified": "検証済み",
"configuration": "設定",
"displayName": "表示名",
"awsAccessKeyId": "AWS アクセスキー ID",
"awsSecretAccessKey": "AWS シークレットアクセスキー",
"awsRegion": "AWS リージョン",
"selectRegion": "リージョンを選択",
"apiKey": "API キー",
"enterApiKey": "API キーを入力",
"enterSecretKey": "シークレットアクセスキーを入力",
"baseUrl": "ベース URL",
"optional": "(オプション)",
"customEndpoint": "カスタムエンドポイント URL",
"models": "モデル",
"customModelId": "カスタムモデル ID...",
"allAdded": "すべて追加済み",
"suggested": "おすすめ",
"noModelsConfigured": "モデルが設定されていません",
"modelIdEmpty": "モデル ID は空にできません",
"modelIdExists": "このモデル ID は既に存在します",
"configureProviders": "AI プロバイダーを設定",
"selectProviderHint": "リストからプロバイダーを選択するか、新規追加して API キーとモデルを設定",
"deleteConfirmDesc": "{name} を削除してもよろしいですか?設定されたすべてのモデルが削除され、元に戻せません。",
"typeToConfirm": "確認のため「{name}」と入力",
"typeProviderName": "プロバイダー名を入力...",
"modelsConfiguredCount": "{count} 個のモデルを設定済み",
"validationFailedCount": "{count} 個のモデルの検証に失敗",
"cancel": "キャンセル",
"delete": "削除",
"clickToChange": "(クリックして変更)",
"usingServerDefault": "サーバーデフォルトモデルを使用中",
"selectModel": "モデルを選択",
"searchModels": "モデルを検索...",
"noVerifiedModels": "検証済みのモデルがありません。先にモデルをテストしてください。",
"noModelsFound": "モデルが見つかりません。",
"default": "デフォルト",
"serverDefault": "サーバーデフォルト",
"configureModels": "モデルを設定...",
"onlyVerifiedShown": "検証済みのモデルのみ表示"
"addModelFirst": "検証するには少なくとも1つのモデルを追加してください"
}
}

View File

@@ -14,7 +14,6 @@
"about": "关于",
"editor": "编辑器",
"newChat": "开始新对话",
"github": "GitHub",
"settings": "设置",
"hidePanel": "隐藏聊天面板 (Ctrl+B)",
"showPanel": "显示聊天面板 (Ctrl+B)",
@@ -88,8 +87,6 @@
"overrides": "覆盖",
"clearSettings": "清除设置",
"useServerDefault": "使用服务器默认值",
"language": "语言",
"languageDescription": "选择界面语言。",
"theme": "主题",
"themeDescription": "界面和 DrawIO 画布的深色/浅色模式。",
"drawioStyle": "DrawIO 样式",
@@ -150,7 +147,6 @@
"tokenLimit": "已达每日令牌限制",
"tpmLimit": "速率限制",
"tpmMessage": "请求过多。请稍等片刻。",
"tpmMessageDetailed": "达到速率限制({limit} 令牌/分钟)。请等待 {seconds} 秒后再发送请求。",
"messageApi": "糟糕 — 您已达到此演示的每日 API 限制!作为一名独立开发者,我自己承担所有 API 费用,因此必须设置这些限制以保持可持续性。",
"messageToken": "糟糕 — 您已达到此演示的每日令牌限制!作为一名独立开发者,我自己承担所有 API 费用,因此必须设置这些限制以保持可持续性。",
"tip": "<strong>提示:</strong>您可以使用自己的 API 密钥(点击设置图标)或自托管项目来绕过这些限制。",
@@ -202,47 +198,6 @@
"apiKeyStored": "API 密钥存储在您的浏览器本地",
"test": "测试",
"validationError": "验证失败",
"addModelFirst": "请先添加至少一个模型以进行验证",
"providers": "提供商",
"addProviderHint": "添加提供商即可开始使用",
"verified": "已验证",
"configuration": "配置",
"displayName": "显示名称",
"awsAccessKeyId": "AWS 访问密钥 ID",
"awsSecretAccessKey": "AWS Secret Access Key",
"awsRegion": "AWS 区域",
"selectRegion": "选择区域",
"apiKey": "API 密钥",
"enterApiKey": "输入您的 API 密钥",
"enterSecretKey": "输入您的 Secret Key",
"baseUrl": "基础 URL",
"optional": "(可选)",
"customEndpoint": "自定义端点 URL",
"models": "模型",
"customModelId": "自定义模型 ID...",
"allAdded": "已全部添加",
"suggested": "推荐",
"noModelsConfigured": "尚未配置模型",
"modelIdEmpty": "模型 ID 不能为空",
"modelIdExists": "此模型 ID 已存在",
"configureProviders": "配置 AI 提供商",
"selectProviderHint": "从列表中选择提供商或添加新的以配置 API 密钥和模型",
"deleteConfirmDesc": "确定要删除 {name} 吗?这将移除所有配置的模型且无法撤销。",
"typeToConfirm": "输入 \"{name}\" 以确认",
"typeProviderName": "输入提供商名称...",
"modelsConfiguredCount": "已配置 {count} 个模型",
"validationFailedCount": "{count} 个模型验证失败",
"cancel": "取消",
"delete": "删除",
"clickToChange": "(点击更改)",
"usingServerDefault": "使用服务器默认模型",
"selectModel": "选择模型",
"searchModels": "搜索模型...",
"noVerifiedModels": "没有已验证的模型。请先测试您的模型。",
"noModelsFound": "未找到模型。",
"default": "默认",
"serverDefault": "服务器默认",
"configureModels": "配置模型...",
"onlyVerifiedShown": "仅显示已验证的模型"
"addModelFirst": "请先添加至少一个模型以进行验证"
}
}

View File

@@ -21,11 +21,9 @@ export function getLangfuseClient(): LangfuseClient | null {
return langfuseClient
}
// Check if Langfuse is configured (both keys required)
// Check if Langfuse is configured
export function isLangfuseEnabled(): boolean {
return !!(
process.env.LANGFUSE_PUBLIC_KEY && process.env.LANGFUSE_SECRET_KEY
)
return !!process.env.LANGFUSE_PUBLIC_KEY
}
// Update trace with input data at the start of request
@@ -45,16 +43,34 @@ export function setTraceInput(params: {
}
// Update trace with output and end the span
// Note: AI SDK 6 telemetry automatically reports token usage on its spans,
// so we only need to set the output text and close our wrapper span
export function setTraceOutput(output: string) {
export function setTraceOutput(
output: string,
usage?: { promptTokens?: number; completionTokens?: number },
) {
if (!isLangfuseEnabled()) return
updateActiveTrace({ output })
// End the observe() wrapper span (AI SDK creates its own child spans with usage)
const activeSpan = api.trace.getActiveSpan()
if (activeSpan) {
// Manually set usage attributes since AI SDK Bedrock streaming doesn't provide them
if (usage?.promptTokens) {
activeSpan.setAttribute("ai.usage.promptTokens", usage.promptTokens)
activeSpan.setAttribute(
"gen_ai.usage.input_tokens",
usage.promptTokens,
)
}
if (usage?.completionTokens) {
activeSpan.setAttribute(
"ai.usage.completionTokens",
usage.completionTokens,
)
activeSpan.setAttribute(
"gen_ai.usage.output_tokens",
usage.completionTokens,
)
}
activeSpan.end()
}
}

View File

@@ -1,10 +1,9 @@
"use client"
import { useCallback } from "react"
import { useCallback, useMemo } from "react"
import { toast } from "sonner"
import { QuotaLimitToast } from "@/components/quota-limit-toast"
import { useDictionary } from "@/hooks/use-dictionary"
import { formatMessage } from "@/lib/i18n/utils"
import { STORAGE_KEYS } from "@/lib/storage"
export interface QuotaConfig {
dailyRequestLimit: number
@@ -12,45 +11,179 @@ export interface QuotaConfig {
tpmLimit: number
}
export interface QuotaCheckResult {
allowed: boolean
remaining: number
used: number
}
/**
* Hook for displaying quota limit toasts.
* Server-side handles actual quota enforcement via DynamoDB.
* This hook only provides UI feedback when limits are exceeded.
* Hook for managing request/token quotas and rate limiting.
* Handles three types of limits:
* - Daily request limit
* - Daily token limit
* - Tokens per minute (TPM) rate limit
*
* Users with their own API key bypass all limits.
*/
export function useQuotaManager(config: QuotaConfig): {
showQuotaLimitToast: (used?: number, limit?: number) => void
showTokenLimitToast: (used?: number, limit?: number) => void
showTPMLimitToast: (limit?: number) => void
hasOwnApiKey: () => boolean
checkDailyLimit: () => QuotaCheckResult
checkTokenLimit: () => QuotaCheckResult
checkTPMLimit: () => QuotaCheckResult
incrementRequestCount: () => void
incrementTokenCount: (tokens: number) => void
incrementTPMCount: (tokens: number) => void
showQuotaLimitToast: () => void
showTokenLimitToast: (used: number) => void
showTPMLimitToast: () => void
} {
const { dailyRequestLimit, dailyTokenLimit, tpmLimit } = config
const dict = useDictionary()
// Check if user has their own API key configured (bypass limits)
const hasOwnApiKey = useCallback((): boolean => {
const provider = localStorage.getItem(STORAGE_KEYS.aiProvider)
const apiKey = localStorage.getItem(STORAGE_KEYS.aiApiKey)
return !!(provider && apiKey)
}, [])
// Generic helper: Parse count from localStorage with NaN guard
const parseStorageCount = (key: string): number => {
const count = parseInt(localStorage.getItem(key) || "0", 10)
return Number.isNaN(count) ? 0 : count
}
// Generic helper: Create quota checker factory
const createQuotaChecker = useCallback(
(
getTimeKey: () => string,
timeStorageKey: string,
countStorageKey: string,
limit: number,
) => {
return (): QuotaCheckResult => {
if (hasOwnApiKey())
return { allowed: true, remaining: -1, used: 0 }
if (limit <= 0) return { allowed: true, remaining: -1, used: 0 }
const currentTime = getTimeKey()
const storedTime = localStorage.getItem(timeStorageKey)
let count = parseStorageCount(countStorageKey)
if (storedTime !== currentTime) {
count = 0
localStorage.setItem(timeStorageKey, currentTime)
localStorage.setItem(countStorageKey, "0")
}
return {
allowed: count < limit,
remaining: limit - count,
used: count,
}
}
},
[hasOwnApiKey],
)
// Generic helper: Create quota incrementer factory
const createQuotaIncrementer = useCallback(
(
getTimeKey: () => string,
timeStorageKey: string,
countStorageKey: string,
validateInput: boolean = false,
) => {
return (tokens: number = 1): void => {
if (validateInput && (!Number.isFinite(tokens) || tokens <= 0))
return
const currentTime = getTimeKey()
const storedTime = localStorage.getItem(timeStorageKey)
let count = parseStorageCount(countStorageKey)
if (storedTime !== currentTime) {
count = 0
localStorage.setItem(timeStorageKey, currentTime)
}
localStorage.setItem(countStorageKey, String(count + tokens))
}
},
[],
)
// Check daily request limit
const checkDailyLimit = useMemo(
() =>
createQuotaChecker(
() => new Date().toDateString(),
STORAGE_KEYS.requestDate,
STORAGE_KEYS.requestCount,
dailyRequestLimit,
),
[createQuotaChecker, dailyRequestLimit],
)
// Increment request count
const incrementRequestCount = useMemo(
() =>
createQuotaIncrementer(
() => new Date().toDateString(),
STORAGE_KEYS.requestDate,
STORAGE_KEYS.requestCount,
false,
),
[createQuotaIncrementer],
)
// Show quota limit toast (request-based)
const showQuotaLimitToast = useCallback(
(used?: number, limit?: number) => {
toast.custom(
(t) => (
<QuotaLimitToast
used={used ?? dailyRequestLimit}
limit={limit ?? dailyRequestLimit}
onDismiss={() => toast.dismiss(t)}
/>
),
{ duration: 15000 },
)
},
[dailyRequestLimit],
const showQuotaLimitToast = useCallback(() => {
toast.custom(
(t) => (
<QuotaLimitToast
used={dailyRequestLimit}
limit={dailyRequestLimit}
onDismiss={() => toast.dismiss(t)}
/>
),
{ duration: 15000 },
)
}, [dailyRequestLimit])
// Check daily token limit
const checkTokenLimit = useMemo(
() =>
createQuotaChecker(
() => new Date().toDateString(),
STORAGE_KEYS.tokenDate,
STORAGE_KEYS.tokenCount,
dailyTokenLimit,
),
[createQuotaChecker, dailyTokenLimit],
)
// Increment token count
const incrementTokenCount = useMemo(
() =>
createQuotaIncrementer(
() => new Date().toDateString(),
STORAGE_KEYS.tokenDate,
STORAGE_KEYS.tokenCount,
true, // Validate input tokens
),
[createQuotaIncrementer],
)
// Show token limit toast
const showTokenLimitToast = useCallback(
(used?: number, limit?: number) => {
(used: number) => {
toast.custom(
(t) => (
<QuotaLimitToast
type="token"
used={used ?? dailyTokenLimit}
limit={limit ?? dailyTokenLimit}
used={used}
limit={dailyTokenLimit}
onDismiss={() => toast.dismiss(t)}
/>
),
@@ -60,24 +193,53 @@ export function useQuotaManager(config: QuotaConfig): {
[dailyTokenLimit],
)
// Show TPM limit toast
const showTPMLimitToast = useCallback(
(limit?: number) => {
const effectiveLimit = limit ?? tpmLimit
const limitDisplay =
effectiveLimit >= 1000
? `${effectiveLimit / 1000}k`
: String(effectiveLimit)
const message = formatMessage(dict.quota.tpmMessageDetailed, {
limit: limitDisplay,
seconds: 60,
})
toast.error(message, { duration: 8000 })
},
[tpmLimit, dict],
// Check TPM (tokens per minute) limit
const checkTPMLimit = useMemo(
() =>
createQuotaChecker(
() => Math.floor(Date.now() / 60000).toString(),
STORAGE_KEYS.tpmMinute,
STORAGE_KEYS.tpmCount,
tpmLimit,
),
[createQuotaChecker, tpmLimit],
)
// Increment TPM count
const incrementTPMCount = useMemo(
() =>
createQuotaIncrementer(
() => Math.floor(Date.now() / 60000).toString(),
STORAGE_KEYS.tpmMinute,
STORAGE_KEYS.tpmCount,
true, // Validate input tokens
),
[createQuotaIncrementer],
)
// Show TPM limit toast
const showTPMLimitToast = useCallback(() => {
const limitDisplay =
tpmLimit >= 1000 ? `${tpmLimit / 1000}k` : String(tpmLimit)
toast.error(
`Rate limit reached (${limitDisplay} tokens/min). Please wait 60 seconds before sending another request.`,
{ duration: 8000 },
)
}, [tpmLimit])
return {
// Check functions
hasOwnApiKey,
checkDailyLimit,
checkTokenLimit,
checkTPMLimit,
// Increment functions
incrementRequestCount,
incrementTokenCount,
incrementTPMCount,
// Toast functions
showQuotaLimitToast,
showTokenLimitToast,
showTPMLimitToast,

View File

@@ -61,47 +61,6 @@ export function isMxCellXmlComplete(xml: string | undefined | null): boolean {
return trimmed.endsWith("/>") || trimmed.endsWith("</mxCell>")
}
/**
* Extract only complete mxCell elements from partial/streaming XML.
* This allows progressive rendering during streaming by ignoring incomplete trailing elements.
* @param xml - The partial XML string (may contain incomplete trailing mxCell)
* @returns XML string containing only complete mxCell elements
*/
export function extractCompleteMxCells(xml: string | undefined | null): string {
if (!xml) return ""
const completeCells: Array<{ index: number; text: string }> = []
// Match self-closing mxCell tags: <mxCell ... />
// Also match mxCell with nested mxGeometry: <mxCell ...>...<mxGeometry .../></mxCell>
const selfClosingPattern = /<mxCell\s+[^>]*\/>/g
const nestedPattern = /<mxCell\s+[^>]*>[\s\S]*?<\/mxCell>/g
// Find all self-closing mxCell elements
let match: RegExpExecArray | null
while ((match = selfClosingPattern.exec(xml)) !== null) {
completeCells.push({ index: match.index, text: match[0] })
}
// Find all mxCell elements with nested content (like mxGeometry)
while ((match = nestedPattern.exec(xml)) !== null) {
completeCells.push({ index: match.index, text: match[0] })
}
// Sort by position to maintain order
completeCells.sort((a, b) => a.index - b.index)
// Remove duplicates (a self-closing match might overlap with nested match)
const seen = new Set<number>()
const uniqueCells = completeCells.filter((cell) => {
if (seen.has(cell.index)) return false
seen.add(cell.index)
return true
})
return uniqueCells.map((c) => c.text).join("\n")
}
// ============================================================================
// XML Parsing Helpers
// ============================================================================

View File

@@ -4,16 +4,9 @@ import packageJson from "./package.json"
const nextConfig: NextConfig = {
/* config options here */
output: "standalone",
// Support for subdirectory deployment (e.g., https://example.com/nextaidrawio)
// Set NEXT_PUBLIC_BASE_PATH environment variable to your subdirectory path (e.g., /nextaidrawio)
basePath: process.env.NEXT_PUBLIC_BASE_PATH || "",
env: {
APP_VERSION: packageJson.version,
},
// Include instrumentation.ts in standalone build for Langfuse telemetry
outputFileTracingIncludes: {
"*": ["./instrumentation.ts"],
},
}
export default nextConfig

1378
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "next-ai-draw-io",
"version": "0.4.6",
"version": "0.4.5",
"license": "Apache-2.0",
"private": true,
"main": "dist-electron/main/index.js",
@@ -24,22 +24,21 @@
"dist:all": "npm run electron:build && npm run electron:prepare && npx electron-builder --mac --win --linux"
},
"dependencies": {
"@ai-sdk/amazon-bedrock": "^4.0.1",
"@ai-sdk/anthropic": "^3.0.0",
"@ai-sdk/azure": "^3.0.0",
"@ai-sdk/deepseek": "^2.0.0",
"@ai-sdk/gateway": "^3.0.0",
"@ai-sdk/google": "^3.0.0",
"@ai-sdk/openai": "^3.0.0",
"@ai-sdk/react": "^3.0.1",
"@aws-sdk/client-dynamodb": "^3.957.0",
"@ai-sdk/amazon-bedrock": "^3.0.70",
"@ai-sdk/anthropic": "^2.0.44",
"@ai-sdk/azure": "^2.0.69",
"@ai-sdk/deepseek": "^1.0.30",
"@ai-sdk/gateway": "^2.0.21",
"@ai-sdk/google": "^2.0.0",
"@ai-sdk/openai": "^2.0.19",
"@ai-sdk/react": "^2.0.107",
"@aws-sdk/credential-providers": "^3.943.0",
"@formatjs/intl-localematcher": "^0.7.2",
"@langfuse/client": "^4.4.9",
"@langfuse/otel": "^4.4.4",
"@langfuse/tracing": "^4.4.9",
"@next/third-parties": "^16.0.6",
"@openrouter/ai-sdk-provider": "^1.5.4",
"@openrouter/ai-sdk-provider": "^1.2.3",
"@opentelemetry/exporter-trace-otlp-http": "^0.208.0",
"@opentelemetry/sdk-trace-node": "^2.2.0",
"@radix-ui/react-alert-dialog": "^1.1.15",
@@ -54,7 +53,7 @@
"@radix-ui/react-tooltip": "^1.1.8",
"@radix-ui/react-use-controllable-state": "^1.2.2",
"@xmldom/xmldom": "^0.9.8",
"ai": "^6.0.1",
"ai": "^5.0.89",
"base-64": "^1.0.0",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
@@ -112,10 +111,5 @@
"tailwindcss": "^4",
"typescript": "^5",
"wait-on": "^9.0.3"
},
"overrides": {
"@openrouter/ai-sdk-provider": {
"ai": "^6.0.1"
}
}
}