mirror of
https://github.com/DayuanJiang/next-ai-draw-io.git
synced 2026-01-03 06:42:27 +08:00
Compare commits
1 Commits
fix/limit-
...
refactor/d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5c9d387c44 |
@@ -85,11 +85,9 @@ Here are some example prompts and their generated diagrams:
|
|||||||
|
|
||||||
- **LLM-Powered Diagram Creation**: Leverage Large Language Models to create and manipulate draw.io diagrams directly through natural language commands
|
- **LLM-Powered Diagram Creation**: Leverage Large Language Models to create and manipulate draw.io diagrams directly through natural language commands
|
||||||
- **Image-Based Diagram Replication**: Upload existing diagrams or images and have the AI replicate and enhance them automatically
|
- **Image-Based Diagram Replication**: Upload existing diagrams or images and have the AI replicate and enhance them automatically
|
||||||
- **PDF & Text File Upload**: Upload PDF documents and text files to extract content and generate diagrams from existing documents
|
|
||||||
- **AI Reasoning Display**: View the AI's thinking process for supported models (OpenAI o1/o3, Gemini, Claude, etc.)
|
|
||||||
- **Diagram History**: Comprehensive version control that tracks all changes, allowing you to view and restore previous versions of your diagrams before the AI editing.
|
- **Diagram History**: Comprehensive version control that tracks all changes, allowing you to view and restore previous versions of your diagrams before the AI editing.
|
||||||
- **Interactive Chat Interface**: Communicate with AI to refine your diagrams in real-time
|
- **Interactive Chat Interface**: Communicate with AI to refine your diagrams in real-time
|
||||||
- **Cloud Architecture Diagram Support**: Specialized support for generating cloud architecture diagrams (AWS, GCP, Azure)
|
- **AWS Architecture Diagram Support**: Specialized support for generating AWS architecture diagrams
|
||||||
- **Animated Connectors**: Create dynamic and animated connectors between diagram elements for better visualization
|
- **Animated Connectors**: Create dynamic and animated connectors between diagram elements for better visualization
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|||||||
@@ -264,13 +264,8 @@ ${lastMessageText}
|
|||||||
// Fix tool call inputs for Bedrock API (requires JSON objects, not strings)
|
// Fix tool call inputs for Bedrock API (requires JSON objects, not strings)
|
||||||
const fixedMessages = fixToolCallInputs(modelMessages)
|
const fixedMessages = fixToolCallInputs(modelMessages)
|
||||||
|
|
||||||
// Replace historical tool call XML with placeholders to reduce tokens
|
// Replace historical tool call XML with placeholders to reduce tokens and avoid confusion
|
||||||
// Disabled by default - some models (e.g. minimax) copy placeholders instead of generating XML
|
const placeholderMessages = replaceHistoricalToolInputs(fixedMessages)
|
||||||
const enableHistoryReplace =
|
|
||||||
process.env.ENABLE_HISTORY_XML_REPLACE === "true"
|
|
||||||
const placeholderMessages = enableHistoryReplace
|
|
||||||
? replaceHistoricalToolInputs(fixedMessages)
|
|
||||||
: fixedMessages
|
|
||||||
|
|
||||||
// Filter out messages with empty content arrays (Bedrock API rejects these)
|
// Filter out messages with empty content arrays (Bedrock API rejects these)
|
||||||
// This is a safety measure - ideally convertToModelMessages should handle all cases
|
// This is a safety measure - ideally convertToModelMessages should handle all cases
|
||||||
|
|||||||
@@ -61,15 +61,31 @@ interface ChatPanelProps {
|
|||||||
// Constants for tool states
|
// Constants for tool states
|
||||||
const TOOL_ERROR_STATE = "output-error" as const
|
const TOOL_ERROR_STATE = "output-error" as const
|
||||||
const DEBUG = process.env.NODE_ENV === "development"
|
const DEBUG = process.env.NODE_ENV === "development"
|
||||||
const MAX_AUTO_RETRY_COUNT = 3
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if auto-resubmit should happen based on tool errors.
|
* Custom auto-resubmit logic for the AI chat.
|
||||||
* Does NOT handle retry count or quota - those are handled by the caller.
|
*
|
||||||
|
* Strategy:
|
||||||
|
* - When tools return errors (e.g., invalid XML), automatically resubmit
|
||||||
|
* the conversation to let the AI retry with corrections
|
||||||
|
* - When tools succeed (e.g., diagram displayed), stop without AI acknowledgment
|
||||||
|
* to prevent unnecessary regeneration cycles
|
||||||
|
*
|
||||||
|
* This fixes the issue where successful diagrams were being regenerated
|
||||||
|
* multiple times because the previous logic (lastAssistantMessageIsCompleteWithToolCalls)
|
||||||
|
* auto-resubmitted on BOTH success and error.
|
||||||
|
*
|
||||||
|
* @param messages - Current conversation messages from AI SDK
|
||||||
|
* @returns true to auto-resubmit (for error recovery), false to stop
|
||||||
*/
|
*/
|
||||||
function hasToolErrors(messages: ChatMessage[]): boolean {
|
function shouldAutoResubmit(messages: ChatMessage[]): boolean {
|
||||||
const lastMessage = messages[messages.length - 1]
|
const lastMessage = messages[messages.length - 1]
|
||||||
if (!lastMessage || lastMessage.role !== "assistant") {
|
if (!lastMessage || lastMessage.role !== "assistant") {
|
||||||
|
if (DEBUG) {
|
||||||
|
console.log(
|
||||||
|
"[sendAutomaticallyWhen] No assistant message, returning false",
|
||||||
|
)
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,10 +95,31 @@ function hasToolErrors(messages: ChatMessage[]): boolean {
|
|||||||
) || []
|
) || []
|
||||||
|
|
||||||
if (toolParts.length === 0) {
|
if (toolParts.length === 0) {
|
||||||
|
if (DEBUG) {
|
||||||
|
console.log(
|
||||||
|
"[sendAutomaticallyWhen] No tool parts, returning false",
|
||||||
|
)
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return toolParts.some((part) => part.state === TOOL_ERROR_STATE)
|
// Only auto-resubmit if ANY tool has an error
|
||||||
|
const hasError = toolParts.some((part) => part.state === TOOL_ERROR_STATE)
|
||||||
|
|
||||||
|
if (DEBUG) {
|
||||||
|
if (hasError) {
|
||||||
|
console.log(
|
||||||
|
"[sendAutomaticallyWhen] Retrying due to errors in tools:",
|
||||||
|
toolParts
|
||||||
|
.filter((p) => p.state === TOOL_ERROR_STATE)
|
||||||
|
.map((p) => p.toolName),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
console.log("[sendAutomaticallyWhen] No errors, stopping")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return hasError
|
||||||
}
|
}
|
||||||
|
|
||||||
export default function ChatPanel({
|
export default function ChatPanel({
|
||||||
@@ -186,9 +223,6 @@ export default function ChatPanel({
|
|||||||
// Ref to hold stop function for use in onToolCall (avoids stale closure)
|
// Ref to hold stop function for use in onToolCall (avoids stale closure)
|
||||||
const stopRef = useRef<(() => void) | null>(null)
|
const stopRef = useRef<(() => void) | null>(null)
|
||||||
|
|
||||||
// Ref to track consecutive auto-retry count (reset on user action)
|
|
||||||
const autoRetryCountRef = useRef(0)
|
|
||||||
|
|
||||||
const {
|
const {
|
||||||
messages,
|
messages,
|
||||||
sendMessage,
|
sendMessage,
|
||||||
@@ -407,68 +441,8 @@ Please retry with an adjusted search pattern or use display_diagram if retries a
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
sendAutomaticallyWhen: ({ messages }) => {
|
sendAutomaticallyWhen: ({ messages }) =>
|
||||||
const shouldRetry = hasToolErrors(
|
shouldAutoResubmit(messages as unknown as ChatMessage[]),
|
||||||
messages as unknown as ChatMessage[],
|
|
||||||
)
|
|
||||||
|
|
||||||
if (!shouldRetry) {
|
|
||||||
// No error, reset retry count
|
|
||||||
autoRetryCountRef.current = 0
|
|
||||||
if (DEBUG) {
|
|
||||||
console.log("[sendAutomaticallyWhen] No errors, stopping")
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check retry count limit
|
|
||||||
if (autoRetryCountRef.current >= MAX_AUTO_RETRY_COUNT) {
|
|
||||||
if (DEBUG) {
|
|
||||||
console.log(
|
|
||||||
`[sendAutomaticallyWhen] Max retry count (${MAX_AUTO_RETRY_COUNT}) reached, stopping`,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
toast.error(
|
|
||||||
`Auto-retry limit reached (${MAX_AUTO_RETRY_COUNT}). Please try again manually.`,
|
|
||||||
)
|
|
||||||
autoRetryCountRef.current = 0
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check quota limits before auto-retry
|
|
||||||
const tokenLimitCheck = quotaManager.checkTokenLimit()
|
|
||||||
if (!tokenLimitCheck.allowed) {
|
|
||||||
if (DEBUG) {
|
|
||||||
console.log(
|
|
||||||
"[sendAutomaticallyWhen] Token limit exceeded, stopping",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
quotaManager.showTokenLimitToast(tokenLimitCheck.used)
|
|
||||||
autoRetryCountRef.current = 0
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
const tpmCheck = quotaManager.checkTPMLimit()
|
|
||||||
if (!tpmCheck.allowed) {
|
|
||||||
if (DEBUG) {
|
|
||||||
console.log(
|
|
||||||
"[sendAutomaticallyWhen] TPM limit exceeded, stopping",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
quotaManager.showTPMLimitToast()
|
|
||||||
autoRetryCountRef.current = 0
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Increment retry count and allow retry
|
|
||||||
autoRetryCountRef.current++
|
|
||||||
if (DEBUG) {
|
|
||||||
console.log(
|
|
||||||
`[sendAutomaticallyWhen] Retrying (${autoRetryCountRef.current}/${MAX_AUTO_RETRY_COUNT})`,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Update stopRef so onToolCall can access it
|
// Update stopRef so onToolCall can access it
|
||||||
@@ -785,9 +759,6 @@ Please retry with an adjusted search pattern or use display_diagram if retries a
|
|||||||
previousXml: string,
|
previousXml: string,
|
||||||
sessionId: string,
|
sessionId: string,
|
||||||
) => {
|
) => {
|
||||||
// Reset auto-retry count on user-initiated message
|
|
||||||
autoRetryCountRef.current = 0
|
|
||||||
|
|
||||||
const config = getAIConfig()
|
const config = getAIConfig()
|
||||||
|
|
||||||
sendMessage(
|
sendMessage(
|
||||||
|
|||||||
@@ -81,11 +81,9 @@ https://github.com/user-attachments/assets/b2eef5f3-b335-4e71-a755-dc2e80931979
|
|||||||
|
|
||||||
- **LLM驱动的图表创建**:利用大语言模型通过自然语言命令直接创建和操作draw.io图表
|
- **LLM驱动的图表创建**:利用大语言模型通过自然语言命令直接创建和操作draw.io图表
|
||||||
- **基于图像的图表复制**:上传现有图表或图像,让AI自动复制和增强
|
- **基于图像的图表复制**:上传现有图表或图像,让AI自动复制和增强
|
||||||
- **PDF和文本文件上传**:上传PDF文档和文本文件,提取内容并从现有文档生成图表
|
|
||||||
- **AI推理过程显示**:查看支持模型的AI思考过程(OpenAI o1/o3、Gemini、Claude等)
|
|
||||||
- **图表历史记录**:全面的版本控制,跟踪所有更改,允许您查看和恢复AI编辑前的图表版本
|
- **图表历史记录**:全面的版本控制,跟踪所有更改,允许您查看和恢复AI编辑前的图表版本
|
||||||
- **交互式聊天界面**:与AI实时对话来完善您的图表
|
- **交互式聊天界面**:与AI实时对话来完善您的图表
|
||||||
- **云架构图支持**:专门支持生成云架构图(AWS、GCP、Azure)
|
- **AWS架构图支持**:专门支持生成AWS架构图
|
||||||
- **动画连接器**:在图表元素之间创建动态动画连接器,实现更好的可视化效果
|
- **动画连接器**:在图表元素之间创建动态动画连接器,实现更好的可视化效果
|
||||||
|
|
||||||
## 快速开始
|
## 快速开始
|
||||||
|
|||||||
@@ -81,11 +81,9 @@ https://github.com/user-attachments/assets/b2eef5f3-b335-4e71-a755-dc2e80931979
|
|||||||
|
|
||||||
- **LLM搭載のダイアグラム作成**:大規模言語モデルを活用して、自然言語コマンドで直接draw.ioダイアグラムを作成・操作
|
- **LLM搭載のダイアグラム作成**:大規模言語モデルを活用して、自然言語コマンドで直接draw.ioダイアグラムを作成・操作
|
||||||
- **画像ベースのダイアグラム複製**:既存のダイアグラムや画像をアップロードし、AIが自動的に複製・強化
|
- **画像ベースのダイアグラム複製**:既存のダイアグラムや画像をアップロードし、AIが自動的に複製・強化
|
||||||
- **PDFとテキストファイルのアップロード**:PDFドキュメントやテキストファイルをアップロードして、既存のドキュメントからコンテンツを抽出し、ダイアグラムを生成
|
|
||||||
- **AI推論プロセス表示**:サポートされているモデル(OpenAI o1/o3、Gemini、Claudeなど)のAIの思考プロセスを表示
|
|
||||||
- **ダイアグラム履歴**:すべての変更を追跡する包括的なバージョン管理。AI編集前のダイアグラムの以前のバージョンを表示・復元可能
|
- **ダイアグラム履歴**:すべての変更を追跡する包括的なバージョン管理。AI編集前のダイアグラムの以前のバージョンを表示・復元可能
|
||||||
- **インタラクティブなチャットインターフェース**:AIとリアルタイムでコミュニケーションしてダイアグラムを改善
|
- **インタラクティブなチャットインターフェース**:AIとリアルタイムでコミュニケーションしてダイアグラムを改善
|
||||||
- **クラウドアーキテクチャダイアグラムサポート**:クラウドアーキテクチャダイアグラムの生成を専門的にサポート(AWS、GCP、Azure)
|
- **AWSアーキテクチャダイアグラムサポート**:AWSアーキテクチャダイアグラムの生成を専門的にサポート
|
||||||
- **アニメーションコネクタ**:より良い可視化のためにダイアグラム要素間に動的でアニメーション化されたコネクタを作成
|
- **アニメーションコネクタ**:より良い可視化のためにダイアグラム要素間に動的でアニメーション化されたコネクタを作成
|
||||||
|
|
||||||
## はじめに
|
## はじめに
|
||||||
|
|||||||
@@ -41,13 +41,9 @@ AI_MODEL=global.anthropic.claude-sonnet-4-5-20250929-v1:0
|
|||||||
# GOOGLE_THINKING_LEVEL=high # Optional: Gemini 3 thinking level (low/high)
|
# GOOGLE_THINKING_LEVEL=high # Optional: Gemini 3 thinking level (low/high)
|
||||||
|
|
||||||
# Azure OpenAI Configuration
|
# Azure OpenAI Configuration
|
||||||
# Configure endpoint using ONE of these methods:
|
|
||||||
# 1. AZURE_RESOURCE_NAME - SDK constructs: https://{name}.openai.azure.com/openai/v1{path}
|
|
||||||
# 2. AZURE_BASE_URL - SDK appends /v1{path} to your URL
|
|
||||||
# If both are set, AZURE_BASE_URL takes precedence.
|
|
||||||
# AZURE_RESOURCE_NAME=your-resource-name
|
# AZURE_RESOURCE_NAME=your-resource-name
|
||||||
# AZURE_API_KEY=...
|
# AZURE_API_KEY=...
|
||||||
# AZURE_BASE_URL=https://your-resource.openai.azure.com/openai # Alternative: Custom endpoint
|
# AZURE_BASE_URL=https://your-resource.openai.azure.com # Optional: Custom endpoint (overrides resourceName)
|
||||||
# AZURE_REASONING_EFFORT=low # Optional: Azure reasoning effort (low, medium, high)
|
# AZURE_REASONING_EFFORT=low # Optional: Azure reasoning effort (low, medium, high)
|
||||||
# AZURE_REASONING_SUMMARY=detailed
|
# AZURE_REASONING_SUMMARY=detailed
|
||||||
|
|
||||||
@@ -90,4 +86,3 @@ AI_MODEL=global.anthropic.claude-sonnet-4-5-20250929-v1:0
|
|||||||
# Enable PDF file upload to extract text and generate diagrams
|
# Enable PDF file upload to extract text and generate diagrams
|
||||||
# Enabled by default. Set to "false" to disable.
|
# Enabled by default. Set to "false" to disable.
|
||||||
# ENABLE_PDF_INPUT=true
|
# ENABLE_PDF_INPUT=true
|
||||||
# NEXT_PUBLIC_MAX_EXTRACTED_CHARS=150000 # Max characters for PDF/text extraction (default: 150000)
|
|
||||||
|
|||||||
@@ -572,15 +572,10 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
|
|||||||
case "azure": {
|
case "azure": {
|
||||||
const apiKey = overrides?.apiKey || process.env.AZURE_API_KEY
|
const apiKey = overrides?.apiKey || process.env.AZURE_API_KEY
|
||||||
const baseURL = overrides?.baseUrl || process.env.AZURE_BASE_URL
|
const baseURL = overrides?.baseUrl || process.env.AZURE_BASE_URL
|
||||||
const resourceName = process.env.AZURE_RESOURCE_NAME
|
if (baseURL || overrides?.apiKey) {
|
||||||
// Azure requires either baseURL or resourceName to construct the endpoint
|
|
||||||
// resourceName constructs: https://{resourceName}.openai.azure.com/openai/v1{path}
|
|
||||||
if (baseURL || resourceName || overrides?.apiKey) {
|
|
||||||
const customAzure = createAzure({
|
const customAzure = createAzure({
|
||||||
apiKey,
|
apiKey,
|
||||||
// baseURL takes precedence over resourceName per SDK behavior
|
|
||||||
...(baseURL && { baseURL }),
|
...(baseURL && { baseURL }),
|
||||||
...(!baseURL && resourceName && { resourceName }),
|
|
||||||
})
|
})
|
||||||
model = customAzure(modelId)
|
model = customAzure(modelId)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -1,10 +1,7 @@
|
|||||||
import { extractText, getDocumentProxy } from "unpdf"
|
import { extractText, getDocumentProxy } from "unpdf"
|
||||||
|
|
||||||
// Maximum characters allowed for extracted text (configurable via env)
|
// Maximum characters allowed for extracted text
|
||||||
const DEFAULT_MAX_EXTRACTED_CHARS = 150000 // 150k chars
|
export const MAX_EXTRACTED_CHARS = 150000 // 150k chars
|
||||||
export const MAX_EXTRACTED_CHARS =
|
|
||||||
Number(process.env.NEXT_PUBLIC_MAX_EXTRACTED_CHARS) ||
|
|
||||||
DEFAULT_MAX_EXTRACTED_CHARS
|
|
||||||
|
|
||||||
// Text file extensions we support
|
// Text file extensions we support
|
||||||
const TEXT_EXTENSIONS = [
|
const TEXT_EXTENSIONS = [
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "next-ai-draw-io",
|
"name": "next-ai-draw-io",
|
||||||
"version": "0.4.0",
|
"version": "0.3.0",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"private": true,
|
"private": true,
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user