Compare commits

..

4 Commits

Author SHA1 Message Date
dayuan.jiang
1198cc2a9b Merge branch 'main' into feature/url-content-extraction 2026-01-06 00:06:18 +09:00
Biki Kalita
a61e89d6d0 chore: restore package.json and package-lock.json 2026-01-05 14:54:40 +05:30
Biki Kalita
580d42f535 Changes made as recommended by Claude:
1. Added a request timeout to prevent server resources from being tied up (route.ts)
2. Implemented runtime validation for the API response shape (url-utils.ts)
3. Removed hardcoded English error messages and replaced them with localized strings (url-input-dialog.tsx)
4. Fixed the incorrect i18n namespace (changed from pdf.* to url.*) (url-input-dialog.tsx and en/ja/zh.json)
2026-01-05 14:51:24 +05:30
Biki Kalita
64268b0fac feat: add URL content extraction for AI diagram generation 2026-01-04 22:36:46 +05:30
40 changed files with 181 additions and 951 deletions

View File

@@ -11,8 +11,7 @@ on:
required: false required: false
jobs: jobs:
# Mac and Linux: Build and publish directly (no signing needed) build:
build-mac-linux:
permissions: permissions:
contents: write contents: write
strategy: strategy:
@@ -21,9 +20,13 @@ jobs:
include: include:
- os: macos-latest - os: macos-latest
platform: mac platform: mac
- os: windows-latest
platform: win
- os: ubuntu-latest - os: ubuntu-latest
platform: linux platform: linux
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v6
@@ -37,58 +40,7 @@ jobs:
- name: Install dependencies - name: Install dependencies
run: npm install run: npm install
- name: Build and publish - name: Build and publish Electron app
run: npm run dist:${{ matrix.platform }} run: npm run dist:${{ matrix.platform }}
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Windows: Build, sign with SignPath, then publish
build-windows:
permissions:
contents: write
runs-on: windows-latest
steps:
- name: Checkout code
uses: actions/checkout@v6
- name: Setup Node.js
uses: actions/setup-node@v6
with:
node-version: 24
cache: "npm"
- name: Install dependencies
run: npm install
# Build WITHOUT publishing
- name: Build Windows app
run: npm run dist:win:build
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload unsigned artifacts for signing
uses: actions/upload-artifact@v4
id: upload-unsigned
with:
name: windows-unsigned
path: release/*.exe
retention-days: 1
- name: Sign with SignPath
uses: signpath/github-action-submit-signing-request@v2
with:
api-token: ${{ secrets.SIGNPATH_API_TOKEN }}
organization-id: '880a211d-2cd3-4e7b-8d04-3d1f8eb39df5'
project-slug: 'next-ai-draw-io'
signing-policy-slug: 'test-signing'
artifact-configuration-slug: 'windows-exe'
github-artifact-id: ${{ steps.upload-unsigned.outputs.artifact-id }}
wait-for-completion: true
output-artifact-directory: release-signed
- name: Upload signed artifacts to release
uses: softprops/action-gh-release@v2
with:
files: release-signed/*.exe
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -40,12 +40,11 @@ https://github.com/user-attachments/assets/9d60a3e8-4a1c-4b5e-acbb-26af2d3eabd1
- [Installation](#installation) - [Installation](#installation)
- [Deployment](#deployment) - [Deployment](#deployment)
- [Deploy to EdgeOne Pages](#deploy-to-edgeone-pages) - [Deploy to EdgeOne Pages](#deploy-to-edgeone-pages)
- [Deploy on Vercel](#deploy-on-vercel) - [Deploy on Vercel (Recommended)](#deploy-on-vercel-recommended)
- [Deploy on Cloudflare Workers](#deploy-on-cloudflare-workers) - [Deploy on Cloudflare Workers](#deploy-on-cloudflare-workers)
- [Multi-Provider Support](#multi-provider-support) - [Multi-Provider Support](#multi-provider-support)
- [How It Works](#how-it-works) - [How It Works](#how-it-works)
- [Support \& Contact](#support--contact) - [Support \& Contact](#support--contact)
- [FAQ](#faq)
- [Star History](#star-history) - [Star History](#star-history)
## Examples ## Examples
@@ -186,7 +185,7 @@ Check out the [Tencent EdgeOne Pages documentation](https://pages.edgeone.ai/doc
Additionally, deploying through Tencent EdgeOne Pages will also grant you a [daily free quota for DeepSeek models](https://pages.edgeone.ai/document/edge-ai). Additionally, deploying through Tencent EdgeOne Pages will also grant you a [daily free quota for DeepSeek models](https://pages.edgeone.ai/document/edge-ai).
### Deploy on Vercel ### Deploy on Vercel (Recommended)
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io) [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io)
@@ -212,7 +211,6 @@ See the [Next.js deployment documentation](https://nextjs.org/docs/app/building-
- OpenRouter - OpenRouter
- DeepSeek - DeepSeek
- SiliconFlow - SiliconFlow
- ModelScope
- SGLang - SGLang
- Vercel AI Gateway - Vercel AI Gateway
@@ -247,10 +245,6 @@ For support or inquiries, please open an issue on the GitHub repository or conta
- Email: me[at]jiang.jp - Email: me[at]jiang.jp
## FAQ
See [FAQ](./docs/en/FAQ.md) for common issues and solutions.
## Star History ## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=DayuanJiang/next-ai-draw-io&type=date&legend=top-left)](https://www.star-history.com/#DayuanJiang/next-ai-draw-io&type=date&legend=top-left) [![Star History Chart](https://api.star-history.com/svg?repos=DayuanJiang/next-ai-draw-io&type=date&legend=top-left)](https://www.star-history.com/#DayuanJiang/next-ai-draw-io&type=date&legend=top-left)

View File

@@ -10,7 +10,18 @@ export const metadata: Metadata = {
keywords: ["AI图表", "draw.io", "AWS架构", "GCP图表", "Azure图表", "LLM"], keywords: ["AI图表", "draw.io", "AWS架构", "GCP图表", "Azure图表", "LLM"],
} }
function formatNumber(num: number): string {
if (num >= 1000) {
return `${num / 1000}k`
}
return num.toString()
}
export default function AboutCN() { export default function AboutCN() {
const dailyRequestLimit = Number(process.env.DAILY_REQUEST_LIMIT) || 20
const dailyTokenLimit = Number(process.env.DAILY_TOKEN_LIMIT) || 500000
const tpmLimit = Number(process.env.TPM_LIMIT) || 50000
return ( return (
<div className="min-h-screen bg-gray-50"> <div className="min-h-screen bg-gray-50">
{/* Navigation */} {/* Navigation */}
@@ -97,6 +108,42 @@ export default function AboutCN() {
</p> </p>
</div> </div>
{/* Usage Limits */}
<p className="text-sm text-gray-600 mb-3">
使
</p>
<div className="grid grid-cols-3 gap-3 mb-5">
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyRequestLimit)}
</p>
<p className="text-xs text-gray-500">
/
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyTokenLimit)}
</p>
<p className="text-xs text-gray-500">
Token/
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(tpmLimit)}
</p>
<p className="text-xs text-gray-500">
Token/
</p>
</div>
</div>
{/* Divider */}
<div className="flex items-center gap-3 my-5">
<div className="flex-1 h-px bg-gradient-to-r from-transparent via-amber-300 to-transparent" />
</div>
{/* Bring Your Own Key */} {/* Bring Your Own Key */}
<div className="text-center"> <div className="text-center">
<h4 className="text-base font-bold text-gray-900 mb-2"> <h4 className="text-base font-bold text-gray-900 mb-2">
@@ -297,7 +344,6 @@ export default function AboutCN() {
<li>OpenRouter</li> <li>OpenRouter</li>
<li>DeepSeek</li> <li>DeepSeek</li>
<li>SiliconFlow</li> <li>SiliconFlow</li>
<li>ModelScope</li>
</ul> </ul>
<p className="text-gray-700 mt-4"> <p className="text-gray-700 mt-4">
<code>claude-sonnet-4-5</code>{" "} <code>claude-sonnet-4-5</code>{" "}

View File

@@ -17,7 +17,18 @@ export const metadata: Metadata = {
], ],
} }
function formatNumber(num: number): string {
if (num >= 1000) {
return `${num / 1000}k`
}
return num.toString()
}
export default function AboutJA() { export default function AboutJA() {
const dailyRequestLimit = Number(process.env.DAILY_REQUEST_LIMIT) || 20
const dailyTokenLimit = Number(process.env.DAILY_TOKEN_LIMIT) || 500000
const tpmLimit = Number(process.env.TPM_LIMIT) || 50000
return ( return (
<div className="min-h-screen bg-gray-50"> <div className="min-h-screen bg-gray-50">
{/* Navigation */} {/* Navigation */}
@@ -105,6 +116,42 @@ export default function AboutJA() {
</p> </p>
</div> </div>
{/* Usage Limits */}
<p className="text-sm text-gray-600 mb-3">
使
</p>
<div className="grid grid-cols-3 gap-3 mb-5">
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyRequestLimit)}
</p>
<p className="text-xs text-gray-500">
/
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyTokenLimit)}
</p>
<p className="text-xs text-gray-500">
/
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(tpmLimit)}
</p>
<p className="text-xs text-gray-500">
/
</p>
</div>
</div>
{/* Divider */}
<div className="flex items-center gap-3 my-5">
<div className="flex-1 h-px bg-gradient-to-r from-transparent via-amber-300 to-transparent" />
</div>
{/* Bring Your Own Key */} {/* Bring Your Own Key */}
<div className="text-center"> <div className="text-center">
<h4 className="text-base font-bold text-gray-900 mb-2"> <h4 className="text-base font-bold text-gray-900 mb-2">
@@ -312,7 +359,6 @@ export default function AboutJA() {
<li>OpenRouter</li> <li>OpenRouter</li>
<li>DeepSeek</li> <li>DeepSeek</li>
<li>SiliconFlow</li> <li>SiliconFlow</li>
<li>ModelScope</li>
</ul> </ul>
<p className="text-gray-700 mt-4"> <p className="text-gray-700 mt-4">
<code>claude-sonnet-4-5</code> <code>claude-sonnet-4-5</code>

View File

@@ -17,7 +17,18 @@ export const metadata: Metadata = {
], ],
} }
function formatNumber(num: number): string {
if (num >= 1000) {
return `${num / 1000}k`
}
return num.toString()
}
export default function About() { export default function About() {
const dailyRequestLimit = Number(process.env.DAILY_REQUEST_LIMIT) || 20
const dailyTokenLimit = Number(process.env.DAILY_TOKEN_LIMIT) || 500000
const tpmLimit = Number(process.env.TPM_LIMIT) || 50000
return ( return (
<div className="min-h-screen bg-gray-50"> <div className="min-h-screen bg-gray-50">
{/* Navigation */} {/* Navigation */}
@@ -107,6 +118,42 @@ export default function About() {
</p> </p>
</div> </div>
{/* Usage Limits */}
<p className="text-sm text-gray-600 mb-3">
Please note the current usage limits:
</p>
<div className="grid grid-cols-3 gap-3 mb-5">
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyRequestLimit)}
</p>
<p className="text-xs text-gray-500">
requests/day
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyTokenLimit)}
</p>
<p className="text-xs text-gray-500">
tokens/day
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(tpmLimit)}
</p>
<p className="text-xs text-gray-500">
tokens/min
</p>
</div>
</div>
{/* Divider */}
<div className="flex items-center gap-3 my-5">
<div className="flex-1 h-px bg-gradient-to-r from-transparent via-amber-300 to-transparent" />
</div>
{/* Bring Your Own Key */} {/* Bring Your Own Key */}
<div className="text-center"> <div className="text-center">
<h4 className="text-base font-bold text-gray-900 mb-2"> <h4 className="text-base font-bold text-gray-900 mb-2">
@@ -331,7 +378,6 @@ export default function About() {
<li>OpenRouter</li> <li>OpenRouter</li>
<li>DeepSeek</li> <li>DeepSeek</li>
<li>SiliconFlow</li> <li>SiliconFlow</li>
<li>ModelScope</li>
</ul> </ul>
<p className="text-gray-700 mt-4"> <p className="text-gray-700 mt-4">
Note that <code>claude-sonnet-4-5</code> has trained on Note that <code>claude-sonnet-4-5</code> has trained on

View File

@@ -202,7 +202,7 @@ export async function POST(req: Request) {
case "siliconflow": { case "siliconflow": {
const sf = createOpenAI({ const sf = createOpenAI({
apiKey, apiKey,
baseURL: baseUrl || "https://api.siliconflow.cn/v1", baseURL: baseUrl || "https://api.siliconflow.com/v1",
}) })
model = sf.chat(modelId) model = sf.chat(modelId)
break break
@@ -274,75 +274,6 @@ export async function POST(req: Request) {
break break
} }
case "modelscope": {
const baseURL =
baseUrl || "https://api-inference.modelscope.cn/v1"
const startTime = Date.now()
try {
// Initiate a streaming request (required for QwQ-32B and certain Qwen3 models)
const response = await fetch(
`${baseURL}/chat/completions`,
{
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify({
model: modelId,
messages: [
{ role: "user", content: "Say 'OK'" },
],
max_tokens: 20,
stream: true,
enable_thinking: false,
}),
},
)
if (!response.ok) {
const errorText = await response.text()
throw new Error(
`ModelScope API error (${response.status}): ${errorText}`,
)
}
const contentType =
response.headers.get("content-type") || ""
const isValidStreamingResponse =
response.status === 200 &&
(contentType.includes("text/event-stream") ||
contentType.includes("application/json"))
if (!isValidStreamingResponse) {
throw new Error(
`Unexpected response format: ${contentType}`,
)
}
const responseTime = Date.now() - startTime
if (response.body) {
response.body.cancel().catch(() => {
/* Ignore cancellation errors */
})
}
return NextResponse.json({
valid: true,
responseTime,
note: "ModelScope model validated (using streaming API)",
})
} catch (error) {
console.error(
"[validate-model] ModelScope validation failed:",
error,
)
throw error
}
}
default: default:
return NextResponse.json( return NextResponse.json(
{ valid: false, error: `Unknown provider: ${provider}` }, { valid: false, error: `Unknown provider: ${provider}` },

View File

@@ -347,12 +347,6 @@ export function ChatInput({
setShowUrlDialog(false) setShowUrlDialog(false)
} catch (error) { } catch (error) {
// Remove the URL from the data map on error
const newUrlData = urlData
? new Map(urlData)
: new Map<string, UrlData>()
newUrlData.delete(url)
onUrlChange(newUrlData)
showErrorToast( showErrorToast(
<span className="text-muted-foreground"> <span className="text-muted-foreground">
{error instanceof Error {error instanceof Error

View File

@@ -79,7 +79,6 @@ const PROVIDER_LOGO_MAP: Record<string, string> = {
gateway: "vercel", gateway: "vercel",
edgeone: "tencent-cloud", edgeone: "tencent-cloud",
doubao: "bytedance", doubao: "bytedance",
modelscope: "modelscope",
} }
// Provider logo component // Provider logo component

View File

@@ -50,7 +50,6 @@ const PROVIDER_LOGO_MAP: Record<string, string> = {
gateway: "vercel", gateway: "vercel",
edgeone: "tencent-cloud", edgeone: "tencent-cloud",
doubao: "bytedance", doubao: "bytedance",
modelscope: "modelscope",
} }
// Group models by providerLabel (handles duplicate providers) // Group models by providerLabel (handles duplicate providers)

View File

@@ -3,7 +3,6 @@
import { Github, Info, Moon, Sun, Tag } from "lucide-react" import { Github, Info, Moon, Sun, Tag } from "lucide-react"
import { usePathname, useRouter, useSearchParams } from "next/navigation" import { usePathname, useRouter, useSearchParams } from "next/navigation"
import { Suspense, useEffect, useState } from "react" import { Suspense, useEffect, useState } from "react"
import { toast } from "sonner"
import { Button } from "@/components/ui/button" import { Button } from "@/components/ui/button"
import { import {
Dialog, Dialog,
@@ -104,11 +103,6 @@ function SettingsContent({
) )
const [currentLang, setCurrentLang] = useState("en") const [currentLang, setCurrentLang] = useState("en")
// Proxy settings state (Electron only)
const [httpProxy, setHttpProxy] = useState("")
const [httpsProxy, setHttpsProxy] = useState("")
const [isApplyingProxy, setIsApplyingProxy] = useState(false)
useEffect(() => { useEffect(() => {
// Only fetch if not cached in localStorage // Only fetch if not cached in localStorage
if (getStoredAccessCodeRequired() !== null) return if (getStoredAccessCodeRequired() !== null) return
@@ -156,14 +150,6 @@ function SettingsContent({
setCloseProtection(storedCloseProtection !== "false") setCloseProtection(storedCloseProtection !== "false")
setError("") setError("")
// Load proxy settings (Electron only)
if (window.electronAPI?.getProxy) {
window.electronAPI.getProxy().then((config) => {
setHttpProxy(config.httpProxy || "")
setHttpsProxy(config.httpsProxy || "")
})
}
} }
}, [open]) }, [open])
@@ -222,46 +208,6 @@ function SettingsContent({
} }
} }
const handleApplyProxy = async () => {
if (!window.electronAPI?.setProxy) return
// Validate proxy URLs (must start with http:// or https://)
const validateProxyUrl = (url: string): boolean => {
if (!url) return true // Empty is OK
return url.startsWith("http://") || url.startsWith("https://")
}
const trimmedHttp = httpProxy.trim()
const trimmedHttps = httpsProxy.trim()
if (trimmedHttp && !validateProxyUrl(trimmedHttp)) {
toast.error("HTTP Proxy must start with http:// or https://")
return
}
if (trimmedHttps && !validateProxyUrl(trimmedHttps)) {
toast.error("HTTPS Proxy must start with http:// or https://")
return
}
setIsApplyingProxy(true)
try {
const result = await window.electronAPI.setProxy({
httpProxy: trimmedHttp || undefined,
httpsProxy: trimmedHttps || undefined,
})
if (result.success) {
toast.success(dict.settings.proxyApplied)
} else {
toast.error(result.error || "Failed to apply proxy settings")
}
} catch {
toast.error("Failed to apply proxy settings")
} finally {
setIsApplyingProxy(false)
}
}
return ( return (
<DialogContent className="sm:max-w-lg p-0 gap-0"> <DialogContent className="sm:max-w-lg p-0 gap-0">
{/* Header */} {/* Header */}
@@ -424,54 +370,6 @@ function SettingsContent({
</span> </span>
</div> </div>
</SettingItem> </SettingItem>
{/* Proxy Settings - Electron only */}
{typeof window !== "undefined" &&
window.electronAPI?.isElectron && (
<div className="py-4 space-y-3">
<div className="space-y-0.5">
<Label className="text-sm font-medium">
{dict.settings.proxy}
</Label>
<p className="text-xs text-muted-foreground">
{dict.settings.proxyDescription}
</p>
</div>
<div className="space-y-2">
<Input
id="http-proxy"
type="text"
value={httpProxy}
onChange={(e) =>
setHttpProxy(e.target.value)
}
placeholder={`${dict.settings.httpProxy}: http://proxy:8080`}
className="h-9"
/>
<Input
id="https-proxy"
type="text"
value={httpsProxy}
onChange={(e) =>
setHttpsProxy(e.target.value)
}
placeholder={`${dict.settings.httpsProxy}: http://proxy:8080`}
className="h-9"
/>
</div>
<Button
onClick={handleApplyProxy}
disabled={isApplyingProxy}
className="h-9 px-4 rounded-xl w-full"
>
{isApplyingProxy
? "..."
: dict.settings.applyProxy}
</Button>
</div>
)}
</div> </div>
</div> </div>

View File

@@ -35,14 +35,14 @@ export function UrlInputDialog({
setError("") setError("")
if (!url.trim()) { if (!url.trim()) {
setError(dict.url.enterUrl) setError("Please enter a URL")
return return
} }
try { try {
new URL(url) new URL(url)
} catch { } catch {
setError(dict.url.invalidFormat) setError("Invalid URL format")
return return
} }

View File

@@ -1,78 +0,0 @@
# 常见问题解答 (FAQ)
---
## 1. 无法导出 PDF
**问题**: Web 版点击导出 PDF 后跳转到 `convert.diagrams.net/node/export` 然后无响应
**原因**: 嵌入式 Draw.io 不支持直接 PDF 导出,依赖外部转换服务,在 iframe 中无法正常工作
**解决方案**: 先导出为图片PNG再打印转成 PDF
**相关 Issue**: #539, #125
---
## 2. 无法访问 embed.diagrams.net离线/内网部署)
**问题**: 内网环境提示"找不到 embed.diagrams.net 的服务器 IP 地址"
**关键点**: `NEXT_PUBLIC_*` 环境变量是**构建时**变量,会被打包到 JS 代码中,**运行时设置无效**
**解决方案**: 必须在构建时通过 `args` 传入:
```yaml
# docker-compose.yml
services:
drawio:
image: jgraph/drawio:latest
ports: ["8080:8080"]
next-ai-draw-io:
build:
context: .
args:
- NEXT_PUBLIC_DRAWIO_BASE_URL=http://你的服务器IP:8080/
ports: ["3000:3000"]
env_file: .env
```
**内网用户**: 在外网修改 Dockerfile 并构建镜像,再传到内网使用
**相关 Issue**: #295, #317
---
## 3. 自建模型只思考不画图
**问题**: 本地部署的模型(如 Qwen、LiteLLM只输出思考过程不生成图表
**可能原因**:
1. **模型太小** - 小模型难以正确遵循 tool calling 指令,建议使用 32B+ 参数的模型
2. **未开启 tool calling** - 模型服务需要配置 tool use 功能
**解决方案**: 开启 tool calling例如 vLLM
```bash
python -m vllm.entrypoints.openai.api_server \
--model Qwen/Qwen3-32B \
--enable-auto-tool-choice \
--tool-call-parser hermes
```
**相关 Issue**: #269, #75
---
## 4. 上传图片后提示"未提供图片"
**问题**: 上传图片后,系统显示"未提供图片"错误
**可能原因**:
1. 模型不支持视觉功能(如 Kimi K2、DeepSeek、Qwen 文本模型)
**解决方案**:
- 使用支持视觉的模型GPT-5.2、Claude 4.5 Sonnet、Gemini 3 Pro
- 模型名带 `vision``vl` 的支持图片
- 更新到最新版本v0.4.9+
**相关 Issue**: #324, #421, #469

View File

@@ -37,12 +37,11 @@ https://github.com/user-attachments/assets/b2eef5f3-b335-4e71-a755-dc2e80931979
- [安装](#安装) - [安装](#安装)
- [部署](#部署) - [部署](#部署)
- [部署到腾讯云EdgeOne Pages](#部署到腾讯云edgeone-pages) - [部署到腾讯云EdgeOne Pages](#部署到腾讯云edgeone-pages)
- [部署到Vercel](#部署到vercel) - [部署到Vercel(推荐)](#部署到vercel推荐)
- [部署到Cloudflare Workers](#部署到cloudflare-workers) - [部署到Cloudflare Workers](#部署到cloudflare-workers)
- [多提供商支持](#多提供商支持) - [多提供商支持](#多提供商支持)
- [工作原理](#工作原理) - [工作原理](#工作原理)
- [支持与联系](#支持与联系) - [支持与联系](#支持与联系)
- [常见问题](#常见问题)
- [Star历史](#star历史) - [Star历史](#star历史)
## 示例 ## 示例
@@ -180,7 +179,7 @@ npm run dev
同时通过腾讯云EdgeOne Pages部署也会获得[每日免费的DeepSeek模型额度](https://edgeone.cloud.tencent.com/pages/document/169925463311781888)。 同时通过腾讯云EdgeOne Pages部署也会获得[每日免费的DeepSeek模型额度](https://edgeone.cloud.tencent.com/pages/document/169925463311781888)。
### 部署到Vercel ### 部署到Vercel(推荐)
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io) [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io)
@@ -205,7 +204,6 @@ npm run dev
- OpenRouter - OpenRouter
- DeepSeek - DeepSeek
- SiliconFlow - SiliconFlow
- ModelScope
- SGLang - SGLang
- Vercel AI Gateway - Vercel AI Gateway
@@ -239,10 +237,6 @@ npm run dev
- 邮箱me[at]jiang.jp - 邮箱me[at]jiang.jp
## 常见问题
请参阅 [FAQ](./FAQ.md) 了解常见问题和解决方案。
## Star历史 ## Star历史
[![Star History Chart](https://api.star-history.com/svg?repos=DayuanJiang/next-ai-draw-io&type=date&legend=top-left)](https://www.star-history.com/#DayuanJiang/next-ai-draw-io&type=date&legend=top-left) [![Star History Chart](https://api.star-history.com/svg?repos=DayuanJiang/next-ai-draw-io&type=date&legend=top-left)](https://www.star-history.com/#DayuanJiang/next-ai-draw-io&type=date&legend=top-left)

View File

@@ -152,19 +152,6 @@ AI_PROVIDER=ollama
AI_MODEL=llama3.2 AI_MODEL=llama3.2
``` ```
### ModelScope
```bash
MODELSCOPE_API_KEY=your_api_key
AI_MODEL=Qwen/Qwen3-235B-A22B-Instruct-2507
```
可选的自定义端点:
```bash
MODELSCOPE_BASE_URL=https://your-custom-endpoint
```
可选的自定义 URL 可选的自定义 URL
```bash ```bash

View File

@@ -1,78 +0,0 @@
# Frequently Asked Questions (FAQ)
---
## 1. Cannot Export PDF
**Problem**: Web version redirects to `convert.diagrams.net/node/export` when exporting PDF, then nothing happens
**Cause**: Embedded Draw.io doesn't support direct PDF export, it relies on external conversion service which doesn't work in iframe
**Solution**: Export as image (PNG) first, then print to PDF
**Related Issues**: #539, #125
---
## 2. Cannot Access embed.diagrams.net (Offline/Intranet Deployment)
**Problem**: Intranet environment shows "Cannot find server IP address for embed.diagrams.net"
**Key Point**: `NEXT_PUBLIC_*` environment variables are **build-time** variables, they get bundled into JS code. **Runtime settings don't work!**
**Solution**: Must pass via `args` at build time:
```yaml
# docker-compose.yml
services:
drawio:
image: jgraph/drawio:latest
ports: ["8080:8080"]
next-ai-draw-io:
build:
context: .
args:
- NEXT_PUBLIC_DRAWIO_BASE_URL=http://your-server-ip:8080/
ports: ["3000:3000"]
env_file: .env
```
**Intranet Users**: Modify Dockerfile and build image on external network, then transfer to intranet
**Related Issues**: #295, #317
---
## 3. Self-hosted Model Only Thinks But Doesn't Draw
**Problem**: Locally deployed models (e.g., Qwen, LiteLLM) only output thinking process, don't generate diagrams
**Possible Causes**:
1. **Model too small** - Small models struggle to follow tool calling instructions correctly, recommend 32B+ parameter models
2. **Tool calling not enabled** - Model service needs tool use configuration
**Solution**: Enable tool calling, e.g., vLLM:
```bash
python -m vllm.entrypoints.openai.api_server \
--model Qwen/Qwen3-32B \
--enable-auto-tool-choice \
--tool-call-parser hermes
```
**Related Issues**: #269, #75
---
## 4. "No Image Provided" After Uploading Image
**Problem**: After uploading an image, the system shows "No image provided" error
**Possible Causes**:
1. Model doesn't support vision (e.g., Kimi K2, DeepSeek, Qwen text models)
**Solution**:
- Use vision-capable models: GPT-5.2, Claude 4.5 Sonnet, Gemini 3 Pro
- Models with `vision` or `vl` in name support images
- Update to latest version (v0.4.9+)
**Related Issues**: #324, #421, #469

View File

@@ -158,19 +158,6 @@ Optional custom URL:
OLLAMA_BASE_URL=http://localhost:11434 OLLAMA_BASE_URL=http://localhost:11434
``` ```
### ModelScope
```bash
MODELSCOPE_API_KEY=your_api_key
AI_MODEL=Qwen/Qwen3-235B-A22B-Instruct-2507
```
Optional custom endpoint:
```bash
MODELSCOPE_BASE_URL=https://your-custom-endpoint
```
### Vercel AI Gateway ### Vercel AI Gateway
Vercel AI Gateway provides unified access to multiple AI providers through a single API key. This simplifies authentication and allows you to switch between providers without managing multiple API keys. Vercel AI Gateway provides unified access to multiple AI providers through a single API key. This simplifies authentication and allows you to switch between providers without managing multiple API keys.
@@ -214,7 +201,7 @@ If you only configure **one** provider's API key, the system will automatically
If you configure **multiple** API keys, you must explicitly set `AI_PROVIDER`: If you configure **multiple** API keys, you must explicitly set `AI_PROVIDER`:
```bash ```bash
AI_PROVIDER=google # or: openai, anthropic, deepseek, siliconflow, doubao, azure, bedrock, openrouter, ollama, gateway, sglang, modelscope AI_PROVIDER=google # or: openai, anthropic, deepseek, siliconflow, doubao, azure, bedrock, openrouter, ollama, gateway, sglang
``` ```
## Model Capability Requirements ## Model Capability Requirements

View File

@@ -1,78 +0,0 @@
# よくある質問 (FAQ)
---
## 1. PDFをエクスポートできない
**問題**: Web版でPDFエクスポートをクリックすると `convert.diagrams.net/node/export` にリダイレクトされ、その後何も起こらない
**原因**: 埋め込みDraw.ioは直接PDFエクスポートをサポートしておらず、外部変換サービスに依存しているが、iframe内では正常に動作しない
**解決策**: まず画像PNGとしてエクスポートし、その後PDFに印刷する
**関連Issue**: #539, #125
---
## 2. embed.diagrams.netにアクセスできないオフライン/イントラネットデプロイ)
**問題**: イントラネット環境で「embed.diagrams.netのサーバーIPアドレスが見つかりません」と表示される
**重要**: `NEXT_PUBLIC_*` 環境変数は**ビルド時**変数であり、JSコードにバンドルされます。**実行時の設定は無効です!**
**解決策**: ビルド時に `args` で渡す必要があります:
```yaml
# docker-compose.yml
services:
drawio:
image: jgraph/drawio:latest
ports: ["8080:8080"]
next-ai-draw-io:
build:
context: .
args:
- NEXT_PUBLIC_DRAWIO_BASE_URL=http://あなたのサーバーIP:8080/
ports: ["3000:3000"]
env_file: .env
```
**イントラネットユーザー**: 外部ネットワークでDockerfileを修正してイメージをビルドし、イントラネットに転送する
**関連Issue**: #295, #317
---
## 3. 自前モデルが思考するだけで描画しない
**問題**: ローカルデプロイのモデルQwen、LiteLLMなどが思考過程のみを出力し、図表を生成しない
**考えられる原因**:
1. **モデルが小さすぎる** - 小さいモデルはtool calling指示に正しく従うことが難しい、32B+パラメータのモデルを推奨
2. **tool callingが有効になっていない** - モデルサービスでtool use機能を設定する必要がある
**解決策**: tool callingを有効にする、例えばvLLM
```bash
python -m vllm.entrypoints.openai.api_server \
--model Qwen/Qwen3-32B \
--enable-auto-tool-choice \
--tool-call-parser hermes
```
**関連Issue**: #269, #75
---
## 4. 画像アップロード後「画像が提供されていません」と表示される
**問題**: 画像をアップロードした後、「画像が提供されていません」というエラーが表示される
**考えられる原因**:
1. モデルがビジョン機能をサポートしていないKimi K2、DeepSeek、Qwenテキストモデルなど
**解決策**:
- ビジョン対応モデルを使用GPT-5.2、Claude 4.5 Sonnet、Gemini 3 Pro
- モデル名に `vision` または `vl` が含まれているものは画像をサポート
- 最新バージョンv0.4.9+)にアップデート
**関連Issue**: #324, #421, #469

View File

@@ -37,12 +37,11 @@ https://github.com/user-attachments/assets/b2eef5f3-b335-4e71-a755-dc2e80931979
- [インストール](#インストール) - [インストール](#インストール)
- [デプロイ](#デプロイ) - [デプロイ](#デプロイ)
- [EdgeOne Pagesへのデプロイ](#edgeone-pagesへのデプロイ) - [EdgeOne Pagesへのデプロイ](#edgeone-pagesへのデプロイ)
- [Vercelへのデプロイ](#vercelへのデプロイ) - [Vercelへのデプロイ(推奨)](#vercelへのデプロイ推奨)
- [Cloudflare Workersへのデプロイ](#cloudflare-workersへのデプロイ) - [Cloudflare Workersへのデプロイ](#cloudflare-workersへのデプロイ)
- [マルチプロバイダーサポート](#マルチプロバイダーサポート) - [マルチプロバイダーサポート](#マルチプロバイダーサポート)
- [仕組み](#仕組み) - [仕組み](#仕組み)
- [サポート&お問い合わせ](#サポートお問い合わせ) - [サポート&お問い合わせ](#サポートお問い合わせ)
- [よくある質問](#よくある質問)
- [スター履歴](#スター履歴) - [スター履歴](#スター履歴)
## 例 ## 例
@@ -181,7 +180,7 @@ npm run dev
また、Tencent EdgeOne Pagesでデプロイすると、[DeepSeekモデルの毎日の無料クォータ](https://pages.edgeone.ai/document/edge-ai)が付与されます。 また、Tencent EdgeOne Pagesでデプロイすると、[DeepSeekモデルの毎日の無料クォータ](https://pages.edgeone.ai/document/edge-ai)が付与されます。
### Vercelへのデプロイ ### Vercelへのデプロイ(推奨)
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io) [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io)
@@ -206,7 +205,6 @@ Next.jsアプリをデプロイする最も簡単な方法は、Next.jsの作成
- OpenRouter - OpenRouter
- DeepSeek - DeepSeek
- SiliconFlow - SiliconFlow
- ModelScope
- SGLang - SGLang
- Vercel AI Gateway - Vercel AI Gateway
@@ -240,10 +238,6 @@ AWS BedrockとOpenRouter以外のすべてのプロバイダーはカスタム
- メールme[at]jiang.jp - メールme[at]jiang.jp
## よくある質問
一般的な問題と解決策については [FAQ](./FAQ.md) をご覧ください。
## スター履歴 ## スター履歴
[![Star History Chart](https://api.star-history.com/svg?repos=DayuanJiang/next-ai-draw-io&type=date&legend=top-left)](https://www.star-history.com/#DayuanJiang/next-ai-draw-io&type=date&legend=top-left) [![Star History Chart](https://api.star-history.com/svg?repos=DayuanJiang/next-ai-draw-io&type=date&legend=top-left)](https://www.star-history.com/#DayuanJiang/next-ai-draw-io&type=date&legend=top-left)

View File

@@ -158,19 +158,6 @@ AI_MODEL=llama3.2
OLLAMA_BASE_URL=http://localhost:11434 OLLAMA_BASE_URL=http://localhost:11434
``` ```
### ModelScope
```bash
MODELSCOPE_API_KEY=your_api_key
AI_MODEL=Qwen/Qwen3-235B-A22B-Instruct-2507
```
任意のカスタムエンドポイント:
```bash
MODELSCOPE_BASE_URL=https://your-custom-endpoint
```
### Vercel AI Gateway ### Vercel AI Gateway
Vercel AI Gateway は、単一の API キーで複数の AI プロバイダーへの統合アクセスを提供します。これにより認証が簡素化され、複数の API キーを管理することなくプロバイダーを切り替えることができます。 Vercel AI Gateway は、単一の API キーで複数の AI プロバイダーへの統合アクセスを提供します。これにより認証が簡素化され、複数の API キーを管理することなくプロバイダーを切り替えることができます。

View File

@@ -25,19 +25,6 @@ interface ApplyPresetResult {
env?: Record<string, string> env?: Record<string, string>
} }
/** Proxy configuration interface */
interface ProxyConfig {
httpProxy?: string
httpsProxy?: string
}
/** Result of setting proxy */
interface SetProxyResult {
success: boolean
error?: string
devMode?: boolean
}
declare global { declare global {
interface Window { interface Window {
/** Main window Electron API */ /** Main window Electron API */
@@ -58,10 +45,6 @@ declare global {
openFile: () => Promise<string | null> openFile: () => Promise<string | null>
/** Save data to file via save dialog */ /** Save data to file via save dialog */
saveFile: (data: string) => Promise<boolean> saveFile: (data: string) => Promise<boolean>
/** Get proxy configuration */
getProxy: () => Promise<ProxyConfig>
/** Set proxy configuration (saves and restarts server) */
setProxy: (config: ProxyConfig) => Promise<SetProxyResult>
} }
/** Settings window Electron API */ /** Settings window Electron API */
@@ -88,4 +71,4 @@ declare global {
} }
} }
export { ConfigPreset, ApplyPresetResult, ProxyConfig, SetProxyResult } export { ConfigPreset, ApplyPresetResult }

View File

@@ -351,10 +351,6 @@ const PROVIDER_ENV_MAP: Record<string, { apiKey: string; baseUrl: string }> = {
apiKey: "SILICONFLOW_API_KEY", apiKey: "SILICONFLOW_API_KEY",
baseUrl: "SILICONFLOW_BASE_URL", baseUrl: "SILICONFLOW_BASE_URL",
}, },
modelscope: {
apiKey: "MODELSCOPE_API_KEY",
baseUrl: "MODELSCOPE_BASE_URL",
},
gateway: { apiKey: "AI_GATEWAY_API_KEY", baseUrl: "AI_GATEWAY_BASE_URL" }, gateway: { apiKey: "AI_GATEWAY_API_KEY", baseUrl: "AI_GATEWAY_BASE_URL" },
// bedrock and ollama don't use API keys in the same way // bedrock and ollama don't use API keys in the same way
bedrock: { apiKey: "", baseUrl: "" }, bedrock: { apiKey: "", baseUrl: "" },

View File

@@ -4,7 +4,6 @@ import { getCurrentPresetEnv } from "./config-manager"
import { loadEnvFile } from "./env-loader" import { loadEnvFile } from "./env-loader"
import { registerIpcHandlers } from "./ipc-handlers" import { registerIpcHandlers } from "./ipc-handlers"
import { startNextServer, stopNextServer } from "./next-server" import { startNextServer, stopNextServer } from "./next-server"
import { applyProxyToEnv } from "./proxy-manager"
import { registerSettingsWindowHandlers } from "./settings-window" import { registerSettingsWindowHandlers } from "./settings-window"
import { createWindow, getMainWindow } from "./window-manager" import { createWindow, getMainWindow } from "./window-manager"
@@ -25,9 +24,6 @@ if (!gotTheLock) {
// Load environment variables from .env files // Load environment variables from .env files
loadEnvFile() loadEnvFile()
// Apply proxy settings from saved config
applyProxyToEnv()
// Apply saved preset environment variables (overrides .env) // Apply saved preset environment variables (overrides .env)
const presetEnv = getCurrentPresetEnv() const presetEnv = getCurrentPresetEnv()
for (const [key, value] of Object.entries(presetEnv)) { for (const [key, value] of Object.entries(presetEnv)) {

View File

@@ -11,12 +11,6 @@ import {
updatePreset, updatePreset,
} from "./config-manager" } from "./config-manager"
import { restartNextServer } from "./next-server" import { restartNextServer } from "./next-server"
import {
applyProxyToEnv,
getProxyConfig,
type ProxyConfig,
saveProxyConfig,
} from "./proxy-manager"
/** /**
* Allowed configuration keys for presets * Allowed configuration keys for presets
@@ -215,40 +209,4 @@ export function registerIpcHandlers(): void {
return setCurrentPreset(id) return setCurrentPreset(id)
}, },
) )
// ==================== Proxy Settings ====================
ipcMain.handle("get-proxy", () => {
return getProxyConfig()
})
ipcMain.handle("set-proxy", async (_event, config: ProxyConfig) => {
try {
// Save config to file
saveProxyConfig(config)
// Apply to current process environment
applyProxyToEnv()
const isDev = process.env.NODE_ENV === "development"
if (isDev) {
// In development, env vars are already applied
// Next.js dev server may need manual restart
return { success: true, devMode: true }
}
// Production: restart Next.js server to pick up new env vars
await restartNextServer()
return { success: true }
} catch (error) {
return {
success: false,
error:
error instanceof Error
? error.message
: "Failed to apply proxy settings",
}
}
})
} }

View File

@@ -69,8 +69,6 @@ export async function startNextServer(): Promise<string> {
NODE_ENV: "production", NODE_ENV: "production",
PORT: String(port), PORT: String(port),
HOSTNAME: "localhost", HOSTNAME: "localhost",
// Enable Node.js built-in proxy support for fetch (Node.js 24+)
NODE_USE_ENV_PROXY: "1",
} }
// Set cache directory to a writable location (user's app data folder) // Set cache directory to a writable location (user's app data folder)
@@ -87,13 +85,6 @@ export async function startNextServer(): Promise<string> {
} }
} }
// Debug: log proxy-related env vars
console.log("Proxy env vars being passed to server:", {
HTTP_PROXY: env.HTTP_PROXY || env.http_proxy || "not set",
HTTPS_PROXY: env.HTTPS_PROXY || env.https_proxy || "not set",
NODE_USE_ENV_PROXY: env.NODE_USE_ENV_PROXY || "not set",
})
// Use Electron's utilityProcess API for running Node.js in background // Use Electron's utilityProcess API for running Node.js in background
// This is the recommended way to run Node.js code in Electron // This is the recommended way to run Node.js code in Electron
serverProcess = utilityProcess.fork(serverPath, [], { serverProcess = utilityProcess.fork(serverPath, [], {
@@ -123,41 +114,13 @@ export async function startNextServer(): Promise<string> {
} }
/** /**
* Stop the Next.js server process and wait for it to exit * Stop the Next.js server process
*/ */
export async function stopNextServer(): Promise<void> { export function stopNextServer(): void {
if (serverProcess) { if (serverProcess) {
console.log("Stopping Next.js server...") console.log("Stopping Next.js server...")
// Create a promise that resolves when the process exits
const exitPromise = new Promise<void>((resolve) => {
const proc = serverProcess
if (!proc) {
resolve()
return
}
const onExit = () => {
resolve()
}
proc.once("exit", onExit)
// Timeout after 5 seconds
setTimeout(() => {
proc.removeListener("exit", onExit)
resolve()
}, 5000)
})
serverProcess.kill() serverProcess.kill()
serverProcess = null serverProcess = null
// Wait for process to exit
await exitPromise
// Additional wait for OS to release port
await new Promise((resolve) => setTimeout(resolve, 500))
} }
} }
@@ -187,8 +150,8 @@ async function waitForServerStop(timeout = 5000): Promise<void> {
export async function restartNextServer(): Promise<string> { export async function restartNextServer(): Promise<string> {
console.log("Restarting Next.js server...") console.log("Restarting Next.js server...")
// Stop the current server and wait for it to exit // Stop the current server
await stopNextServer() stopNextServer()
// Wait for the port to be released // Wait for the port to be released
await waitForServerStop() await waitForServerStop()

View File

@@ -1,75 +0,0 @@
import { app } from "electron"
import * as fs from "fs"
import * as path from "path"
import type { ProxyConfig } from "../electron.d"
export type { ProxyConfig }
const CONFIG_FILE = "proxy-config.json"
function getConfigPath(): string {
return path.join(app.getPath("userData"), CONFIG_FILE)
}
/**
* Load proxy configuration from JSON file
*/
export function loadProxyConfig(): ProxyConfig {
try {
const configPath = getConfigPath()
if (fs.existsSync(configPath)) {
const data = fs.readFileSync(configPath, "utf-8")
return JSON.parse(data) as ProxyConfig
}
} catch (error) {
console.error("Failed to load proxy config:", error)
}
return {}
}
/**
* Save proxy configuration to JSON file
*/
export function saveProxyConfig(config: ProxyConfig): void {
try {
const configPath = getConfigPath()
fs.writeFileSync(configPath, JSON.stringify(config, null, 2), "utf-8")
} catch (error) {
console.error("Failed to save proxy config:", error)
throw error
}
}
/**
* Apply proxy configuration to process.env
* Must be called BEFORE starting the Next.js server
*/
export function applyProxyToEnv(): void {
const config = loadProxyConfig()
if (config.httpProxy) {
process.env.HTTP_PROXY = config.httpProxy
process.env.http_proxy = config.httpProxy
} else {
delete process.env.HTTP_PROXY
delete process.env.http_proxy
}
if (config.httpsProxy) {
process.env.HTTPS_PROXY = config.httpsProxy
process.env.https_proxy = config.httpsProxy
} else {
delete process.env.HTTPS_PROXY
delete process.env.https_proxy
}
}
/**
* Get current proxy configuration (from process.env)
*/
export function getProxyConfig(): ProxyConfig {
return {
httpProxy: process.env.HTTP_PROXY || process.env.http_proxy || "",
httpsProxy: process.env.HTTPS_PROXY || process.env.https_proxy || "",
}
}

View File

@@ -21,9 +21,4 @@ contextBridge.exposeInMainWorld("electronAPI", {
// File operations // File operations
openFile: () => ipcRenderer.invoke("dialog-open-file"), openFile: () => ipcRenderer.invoke("dialog-open-file"),
saveFile: (data: string) => ipcRenderer.invoke("dialog-save-file", data), saveFile: (data: string) => ipcRenderer.invoke("dialog-save-file", data),
// Proxy settings
getProxy: () => ipcRenderer.invoke("get-proxy"),
setProxy: (config: { httpProxy?: string; httpsProxy?: string }) =>
ipcRenderer.invoke("set-proxy", config),
}) })

View File

@@ -55,7 +55,6 @@
<option value="openrouter">OpenRouter</option> <option value="openrouter">OpenRouter</option>
<option value="deepseek">DeepSeek</option> <option value="deepseek">DeepSeek</option>
<option value="siliconflow">SiliconFlow</option> <option value="siliconflow">SiliconFlow</option>
<option value="modelscope">ModelScope</option>
<option value="ollama">Ollama (Local)</option> <option value="ollama">Ollama (Local)</option>
</select> </select>
</div> </div>

View File

@@ -288,7 +288,6 @@ function getProviderLabel(provider) {
openrouter: "OpenRouter", openrouter: "OpenRouter",
deepseek: "DeepSeek", deepseek: "DeepSeek",
siliconflow: "SiliconFlow", siliconflow: "SiliconFlow",
modelscope: "ModelScope",
ollama: "Ollama", ollama: "Ollama",
} }
return labels[provider] || provider return labels[provider] || provider

View File

@@ -72,10 +72,6 @@ AI_MODEL=global.anthropic.claude-sonnet-4-5-20250929-v1:0
# SGLANG_API_KEY=your-sglang-api-key # SGLANG_API_KEY=your-sglang-api-key
# SGLANG_BASE_URL=http://127.0.0.1:8000/v1 # Your SGLang endpoint # SGLANG_BASE_URL=http://127.0.0.1:8000/v1 # Your SGLang endpoint
# ModelScope Configuration
# MODELSCOPE_API_KEY=ms-...
# MODELSCOPE_BASE_URL=https://api-inference.modelscope.cn/v1 # Optional: Custom endpoint
# ByteDance Doubao Configuration (via Volcengine) # ByteDance Doubao Configuration (via Volcengine)
# DOUBAO_API_KEY=your-doubao-api-key # DOUBAO_API_KEY=your-doubao-api-key
# DOUBAO_BASE_URL=https://ark.cn-beijing.volces.com/api/v3 # ByteDance Volcengine endpoint # DOUBAO_BASE_URL=https://ark.cn-beijing.volces.com/api/v3 # ByteDance Volcengine endpoint

View File

@@ -23,7 +23,6 @@ export type ProviderName =
| "gateway" | "gateway"
| "edgeone" | "edgeone"
| "doubao" | "doubao"
| "modelscope"
interface ModelConfig { interface ModelConfig {
model: any model: any
@@ -60,7 +59,6 @@ const ALLOWED_CLIENT_PROVIDERS: ProviderName[] = [
"gateway", "gateway",
"edgeone", "edgeone",
"doubao", "doubao",
"modelscope",
] ]
// Bedrock provider options for Anthropic beta features // Bedrock provider options for Anthropic beta features
@@ -355,7 +353,6 @@ function buildProviderOptions(
case "siliconflow": case "siliconflow":
case "sglang": case "sglang":
case "gateway": case "gateway":
case "modelscope":
case "doubao": { case "doubao": {
// These providers don't have reasoning configs in AI SDK yet // These providers don't have reasoning configs in AI SDK yet
// Gateway passes through to underlying providers which handle their own configs // Gateway passes through to underlying providers which handle their own configs
@@ -384,7 +381,6 @@ const PROVIDER_ENV_VARS: Record<ProviderName, string | null> = {
gateway: "AI_GATEWAY_API_KEY", gateway: "AI_GATEWAY_API_KEY",
edgeone: null, // No credentials needed - uses EdgeOne Edge AI edgeone: null, // No credentials needed - uses EdgeOne Edge AI
doubao: "DOUBAO_API_KEY", doubao: "DOUBAO_API_KEY",
modelscope: "MODELSCOPE_API_KEY",
} }
/** /**
@@ -449,7 +445,7 @@ function validateProviderCredentials(provider: ProviderName): void {
* Get the AI model based on environment variables * Get the AI model based on environment variables
* *
* Environment variables: * Environment variables:
* - AI_PROVIDER: The provider to use (bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway, modelscope) * - AI_PROVIDER: The provider to use (bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway)
* - AI_MODEL: The model ID/name for the selected provider * - AI_MODEL: The model ID/name for the selected provider
* *
* Provider-specific env vars: * Provider-specific env vars:
@@ -464,11 +460,9 @@ function validateProviderCredentials(provider: ProviderName): void {
* - DEEPSEEK_API_KEY: DeepSeek API key * - DEEPSEEK_API_KEY: DeepSeek API key
* - DEEPSEEK_BASE_URL: DeepSeek endpoint (optional) * - DEEPSEEK_BASE_URL: DeepSeek endpoint (optional)
* - SILICONFLOW_API_KEY: SiliconFlow API key * - SILICONFLOW_API_KEY: SiliconFlow API key
* - SILICONFLOW_BASE_URL: SiliconFlow endpoint (optional, defaults to https://api.siliconflow.cn/v1) * - SILICONFLOW_BASE_URL: SiliconFlow endpoint (optional, defaults to https://api.siliconflow.com/v1)
* - SGLANG_API_KEY: SGLang API key * - SGLANG_API_KEY: SGLang API key
* - SGLANG_BASE_URL: SGLang endpoint (optional) * - SGLANG_BASE_URL: SGLang endpoint (optional)
* - MODELSCOPE_API_KEY: ModelScope API key
* - MODELSCOPE_BASE_URL: ModelScope endpoint (optional)
*/ */
export function getAIModel(overrides?: ClientOverrides): ModelConfig { export function getAIModel(overrides?: ClientOverrides): ModelConfig {
// SECURITY: Prevent SSRF attacks (GHSA-9qf7-mprq-9qgm) // SECURITY: Prevent SSRF attacks (GHSA-9qf7-mprq-9qgm)
@@ -543,7 +537,6 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
`- AZURE_API_KEY for Azure\n` + `- AZURE_API_KEY for Azure\n` +
`- SILICONFLOW_API_KEY for SiliconFlow\n` + `- SILICONFLOW_API_KEY for SiliconFlow\n` +
`- SGLANG_API_KEY for SGLang\n` + `- SGLANG_API_KEY for SGLang\n` +
`- MODELSCOPE_API_KEY for ModelScope\n` +
`Or set AI_PROVIDER=ollama for local Ollama.`, `Or set AI_PROVIDER=ollama for local Ollama.`,
) )
} else { } else {
@@ -721,7 +714,7 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
const baseURL = const baseURL =
overrides?.baseUrl || overrides?.baseUrl ||
process.env.SILICONFLOW_BASE_URL || process.env.SILICONFLOW_BASE_URL ||
"https://api.siliconflow.cn/v1" "https://api.siliconflow.com/v1"
const siliconflowProvider = createOpenAI({ const siliconflowProvider = createOpenAI({
apiKey, apiKey,
baseURL, baseURL,
@@ -899,23 +892,9 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
break break
} }
case "modelscope": {
const apiKey = overrides?.apiKey || process.env.MODELSCOPE_API_KEY
const baseURL =
overrides?.baseUrl ||
process.env.MODELSCOPE_BASE_URL ||
"https://api-inference.modelscope.cn/v1"
const modelscopeProvider = createOpenAI({
apiKey,
baseURL,
})
model = modelscopeProvider.chat(modelId)
break
}
default: default:
throw new Error( throw new Error(
`Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway, edgeone, doubao, modelscope`, `Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway, edgeone, doubao`,
) )
} }

View File

@@ -28,8 +28,7 @@
"azure": "Azure OpenAI", "azure": "Azure OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"deepseek": "DeepSeek", "deepseek": "DeepSeek",
"siliconflow": "SiliconFlow", "siliconflow": "SiliconFlow"
"modelscope": "ModelScope"
}, },
"chat": { "chat": {
"placeholder": "Describe your diagram or upload a file...", "placeholder": "Describe your diagram or upload a file...",
@@ -107,13 +106,7 @@
"diagramActions": "Diagram Actions", "diagramActions": "Diagram Actions",
"diagramActionsDescription": "Manage diagram history and exports", "diagramActionsDescription": "Manage diagram history and exports",
"history": "History", "history": "History",
"download": "Download", "download": "Download"
"proxy": "Proxy Settings",
"proxyDescription": "Configure HTTP/HTTPS proxy for API requests (Desktop only)",
"httpProxy": "HTTP Proxy",
"httpsProxy": "HTTPS Proxy",
"applyProxy": "Apply",
"proxyApplied": "Proxy settings applied"
}, },
"save": { "save": {
"title": "Save Diagram", "title": "Save Diagram",
@@ -199,9 +192,7 @@
"description": "Paste a URL to extract and analyze its content", "description": "Paste a URL to extract and analyze its content",
"Extracting": "Extracting...", "Extracting": "Extracting...",
"extract": "Extract", "extract": "Extract",
"Cancel": "Cancel", "Cancel": "Cancel"
"enterUrl": "Please enter a URL",
"invalidFormat": "Invalid URL format"
}, },
"reasoning": { "reasoning": {
"thinking": "Thinking...", "thinking": "Thinking...",

View File

@@ -28,8 +28,7 @@
"azure": "Azure OpenAI", "azure": "Azure OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"deepseek": "DeepSeek", "deepseek": "DeepSeek",
"siliconflow": "SiliconFlow", "siliconflow": "SiliconFlow"
"modelscope": "ModelScope"
}, },
"chat": { "chat": {
"placeholder": "ダイアグラムを説明するか、ファイルをアップロード...", "placeholder": "ダイアグラムを説明するか、ファイルをアップロード...",
@@ -107,13 +106,7 @@
"diagramActions": "ダイアグラム操作", "diagramActions": "ダイアグラム操作",
"diagramActionsDescription": "ダイアグラムの履歴とエクスポートを管理", "diagramActionsDescription": "ダイアグラムの履歴とエクスポートを管理",
"history": "履歴", "history": "履歴",
"download": "ダウンロード", "download": "ダウンロード"
"proxy": "プロキシ設定",
"proxyDescription": "API リクエスト用の HTTP/HTTPS プロキシを設定(デスクトップ版のみ)",
"httpProxy": "HTTP プロキシ",
"httpsProxy": "HTTPS プロキシ",
"applyProxy": "適用",
"proxyApplied": "プロキシ設定が適用されました"
}, },
"save": { "save": {
"title": "ダイアグラムを保存", "title": "ダイアグラムを保存",
@@ -199,9 +192,7 @@
"description": "URLを貼り付けてそのコンテンツを抽出および分析します", "description": "URLを貼り付けてそのコンテンツを抽出および分析します",
"Extracting": "抽出中...", "Extracting": "抽出中...",
"extract": "抽出", "extract": "抽出",
"Cancel": "キャンセル", "Cancel": "キャンセル"
"enterUrl": "URLを入力してください",
"invalidFormat": "無効なURL形式です"
}, },
"reasoning": { "reasoning": {
"thinking": "考え中...", "thinking": "考え中...",

View File

@@ -28,8 +28,7 @@
"azure": "Azure OpenAI", "azure": "Azure OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"deepseek": "DeepSeek", "deepseek": "DeepSeek",
"siliconflow": "SiliconFlow", "siliconflow": "SiliconFlow"
"modelscope": "ModelScope"
}, },
"chat": { "chat": {
"placeholder": "描述您的图表或上传文件...", "placeholder": "描述您的图表或上传文件...",
@@ -107,13 +106,7 @@
"diagramActions": "图表操作", "diagramActions": "图表操作",
"diagramActionsDescription": "管理图表历史记录和导出", "diagramActionsDescription": "管理图表历史记录和导出",
"history": "历史记录", "history": "历史记录",
"download": "下载", "download": "下载"
"proxy": "代理设置",
"proxyDescription": "配置 API 请求的 HTTP/HTTPS 代理(仅桌面版)",
"httpProxy": "HTTP 代理",
"httpsProxy": "HTTPS 代理",
"applyProxy": "应用",
"proxyApplied": "代理设置已应用"
}, },
"save": { "save": {
"title": "保存图表", "title": "保存图表",
@@ -199,9 +192,7 @@
"description": "粘贴 URL 以提取和分析其内容", "description": "粘贴 URL 以提取和分析其内容",
"Extracting": "提取中...", "Extracting": "提取中...",
"extract": "提取", "extract": "提取",
"Cancel": "取消", "Cancel": "取消"
"enterUrl": "请输入 URL",
"invalidFormat": "URL 格式无效"
}, },
"reasoning": { "reasoning": {
"thinking": "思考中...", "thinking": "思考中...",

View File

@@ -13,7 +13,6 @@ export type ProviderName =
| "gateway" | "gateway"
| "edgeone" | "edgeone"
| "doubao" | "doubao"
| "modelscope"
// Individual model configuration // Individual model configuration
export interface ModelConfig { export interface ModelConfig {
@@ -80,7 +79,7 @@ export const PROVIDER_INFO: Record<
deepseek: { label: "DeepSeek" }, deepseek: { label: "DeepSeek" },
siliconflow: { siliconflow: {
label: "SiliconFlow", label: "SiliconFlow",
defaultBaseUrl: "https://api.siliconflow.cn/v1", defaultBaseUrl: "https://api.siliconflow.com/v1",
}, },
sglang: { sglang: {
label: "SGLang", label: "SGLang",
@@ -92,10 +91,6 @@ export const PROVIDER_INFO: Record<
label: "Doubao (ByteDance)", label: "Doubao (ByteDance)",
defaultBaseUrl: "https://ark.cn-beijing.volces.com/api/v3", defaultBaseUrl: "https://ark.cn-beijing.volces.com/api/v3",
}, },
modelscope: {
label: "ModelScope",
defaultBaseUrl: "https://api-inference.modelscope.cn/v1",
},
} }
// Suggested models per provider for quick add // Suggested models per provider for quick add
@@ -236,17 +231,6 @@ export const SUGGESTED_MODELS: Record<ProviderName, string[]> = {
"doubao-pro-32k-241215", "doubao-pro-32k-241215",
"doubao-pro-256k-241215", "doubao-pro-256k-241215",
], ],
modelscope: [
// Qwen
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-32B-Instruct",
"Qwen/Qwen3-235B-A22B-Instruct-2507",
"Qwen/Qwen3-VL-235B-A22B-Instruct",
"Qwen/Qwen3-32B",
// DeepSeek
"deepseek-ai/DeepSeek-R1-0528",
"deepseek-ai/DeepSeek-V3.2",
],
} }
// Helper to generate UUID // Helper to generate UUID

View File

@@ -1,6 +1,6 @@
{ {
"name": "next-ai-draw-io", "name": "next-ai-draw-io",
"version": "0.4.10", "version": "0.4.9",
"license": "Apache-2.0", "license": "Apache-2.0",
"private": true, "private": true,
"main": "dist-electron/main/index.js", "main": "dist-electron/main/index.js",
@@ -24,7 +24,6 @@
"dist": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml", "dist": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml",
"dist:mac": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --mac", "dist:mac": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --mac",
"dist:win": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --win", "dist:win": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --win",
"dist:win:build": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --win --publish never",
"dist:linux": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --linux", "dist:linux": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --linux",
"dist:all": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --mac --win --linux", "dist:all": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --mac --win --linux",
"test": "vitest", "test": "vitest",
@@ -73,7 +72,6 @@
"jsonrepair": "^3.13.1", "jsonrepair": "^3.13.1",
"lucide-react": "^0.562.0", "lucide-react": "^0.562.0",
"motion": "^12.23.25", "motion": "^12.23.25",
"nanoid": "^3.3.11",
"negotiator": "^1.0.0", "negotiator": "^1.0.0",
"next": "^16.0.7", "next": "^16.0.7",
"ollama-ai-provider-v2": "^2.0.0", "ollama-ai-provider-v2": "^2.0.0",

View File

@@ -1,11 +0,0 @@
{
"name": "next-ai-drawio",
"version": "1.0.0",
"description": "AI-powered Draw.io diagram generation with real-time browser preview. Create flowcharts, architecture diagrams, and more through natural language.",
"author": {
"name": "DayuanJiang"
},
"repository": "https://github.com/DayuanJiang/next-ai-draw-io",
"homepage": "https://next-ai-drawio.jiang.jp",
"license": "Apache-2.0"
}

View File

@@ -1,8 +0,0 @@
{
"mcpServers": {
"drawio": {
"command": "npx",
"args": ["@next-ai-drawio/mcp-server@latest"]
}
}
}

View File

@@ -1,107 +0,0 @@
# Next AI Draw.io - Claude Code Plugin
AI-powered Draw.io diagram generation with real-time browser preview for Claude Code.
## Installation
### From Plugin Directory (Coming Soon)
Once approved, install via:
```
/plugin install next-ai-drawio
```
### Manual Installation
```bash
claude --plugin-dir /path/to/packages/claude-plugin
```
Or add the MCP server directly:
```bash
claude mcp add drawio -- npx @next-ai-drawio/mcp-server@latest
```
## Features
- **Real-time Preview**: Diagrams appear and update in your browser as Claude creates them
- **Version History**: Restore previous diagram versions with visual thumbnails
- **Natural Language**: Describe diagrams in plain text - flowcharts, architecture diagrams, etc.
- **Edit Support**: Modify existing diagrams with natural language instructions
- **Export**: Save diagrams as `.drawio` files
- **Self-contained**: Embedded server, no external dependencies required
## Use Case Examples
### 1. Create Architecture Diagrams
```
Generate an AWS architecture diagram with Lambda, API Gateway, DynamoDB,
and S3 for a serverless REST API
```
### 2. Flowchart Generation
```
Create a flowchart showing the CI/CD pipeline: code commit -> build ->
test -> staging deploy -> production deploy with approval gates
```
### 3. System Design Documentation
```
Design a microservices e-commerce system with user service, product catalog,
shopping cart, order processing, and payment gateway
```
### 4. Cloud Architecture (AWS/GCP/Azure)
```
Generate a GCP architecture diagram with Cloud Run, Cloud SQL, and
Cloud Storage for a web application
```
### 5. Sequence Diagrams
```
Create a sequence diagram showing OAuth 2.0 authorization code flow
between user, client app, auth server, and resource server
```
## Available Tools
| Tool | Description |
|------|-------------|
| `start_session` | Opens browser with real-time diagram preview |
| `create_new_diagram` | Create a new diagram from XML |
| `edit_diagram` | Edit diagram by ID-based operations |
| `get_diagram` | Get the current diagram XML |
| `export_diagram` | Save diagram to a `.drawio` file |
## How It Works
```
Claude Code <--stdio--> MCP Server <--http--> Browser (draw.io)
```
1. Ask Claude to create a diagram
2. Claude calls `start_session` to open a browser window
3. Claude generates diagram XML and sends it to the browser
4. You see the diagram update in real-time!
## Configuration
| Variable | Default | Description |
|----------|---------|-------------|
| `PORT` | `6002` | Port for the embedded HTTP server |
| `DRAWIO_BASE_URL` | `https://embed.diagrams.net` | Base URL for draw.io (for self-hosted deployments) |
## Links
- [Homepage](https://next-ai-drawio.jiang.jp)
- [GitHub Repository](https://github.com/DayuanJiang/next-ai-draw-io)
- [MCP Server Documentation](https://github.com/DayuanJiang/next-ai-draw-io/tree/main/packages/mcp-server)
## License
Apache-2.0

View File

@@ -1,6 +1,6 @@
{ {
"name": "@next-ai-drawio/mcp-server", "name": "@next-ai-drawio/mcp-server",
"version": "0.1.12", "version": "0.1.11",
"description": "MCP server for Next AI Draw.io - AI-powered diagram generation with real-time browser preview", "description": "MCP server for Next AI Draw.io - AI-powered diagram generation with real-time browser preview",
"type": "module", "type": "module",
"main": "dist/index.js", "main": "dist/index.js",
@@ -21,16 +21,16 @@
"claude", "claude",
"model-context-protocol" "model-context-protocol"
], ],
"author": "DayuanJiang", "author": "Biki-dev",
"license": "Apache-2.0", "license": "Apache-2.0",
"repository": { "repository": {
"type": "git", "type": "git",
"url": "https://github.com/DayuanJiang/next-ai-draw-io", "url": "https://github.com/Biki-dev/next-ai-draw-io",
"directory": "packages/mcp-server" "directory": "packages/mcp-server"
}, },
"homepage": "https://next-ai-drawio.jiang.jp", "homepage": "https://next-ai-drawio.jiang.jp",
"bugs": { "bugs": {
"url": "https://github.com/DayuanJiang/next-ai-draw-io/issues" "url": "https://github.com/Biki-dev/next-ai-draw-io/issues"
}, },
"publishConfig": { "publishConfig": {
"access": "public" "access": "public"

View File

@@ -38,12 +38,4 @@ const targetStaticDir = join(targetDir, ".next", "static")
mkdirSync(targetStaticDir, { recursive: true }) mkdirSync(targetStaticDir, { recursive: true })
cpSync(staticDir, targetStaticDir, { recursive: true }) cpSync(staticDir, targetStaticDir, { recursive: true })
// Copy public folder (required for favicon-white.svg and other assets)
console.log("Copying public folder...")
const publicDir = join(rootDir, "public")
const targetPublicDir = join(targetDir, "public")
if (existsSync(publicDir)) {
cpSync(publicDir, targetPublicDir, { recursive: true })
}
console.log("Done! Files prepared in electron-standalone/") console.log("Done! Files prepared in electron-standalone/")