Compare commits

..

4 Commits

Author SHA1 Message Date
dayuan.jiang
1198cc2a9b Merge branch 'main' into feature/url-content-extraction 2026-01-06 00:06:18 +09:00
Biki Kalita
a61e89d6d0 chore: restore package.json and package-lock.json 2026-01-05 14:54:40 +05:30
Biki Kalita
580d42f535 Changes made as recommended by Claude:
1. Added a request timeout to prevent server resources from being tied up (route.ts)
2. Implemented runtime validation for the API response shape (url-utils.ts)
3. Removed hardcoded English error messages and replaced them with localized strings (url-input-dialog.tsx)
4. Fixed the incorrect i18n namespace (changed from pdf.* to url.*) (url-input-dialog.tsx and en/ja/zh.json)
2026-01-05 14:51:24 +05:30
Biki Kalita
64268b0fac feat: add URL content extraction for AI diagram generation 2026-01-04 22:36:46 +05:30
27 changed files with 175 additions and 262 deletions

View File

@@ -11,8 +11,7 @@ on:
required: false required: false
jobs: jobs:
# Mac and Linux: Build and publish directly (no signing needed) build:
build-mac-linux:
permissions: permissions:
contents: write contents: write
strategy: strategy:
@@ -21,9 +20,13 @@ jobs:
include: include:
- os: macos-latest - os: macos-latest
platform: mac platform: mac
- os: windows-latest
platform: win
- os: ubuntu-latest - os: ubuntu-latest
platform: linux platform: linux
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v6 uses: actions/checkout@v6
@@ -37,58 +40,7 @@ jobs:
- name: Install dependencies - name: Install dependencies
run: npm install run: npm install
- name: Build and publish - name: Build and publish Electron app
run: npm run dist:${{ matrix.platform }} run: npm run dist:${{ matrix.platform }}
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Windows: Build, sign with SignPath, then publish
build-windows:
permissions:
contents: write
runs-on: windows-latest
steps:
- name: Checkout code
uses: actions/checkout@v6
- name: Setup Node.js
uses: actions/setup-node@v6
with:
node-version: 24
cache: "npm"
- name: Install dependencies
run: npm install
# Build WITHOUT publishing
- name: Build Windows app
run: npm run dist:win:build
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload unsigned artifacts for signing
uses: actions/upload-artifact@v4
id: upload-unsigned
with:
name: windows-unsigned
path: release/*.exe
retention-days: 1
- name: Sign with SignPath
uses: signpath/github-action-submit-signing-request@v2
with:
api-token: ${{ secrets.SIGNPATH_API_TOKEN }}
organization-id: '880a211d-2cd3-4e7b-8d04-3d1f8eb39df5'
project-slug: 'next-ai-draw-io'
signing-policy-slug: 'test-signing'
artifact-configuration-slug: 'windows-exe'
github-artifact-id: ${{ steps.upload-unsigned.outputs.artifact-id }}
wait-for-completion: true
output-artifact-directory: release-signed
- name: Upload signed artifacts to release
uses: softprops/action-gh-release@v2
with:
files: release-signed/*.exe
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -40,7 +40,7 @@ https://github.com/user-attachments/assets/9d60a3e8-4a1c-4b5e-acbb-26af2d3eabd1
- [Installation](#installation) - [Installation](#installation)
- [Deployment](#deployment) - [Deployment](#deployment)
- [Deploy to EdgeOne Pages](#deploy-to-edgeone-pages) - [Deploy to EdgeOne Pages](#deploy-to-edgeone-pages)
- [Deploy on Vercel](#deploy-on-vercel) - [Deploy on Vercel (Recommended)](#deploy-on-vercel-recommended)
- [Deploy on Cloudflare Workers](#deploy-on-cloudflare-workers) - [Deploy on Cloudflare Workers](#deploy-on-cloudflare-workers)
- [Multi-Provider Support](#multi-provider-support) - [Multi-Provider Support](#multi-provider-support)
- [How It Works](#how-it-works) - [How It Works](#how-it-works)
@@ -185,7 +185,7 @@ Check out the [Tencent EdgeOne Pages documentation](https://pages.edgeone.ai/doc
Additionally, deploying through Tencent EdgeOne Pages will also grant you a [daily free quota for DeepSeek models](https://pages.edgeone.ai/document/edge-ai). Additionally, deploying through Tencent EdgeOne Pages will also grant you a [daily free quota for DeepSeek models](https://pages.edgeone.ai/document/edge-ai).
### Deploy on Vercel ### Deploy on Vercel (Recommended)
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io) [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io)
@@ -211,7 +211,6 @@ See the [Next.js deployment documentation](https://nextjs.org/docs/app/building-
- OpenRouter - OpenRouter
- DeepSeek - DeepSeek
- SiliconFlow - SiliconFlow
- ModelScope
- SGLang - SGLang
- Vercel AI Gateway - Vercel AI Gateway

View File

@@ -10,7 +10,18 @@ export const metadata: Metadata = {
keywords: ["AI图表", "draw.io", "AWS架构", "GCP图表", "Azure图表", "LLM"], keywords: ["AI图表", "draw.io", "AWS架构", "GCP图表", "Azure图表", "LLM"],
} }
function formatNumber(num: number): string {
if (num >= 1000) {
return `${num / 1000}k`
}
return num.toString()
}
export default function AboutCN() { export default function AboutCN() {
const dailyRequestLimit = Number(process.env.DAILY_REQUEST_LIMIT) || 20
const dailyTokenLimit = Number(process.env.DAILY_TOKEN_LIMIT) || 500000
const tpmLimit = Number(process.env.TPM_LIMIT) || 50000
return ( return (
<div className="min-h-screen bg-gray-50"> <div className="min-h-screen bg-gray-50">
{/* Navigation */} {/* Navigation */}
@@ -97,6 +108,42 @@ export default function AboutCN() {
</p> </p>
</div> </div>
{/* Usage Limits */}
<p className="text-sm text-gray-600 mb-3">
使
</p>
<div className="grid grid-cols-3 gap-3 mb-5">
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyRequestLimit)}
</p>
<p className="text-xs text-gray-500">
/
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyTokenLimit)}
</p>
<p className="text-xs text-gray-500">
Token/
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(tpmLimit)}
</p>
<p className="text-xs text-gray-500">
Token/
</p>
</div>
</div>
{/* Divider */}
<div className="flex items-center gap-3 my-5">
<div className="flex-1 h-px bg-gradient-to-r from-transparent via-amber-300 to-transparent" />
</div>
{/* Bring Your Own Key */} {/* Bring Your Own Key */}
<div className="text-center"> <div className="text-center">
<h4 className="text-base font-bold text-gray-900 mb-2"> <h4 className="text-base font-bold text-gray-900 mb-2">
@@ -297,7 +344,6 @@ export default function AboutCN() {
<li>OpenRouter</li> <li>OpenRouter</li>
<li>DeepSeek</li> <li>DeepSeek</li>
<li>SiliconFlow</li> <li>SiliconFlow</li>
<li>ModelScope</li>
</ul> </ul>
<p className="text-gray-700 mt-4"> <p className="text-gray-700 mt-4">
<code>claude-sonnet-4-5</code>{" "} <code>claude-sonnet-4-5</code>{" "}

View File

@@ -17,7 +17,18 @@ export const metadata: Metadata = {
], ],
} }
function formatNumber(num: number): string {
if (num >= 1000) {
return `${num / 1000}k`
}
return num.toString()
}
export default function AboutJA() { export default function AboutJA() {
const dailyRequestLimit = Number(process.env.DAILY_REQUEST_LIMIT) || 20
const dailyTokenLimit = Number(process.env.DAILY_TOKEN_LIMIT) || 500000
const tpmLimit = Number(process.env.TPM_LIMIT) || 50000
return ( return (
<div className="min-h-screen bg-gray-50"> <div className="min-h-screen bg-gray-50">
{/* Navigation */} {/* Navigation */}
@@ -105,6 +116,42 @@ export default function AboutJA() {
</p> </p>
</div> </div>
{/* Usage Limits */}
<p className="text-sm text-gray-600 mb-3">
使
</p>
<div className="grid grid-cols-3 gap-3 mb-5">
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyRequestLimit)}
</p>
<p className="text-xs text-gray-500">
/
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyTokenLimit)}
</p>
<p className="text-xs text-gray-500">
/
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(tpmLimit)}
</p>
<p className="text-xs text-gray-500">
/
</p>
</div>
</div>
{/* Divider */}
<div className="flex items-center gap-3 my-5">
<div className="flex-1 h-px bg-gradient-to-r from-transparent via-amber-300 to-transparent" />
</div>
{/* Bring Your Own Key */} {/* Bring Your Own Key */}
<div className="text-center"> <div className="text-center">
<h4 className="text-base font-bold text-gray-900 mb-2"> <h4 className="text-base font-bold text-gray-900 mb-2">
@@ -312,7 +359,6 @@ export default function AboutJA() {
<li>OpenRouter</li> <li>OpenRouter</li>
<li>DeepSeek</li> <li>DeepSeek</li>
<li>SiliconFlow</li> <li>SiliconFlow</li>
<li>ModelScope</li>
</ul> </ul>
<p className="text-gray-700 mt-4"> <p className="text-gray-700 mt-4">
<code>claude-sonnet-4-5</code> <code>claude-sonnet-4-5</code>

View File

@@ -17,7 +17,18 @@ export const metadata: Metadata = {
], ],
} }
function formatNumber(num: number): string {
if (num >= 1000) {
return `${num / 1000}k`
}
return num.toString()
}
export default function About() { export default function About() {
const dailyRequestLimit = Number(process.env.DAILY_REQUEST_LIMIT) || 20
const dailyTokenLimit = Number(process.env.DAILY_TOKEN_LIMIT) || 500000
const tpmLimit = Number(process.env.TPM_LIMIT) || 50000
return ( return (
<div className="min-h-screen bg-gray-50"> <div className="min-h-screen bg-gray-50">
{/* Navigation */} {/* Navigation */}
@@ -107,6 +118,42 @@ export default function About() {
</p> </p>
</div> </div>
{/* Usage Limits */}
<p className="text-sm text-gray-600 mb-3">
Please note the current usage limits:
</p>
<div className="grid grid-cols-3 gap-3 mb-5">
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyRequestLimit)}
</p>
<p className="text-xs text-gray-500">
requests/day
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(dailyTokenLimit)}
</p>
<p className="text-xs text-gray-500">
tokens/day
</p>
</div>
<div className="text-center p-3 bg-white/60 rounded-lg">
<p className="text-lg font-bold text-amber-600">
{formatNumber(tpmLimit)}
</p>
<p className="text-xs text-gray-500">
tokens/min
</p>
</div>
</div>
{/* Divider */}
<div className="flex items-center gap-3 my-5">
<div className="flex-1 h-px bg-gradient-to-r from-transparent via-amber-300 to-transparent" />
</div>
{/* Bring Your Own Key */} {/* Bring Your Own Key */}
<div className="text-center"> <div className="text-center">
<h4 className="text-base font-bold text-gray-900 mb-2"> <h4 className="text-base font-bold text-gray-900 mb-2">
@@ -331,7 +378,6 @@ export default function About() {
<li>OpenRouter</li> <li>OpenRouter</li>
<li>DeepSeek</li> <li>DeepSeek</li>
<li>SiliconFlow</li> <li>SiliconFlow</li>
<li>ModelScope</li>
</ul> </ul>
<p className="text-gray-700 mt-4"> <p className="text-gray-700 mt-4">
Note that <code>claude-sonnet-4-5</code> has trained on Note that <code>claude-sonnet-4-5</code> has trained on

View File

@@ -274,75 +274,6 @@ export async function POST(req: Request) {
break break
} }
case "modelscope": {
const baseURL =
baseUrl || "https://api-inference.modelscope.cn/v1"
const startTime = Date.now()
try {
// Initiate a streaming request (required for QwQ-32B and certain Qwen3 models)
const response = await fetch(
`${baseURL}/chat/completions`,
{
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify({
model: modelId,
messages: [
{ role: "user", content: "Say 'OK'" },
],
max_tokens: 20,
stream: true,
enable_thinking: false,
}),
},
)
if (!response.ok) {
const errorText = await response.text()
throw new Error(
`ModelScope API error (${response.status}): ${errorText}`,
)
}
const contentType =
response.headers.get("content-type") || ""
const isValidStreamingResponse =
response.status === 200 &&
(contentType.includes("text/event-stream") ||
contentType.includes("application/json"))
if (!isValidStreamingResponse) {
throw new Error(
`Unexpected response format: ${contentType}`,
)
}
const responseTime = Date.now() - startTime
if (response.body) {
response.body.cancel().catch(() => {
/* Ignore cancellation errors */
})
}
return NextResponse.json({
valid: true,
responseTime,
note: "ModelScope model validated (using streaming API)",
})
} catch (error) {
console.error(
"[validate-model] ModelScope validation failed:",
error,
)
throw error
}
}
default: default:
return NextResponse.json( return NextResponse.json(
{ valid: false, error: `Unknown provider: ${provider}` }, { valid: false, error: `Unknown provider: ${provider}` },

View File

@@ -347,12 +347,6 @@ export function ChatInput({
setShowUrlDialog(false) setShowUrlDialog(false)
} catch (error) { } catch (error) {
// Remove the URL from the data map on error
const newUrlData = urlData
? new Map(urlData)
: new Map<string, UrlData>()
newUrlData.delete(url)
onUrlChange(newUrlData)
showErrorToast( showErrorToast(
<span className="text-muted-foreground"> <span className="text-muted-foreground">
{error instanceof Error {error instanceof Error

View File

@@ -79,7 +79,6 @@ const PROVIDER_LOGO_MAP: Record<string, string> = {
gateway: "vercel", gateway: "vercel",
edgeone: "tencent-cloud", edgeone: "tencent-cloud",
doubao: "bytedance", doubao: "bytedance",
modelscope: "modelscope",
} }
// Provider logo component // Provider logo component

View File

@@ -50,7 +50,6 @@ const PROVIDER_LOGO_MAP: Record<string, string> = {
gateway: "vercel", gateway: "vercel",
edgeone: "tencent-cloud", edgeone: "tencent-cloud",
doubao: "bytedance", doubao: "bytedance",
modelscope: "modelscope",
} }
// Group models by providerLabel (handles duplicate providers) // Group models by providerLabel (handles duplicate providers)

View File

@@ -35,14 +35,14 @@ export function UrlInputDialog({
setError("") setError("")
if (!url.trim()) { if (!url.trim()) {
setError(dict.url.enterUrl) setError("Please enter a URL")
return return
} }
try { try {
new URL(url) new URL(url)
} catch { } catch {
setError(dict.url.invalidFormat) setError("Invalid URL format")
return return
} }

View File

@@ -37,7 +37,7 @@ https://github.com/user-attachments/assets/b2eef5f3-b335-4e71-a755-dc2e80931979
- [安装](#安装) - [安装](#安装)
- [部署](#部署) - [部署](#部署)
- [部署到腾讯云EdgeOne Pages](#部署到腾讯云edgeone-pages) - [部署到腾讯云EdgeOne Pages](#部署到腾讯云edgeone-pages)
- [部署到Vercel](#部署到vercel) - [部署到Vercel(推荐)](#部署到vercel推荐)
- [部署到Cloudflare Workers](#部署到cloudflare-workers) - [部署到Cloudflare Workers](#部署到cloudflare-workers)
- [多提供商支持](#多提供商支持) - [多提供商支持](#多提供商支持)
- [工作原理](#工作原理) - [工作原理](#工作原理)
@@ -179,7 +179,7 @@ npm run dev
同时通过腾讯云EdgeOne Pages部署也会获得[每日免费的DeepSeek模型额度](https://edgeone.cloud.tencent.com/pages/document/169925463311781888)。 同时通过腾讯云EdgeOne Pages部署也会获得[每日免费的DeepSeek模型额度](https://edgeone.cloud.tencent.com/pages/document/169925463311781888)。
### 部署到Vercel ### 部署到Vercel(推荐)
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io) [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io)
@@ -204,7 +204,6 @@ npm run dev
- OpenRouter - OpenRouter
- DeepSeek - DeepSeek
- SiliconFlow - SiliconFlow
- ModelScope
- SGLang - SGLang
- Vercel AI Gateway - Vercel AI Gateway

View File

@@ -152,19 +152,6 @@ AI_PROVIDER=ollama
AI_MODEL=llama3.2 AI_MODEL=llama3.2
``` ```
### ModelScope
```bash
MODELSCOPE_API_KEY=your_api_key
AI_MODEL=Qwen/Qwen3-235B-A22B-Instruct-2507
```
可选的自定义端点:
```bash
MODELSCOPE_BASE_URL=https://your-custom-endpoint
```
可选的自定义 URL 可选的自定义 URL
```bash ```bash

View File

@@ -158,19 +158,6 @@ Optional custom URL:
OLLAMA_BASE_URL=http://localhost:11434 OLLAMA_BASE_URL=http://localhost:11434
``` ```
### ModelScope
```bash
MODELSCOPE_API_KEY=your_api_key
AI_MODEL=Qwen/Qwen3-235B-A22B-Instruct-2507
```
Optional custom endpoint:
```bash
MODELSCOPE_BASE_URL=https://your-custom-endpoint
```
### Vercel AI Gateway ### Vercel AI Gateway
Vercel AI Gateway provides unified access to multiple AI providers through a single API key. This simplifies authentication and allows you to switch between providers without managing multiple API keys. Vercel AI Gateway provides unified access to multiple AI providers through a single API key. This simplifies authentication and allows you to switch between providers without managing multiple API keys.
@@ -214,7 +201,7 @@ If you only configure **one** provider's API key, the system will automatically
If you configure **multiple** API keys, you must explicitly set `AI_PROVIDER`: If you configure **multiple** API keys, you must explicitly set `AI_PROVIDER`:
```bash ```bash
AI_PROVIDER=google # or: openai, anthropic, deepseek, siliconflow, doubao, azure, bedrock, openrouter, ollama, gateway, sglang, modelscope AI_PROVIDER=google # or: openai, anthropic, deepseek, siliconflow, doubao, azure, bedrock, openrouter, ollama, gateway, sglang
``` ```
## Model Capability Requirements ## Model Capability Requirements

View File

@@ -37,7 +37,7 @@ https://github.com/user-attachments/assets/b2eef5f3-b335-4e71-a755-dc2e80931979
- [インストール](#インストール) - [インストール](#インストール)
- [デプロイ](#デプロイ) - [デプロイ](#デプロイ)
- [EdgeOne Pagesへのデプロイ](#edgeone-pagesへのデプロイ) - [EdgeOne Pagesへのデプロイ](#edgeone-pagesへのデプロイ)
- [Vercelへのデプロイ](#vercelへのデプロイ) - [Vercelへのデプロイ(推奨)](#vercelへのデプロイ推奨)
- [Cloudflare Workersへのデプロイ](#cloudflare-workersへのデプロイ) - [Cloudflare Workersへのデプロイ](#cloudflare-workersへのデプロイ)
- [マルチプロバイダーサポート](#マルチプロバイダーサポート) - [マルチプロバイダーサポート](#マルチプロバイダーサポート)
- [仕組み](#仕組み) - [仕組み](#仕組み)
@@ -180,7 +180,7 @@ npm run dev
また、Tencent EdgeOne Pagesでデプロイすると、[DeepSeekモデルの毎日の無料クォータ](https://pages.edgeone.ai/document/edge-ai)が付与されます。 また、Tencent EdgeOne Pagesでデプロイすると、[DeepSeekモデルの毎日の無料クォータ](https://pages.edgeone.ai/document/edge-ai)が付与されます。
### Vercelへのデプロイ ### Vercelへのデプロイ(推奨)
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io) [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io)
@@ -205,7 +205,6 @@ Next.jsアプリをデプロイする最も簡単な方法は、Next.jsの作成
- OpenRouter - OpenRouter
- DeepSeek - DeepSeek
- SiliconFlow - SiliconFlow
- ModelScope
- SGLang - SGLang
- Vercel AI Gateway - Vercel AI Gateway

View File

@@ -158,19 +158,6 @@ AI_MODEL=llama3.2
OLLAMA_BASE_URL=http://localhost:11434 OLLAMA_BASE_URL=http://localhost:11434
``` ```
### ModelScope
```bash
MODELSCOPE_API_KEY=your_api_key
AI_MODEL=Qwen/Qwen3-235B-A22B-Instruct-2507
```
任意のカスタムエンドポイント:
```bash
MODELSCOPE_BASE_URL=https://your-custom-endpoint
```
### Vercel AI Gateway ### Vercel AI Gateway
Vercel AI Gateway は、単一の API キーで複数の AI プロバイダーへの統合アクセスを提供します。これにより認証が簡素化され、複数の API キーを管理することなくプロバイダーを切り替えることができます。 Vercel AI Gateway は、単一の API キーで複数の AI プロバイダーへの統合アクセスを提供します。これにより認証が簡素化され、複数の API キーを管理することなくプロバイダーを切り替えることができます。

View File

@@ -351,10 +351,6 @@ const PROVIDER_ENV_MAP: Record<string, { apiKey: string; baseUrl: string }> = {
apiKey: "SILICONFLOW_API_KEY", apiKey: "SILICONFLOW_API_KEY",
baseUrl: "SILICONFLOW_BASE_URL", baseUrl: "SILICONFLOW_BASE_URL",
}, },
modelscope: {
apiKey: "MODELSCOPE_API_KEY",
baseUrl: "MODELSCOPE_BASE_URL",
},
gateway: { apiKey: "AI_GATEWAY_API_KEY", baseUrl: "AI_GATEWAY_BASE_URL" }, gateway: { apiKey: "AI_GATEWAY_API_KEY", baseUrl: "AI_GATEWAY_BASE_URL" },
// bedrock and ollama don't use API keys in the same way // bedrock and ollama don't use API keys in the same way
bedrock: { apiKey: "", baseUrl: "" }, bedrock: { apiKey: "", baseUrl: "" },

View File

@@ -55,7 +55,6 @@
<option value="openrouter">OpenRouter</option> <option value="openrouter">OpenRouter</option>
<option value="deepseek">DeepSeek</option> <option value="deepseek">DeepSeek</option>
<option value="siliconflow">SiliconFlow</option> <option value="siliconflow">SiliconFlow</option>
<option value="modelscope">ModelScope</option>
<option value="ollama">Ollama (Local)</option> <option value="ollama">Ollama (Local)</option>
</select> </select>
</div> </div>

View File

@@ -288,7 +288,6 @@ function getProviderLabel(provider) {
openrouter: "OpenRouter", openrouter: "OpenRouter",
deepseek: "DeepSeek", deepseek: "DeepSeek",
siliconflow: "SiliconFlow", siliconflow: "SiliconFlow",
modelscope: "ModelScope",
ollama: "Ollama", ollama: "Ollama",
} }
return labels[provider] || provider return labels[provider] || provider

View File

@@ -72,10 +72,6 @@ AI_MODEL=global.anthropic.claude-sonnet-4-5-20250929-v1:0
# SGLANG_API_KEY=your-sglang-api-key # SGLANG_API_KEY=your-sglang-api-key
# SGLANG_BASE_URL=http://127.0.0.1:8000/v1 # Your SGLang endpoint # SGLANG_BASE_URL=http://127.0.0.1:8000/v1 # Your SGLang endpoint
# ModelScope Configuration
# MODELSCOPE_API_KEY=ms-...
# MODELSCOPE_BASE_URL=https://api-inference.modelscope.cn/v1 # Optional: Custom endpoint
# ByteDance Doubao Configuration (via Volcengine) # ByteDance Doubao Configuration (via Volcengine)
# DOUBAO_API_KEY=your-doubao-api-key # DOUBAO_API_KEY=your-doubao-api-key
# DOUBAO_BASE_URL=https://ark.cn-beijing.volces.com/api/v3 # ByteDance Volcengine endpoint # DOUBAO_BASE_URL=https://ark.cn-beijing.volces.com/api/v3 # ByteDance Volcengine endpoint

View File

@@ -23,7 +23,6 @@ export type ProviderName =
| "gateway" | "gateway"
| "edgeone" | "edgeone"
| "doubao" | "doubao"
| "modelscope"
interface ModelConfig { interface ModelConfig {
model: any model: any
@@ -60,7 +59,6 @@ const ALLOWED_CLIENT_PROVIDERS: ProviderName[] = [
"gateway", "gateway",
"edgeone", "edgeone",
"doubao", "doubao",
"modelscope",
] ]
// Bedrock provider options for Anthropic beta features // Bedrock provider options for Anthropic beta features
@@ -355,7 +353,6 @@ function buildProviderOptions(
case "siliconflow": case "siliconflow":
case "sglang": case "sglang":
case "gateway": case "gateway":
case "modelscope":
case "doubao": { case "doubao": {
// These providers don't have reasoning configs in AI SDK yet // These providers don't have reasoning configs in AI SDK yet
// Gateway passes through to underlying providers which handle their own configs // Gateway passes through to underlying providers which handle their own configs
@@ -384,7 +381,6 @@ const PROVIDER_ENV_VARS: Record<ProviderName, string | null> = {
gateway: "AI_GATEWAY_API_KEY", gateway: "AI_GATEWAY_API_KEY",
edgeone: null, // No credentials needed - uses EdgeOne Edge AI edgeone: null, // No credentials needed - uses EdgeOne Edge AI
doubao: "DOUBAO_API_KEY", doubao: "DOUBAO_API_KEY",
modelscope: "MODELSCOPE_API_KEY",
} }
/** /**
@@ -449,7 +445,7 @@ function validateProviderCredentials(provider: ProviderName): void {
* Get the AI model based on environment variables * Get the AI model based on environment variables
* *
* Environment variables: * Environment variables:
* - AI_PROVIDER: The provider to use (bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway, modelscope) * - AI_PROVIDER: The provider to use (bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway)
* - AI_MODEL: The model ID/name for the selected provider * - AI_MODEL: The model ID/name for the selected provider
* *
* Provider-specific env vars: * Provider-specific env vars:
@@ -467,8 +463,6 @@ function validateProviderCredentials(provider: ProviderName): void {
* - SILICONFLOW_BASE_URL: SiliconFlow endpoint (optional, defaults to https://api.siliconflow.com/v1) * - SILICONFLOW_BASE_URL: SiliconFlow endpoint (optional, defaults to https://api.siliconflow.com/v1)
* - SGLANG_API_KEY: SGLang API key * - SGLANG_API_KEY: SGLang API key
* - SGLANG_BASE_URL: SGLang endpoint (optional) * - SGLANG_BASE_URL: SGLang endpoint (optional)
* - MODELSCOPE_API_KEY: ModelScope API key
* - MODELSCOPE_BASE_URL: ModelScope endpoint (optional)
*/ */
export function getAIModel(overrides?: ClientOverrides): ModelConfig { export function getAIModel(overrides?: ClientOverrides): ModelConfig {
// SECURITY: Prevent SSRF attacks (GHSA-9qf7-mprq-9qgm) // SECURITY: Prevent SSRF attacks (GHSA-9qf7-mprq-9qgm)
@@ -543,7 +537,6 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
`- AZURE_API_KEY for Azure\n` + `- AZURE_API_KEY for Azure\n` +
`- SILICONFLOW_API_KEY for SiliconFlow\n` + `- SILICONFLOW_API_KEY for SiliconFlow\n` +
`- SGLANG_API_KEY for SGLang\n` + `- SGLANG_API_KEY for SGLang\n` +
`- MODELSCOPE_API_KEY for ModelScope\n` +
`Or set AI_PROVIDER=ollama for local Ollama.`, `Or set AI_PROVIDER=ollama for local Ollama.`,
) )
} else { } else {
@@ -899,23 +892,9 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
break break
} }
case "modelscope": {
const apiKey = overrides?.apiKey || process.env.MODELSCOPE_API_KEY
const baseURL =
overrides?.baseUrl ||
process.env.MODELSCOPE_BASE_URL ||
"https://api-inference.modelscope.cn/v1"
const modelscopeProvider = createOpenAI({
apiKey,
baseURL,
})
model = modelscopeProvider.chat(modelId)
break
}
default: default:
throw new Error( throw new Error(
`Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway, edgeone, doubao, modelscope`, `Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway, edgeone, doubao`,
) )
} }

View File

@@ -28,8 +28,7 @@
"azure": "Azure OpenAI", "azure": "Azure OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"deepseek": "DeepSeek", "deepseek": "DeepSeek",
"siliconflow": "SiliconFlow", "siliconflow": "SiliconFlow"
"modelscope": "ModelScope"
}, },
"chat": { "chat": {
"placeholder": "Describe your diagram or upload a file...", "placeholder": "Describe your diagram or upload a file...",
@@ -193,9 +192,7 @@
"description": "Paste a URL to extract and analyze its content", "description": "Paste a URL to extract and analyze its content",
"Extracting": "Extracting...", "Extracting": "Extracting...",
"extract": "Extract", "extract": "Extract",
"Cancel": "Cancel", "Cancel": "Cancel"
"enterUrl": "Please enter a URL",
"invalidFormat": "Invalid URL format"
}, },
"reasoning": { "reasoning": {
"thinking": "Thinking...", "thinking": "Thinking...",

View File

@@ -28,8 +28,7 @@
"azure": "Azure OpenAI", "azure": "Azure OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"deepseek": "DeepSeek", "deepseek": "DeepSeek",
"siliconflow": "SiliconFlow", "siliconflow": "SiliconFlow"
"modelscope": "ModelScope"
}, },
"chat": { "chat": {
"placeholder": "ダイアグラムを説明するか、ファイルをアップロード...", "placeholder": "ダイアグラムを説明するか、ファイルをアップロード...",
@@ -193,9 +192,7 @@
"description": "URLを貼り付けてそのコンテンツを抽出および分析します", "description": "URLを貼り付けてそのコンテンツを抽出および分析します",
"Extracting": "抽出中...", "Extracting": "抽出中...",
"extract": "抽出", "extract": "抽出",
"Cancel": "キャンセル", "Cancel": "キャンセル"
"enterUrl": "URLを入力してください",
"invalidFormat": "無効なURL形式です"
}, },
"reasoning": { "reasoning": {
"thinking": "考え中...", "thinking": "考え中...",

View File

@@ -28,8 +28,7 @@
"azure": "Azure OpenAI", "azure": "Azure OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"deepseek": "DeepSeek", "deepseek": "DeepSeek",
"siliconflow": "SiliconFlow", "siliconflow": "SiliconFlow"
"modelscope": "ModelScope"
}, },
"chat": { "chat": {
"placeholder": "描述您的图表或上传文件...", "placeholder": "描述您的图表或上传文件...",
@@ -193,9 +192,7 @@
"description": "粘贴 URL 以提取和分析其内容", "description": "粘贴 URL 以提取和分析其内容",
"Extracting": "提取中...", "Extracting": "提取中...",
"extract": "提取", "extract": "提取",
"Cancel": "取消", "Cancel": "取消"
"enterUrl": "请输入 URL",
"invalidFormat": "URL 格式无效"
}, },
"reasoning": { "reasoning": {
"thinking": "思考中...", "thinking": "思考中...",

View File

@@ -13,7 +13,6 @@ export type ProviderName =
| "gateway" | "gateway"
| "edgeone" | "edgeone"
| "doubao" | "doubao"
| "modelscope"
// Individual model configuration // Individual model configuration
export interface ModelConfig { export interface ModelConfig {
@@ -92,10 +91,6 @@ export const PROVIDER_INFO: Record<
label: "Doubao (ByteDance)", label: "Doubao (ByteDance)",
defaultBaseUrl: "https://ark.cn-beijing.volces.com/api/v3", defaultBaseUrl: "https://ark.cn-beijing.volces.com/api/v3",
}, },
modelscope: {
label: "ModelScope",
defaultBaseUrl: "https://api-inference.modelscope.cn/v1",
},
} }
// Suggested models per provider for quick add // Suggested models per provider for quick add
@@ -236,17 +231,6 @@ export const SUGGESTED_MODELS: Record<ProviderName, string[]> = {
"doubao-pro-32k-241215", "doubao-pro-32k-241215",
"doubao-pro-256k-241215", "doubao-pro-256k-241215",
], ],
modelscope: [
// Qwen
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-32B-Instruct",
"Qwen/Qwen3-235B-A22B-Instruct-2507",
"Qwen/Qwen3-VL-235B-A22B-Instruct",
"Qwen/Qwen3-32B",
// DeepSeek
"deepseek-ai/DeepSeek-R1-0528",
"deepseek-ai/DeepSeek-V3.2",
],
} }
// Helper to generate UUID // Helper to generate UUID

View File

@@ -24,7 +24,6 @@
"dist": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml", "dist": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml",
"dist:mac": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --mac", "dist:mac": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --mac",
"dist:win": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --win", "dist:win": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --win",
"dist:win:build": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --win --publish never",
"dist:linux": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --linux", "dist:linux": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --linux",
"dist:all": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --mac --win --linux", "dist:all": "npm run electron:build && npm run electron:prepare && npx electron-builder --config electron/electron-builder.yml --mac --win --linux",
"test": "vitest", "test": "vitest",
@@ -73,7 +72,6 @@
"jsonrepair": "^3.13.1", "jsonrepair": "^3.13.1",
"lucide-react": "^0.562.0", "lucide-react": "^0.562.0",
"motion": "^12.23.25", "motion": "^12.23.25",
"nanoid": "^3.3.11",
"negotiator": "^1.0.0", "negotiator": "^1.0.0",
"next": "^16.0.7", "next": "^16.0.7",
"ollama-ai-provider-v2": "^2.0.0", "ollama-ai-provider-v2": "^2.0.0",

View File

@@ -1,18 +1,18 @@
{ {
"name": "@next-ai-drawio/mcp-server", "name": "@next-ai-drawio/mcp-server",
"version": "0.1.12", "version": "0.1.6",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "@next-ai-drawio/mcp-server", "name": "@next-ai-drawio/mcp-server",
"version": "0.1.12", "version": "0.1.6",
"license": "Apache-2.0", "license": "Apache-2.0",
"dependencies": { "dependencies": {
"@modelcontextprotocol/sdk": "^1.0.4", "@modelcontextprotocol/sdk": "^1.0.4",
"linkedom": "^0.18.0", "linkedom": "^0.18.0",
"open": "^11.0.0", "open": "^11.0.0",
"zod": "^4.0.0" "zod": "^3.24.0"
}, },
"bin": { "bin": {
"next-ai-drawio-mcp": "dist/index.js" "next-ai-drawio-mcp": "dist/index.js"
@@ -2051,9 +2051,9 @@
} }
}, },
"node_modules/zod": { "node_modules/zod": {
"version": "4.3.5", "version": "3.25.76",
"resolved": "https://registry.npmjs.org/zod/-/zod-4.3.5.tgz", "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz",
"integrity": "sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g==", "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==",
"license": "MIT", "license": "MIT",
"peer": true, "peer": true,
"funding": { "funding": {

View File

@@ -1,6 +1,6 @@
{ {
"name": "@next-ai-drawio/mcp-server", "name": "@next-ai-drawio/mcp-server",
"version": "0.1.12", "version": "0.1.11",
"description": "MCP server for Next AI Draw.io - AI-powered diagram generation with real-time browser preview", "description": "MCP server for Next AI Draw.io - AI-powered diagram generation with real-time browser preview",
"type": "module", "type": "module",
"main": "dist/index.js", "main": "dist/index.js",
@@ -21,16 +21,16 @@
"claude", "claude",
"model-context-protocol" "model-context-protocol"
], ],
"author": "DayuanJiang", "author": "Biki-dev",
"license": "Apache-2.0", "license": "Apache-2.0",
"repository": { "repository": {
"type": "git", "type": "git",
"url": "https://github.com/DayuanJiang/next-ai-draw-io", "url": "https://github.com/Biki-dev/next-ai-draw-io",
"directory": "packages/mcp-server" "directory": "packages/mcp-server"
}, },
"homepage": "https://next-ai-drawio.jiang.jp", "homepage": "https://next-ai-drawio.jiang.jp",
"bugs": { "bugs": {
"url": "https://github.com/DayuanJiang/next-ai-draw-io/issues" "url": "https://github.com/Biki-dev/next-ai-draw-io/issues"
}, },
"publishConfig": { "publishConfig": {
"access": "public" "access": "public"
@@ -39,7 +39,7 @@
"@modelcontextprotocol/sdk": "^1.0.4", "@modelcontextprotocol/sdk": "^1.0.4",
"linkedom": "^0.18.0", "linkedom": "^0.18.0",
"open": "^11.0.0", "open": "^11.0.0",
"zod": "^4.0.0" "zod": "^3.24.0"
}, },
"devDependencies": { "devDependencies": {
"@types/node": "^24.0.0", "@types/node": "^24.0.0",