Feat/add modelscope support (#521)

* add ModelScope API support

* update some documentation

* modify some details
This commit is contained in:
yrk111222
2026-01-06 18:41:25 +08:00
committed by GitHub
parent ffcb241383
commit 54fd48506d
21 changed files with 172 additions and 6 deletions

View File

@@ -211,6 +211,7 @@ See the [Next.js deployment documentation](https://nextjs.org/docs/app/building-
- OpenRouter - OpenRouter
- DeepSeek - DeepSeek
- SiliconFlow - SiliconFlow
- ModelScope
- SGLang - SGLang
- Vercel AI Gateway - Vercel AI Gateway

View File

@@ -297,6 +297,7 @@ export default function AboutCN() {
<li>OpenRouter</li> <li>OpenRouter</li>
<li>DeepSeek</li> <li>DeepSeek</li>
<li>SiliconFlow</li> <li>SiliconFlow</li>
<li>ModelScope</li>
</ul> </ul>
<p className="text-gray-700 mt-4"> <p className="text-gray-700 mt-4">
<code>claude-sonnet-4-5</code>{" "} <code>claude-sonnet-4-5</code>{" "}

View File

@@ -312,6 +312,7 @@ export default function AboutJA() {
<li>OpenRouter</li> <li>OpenRouter</li>
<li>DeepSeek</li> <li>DeepSeek</li>
<li>SiliconFlow</li> <li>SiliconFlow</li>
<li>ModelScope</li>
</ul> </ul>
<p className="text-gray-700 mt-4"> <p className="text-gray-700 mt-4">
<code>claude-sonnet-4-5</code> <code>claude-sonnet-4-5</code>

View File

@@ -331,6 +331,7 @@ export default function About() {
<li>OpenRouter</li> <li>OpenRouter</li>
<li>DeepSeek</li> <li>DeepSeek</li>
<li>SiliconFlow</li> <li>SiliconFlow</li>
<li>ModelScope</li>
</ul> </ul>
<p className="text-gray-700 mt-4"> <p className="text-gray-700 mt-4">
Note that <code>claude-sonnet-4-5</code> has trained on Note that <code>claude-sonnet-4-5</code> has trained on

View File

@@ -274,6 +274,75 @@ export async function POST(req: Request) {
break break
} }
case "modelscope": {
const baseURL =
baseUrl || "https://api-inference.modelscope.cn/v1"
const startTime = Date.now()
try {
// Initiate a streaming request (required for QwQ-32B and certain Qwen3 models)
const response = await fetch(
`${baseURL}/chat/completions`,
{
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify({
model: modelId,
messages: [
{ role: "user", content: "Say 'OK'" },
],
max_tokens: 20,
stream: true,
enable_thinking: false,
}),
},
)
if (!response.ok) {
const errorText = await response.text()
throw new Error(
`ModelScope API error (${response.status}): ${errorText}`,
)
}
const contentType =
response.headers.get("content-type") || ""
const isValidStreamingResponse =
response.status === 200 &&
(contentType.includes("text/event-stream") ||
contentType.includes("application/json"))
if (!isValidStreamingResponse) {
throw new Error(
`Unexpected response format: ${contentType}`,
)
}
const responseTime = Date.now() - startTime
if (response.body) {
response.body.cancel().catch(() => {
/* Ignore cancellation errors */
})
}
return NextResponse.json({
valid: true,
responseTime,
note: "ModelScope model validated (using streaming API)",
})
} catch (error) {
console.error(
"[validate-model] ModelScope validation failed:",
error,
)
throw error
}
}
default: default:
return NextResponse.json( return NextResponse.json(
{ valid: false, error: `Unknown provider: ${provider}` }, { valid: false, error: `Unknown provider: ${provider}` },

View File

@@ -79,6 +79,7 @@ const PROVIDER_LOGO_MAP: Record<string, string> = {
gateway: "vercel", gateway: "vercel",
edgeone: "tencent-cloud", edgeone: "tencent-cloud",
doubao: "bytedance", doubao: "bytedance",
modelscope: "modelscope",
} }
// Provider logo component // Provider logo component

View File

@@ -50,6 +50,7 @@ const PROVIDER_LOGO_MAP: Record<string, string> = {
gateway: "vercel", gateway: "vercel",
edgeone: "tencent-cloud", edgeone: "tencent-cloud",
doubao: "bytedance", doubao: "bytedance",
modelscope: "modelscope",
} }
// Group models by providerLabel (handles duplicate providers) // Group models by providerLabel (handles duplicate providers)

View File

@@ -204,6 +204,7 @@ npm run dev
- OpenRouter - OpenRouter
- DeepSeek - DeepSeek
- SiliconFlow - SiliconFlow
- ModelScope
- SGLang - SGLang
- Vercel AI Gateway - Vercel AI Gateway

View File

@@ -152,6 +152,19 @@ AI_PROVIDER=ollama
AI_MODEL=llama3.2 AI_MODEL=llama3.2
``` ```
### ModelScope
```bash
MODELSCOPE_API_KEY=your_api_key
AI_MODEL=Qwen/Qwen3-235B-A22B-Instruct-2507
```
可选的自定义端点:
```bash
MODELSCOPE_BASE_URL=https://your-custom-endpoint
```
可选的自定义 URL 可选的自定义 URL
```bash ```bash

View File

@@ -158,6 +158,19 @@ Optional custom URL:
OLLAMA_BASE_URL=http://localhost:11434 OLLAMA_BASE_URL=http://localhost:11434
``` ```
### ModelScope
```bash
MODELSCOPE_API_KEY=your_api_key
AI_MODEL=Qwen/Qwen3-235B-A22B-Instruct-2507
```
Optional custom endpoint:
```bash
MODELSCOPE_BASE_URL=https://your-custom-endpoint
```
### Vercel AI Gateway ### Vercel AI Gateway
Vercel AI Gateway provides unified access to multiple AI providers through a single API key. This simplifies authentication and allows you to switch between providers without managing multiple API keys. Vercel AI Gateway provides unified access to multiple AI providers through a single API key. This simplifies authentication and allows you to switch between providers without managing multiple API keys.
@@ -201,7 +214,7 @@ If you only configure **one** provider's API key, the system will automatically
If you configure **multiple** API keys, you must explicitly set `AI_PROVIDER`: If you configure **multiple** API keys, you must explicitly set `AI_PROVIDER`:
```bash ```bash
AI_PROVIDER=google # or: openai, anthropic, deepseek, siliconflow, doubao, azure, bedrock, openrouter, ollama, gateway, sglang AI_PROVIDER=google # or: openai, anthropic, deepseek, siliconflow, doubao, azure, bedrock, openrouter, ollama, gateway, sglang, modelscope
``` ```
## Model Capability Requirements ## Model Capability Requirements

View File

@@ -205,6 +205,7 @@ Next.jsアプリをデプロイする最も簡単な方法は、Next.jsの作成
- OpenRouter - OpenRouter
- DeepSeek - DeepSeek
- SiliconFlow - SiliconFlow
- ModelScope
- SGLang - SGLang
- Vercel AI Gateway - Vercel AI Gateway

View File

@@ -158,6 +158,19 @@ AI_MODEL=llama3.2
OLLAMA_BASE_URL=http://localhost:11434 OLLAMA_BASE_URL=http://localhost:11434
``` ```
### ModelScope
```bash
MODELSCOPE_API_KEY=your_api_key
AI_MODEL=Qwen/Qwen3-235B-A22B-Instruct-2507
```
任意のカスタムエンドポイント:
```bash
MODELSCOPE_BASE_URL=https://your-custom-endpoint
```
### Vercel AI Gateway ### Vercel AI Gateway
Vercel AI Gateway は、単一の API キーで複数の AI プロバイダーへの統合アクセスを提供します。これにより認証が簡素化され、複数の API キーを管理することなくプロバイダーを切り替えることができます。 Vercel AI Gateway は、単一の API キーで複数の AI プロバイダーへの統合アクセスを提供します。これにより認証が簡素化され、複数の API キーを管理することなくプロバイダーを切り替えることができます。

View File

@@ -351,6 +351,10 @@ const PROVIDER_ENV_MAP: Record<string, { apiKey: string; baseUrl: string }> = {
apiKey: "SILICONFLOW_API_KEY", apiKey: "SILICONFLOW_API_KEY",
baseUrl: "SILICONFLOW_BASE_URL", baseUrl: "SILICONFLOW_BASE_URL",
}, },
modelscope: {
apiKey: "MODELSCOPE_API_KEY",
baseUrl: "MODELSCOPE_BASE_URL",
},
gateway: { apiKey: "AI_GATEWAY_API_KEY", baseUrl: "AI_GATEWAY_BASE_URL" }, gateway: { apiKey: "AI_GATEWAY_API_KEY", baseUrl: "AI_GATEWAY_BASE_URL" },
// bedrock and ollama don't use API keys in the same way // bedrock and ollama don't use API keys in the same way
bedrock: { apiKey: "", baseUrl: "" }, bedrock: { apiKey: "", baseUrl: "" },

View File

@@ -55,6 +55,7 @@
<option value="openrouter">OpenRouter</option> <option value="openrouter">OpenRouter</option>
<option value="deepseek">DeepSeek</option> <option value="deepseek">DeepSeek</option>
<option value="siliconflow">SiliconFlow</option> <option value="siliconflow">SiliconFlow</option>
<option value="modelscope">ModelScope</option>
<option value="ollama">Ollama (Local)</option> <option value="ollama">Ollama (Local)</option>
</select> </select>
</div> </div>

View File

@@ -288,6 +288,7 @@ function getProviderLabel(provider) {
openrouter: "OpenRouter", openrouter: "OpenRouter",
deepseek: "DeepSeek", deepseek: "DeepSeek",
siliconflow: "SiliconFlow", siliconflow: "SiliconFlow",
modelscope: "ModelScope",
ollama: "Ollama", ollama: "Ollama",
} }
return labels[provider] || provider return labels[provider] || provider

View File

@@ -72,6 +72,10 @@ AI_MODEL=global.anthropic.claude-sonnet-4-5-20250929-v1:0
# SGLANG_API_KEY=your-sglang-api-key # SGLANG_API_KEY=your-sglang-api-key
# SGLANG_BASE_URL=http://127.0.0.1:8000/v1 # Your SGLang endpoint # SGLANG_BASE_URL=http://127.0.0.1:8000/v1 # Your SGLang endpoint
# ModelScope Configuration
# MODELSCOPE_API_KEY=ms-...
# MODELSCOPE_BASE_URL=https://api-inference.modelscope.cn/v1 # Optional: Custom endpoint
# ByteDance Doubao Configuration (via Volcengine) # ByteDance Doubao Configuration (via Volcengine)
# DOUBAO_API_KEY=your-doubao-api-key # DOUBAO_API_KEY=your-doubao-api-key
# DOUBAO_BASE_URL=https://ark.cn-beijing.volces.com/api/v3 # ByteDance Volcengine endpoint # DOUBAO_BASE_URL=https://ark.cn-beijing.volces.com/api/v3 # ByteDance Volcengine endpoint

View File

@@ -23,6 +23,7 @@ export type ProviderName =
| "gateway" | "gateway"
| "edgeone" | "edgeone"
| "doubao" | "doubao"
| "modelscope"
interface ModelConfig { interface ModelConfig {
model: any model: any
@@ -59,6 +60,7 @@ const ALLOWED_CLIENT_PROVIDERS: ProviderName[] = [
"gateway", "gateway",
"edgeone", "edgeone",
"doubao", "doubao",
"modelscope",
] ]
// Bedrock provider options for Anthropic beta features // Bedrock provider options for Anthropic beta features
@@ -353,6 +355,7 @@ function buildProviderOptions(
case "siliconflow": case "siliconflow":
case "sglang": case "sglang":
case "gateway": case "gateway":
case "modelscope":
case "doubao": { case "doubao": {
// These providers don't have reasoning configs in AI SDK yet // These providers don't have reasoning configs in AI SDK yet
// Gateway passes through to underlying providers which handle their own configs // Gateway passes through to underlying providers which handle their own configs
@@ -381,6 +384,7 @@ const PROVIDER_ENV_VARS: Record<ProviderName, string | null> = {
gateway: "AI_GATEWAY_API_KEY", gateway: "AI_GATEWAY_API_KEY",
edgeone: null, // No credentials needed - uses EdgeOne Edge AI edgeone: null, // No credentials needed - uses EdgeOne Edge AI
doubao: "DOUBAO_API_KEY", doubao: "DOUBAO_API_KEY",
modelscope: "MODELSCOPE_API_KEY",
} }
/** /**
@@ -445,7 +449,7 @@ function validateProviderCredentials(provider: ProviderName): void {
* Get the AI model based on environment variables * Get the AI model based on environment variables
* *
* Environment variables: * Environment variables:
* - AI_PROVIDER: The provider to use (bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway) * - AI_PROVIDER: The provider to use (bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway, modelscope)
* - AI_MODEL: The model ID/name for the selected provider * - AI_MODEL: The model ID/name for the selected provider
* *
* Provider-specific env vars: * Provider-specific env vars:
@@ -463,6 +467,8 @@ function validateProviderCredentials(provider: ProviderName): void {
* - SILICONFLOW_BASE_URL: SiliconFlow endpoint (optional, defaults to https://api.siliconflow.com/v1) * - SILICONFLOW_BASE_URL: SiliconFlow endpoint (optional, defaults to https://api.siliconflow.com/v1)
* - SGLANG_API_KEY: SGLang API key * - SGLANG_API_KEY: SGLang API key
* - SGLANG_BASE_URL: SGLang endpoint (optional) * - SGLANG_BASE_URL: SGLang endpoint (optional)
* - MODELSCOPE_API_KEY: ModelScope API key
* - MODELSCOPE_BASE_URL: ModelScope endpoint (optional)
*/ */
export function getAIModel(overrides?: ClientOverrides): ModelConfig { export function getAIModel(overrides?: ClientOverrides): ModelConfig {
// SECURITY: Prevent SSRF attacks (GHSA-9qf7-mprq-9qgm) // SECURITY: Prevent SSRF attacks (GHSA-9qf7-mprq-9qgm)
@@ -537,6 +543,7 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
`- AZURE_API_KEY for Azure\n` + `- AZURE_API_KEY for Azure\n` +
`- SILICONFLOW_API_KEY for SiliconFlow\n` + `- SILICONFLOW_API_KEY for SiliconFlow\n` +
`- SGLANG_API_KEY for SGLang\n` + `- SGLANG_API_KEY for SGLang\n` +
`- MODELSCOPE_API_KEY for ModelScope\n` +
`Or set AI_PROVIDER=ollama for local Ollama.`, `Or set AI_PROVIDER=ollama for local Ollama.`,
) )
} else { } else {
@@ -892,9 +899,23 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig {
break break
} }
case "modelscope": {
const apiKey = overrides?.apiKey || process.env.MODELSCOPE_API_KEY
const baseURL =
overrides?.baseUrl ||
process.env.MODELSCOPE_BASE_URL ||
"https://api-inference.modelscope.cn/v1"
const modelscopeProvider = createOpenAI({
apiKey,
baseURL,
})
model = modelscopeProvider.chat(modelId)
break
}
default: default:
throw new Error( throw new Error(
`Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway, edgeone, doubao`, `Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway, edgeone, doubao, modelscope`,
) )
} }

View File

@@ -28,7 +28,8 @@
"azure": "Azure OpenAI", "azure": "Azure OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"deepseek": "DeepSeek", "deepseek": "DeepSeek",
"siliconflow": "SiliconFlow" "siliconflow": "SiliconFlow",
"modelscope": "ModelScope"
}, },
"chat": { "chat": {
"placeholder": "Describe your diagram or upload a file...", "placeholder": "Describe your diagram or upload a file...",

View File

@@ -28,7 +28,8 @@
"azure": "Azure OpenAI", "azure": "Azure OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"deepseek": "DeepSeek", "deepseek": "DeepSeek",
"siliconflow": "SiliconFlow" "siliconflow": "SiliconFlow",
"modelscope": "ModelScope"
}, },
"chat": { "chat": {
"placeholder": "ダイアグラムを説明するか、ファイルをアップロード...", "placeholder": "ダイアグラムを説明するか、ファイルをアップロード...",

View File

@@ -28,7 +28,8 @@
"azure": "Azure OpenAI", "azure": "Azure OpenAI",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
"deepseek": "DeepSeek", "deepseek": "DeepSeek",
"siliconflow": "SiliconFlow" "siliconflow": "SiliconFlow",
"modelscope": "ModelScope"
}, },
"chat": { "chat": {
"placeholder": "描述您的图表或上传文件...", "placeholder": "描述您的图表或上传文件...",

View File

@@ -13,6 +13,7 @@ export type ProviderName =
| "gateway" | "gateway"
| "edgeone" | "edgeone"
| "doubao" | "doubao"
| "modelscope"
// Individual model configuration // Individual model configuration
export interface ModelConfig { export interface ModelConfig {
@@ -91,6 +92,10 @@ export const PROVIDER_INFO: Record<
label: "Doubao (ByteDance)", label: "Doubao (ByteDance)",
defaultBaseUrl: "https://ark.cn-beijing.volces.com/api/v3", defaultBaseUrl: "https://ark.cn-beijing.volces.com/api/v3",
}, },
modelscope: {
label: "ModelScope",
defaultBaseUrl: "https://api-inference.modelscope.cn/v1",
},
} }
// Suggested models per provider for quick add // Suggested models per provider for quick add
@@ -231,6 +236,17 @@ export const SUGGESTED_MODELS: Record<ProviderName, string[]> = {
"doubao-pro-32k-241215", "doubao-pro-32k-241215",
"doubao-pro-256k-241215", "doubao-pro-256k-241215",
], ],
modelscope: [
// Qwen
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-32B-Instruct",
"Qwen/Qwen3-235B-A22B-Instruct-2507",
"Qwen/Qwen3-VL-235B-A22B-Instruct",
"Qwen/Qwen3-32B",
// DeepSeek
"deepseek-ai/DeepSeek-R1-0528",
"deepseek-ai/DeepSeek-V3.2",
],
} }
// Helper to generate UUID // Helper to generate UUID