diff --git a/.gitignore b/.gitignore index c4eab07..e0596a2 100644 --- a/.gitignore +++ b/.gitignore @@ -62,4 +62,7 @@ push-via-ec2.sh *.snap CLAUDE.md -.spec-workflow \ No newline at end of file +.spec-workflow + +# edgeone +.edgeone \ No newline at end of file diff --git a/README.md b/README.md index 7d1164f..422bf20 100644 --- a/README.md +++ b/README.md @@ -237,6 +237,18 @@ npm run dev ## Deployment +### Deploy to EdgeOne Pages + +You can deploy with one click using [Tencent EdgeOne Pages](https://pages.edgeone.ai/). + +Deploy by this button: + +[![Deploy to EdgeOne Pages](https://cdnstatic.tencentcs.com/edgeone/pages/deploy.svg)](https://edgeone.ai/pages/new?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io) + +Check out the [Tencent EdgeOne Pages documentation](https://pages.edgeone.ai/document/deployment-overview) for more details. + +Additionally, deploying through Tencent EdgeOne Pages will also grant you a [daily free quota for DeepSeek models](https://pages.edgeone.ai/document/edge-ai). + ### Deploy on Vercel (Recommended) [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io) diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index 96f448e..c8fb0f0 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -244,9 +244,22 @@ async function handleChatRequest(req: Request): Promise { // === CACHE CHECK END === // Read client AI provider overrides from headers + const provider = req.headers.get("x-ai-provider") + let baseUrl = req.headers.get("x-ai-base-url") + + // For EdgeOne provider, construct full URL from request origin + // because createOpenAI needs absolute URL, not relative path + if (provider === "edgeone" && !baseUrl) { + const origin = req.headers.get("origin") || new URL(req.url).origin + baseUrl = `${origin}/api/edgeai` + } + + // Get cookie header for EdgeOne authentication (eo_token, eo_time) + const cookieHeader = req.headers.get("cookie") + const clientOverrides = { - provider: req.headers.get("x-ai-provider"), - baseUrl: req.headers.get("x-ai-base-url"), + provider, + baseUrl, apiKey: req.headers.get("x-ai-api-key"), modelId: req.headers.get("x-ai-model"), // AWS Bedrock credentials @@ -254,6 +267,11 @@ async function handleChatRequest(req: Request): Promise { awsSecretAccessKey: req.headers.get("x-aws-secret-access-key"), awsRegion: req.headers.get("x-aws-region"), awsSessionToken: req.headers.get("x-aws-session-token"), + // Pass cookies for EdgeOne Pages authentication + ...(provider === "edgeone" && + cookieHeader && { + headers: { cookie: cookieHeader }, + }), } // Read minimal style preference from header diff --git a/app/api/validate-model/route.ts b/app/api/validate-model/route.ts index 614d039..2185ae8 100644 --- a/app/api/validate-model/route.ts +++ b/app/api/validate-model/route.ts @@ -121,7 +121,7 @@ export async function POST(req: Request) { { status: 400 }, ) } - } else if (provider !== "ollama" && !apiKey) { + } else if (provider !== "ollama" && provider !== "edgeone" && !apiKey) { return NextResponse.json( { valid: false, error: "API key is required" }, { status: 400 }, @@ -225,6 +225,21 @@ export async function POST(req: Request) { break } + case "edgeone": { + // EdgeOne uses OpenAI-compatible API via Edge Functions + // Need to pass cookies for EdgeOne Pages authentication + const cookieHeader = req.headers.get("cookie") || "" + const edgeone = createOpenAI({ + apiKey: "edgeone", // EdgeOne doesn't require API key + baseURL: baseUrl || "/api/edgeai", + headers: { + cookie: cookieHeader, + }, + }) + model = edgeone.chat(modelId) + break + } + case "sglang": { // SGLang is OpenAI-compatible const sglang = createOpenAI({ diff --git a/components/model-config-dialog.tsx b/components/model-config-dialog.tsx index 0bd0a9c..1fe2e33 100644 --- a/components/model-config-dialog.tsx +++ b/components/model-config-dialog.tsx @@ -78,6 +78,7 @@ const PROVIDER_LOGO_MAP: Record = { siliconflow: "siliconflow", sglang: "openai", // SGLang is OpenAI-compatible gateway: "vercel", + edgeone: "tencent-cloud", doubao: "bytedance", } @@ -277,6 +278,7 @@ export function ModelConfigDialog({ // Check credentials based on provider type const isBedrock = selectedProvider.provider === "bedrock" + const isEdgeOne = selectedProvider.provider === "edgeone" if (isBedrock) { if ( !selectedProvider.awsAccessKeyId || @@ -285,7 +287,7 @@ export function ModelConfigDialog({ ) { return } - } else if (!selectedProvider.apiKey) { + } else if (!isEdgeOne && !selectedProvider.apiKey) { return } @@ -308,13 +310,18 @@ export function ModelConfigDialog({ setValidatingModelIndex(i) try { + // For EdgeOne, construct baseUrl from current origin + const baseUrl = isEdgeOne + ? `${window.location.origin}/api/edgeai` + : selectedProvider.baseUrl + const response = await fetch("/api/validate-model", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ provider: selectedProvider.provider, apiKey: selectedProvider.apiKey, - baseUrl: selectedProvider.baseUrl, + baseUrl, modelId: model.modelId, // AWS Bedrock credentials awsAccessKeyId: selectedProvider.awsAccessKeyId, @@ -322,7 +329,6 @@ export function ModelConfigDialog({ awsRegion: selectedProvider.awsRegion, }), }) - const data = await response.json() if (data.valid) { @@ -876,6 +882,63 @@ export function ModelConfigDialog({ )} + ) : selectedProvider.provider === + "edgeone" ? ( +
+
+ + {validationStatus === + "error" && + validationError && ( +

+ + { + validationError + } +

+ )} +
+
) : ( <> {/* API Key */} diff --git a/components/model-selector.tsx b/components/model-selector.tsx index 370d40c..027c699 100644 --- a/components/model-selector.tsx +++ b/components/model-selector.tsx @@ -48,6 +48,7 @@ const PROVIDER_LOGO_MAP: Record = { siliconflow: "siliconflow", sglang: "openai", // SGLang is OpenAI-compatible, use OpenAI logo gateway: "vercel", + edgeone: "tencent-cloud", doubao: "bytedance", } diff --git a/docs/README_CN.md b/docs/README_CN.md index 592baf1..91f7a72 100644 --- a/docs/README_CN.md +++ b/docs/README_CN.md @@ -206,6 +206,19 @@ npm run dev ## 部署 +### 部署到腾讯云EdgeOne Pages + +您可以通过[腾讯云EdgeOne Pages](https://pages.edgeone.ai/zh)一键部署。 + +直接点击此按钮一键部署: +[![使用 EdgeOne Pages 部署](https://cdnstatic.tencentcs.com/edgeone/pages/deploy.svg)](https://console.cloud.tencent.com/edgeone/pages/new?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io) + +查看[腾讯云EdgeOne Pages文档](https://pages.edgeone.ai/zh/document/product-introduction)了解更多详情。 + +同时,通过腾讯云EdgeOne Pages部署,也会获得[每日免费的DeepSeek模型额度](https://edgeone.cloud.tencent.com/pages/document/169925463311781888)。 + +### 部署到Vercel + 部署Next.js应用最简单的方式是使用Next.js创建者提供的[Vercel平台](https://vercel.com/new)。 查看[Next.js部署文档](https://nextjs.org/docs/app/building-your-application/deploying)了解更多详情。 diff --git a/docs/README_JA.md b/docs/README_JA.md index d9e05be..edbd1bd 100644 --- a/docs/README_JA.md +++ b/docs/README_JA.md @@ -206,6 +206,20 @@ npm run dev ## デプロイ +### EdgeOne Pagesへのデプロイ + +[Tencent EdgeOne Pages](https://pages.edgeone.ai/)を使用してワンクリックでデプロイできます。 + +このボタンでデプロイ: + +[![Deploy to EdgeOne Pages](https://cdnstatic.tencentcs.com/edgeone/pages/deploy.svg)](https://edgeone.ai/pages/new?repository-url=https%3A%2F%2Fgithub.com%2FDayuanJiang%2Fnext-ai-draw-io) + +詳細は[Tencent EdgeOne Pagesドキュメント](https://pages.edgeone.ai/document/deployment-overview)をご覧ください。 + +また、Tencent EdgeOne Pagesでデプロイすると、[DeepSeekモデルの毎日の無料クォータ](https://pages.edgeone.ai/document/edge-ai)が付与されます。 + +### Vercelへのデプロイ + Next.jsアプリをデプロイする最も簡単な方法は、Next.jsの作成者による[Vercelプラットフォーム](https://vercel.com/new)を使用することです。 詳細は[Next.jsデプロイメントドキュメント](https://nextjs.org/docs/app/building-your-application/deploying)をご覧ください。 diff --git a/edge-functions/api/edgeai/chat/completions.ts b/edge-functions/api/edgeai/chat/completions.ts new file mode 100644 index 0000000..fd62f80 --- /dev/null +++ b/edge-functions/api/edgeai/chat/completions.ts @@ -0,0 +1,270 @@ +/** + * EdgeOne Pages Edge Function for OpenAI-compatible Chat Completions API + * + * This endpoint provides an OpenAI-compatible API that can be used with + * AI SDK's createOpenAI({ baseURL: '/api/edgeai' }) + * + * Uses EdgeOne Edge AI's AI.chatCompletions() which now supports native tool calling. + */ + +import { z } from "zod" + +// EdgeOne Pages global AI object +declare const AI: { + chatCompletions(options: { + model: string + messages: Array<{ role: string; content: string | null }> + stream?: boolean + max_tokens?: number + temperature?: number + tools?: any + tool_choice?: any + }): Promise | any> +} + +const messageItemSchema = z + .object({ + role: z.enum(["user", "assistant", "system", "tool", "function"]), + content: z.string().nullable().optional(), + }) + .passthrough() + +const messageSchema = z + .object({ + messages: z.array(messageItemSchema), + model: z.string().optional(), + stream: z.boolean().optional(), + tools: z.any().optional(), + tool_choice: z.any().optional(), + functions: z.any().optional(), + function_call: z.any().optional(), + temperature: z.number().optional(), + top_p: z.number().optional(), + max_tokens: z.number().optional(), + presence_penalty: z.number().optional(), + frequency_penalty: z.number().optional(), + stop: z.union([z.string(), z.array(z.string())]).optional(), + response_format: z.any().optional(), + seed: z.number().optional(), + user: z.string().optional(), + n: z.number().int().optional(), + logit_bias: z.record(z.string(), z.number()).optional(), + parallel_tool_calls: z.boolean().optional(), + stream_options: z.any().optional(), + }) + .passthrough() + +// Model configuration +const ALLOWED_MODELS = [ + "@tx/deepseek-ai/deepseek-v32", + "@tx/deepseek-ai/deepseek-r1-0528", + "@tx/deepseek-ai/deepseek-v3-0324", +] + +const MODEL_ALIASES: Record = { + "deepseek-v3.2": "@tx/deepseek-ai/deepseek-v32", + "deepseek-r1-0528": "@tx/deepseek-ai/deepseek-r1-0528", + "deepseek-v3-0324": "@tx/deepseek-ai/deepseek-v3-0324", +} + +const CORS_HEADERS = { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "POST, OPTIONS", + "Access-Control-Allow-Headers": "Content-Type, Authorization", +} + +/** + * Create standardized response with CORS headers + */ +function createResponse(body: any, status = 200, extraHeaders = {}): Response { + return new Response(JSON.stringify(body), { + status, + headers: { + "Content-Type": "application/json", + ...CORS_HEADERS, + ...extraHeaders, + }, + }) +} + +/** + * Handle OPTIONS request for CORS preflight + */ +function handleOptionsRequest(): Response { + return new Response(null, { + headers: { + ...CORS_HEADERS, + "Access-Control-Max-Age": "86400", + }, + }) +} + +export async function onRequest({ request, env }: any) { + if (request.method === "OPTIONS") { + return handleOptionsRequest() + } + + request.headers.delete("accept-encoding") + + try { + const json = await request.clone().json() + const parseResult = messageSchema.safeParse(json) + + if (!parseResult.success) { + return createResponse( + { + error: { + message: parseResult.error.message, + type: "invalid_request_error", + }, + }, + 400, + ) + } + + const { messages, model, stream, tools, tool_choice, ...extraParams } = + parseResult.data + + // Validate messages + const userMessages = messages.filter( + (message) => message.role === "user", + ) + if (!userMessages.length) { + return createResponse( + { + error: { + message: "No user message found", + type: "invalid_request_error", + }, + }, + 400, + ) + } + + // Resolve model + const requestedModel = model || ALLOWED_MODELS[0] + const selectedModel = MODEL_ALIASES[requestedModel] || requestedModel + + if (!ALLOWED_MODELS.includes(selectedModel)) { + return createResponse( + { + error: { + message: `Invalid model: ${requestedModel}.`, + type: "invalid_request_error", + }, + }, + 429, + ) + } + + console.log( + `[EdgeOne] Model: ${selectedModel}, Tools: ${tools?.length || 0}, Stream: ${stream ?? true}`, + ) + + try { + const isStream = !!stream + + // Non-streaming: return mock response for validation + // AI.chatCompletions doesn't support non-streaming mode + if (!isStream) { + const mockResponse = { + id: `chatcmpl-${Date.now()}`, + object: "chat.completion", + created: Math.floor(Date.now() / 1000), + model: selectedModel, + choices: [ + { + index: 0, + message: { + role: "assistant", + content: "OK", + }, + finish_reason: "stop", + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 1, + total_tokens: 11, + }, + } + return createResponse(mockResponse) + } + + // Build AI.chatCompletions options for streaming + const aiOptions: any = { + ...extraParams, + model: selectedModel, + messages, + stream: true, + } + + // Add tools if provided + if (tools && tools.length > 0) { + aiOptions.tools = tools + } + if (tool_choice !== undefined) { + aiOptions.tool_choice = tool_choice + } + + const aiResponse = await AI.chatCompletions(aiOptions) + + // Streaming response + return new Response(aiResponse, { + headers: { + "Content-Type": "text/event-stream; charset=utf-8", + "Cache-Control": "no-cache, no-store, no-transform", + "X-Accel-Buffering": "no", + Connection: "keep-alive", + ...CORS_HEADERS, + }, + }) + } catch (error: any) { + // Handle EdgeOne specific errors + try { + const message = JSON.parse(error.message) + if (message.code === 14020) { + return createResponse( + { + error: { + message: + "The daily public quota has been exhausted. After deployment, you can enjoy a personal daily exclusive quota.", + type: "rate_limit_error", + }, + }, + 429, + ) + } + return createResponse( + { error: { message: error.message, type: "api_error" } }, + 500, + ) + } catch { + // Not a JSON error message + } + + console.error("[EdgeOne] AI error:", error.message) + return createResponse( + { + error: { + message: error.message || "AI service error", + type: "api_error", + }, + }, + 500, + ) + } + } catch (error: any) { + console.error("[EdgeOne] Request error:", error.message) + return createResponse( + { + error: { + message: "Request processing failed", + type: "server_error", + details: error.message, + }, + }, + 500, + ) + } +} diff --git a/edgeone.json b/edgeone.json new file mode 100644 index 0000000..dcf14ff --- /dev/null +++ b/edgeone.json @@ -0,0 +1,5 @@ +{ + "nodeFunctionsConfig": { + "maxDuration": 120 + } +} diff --git a/lib/ai-providers.ts b/lib/ai-providers.ts index a744339..2ef585b 100644 --- a/lib/ai-providers.ts +++ b/lib/ai-providers.ts @@ -21,6 +21,7 @@ export type ProviderName = | "siliconflow" | "sglang" | "gateway" + | "edgeone" | "doubao" interface ModelConfig { @@ -40,6 +41,8 @@ export interface ClientOverrides { awsSecretAccessKey?: string | null awsRegion?: string | null awsSessionToken?: string | null + // Custom headers (e.g., for EdgeOne cookie auth) + headers?: Record } // Providers that can be used with client-provided API keys @@ -54,6 +57,7 @@ const ALLOWED_CLIENT_PROVIDERS: ProviderName[] = [ "siliconflow", "sglang", "gateway", + "edgeone", "doubao", ] @@ -375,6 +379,7 @@ const PROVIDER_ENV_VARS: Record = { siliconflow: "SILICONFLOW_API_KEY", sglang: "SGLANG_API_KEY", gateway: "AI_GATEWAY_API_KEY", + edgeone: null, // No credentials needed - uses EdgeOne Edge AI doubao: "DOUBAO_API_KEY", } @@ -463,7 +468,12 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig { // SECURITY: Prevent SSRF attacks (GHSA-9qf7-mprq-9qgm) // If a custom baseUrl is provided, an API key MUST also be provided. // This prevents attackers from redirecting server API keys to malicious endpoints. - if (overrides?.baseUrl && !overrides?.apiKey) { + // Exception: EdgeOne provider doesn't require API key (uses Edge AI runtime) + if ( + overrides?.baseUrl && + !overrides?.apiKey && + overrides?.provider !== "edgeone" + ) { throw new Error( `API key is required when using a custom base URL. ` + `Please provide your own API key in Settings.`, @@ -840,6 +850,21 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig { break } + case "edgeone": { + // EdgeOne Pages Edge AI - uses OpenAI-compatible API + // AI SDK appends /chat/completions to baseURL + // /api/edgeai + /chat/completions = /api/edgeai/chat/completions + const baseURL = overrides?.baseUrl || "/api/edgeai" + const edgeoneProvider = createOpenAI({ + apiKey: "edgeone", // Dummy key - EdgeOne doesn't require API key + baseURL, + // Pass cookies for EdgeOne Pages authentication (eo_token, eo_time) + ...(overrides?.headers && { headers: overrides.headers }), + }) + model = edgeoneProvider.chat(modelId) + break + } + case "doubao": { const apiKey = overrides?.apiKey || process.env.DOUBAO_API_KEY const baseURL = @@ -856,7 +881,7 @@ export function getAIModel(overrides?: ClientOverrides): ModelConfig { default: throw new Error( - `Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway, doubao`, + `Unknown AI provider: ${provider}. Supported providers: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, sglang, gateway, edgeone, doubao`, ) } diff --git a/lib/types/model-config.ts b/lib/types/model-config.ts index 4452278..6ea73da 100644 --- a/lib/types/model-config.ts +++ b/lib/types/model-config.ts @@ -11,6 +11,7 @@ export type ProviderName = | "siliconflow" | "sglang" | "gateway" + | "edgeone" | "doubao" // Individual model configuration @@ -85,6 +86,7 @@ export const PROVIDER_INFO: Record< defaultBaseUrl: "http://127.0.0.1:8000/v1", }, gateway: { label: "AI Gateway" }, + edgeone: { label: "EdgeOne Pages" }, doubao: { label: "Doubao (ByteDance)", defaultBaseUrl: "https://ark.cn-beijing.volces.com/api/v3", @@ -219,6 +221,7 @@ export const SUGGESTED_MODELS: Record = { "anthropic/claude-3-5-sonnet", "google/gemini-2.0-flash", ], + edgeone: ["@tx/deepseek-ai/deepseek-v32"], doubao: [ // ByteDance Doubao models "doubao-1.5-thinking-pro-250415", diff --git a/package.json b/package.json index b85e39f..e336d73 100644 --- a/package.json +++ b/package.json @@ -88,6 +88,11 @@ "unpdf": "^1.4.0", "zod": "^4.1.12" }, + "optionalDependencies": { + "lightningcss": "^1.30.2", + "lightningcss-linux-x64-gnu": "^1.30.2", + "@tailwindcss/oxide-linux-x64-gnu": "^4.1.18" + }, "lint-staged": { "*.{js,ts,jsx,tsx,json,css}": [ "biome check --write --no-errors-on-unmatched",