2025-11-15 13:36:42 +09:00
|
|
|
# AI Provider Configuration
|
|
|
|
|
# AI_PROVIDER: Which provider to use
|
2025-12-16 22:43:33 -05:00
|
|
|
# Options: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow, gateway
|
2025-11-15 13:36:42 +09:00
|
|
|
# Default: bedrock
|
|
|
|
|
AI_PROVIDER=bedrock
|
|
|
|
|
|
|
|
|
|
# AI_MODEL: The model ID for your chosen provider (REQUIRED)
|
|
|
|
|
AI_MODEL=global.anthropic.claude-sonnet-4-5-20250929-v1:0
|
|
|
|
|
|
|
|
|
|
# AWS Bedrock Configuration
|
|
|
|
|
# AWS_REGION=us-east-1
|
|
|
|
|
# AWS_ACCESS_KEY_ID=your-access-key-id
|
|
|
|
|
# AWS_SECRET_ACCESS_KEY=your-secret-access-key
|
2025-12-10 20:54:43 +05:30
|
|
|
# Note: Claude and Nova models support reasoning/extended thinking
|
|
|
|
|
# BEDROCK_REASONING_BUDGET_TOKENS=12000 # Optional: Claude reasoning budget in tokens (1024-64000)
|
|
|
|
|
# BEDROCK_REASONING_EFFORT=medium # Optional: Nova reasoning effort (low/medium/high)
|
2025-11-15 13:36:42 +09:00
|
|
|
|
|
|
|
|
# OpenAI Configuration
|
|
|
|
|
# OPENAI_API_KEY=sk-...
|
2025-11-21 16:58:42 +08:00
|
|
|
# OPENAI_BASE_URL=https://api.openai.com/v1 # Optional: Custom OpenAI-compatible endpoint
|
2025-11-15 13:36:42 +09:00
|
|
|
# OPENAI_ORGANIZATION=org-... # Optional
|
|
|
|
|
# OPENAI_PROJECT=proj_... # Optional
|
2025-12-10 20:54:43 +05:30
|
|
|
# Note: o1/o3/gpt-5 models automatically enable reasoning summary (default: detailed)
|
|
|
|
|
# OPENAI_REASONING_EFFORT=low # Optional: Reasoning effort (minimal/low/medium/high) - for o1/o3/gpt-5
|
|
|
|
|
# OPENAI_REASONING_SUMMARY=detailed # Optional: Override reasoning summary (none/brief/detailed)
|
2025-11-15 13:36:42 +09:00
|
|
|
|
|
|
|
|
# Anthropic (Direct) Configuration
|
|
|
|
|
# ANTHROPIC_API_KEY=sk-ant-...
|
2025-12-02 00:08:06 +08:00
|
|
|
# ANTHROPIC_BASE_URL=https://your-custom-anthropic/v1
|
2025-12-10 20:54:43 +05:30
|
|
|
# ANTHROPIC_THINKING_TYPE=enabled # Optional: Anthropic extended thinking (enabled)
|
|
|
|
|
# ANTHROPIC_THINKING_BUDGET_TOKENS=12000 # Optional: Budget for extended thinking in tokens
|
2025-11-15 13:36:42 +09:00
|
|
|
|
|
|
|
|
# Google Generative AI Configuration
|
|
|
|
|
# GOOGLE_GENERATIVE_AI_API_KEY=...
|
2025-12-02 00:08:06 +08:00
|
|
|
# GOOGLE_BASE_URL=https://generativelanguage.googleapis.com/v1beta # Optional: Custom endpoint
|
2025-12-10 20:54:43 +05:30
|
|
|
# GOOGLE_CANDIDATE_COUNT=1 # Optional: Number of candidates to generate
|
|
|
|
|
# GOOGLE_TOP_K=40 # Optional: Top K sampling parameter
|
|
|
|
|
# GOOGLE_TOP_P=0.95 # Optional: Nucleus sampling parameter
|
|
|
|
|
# Note: Gemini 2.5/3 models automatically enable reasoning display (includeThoughts: true)
|
|
|
|
|
# GOOGLE_THINKING_BUDGET=8192 # Optional: Gemini 2.5 thinking budget in tokens (for more/less thinking)
|
|
|
|
|
# GOOGLE_THINKING_LEVEL=high # Optional: Gemini 3 thinking level (low/high)
|
2025-11-15 13:36:42 +09:00
|
|
|
|
|
|
|
|
# Azure OpenAI Configuration
|
2025-12-11 13:32:33 +09:00
|
|
|
# Configure endpoint using ONE of these methods:
|
|
|
|
|
# 1. AZURE_RESOURCE_NAME - SDK constructs: https://{name}.openai.azure.com/openai/v1{path}
|
|
|
|
|
# 2. AZURE_BASE_URL - SDK appends /v1{path} to your URL
|
|
|
|
|
# If both are set, AZURE_BASE_URL takes precedence.
|
2025-11-15 13:36:42 +09:00
|
|
|
# AZURE_RESOURCE_NAME=your-resource-name
|
|
|
|
|
# AZURE_API_KEY=...
|
2025-12-11 13:32:33 +09:00
|
|
|
# AZURE_BASE_URL=https://your-resource.openai.azure.com/openai # Alternative: Custom endpoint
|
2025-12-10 20:54:43 +05:30
|
|
|
# AZURE_REASONING_EFFORT=low # Optional: Azure reasoning effort (low, medium, high)
|
|
|
|
|
# AZURE_REASONING_SUMMARY=detailed
|
2025-11-15 13:36:42 +09:00
|
|
|
|
|
|
|
|
# Ollama (Local) Configuration
|
|
|
|
|
# OLLAMA_BASE_URL=http://localhost:11434/api # Optional, defaults to localhost
|
2025-12-10 20:54:43 +05:30
|
|
|
# OLLAMA_ENABLE_THINKING=true # Optional: Enable thinking for models that support it (e.g., qwen3)
|
2025-11-15 13:36:42 +09:00
|
|
|
|
2025-11-15 14:29:18 +09:00
|
|
|
# OpenRouter Configuration
|
|
|
|
|
# OPENROUTER_API_KEY=sk-or-v1-...
|
2025-12-02 00:08:06 +08:00
|
|
|
# OPENROUTER_BASE_URL=https://openrouter.ai/api/v1 # Optional: Custom endpoint
|
2025-12-02 11:52:09 +09:00
|
|
|
|
|
|
|
|
# DeepSeek Configuration
|
|
|
|
|
# DEEPSEEK_API_KEY=sk-...
|
|
|
|
|
# DEEPSEEK_BASE_URL=https://api.deepseek.com/v1 # Optional: Custom endpoint
|
2025-12-05 21:15:02 +09:00
|
|
|
|
2025-12-07 09:22:57 +08:00
|
|
|
# SiliconFlow Configuration (OpenAI-compatible)
|
|
|
|
|
# Base domain can be .com or .cn, defaults to https://api.siliconflow.com/v1
|
|
|
|
|
# SILICONFLOW_API_KEY=sk-...
|
|
|
|
|
# SILICONFLOW_BASE_URL=https://api.siliconflow.com/v1 # Optional: switch to https://api.siliconflow.cn/v1 if needed
|
|
|
|
|
|
2025-12-22 22:13:45 +08:00
|
|
|
# SGLang Configuration (OpenAI-compatible)
|
|
|
|
|
# SGLANG_API_KEY=your-sglang-api-key
|
|
|
|
|
# SGLANG_BASE_URL=http://127.0.0.1:8000/v1 # Your SGLang endpoint
|
|
|
|
|
|
2025-12-29 11:30:58 +09:00
|
|
|
# ByteDance Doubao Configuration (via Volcengine)
|
|
|
|
|
# DOUBAO_API_KEY=your-doubao-api-key
|
|
|
|
|
# DOUBAO_BASE_URL=https://ark.cn-beijing.volces.com/api/v3 # ByteDance Volcengine endpoint
|
|
|
|
|
|
2025-12-16 22:43:33 -05:00
|
|
|
# Vercel AI Gateway Configuration
|
|
|
|
|
# Get your API key from: https://vercel.com/ai-gateway
|
|
|
|
|
# Model format: "provider/model" e.g., "openai/gpt-4o", "anthropic/claude-sonnet-4-5"
|
|
|
|
|
# AI_GATEWAY_API_KEY=...
|
2025-12-18 21:00:21 +08:00
|
|
|
# AI_GATEWAY_BASE_URL=https://your-custom-gateway.com/v1/ai # Optional: Custom Gateway URL (for local dev or self-hosted Gateway)
|
|
|
|
|
# # If not set, uses Vercel default: https://ai-gateway.vercel.sh/v1/ai
|
2025-12-16 22:43:33 -05:00
|
|
|
|
2025-12-05 21:15:02 +09:00
|
|
|
# Langfuse Observability (Optional)
|
|
|
|
|
# Enable LLM tracing and analytics - https://langfuse.com
|
|
|
|
|
# LANGFUSE_PUBLIC_KEY=pk-lf-...
|
|
|
|
|
# LANGFUSE_SECRET_KEY=sk-lf-...
|
|
|
|
|
# LANGFUSE_BASEURL=https://cloud.langfuse.com # EU region, use https://us.cloud.langfuse.com for US
|
2025-12-05 21:09:34 +08:00
|
|
|
|
2025-12-06 22:04:59 +05:30
|
|
|
# Temperature (Optional)
|
|
|
|
|
# Controls randomness in AI responses. Lower = more deterministic.
|
|
|
|
|
# Leave unset for models that don't support temperature (e.g., GPT-5.1 reasoning models)
|
|
|
|
|
# TEMPERATURE=0
|
|
|
|
|
|
2025-12-05 21:09:34 +08:00
|
|
|
# Access Control (Optional)
|
|
|
|
|
# ACCESS_CODE_LIST=your-secret-code,another-code
|
2025-12-09 22:00:54 +09:00
|
|
|
|
|
|
|
|
# Draw.io Configuration (Optional)
|
|
|
|
|
# NEXT_PUBLIC_DRAWIO_BASE_URL=https://embed.diagrams.net # Default: https://embed.diagrams.net
|
|
|
|
|
# Use this to point to a self-hosted draw.io instance
|
2025-12-10 21:32:35 +09:00
|
|
|
|
2025-12-22 19:58:55 +05:30
|
|
|
# Subdirectory Deployment (Optional)
|
|
|
|
|
# For deploying to a subdirectory (e.g., https://example.com/nextaidrawio)
|
|
|
|
|
# Set this to your subdirectory path with leading slash (e.g., /nextaidrawio)
|
|
|
|
|
# Leave empty for root deployment (default)
|
|
|
|
|
# NEXT_PUBLIC_BASE_PATH=/nextaidrawio
|
|
|
|
|
|
2025-12-10 21:32:35 +09:00
|
|
|
# PDF Input Feature (Optional)
|
|
|
|
|
# Enable PDF file upload to extract text and generate diagrams
|
|
|
|
|
# Enabled by default. Set to "false" to disable.
|
|
|
|
|
# ENABLE_PDF_INPUT=true
|
2025-12-11 14:14:31 +09:00
|
|
|
# NEXT_PUBLIC_MAX_EXTRACTED_CHARS=150000 # Max characters for PDF/text extraction (default: 150000)
|