mirror of
https://github.com/DayuanJiang/next-ai-draw-io.git
synced 2026-01-02 22:32:27 +08:00
Add NEXT_PUBLIC_MAX_EXTRACTED_CHARS environment variable to allow configuring the maximum characters extracted from PDF and text files. Defaults to 150000 (150k chars) if not set.
94 lines
4.5 KiB
Plaintext
94 lines
4.5 KiB
Plaintext
# AI Provider Configuration
|
|
# AI_PROVIDER: Which provider to use
|
|
# Options: bedrock, openai, anthropic, google, azure, ollama, openrouter, deepseek, siliconflow
|
|
# Default: bedrock
|
|
AI_PROVIDER=bedrock
|
|
|
|
# AI_MODEL: The model ID for your chosen provider (REQUIRED)
|
|
AI_MODEL=global.anthropic.claude-sonnet-4-5-20250929-v1:0
|
|
|
|
# AWS Bedrock Configuration
|
|
# AWS_REGION=us-east-1
|
|
# AWS_ACCESS_KEY_ID=your-access-key-id
|
|
# AWS_SECRET_ACCESS_KEY=your-secret-access-key
|
|
# Note: Claude and Nova models support reasoning/extended thinking
|
|
# BEDROCK_REASONING_BUDGET_TOKENS=12000 # Optional: Claude reasoning budget in tokens (1024-64000)
|
|
# BEDROCK_REASONING_EFFORT=medium # Optional: Nova reasoning effort (low/medium/high)
|
|
|
|
# OpenAI Configuration
|
|
# OPENAI_API_KEY=sk-...
|
|
# OPENAI_BASE_URL=https://api.openai.com/v1 # Optional: Custom OpenAI-compatible endpoint
|
|
# OPENAI_ORGANIZATION=org-... # Optional
|
|
# OPENAI_PROJECT=proj_... # Optional
|
|
# Note: o1/o3/gpt-5 models automatically enable reasoning summary (default: detailed)
|
|
# OPENAI_REASONING_EFFORT=low # Optional: Reasoning effort (minimal/low/medium/high) - for o1/o3/gpt-5
|
|
# OPENAI_REASONING_SUMMARY=detailed # Optional: Override reasoning summary (none/brief/detailed)
|
|
|
|
# Anthropic (Direct) Configuration
|
|
# ANTHROPIC_API_KEY=sk-ant-...
|
|
# ANTHROPIC_BASE_URL=https://your-custom-anthropic/v1
|
|
# ANTHROPIC_THINKING_TYPE=enabled # Optional: Anthropic extended thinking (enabled)
|
|
# ANTHROPIC_THINKING_BUDGET_TOKENS=12000 # Optional: Budget for extended thinking in tokens
|
|
|
|
# Google Generative AI Configuration
|
|
# GOOGLE_GENERATIVE_AI_API_KEY=...
|
|
# GOOGLE_BASE_URL=https://generativelanguage.googleapis.com/v1beta # Optional: Custom endpoint
|
|
# GOOGLE_CANDIDATE_COUNT=1 # Optional: Number of candidates to generate
|
|
# GOOGLE_TOP_K=40 # Optional: Top K sampling parameter
|
|
# GOOGLE_TOP_P=0.95 # Optional: Nucleus sampling parameter
|
|
# Note: Gemini 2.5/3 models automatically enable reasoning display (includeThoughts: true)
|
|
# GOOGLE_THINKING_BUDGET=8192 # Optional: Gemini 2.5 thinking budget in tokens (for more/less thinking)
|
|
# GOOGLE_THINKING_LEVEL=high # Optional: Gemini 3 thinking level (low/high)
|
|
|
|
# Azure OpenAI Configuration
|
|
# Configure endpoint using ONE of these methods:
|
|
# 1. AZURE_RESOURCE_NAME - SDK constructs: https://{name}.openai.azure.com/openai/v1{path}
|
|
# 2. AZURE_BASE_URL - SDK appends /v1{path} to your URL
|
|
# If both are set, AZURE_BASE_URL takes precedence.
|
|
# AZURE_RESOURCE_NAME=your-resource-name
|
|
# AZURE_API_KEY=...
|
|
# AZURE_BASE_URL=https://your-resource.openai.azure.com/openai # Alternative: Custom endpoint
|
|
# AZURE_REASONING_EFFORT=low # Optional: Azure reasoning effort (low, medium, high)
|
|
# AZURE_REASONING_SUMMARY=detailed
|
|
|
|
# Ollama (Local) Configuration
|
|
# OLLAMA_BASE_URL=http://localhost:11434/api # Optional, defaults to localhost
|
|
# OLLAMA_ENABLE_THINKING=true # Optional: Enable thinking for models that support it (e.g., qwen3)
|
|
|
|
# OpenRouter Configuration
|
|
# OPENROUTER_API_KEY=sk-or-v1-...
|
|
# OPENROUTER_BASE_URL=https://openrouter.ai/api/v1 # Optional: Custom endpoint
|
|
|
|
# DeepSeek Configuration
|
|
# DEEPSEEK_API_KEY=sk-...
|
|
# DEEPSEEK_BASE_URL=https://api.deepseek.com/v1 # Optional: Custom endpoint
|
|
|
|
# SiliconFlow Configuration (OpenAI-compatible)
|
|
# Base domain can be .com or .cn, defaults to https://api.siliconflow.com/v1
|
|
# SILICONFLOW_API_KEY=sk-...
|
|
# SILICONFLOW_BASE_URL=https://api.siliconflow.com/v1 # Optional: switch to https://api.siliconflow.cn/v1 if needed
|
|
|
|
# Langfuse Observability (Optional)
|
|
# Enable LLM tracing and analytics - https://langfuse.com
|
|
# LANGFUSE_PUBLIC_KEY=pk-lf-...
|
|
# LANGFUSE_SECRET_KEY=sk-lf-...
|
|
# LANGFUSE_BASEURL=https://cloud.langfuse.com # EU region, use https://us.cloud.langfuse.com for US
|
|
|
|
# Temperature (Optional)
|
|
# Controls randomness in AI responses. Lower = more deterministic.
|
|
# Leave unset for models that don't support temperature (e.g., GPT-5.1 reasoning models)
|
|
# TEMPERATURE=0
|
|
|
|
# Access Control (Optional)
|
|
# ACCESS_CODE_LIST=your-secret-code,another-code
|
|
|
|
# Draw.io Configuration (Optional)
|
|
# NEXT_PUBLIC_DRAWIO_BASE_URL=https://embed.diagrams.net # Default: https://embed.diagrams.net
|
|
# Use this to point to a self-hosted draw.io instance
|
|
|
|
# PDF Input Feature (Optional)
|
|
# Enable PDF file upload to extract text and generate diagrams
|
|
# Enabled by default. Set to "false" to disable.
|
|
# ENABLE_PDF_INPUT=true
|
|
# NEXT_PUBLIC_MAX_EXTRACTED_CHARS=150000 # Max characters for PDF/text extraction (default: 150000)
|