4 Commits

Author SHA1 Message Date
fawney19
b89a4af0cf refactor: 统一 HTTP 客户端超时配置
将 HTTPClientPool 中硬编码的超时参数改为使用可配置的环境变量,提高系统的灵活性和可维护性。

- 添加 HTTP_READ_TIMEOUT 环境变量配置(默认 300 秒)
- 统一所有 HTTP 客户端创建逻辑使用配置化超时
- 改进变量命名清晰性(config -> default_config 或 client_config)
2025-12-30 15:06:55 +08:00
fawney19
a56854af43 feat: 为 GlobalModel 添加关联提供商查询 API
添加新的 API 端点 GET /api/admin/models/global/{global_model_id}/providers,用于获取 GlobalModel 的所有关联提供商(包括非活跃的)。

- 后端:实现 AdminGetGlobalModelProvidersAdapter 适配器
- 前端:使用新 API 替换原有的 ModelCatalog 获取方式
- 数据库:改进初始化时的错误提示和连接异常处理
2025-12-30 14:47:35 +08:00
fawney19
4a35d78c8d fix: 修正 README 中的图片文件扩展名 2025-12-30 09:44:45 +08:00
fawney19
26b281271e docs: 更新许可证说明、README 文档和模拟数据;调整模型版本至最新 2025-12-30 09:41:03 +08:00
15 changed files with 259 additions and 101 deletions

15
LICENSE
View File

@@ -5,12 +5,17 @@ Aether 非商业开源许可证
特此授予任何获得本软件及其相关文档文件(以下简称"软件")副本的人免费使用、
复制、修改、合并、发布和分发本软件的权限,但须遵守以下条件:
1. 仅限非商业用途
本软件不得用于商业目的。商业目的包括但不限于:
1. 仅限非盈利用途
本软件不得用于盈利目的。盈利目的包括但不限于:
- 出售本软件或任何衍生作品
- 使用本软件提供付费服务
- 将本软件用于商业产品或服务
- 将本软件用于任何旨在获取商业利益或金钱报酬的活动
- 将本软件用于以盈利为目的的商业产品或服务
以下用途被明确允许:
- 个人学习和研究
- 教育机构的教学和研究
- 非盈利组织的内部使用
- 企业内部非盈利性质的使用(如内部工具、测试环境等)
2. 署名要求
上述版权声明和本许可声明应包含在本软件的所有副本或主要部分中。
@@ -22,7 +27,7 @@ Aether 非商业开源许可证
您不得以不同的条款将本软件再许可给他人。
5. 商业许可
如需商业使用,请联系版权持有人以获取单独的商业许可。
如需将本软件用于盈利目的,请联系版权持有人以获取单独的商业许可。
本软件按"原样"提供,不提供任何明示或暗示的保证,包括但不限于对适销性、
特定用途适用性和非侵权性的保证。在任何情况下,作者或版权持有人均不对任何

View File

@@ -143,7 +143,7 @@ cd frontend && npm install && npm run dev
- **模型级别**: 在模型管理中针对指定模型开启 1H缓存策略
- **密钥级别**: 在密钥管理中针对指定密钥使用 1H缓存策略
> **注意**: 若对密钥设置强制 1H缓存, 则该密钥只能用支持 1H缓存的模型
> **注意**: 若对密钥设置强制 1H缓存, 则该密钥只能使用支持 1H缓存的模型, 匹配提供商Key, 将会导致这个Key无法同时用于Claude Code、Codex、GeminiCLI, 因为更推荐使用模型开启1H缓存.
### Q: 如何配置负载均衡?
@@ -162,4 +162,16 @@ cd frontend && npm install && npm run dev
## 许可证
本项目采用 [Aether 非商业开源许可证](LICENSE)。
本项目采用 [Aether 非商业开源许可证](LICENSE)。允许个人学习、教育研究、非盈利组织及企业内部非盈利性质的使用;禁止用于盈利目的。商业使用请联系获取商业许可。
## 联系作者
<p align="center">
<img src="docs/author/qq_qrcode.jpg" width="200" alt="QQ二维码">
</p>
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=fawney19/Aether&type=Date)](https://star-history.com/#fawney19/Aether&Date)

BIN
docs/author/qq_qrcode.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 266 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

View File

@@ -4,7 +4,8 @@ import type {
GlobalModelUpdate,
GlobalModelResponse,
GlobalModelWithStats,
GlobalModelListResponse
GlobalModelListResponse,
ModelCatalogProviderDetail,
} from './types'
/**
@@ -83,3 +84,16 @@ export async function batchAssignToProviders(
)
return response.data
}
/**
* 获取 GlobalModel 的所有关联提供商(包括非活跃的)
*/
export async function getGlobalModelProviders(globalModelId: string): Promise<{
providers: ModelCatalogProviderDetail[]
total: number
}> {
const response = await client.get(
`/api/admin/models/global/${globalModelId}/providers`
)
return response.data
}

View File

@@ -20,4 +20,5 @@ export {
updateGlobalModel,
deleteGlobalModel,
batchAssignToProviders,
getGlobalModelProviders,
} from './endpoints/global-models'

View File

@@ -5,7 +5,7 @@
import type { User, LoginResponse } from '@/api/auth'
import type { DashboardStatsResponse, RecentRequest, ProviderStatus, DailyStatsResponse } from '@/api/dashboard'
import type { User as AdminUser, ApiKey } from '@/api/users'
import type { User as AdminUser } from '@/api/users'
import type { AdminApiKeysResponse } from '@/api/admin'
import type { Profile, UsageResponse } from '@/api/me'
import type { ProviderWithEndpointsSummary, GlobalModelResponse } from '@/api/endpoints/types'
@@ -185,18 +185,20 @@ export const MOCK_DASHBOARD_STATS: DashboardStatsResponse = {
output: 700000,
cache_creation: 50000,
cache_read: 200000
}
},
// 普通用户专用字段
monthly_cost: 45.67
}
export const MOCK_RECENT_REQUESTS: RecentRequest[] = [
{ id: 'req-001', user: 'alice', model: 'claude-sonnet-4-20250514', tokens: 15234, time: '2 分钟前' },
{ id: 'req-002', user: 'bob', model: 'gpt-4o', tokens: 8765, time: '5 分钟前' },
{ id: 'req-003', user: 'charlie', model: 'claude-opus-4-20250514', tokens: 32100, time: '8 分钟前' },
{ id: 'req-004', user: 'diana', model: 'gemini-2.0-flash', tokens: 4521, time: '12 分钟前' },
{ id: 'req-005', user: 'eve', model: 'claude-sonnet-4-20250514', tokens: 9876, time: '15 分钟前' },
{ id: 'req-006', user: 'frank', model: 'gpt-4o-mini', tokens: 2345, time: '18 分钟前' },
{ id: 'req-007', user: 'grace', model: 'claude-haiku-3-5-20241022', tokens: 6789, time: '22 分钟前' },
{ id: 'req-008', user: 'henry', model: 'gemini-2.5-pro', tokens: 12345, time: '25 分钟前' }
{ id: 'req-001', user: 'alice', model: 'claude-sonnet-4-5-20250929', tokens: 15234, time: '2 分钟前' },
{ id: 'req-002', user: 'bob', model: 'gpt-5.1', tokens: 8765, time: '5 分钟前' },
{ id: 'req-003', user: 'charlie', model: 'claude-opus-4-5-20251101', tokens: 32100, time: '8 分钟前' },
{ id: 'req-004', user: 'diana', model: 'gemini-3-pro-preview', tokens: 4521, time: '12 分钟前' },
{ id: 'req-005', user: 'eve', model: 'claude-sonnet-4-5-20250929', tokens: 9876, time: '15 分钟前' },
{ id: 'req-006', user: 'frank', model: 'gpt-5.1-codex-mini', tokens: 2345, time: '18 分钟前' },
{ id: 'req-007', user: 'grace', model: 'claude-haiku-4-5-20251001', tokens: 6789, time: '22 分钟前' },
{ id: 'req-008', user: 'henry', model: 'gemini-3-pro-preview', tokens: 12345, time: '25 分钟前' }
]
export const MOCK_PROVIDER_STATUS: ProviderStatus[] = [
@@ -231,11 +233,11 @@ function generateDailyStats(): DailyStatsResponse {
unique_models: 8 + Math.floor(Math.random() * 5),
unique_providers: 4 + Math.floor(Math.random() * 3),
model_breakdown: [
{ model: 'claude-sonnet-4-20250514', requests: Math.floor(baseRequests * 0.35), tokens: Math.floor(baseTokens * 0.35), cost: Number((baseCost * 0.35).toFixed(2)) },
{ model: 'gpt-4o', requests: Math.floor(baseRequests * 0.25), tokens: Math.floor(baseTokens * 0.25), cost: Number((baseCost * 0.25).toFixed(2)) },
{ model: 'claude-opus-4-20250514', requests: Math.floor(baseRequests * 0.15), tokens: Math.floor(baseTokens * 0.15), cost: Number((baseCost * 0.20).toFixed(2)) },
{ model: 'gemini-2.0-flash', requests: Math.floor(baseRequests * 0.15), tokens: Math.floor(baseTokens * 0.15), cost: Number((baseCost * 0.10).toFixed(2)) },
{ model: 'claude-haiku-3-5-20241022', requests: Math.floor(baseRequests * 0.10), tokens: Math.floor(baseTokens * 0.10), cost: Number((baseCost * 0.10).toFixed(2)) }
{ model: 'claude-sonnet-4-5-20250929', requests: Math.floor(baseRequests * 0.35), tokens: Math.floor(baseTokens * 0.35), cost: Number((baseCost * 0.35).toFixed(2)) },
{ model: 'gpt-5.1', requests: Math.floor(baseRequests * 0.25), tokens: Math.floor(baseTokens * 0.25), cost: Number((baseCost * 0.25).toFixed(2)) },
{ model: 'claude-opus-4-5-20251101', requests: Math.floor(baseRequests * 0.15), tokens: Math.floor(baseTokens * 0.15), cost: Number((baseCost * 0.20).toFixed(2)) },
{ model: 'gemini-3-pro-preview', requests: Math.floor(baseRequests * 0.15), tokens: Math.floor(baseTokens * 0.15), cost: Number((baseCost * 0.10).toFixed(2)) },
{ model: 'claude-haiku-4-5-20251001', requests: Math.floor(baseRequests * 0.10), tokens: Math.floor(baseTokens * 0.10), cost: Number((baseCost * 0.10).toFixed(2)) }
]
})
}
@@ -243,11 +245,11 @@ function generateDailyStats(): DailyStatsResponse {
return {
daily_stats: dailyStats,
model_summary: [
{ model: 'claude-sonnet-4-20250514', requests: 2456, tokens: 8500000, cost: 125.45, avg_response_time: 1.2, cost_per_request: 0.051, tokens_per_request: 3461 },
{ model: 'gpt-4o', requests: 1823, tokens: 6200000, cost: 98.32, avg_response_time: 0.9, cost_per_request: 0.054, tokens_per_request: 3401 },
{ model: 'claude-opus-4-20250514', requests: 987, tokens: 4100000, cost: 156.78, avg_response_time: 2.1, cost_per_request: 0.159, tokens_per_request: 4154 },
{ model: 'gemini-2.0-flash', requests: 1234, tokens: 3800000, cost: 28.56, avg_response_time: 0.6, cost_per_request: 0.023, tokens_per_request: 3079 },
{ model: 'claude-haiku-3-5-20241022', requests: 2100, tokens: 5200000, cost: 32.10, avg_response_time: 0.5, cost_per_request: 0.015, tokens_per_request: 2476 }
{ model: 'claude-sonnet-4-5-20250929', requests: 2456, tokens: 8500000, cost: 125.45, avg_response_time: 1.2, cost_per_request: 0.051, tokens_per_request: 3461 },
{ model: 'gpt-5.1', requests: 1823, tokens: 6200000, cost: 98.32, avg_response_time: 0.9, cost_per_request: 0.054, tokens_per_request: 3401 },
{ model: 'claude-opus-4-5-20251101', requests: 987, tokens: 4100000, cost: 156.78, avg_response_time: 2.1, cost_per_request: 0.159, tokens_per_request: 4154 },
{ model: 'gemini-3-pro-preview', requests: 1234, tokens: 3800000, cost: 28.56, avg_response_time: 0.6, cost_per_request: 0.023, tokens_per_request: 3079 },
{ model: 'claude-haiku-4-5-20251001', requests: 2100, tokens: 5200000, cost: 32.10, avg_response_time: 0.5, cost_per_request: 0.015, tokens_per_request: 2476 }
],
period: {
start_date: dailyStats[0].date,
@@ -336,7 +338,7 @@ export const MOCK_ALL_USERS: AdminUser[] = [
// ========== API Key 数据 ==========
export const MOCK_USER_API_KEYS: ApiKey[] = [
export const MOCK_USER_API_KEYS = [
{
id: 'key-uuid-001',
key_display: 'sk-ae...x7f9',
@@ -346,7 +348,8 @@ export const MOCK_USER_API_KEYS: ApiKey[] = [
is_active: true,
is_standalone: false,
total_requests: 1234,
total_cost_usd: 45.67
total_cost_usd: 45.67,
force_capabilities: null
},
{
id: 'key-uuid-002',
@@ -357,7 +360,8 @@ export const MOCK_USER_API_KEYS: ApiKey[] = [
is_active: true,
is_standalone: false,
total_requests: 5678,
total_cost_usd: 123.45
total_cost_usd: 123.45,
force_capabilities: { cache_1h: true }
},
{
id: 'key-uuid-003',
@@ -367,7 +371,8 @@ export const MOCK_USER_API_KEYS: ApiKey[] = [
is_active: false,
is_standalone: false,
total_requests: 100,
total_cost_usd: 2.34
total_cost_usd: 2.34,
force_capabilities: null
}
]
@@ -813,16 +818,16 @@ export const MOCK_USAGE_RESPONSE: UsageResponse = {
quota_usd: 100,
used_usd: 45.32,
summary_by_model: [
{ model: 'claude-sonnet-4-20250514', requests: 456, input_tokens: 650000, output_tokens: 250000, total_tokens: 900000, total_cost_usd: 18.50, actual_total_cost_usd: 13.50 },
{ model: 'gpt-4o', requests: 312, input_tokens: 480000, output_tokens: 180000, total_tokens: 660000, total_cost_usd: 12.30, actual_total_cost_usd: 9.20 },
{ model: 'claude-haiku-3-5-20241022', requests: 289, input_tokens: 420000, output_tokens: 170000, total_tokens: 590000, total_cost_usd: 8.50, actual_total_cost_usd: 6.30 },
{ model: 'gemini-2.0-flash', requests: 177, input_tokens: 250000, output_tokens: 100000, total_tokens: 350000, total_cost_usd: 6.37, actual_total_cost_usd: 4.33 }
{ model: 'claude-sonnet-4-5-20250929', requests: 456, input_tokens: 650000, output_tokens: 250000, total_tokens: 900000, total_cost_usd: 18.50, actual_total_cost_usd: 13.50 },
{ model: 'gpt-5.1', requests: 312, input_tokens: 480000, output_tokens: 180000, total_tokens: 660000, total_cost_usd: 12.30, actual_total_cost_usd: 9.20 },
{ model: 'claude-haiku-4-5-20251001', requests: 289, input_tokens: 420000, output_tokens: 170000, total_tokens: 590000, total_cost_usd: 8.50, actual_total_cost_usd: 6.30 },
{ model: 'gemini-3-pro-preview', requests: 177, input_tokens: 250000, output_tokens: 100000, total_tokens: 350000, total_cost_usd: 6.37, actual_total_cost_usd: 4.33 }
],
records: [
{
id: 'usage-001',
provider: 'anthropic',
model: 'claude-sonnet-4-20250514',
model: 'claude-sonnet-4-5-20250929',
input_tokens: 1500,
output_tokens: 800,
total_tokens: 2300,
@@ -837,7 +842,7 @@ export const MOCK_USAGE_RESPONSE: UsageResponse = {
{
id: 'usage-002',
provider: 'openai',
model: 'gpt-4o',
model: 'gpt-5.1',
input_tokens: 2000,
output_tokens: 500,
total_tokens: 2500,

View File

@@ -405,10 +405,10 @@ function getUsageRecords() {
// Mock 映射数据
const MOCK_ALIASES = [
{ id: 'alias-001', source_model: 'claude-4-sonnet', target_global_model_id: 'gm-001', target_global_model_name: 'claude-sonnet-4-20250514', target_global_model_display_name: 'Claude Sonnet 4', provider_id: null, provider_name: null, scope: 'global', mapping_type: 'alias', is_active: true, created_at: '2024-01-01T00:00:00Z', updated_at: '2024-01-01T00:00:00Z' },
{ id: 'alias-002', source_model: 'claude-4-opus', target_global_model_id: 'gm-002', target_global_model_name: 'claude-opus-4-20250514', target_global_model_display_name: 'Claude Opus 4', provider_id: null, provider_name: null, scope: 'global', mapping_type: 'alias', is_active: true, created_at: '2024-01-01T00:00:00Z', updated_at: '2024-01-01T00:00:00Z' },
{ id: 'alias-003', source_model: 'gpt4o', target_global_model_id: 'gm-004', target_global_model_name: 'gpt-4o', target_global_model_display_name: 'GPT-4o', provider_id: null, provider_name: null, scope: 'global', mapping_type: 'alias', is_active: true, created_at: '2024-01-01T00:00:00Z', updated_at: '2024-01-01T00:00:00Z' },
{ id: 'alias-004', source_model: 'gemini-flash', target_global_model_id: 'gm-005', target_global_model_name: 'gemini-2.0-flash', target_global_model_display_name: 'Gemini 2.0 Flash', provider_id: null, provider_name: null, scope: 'global', mapping_type: 'alias', is_active: true, created_at: '2024-01-01T00:00:00Z', updated_at: '2024-01-01T00:00:00Z' }
{ id: 'alias-001', source_model: 'claude-4-sonnet', target_global_model_id: 'gm-003', target_global_model_name: 'claude-sonnet-4-5-20250929', target_global_model_display_name: 'Claude Sonnet 4.5', provider_id: null, provider_name: null, scope: 'global', mapping_type: 'alias', is_active: true, created_at: '2024-01-01T00:00:00Z', updated_at: '2024-01-01T00:00:00Z' },
{ id: 'alias-002', source_model: 'claude-4-opus', target_global_model_id: 'gm-002', target_global_model_name: 'claude-opus-4-5-20251101', target_global_model_display_name: 'Claude Opus 4.5', provider_id: null, provider_name: null, scope: 'global', mapping_type: 'alias', is_active: true, created_at: '2024-01-01T00:00:00Z', updated_at: '2024-01-01T00:00:00Z' },
{ id: 'alias-003', source_model: 'gpt5', target_global_model_id: 'gm-006', target_global_model_name: 'gpt-5.1', target_global_model_display_name: 'GPT-5.1', provider_id: null, provider_name: null, scope: 'global', mapping_type: 'alias', is_active: true, created_at: '2024-01-01T00:00:00Z', updated_at: '2024-01-01T00:00:00Z' },
{ id: 'alias-004', source_model: 'gemini-pro', target_global_model_id: 'gm-005', target_global_model_name: 'gemini-3-pro-preview', target_global_model_display_name: 'Gemini 3 Pro Preview', provider_id: null, provider_name: null, scope: 'global', mapping_type: 'alias', is_active: true, created_at: '2024-01-01T00:00:00Z', updated_at: '2024-01-01T00:00:00Z' }
]
// Mock Endpoint Keys
@@ -2172,10 +2172,10 @@ function generateIntervalTimelineData(
// 模型列表(用于按模型区分颜色)
const models = [
'claude-sonnet-4-20250514',
'claude-3-5-sonnet-20241022',
'claude-3-5-haiku-20241022',
'claude-opus-4-20250514'
'claude-sonnet-4-5-20250929',
'claude-haiku-4-5-20251001',
'claude-opus-4-5-20251101',
'gpt-5.1'
]
// 生成模拟的请求间隔数据

View File

@@ -737,6 +737,7 @@ import {
updateGlobalModel,
deleteGlobalModel,
batchAssignToProviders,
getGlobalModelProviders,
type GlobalModelResponse,
} from '@/api/global-models'
import { log } from '@/utils/logger'
@@ -1080,42 +1081,32 @@ async function selectModel(model: GlobalModelResponse) {
async function loadModelProviders(_globalModelId: string) {
loadingModelProviders.value = true
try {
// 使用 ModelCatalog API 获取详细的关联提供商信息
const { getModelCatalog } = await import('@/api/endpoints')
const catalogResponse = await getModelCatalog()
// 使用新的 API 获取所有关联提供商(包括非活跃的)
const response = await getGlobalModelProviders(_globalModelId)
// 查找当前 GlobalModel 对应的 catalog item
const catalogItem = catalogResponse.models.find(
m => m.global_model_name === selectedModel.value?.name
)
if (catalogItem) {
// 转换为展示格式,包含完整的模型实现信息
selectedModelProviders.value = catalogItem.providers.map(p => ({
id: p.provider_id,
model_id: p.model_id,
display_name: p.provider_display_name || p.provider_name,
identifier: p.provider_name,
provider_type: 'API',
target_model: p.target_model,
is_active: p.is_active,
// 价格信息
input_price_per_1m: p.input_price_per_1m,
output_price_per_1m: p.output_price_per_1m,
cache_creation_price_per_1m: p.cache_creation_price_per_1m,
cache_read_price_per_1m: p.cache_read_price_per_1m,
cache_1h_creation_price_per_1m: p.cache_1h_creation_price_per_1m,
price_per_request: p.price_per_request,
effective_tiered_pricing: p.effective_tiered_pricing,
tier_count: p.tier_count,
// 能力信息
supports_vision: p.supports_vision,
supports_function_calling: p.supports_function_calling,
supports_streaming: p.supports_streaming
}))
} else {
selectedModelProviders.value = []
}
// 转换为展示格式
selectedModelProviders.value = response.providers.map(p => ({
id: p.provider_id,
model_id: p.model_id,
display_name: p.provider_display_name || p.provider_name,
identifier: p.provider_name,
provider_type: 'API',
target_model: p.target_model,
is_active: p.is_active,
// 价格信息
input_price_per_1m: p.input_price_per_1m,
output_price_per_1m: p.output_price_per_1m,
cache_creation_price_per_1m: p.cache_creation_price_per_1m,
cache_read_price_per_1m: p.cache_read_price_per_1m,
cache_1h_creation_price_per_1m: p.cache_1h_creation_price_per_1m,
price_per_request: p.price_per_request,
effective_tiered_pricing: p.effective_tiered_pricing,
tier_count: p.tier_count,
// 能力信息
supports_vision: p.supports_vision,
supports_function_calling: p.supports_function_calling,
supports_streaming: p.supports_streaming
}))
} catch (err: any) {
log.error('加载关联提供商失败:', err)
showError(parseApiError(err, '加载关联提供商失败'), '错误')

View File

@@ -13,7 +13,7 @@ authors = [
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"License :: Other/Proprietary License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",

View File

@@ -5,7 +5,7 @@ GlobalModel Admin API
"""
from dataclasses import dataclass
from typing import List, Optional
from typing import Optional
from fastapi import APIRouter, Depends, Query, Request
from sqlalchemy.orm import Session
@@ -19,9 +19,11 @@ from src.models.pydantic_models import (
BatchAssignToProvidersResponse,
GlobalModelCreate,
GlobalModelListResponse,
GlobalModelProvidersResponse,
GlobalModelResponse,
GlobalModelUpdate,
GlobalModelWithStats,
ModelCatalogProviderDetail,
)
from src.services.model.global_model import GlobalModelService
@@ -108,6 +110,17 @@ async def batch_assign_to_providers(
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.get("/{global_model_id}/providers", response_model=GlobalModelProvidersResponse)
async def get_global_model_providers(
request: Request,
global_model_id: str,
db: Session = Depends(get_db),
) -> GlobalModelProvidersResponse:
"""获取 GlobalModel 的所有关联提供商(包括非活跃的)"""
adapter = AdminGetGlobalModelProvidersAdapter(global_model_id=global_model_id)
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
# ========== Adapters ==========
@@ -275,3 +288,61 @@ class AdminBatchAssignToProvidersAdapter(AdminApiAdapter):
logger.info(f"批量为 Provider 添加 GlobalModel: global_model_id={self.global_model_id} success={len(result['success'])} errors={len(result['errors'])}")
return BatchAssignToProvidersResponse(**result)
@dataclass
class AdminGetGlobalModelProvidersAdapter(AdminApiAdapter):
"""获取 GlobalModel 的所有关联提供商(包括非活跃的)"""
global_model_id: str
async def handle(self, context): # type: ignore[override]
from sqlalchemy.orm import joinedload
from src.models.database import Model
global_model = GlobalModelService.get_global_model(context.db, self.global_model_id)
# 获取所有关联的 Model包括非活跃的
models = (
context.db.query(Model)
.options(joinedload(Model.provider), joinedload(Model.global_model))
.filter(Model.global_model_id == global_model.id)
.all()
)
provider_entries = []
for model in models:
provider = model.provider
if not provider:
continue
effective_tiered = model.get_effective_tiered_pricing()
tier_count = len(effective_tiered.get("tiers", [])) if effective_tiered else 1
provider_entries.append(
ModelCatalogProviderDetail(
provider_id=provider.id,
provider_name=provider.name,
provider_display_name=provider.display_name,
model_id=model.id,
target_model=model.provider_model_name,
input_price_per_1m=model.get_effective_input_price(),
output_price_per_1m=model.get_effective_output_price(),
cache_creation_price_per_1m=model.get_effective_cache_creation_price(),
cache_read_price_per_1m=model.get_effective_cache_read_price(),
cache_1h_creation_price_per_1m=model.get_effective_1h_cache_creation_price(),
price_per_request=model.get_effective_price_per_request(),
effective_tiered_pricing=effective_tiered,
tier_count=tier_count,
supports_vision=model.get_effective_supports_vision(),
supports_function_calling=model.get_effective_supports_function_calling(),
supports_streaming=model.get_effective_supports_streaming(),
is_active=bool(model.is_active),
)
)
return GlobalModelProvidersResponse(
providers=provider_entries,
total=len(provider_entries),
)

View File

@@ -9,6 +9,7 @@ from urllib.parse import quote, urlparse
import httpx
from src.config import config
from src.core.logger import logger
@@ -83,10 +84,10 @@ class HTTPClientPool:
http2=False, # 暂时禁用HTTP/2以提高兼容性
verify=True, # 启用SSL验证
timeout=httpx.Timeout(
connect=10.0, # 连接超时
read=300.0, # 读取超时(5分钟,适合流式响应)
write=60.0, # 写入超时(60秒,支持大请求体)
pool=5.0, # 连接池超时
connect=config.http_connect_timeout,
read=config.http_read_timeout,
write=config.http_write_timeout,
pool=config.http_pool_timeout,
),
limits=httpx.Limits(
max_connections=100, # 最大连接数
@@ -111,15 +112,20 @@ class HTTPClientPool:
"""
if name not in cls._clients:
# 合并默认配置和自定义配置
config = {
default_config = {
"http2": False,
"verify": True,
"timeout": httpx.Timeout(10.0, read=300.0),
"timeout": httpx.Timeout(
connect=config.http_connect_timeout,
read=config.http_read_timeout,
write=config.http_write_timeout,
pool=config.http_pool_timeout,
),
"follow_redirects": True,
}
config.update(kwargs)
default_config.update(kwargs)
cls._clients[name] = httpx.AsyncClient(**config)
cls._clients[name] = httpx.AsyncClient(**default_config)
logger.debug(f"创建命名HTTP客户端: {name}")
return cls._clients[name]
@@ -151,14 +157,19 @@ class HTTPClientPool:
async with HTTPClientPool.get_temp_client() as client:
response = await client.get('https://example.com')
"""
config = {
default_config = {
"http2": False,
"verify": True,
"timeout": httpx.Timeout(10.0),
"timeout": httpx.Timeout(
connect=config.http_connect_timeout,
read=config.http_read_timeout,
write=config.http_write_timeout,
pool=config.http_pool_timeout,
),
}
config.update(kwargs)
default_config.update(kwargs)
client = httpx.AsyncClient(**config)
client = httpx.AsyncClient(**default_config)
try:
yield client
finally:
@@ -182,25 +193,30 @@ class HTTPClientPool:
Returns:
配置好的 httpx.AsyncClient 实例
"""
config: Dict[str, Any] = {
client_config: Dict[str, Any] = {
"http2": False,
"verify": True,
"follow_redirects": True,
}
if timeout:
config["timeout"] = timeout
client_config["timeout"] = timeout
else:
config["timeout"] = httpx.Timeout(10.0, read=300.0)
client_config["timeout"] = httpx.Timeout(
connect=config.http_connect_timeout,
read=config.http_read_timeout,
write=config.http_write_timeout,
pool=config.http_pool_timeout,
)
# 添加代理配置
proxy_url = build_proxy_url(proxy_config) if proxy_config else None
if proxy_url:
config["proxy"] = proxy_url
client_config["proxy"] = proxy_url
logger.debug(f"创建带代理的HTTP客户端: {proxy_config.get('url', 'unknown')}")
config.update(kwargs)
return httpx.AsyncClient(**config)
client_config.update(kwargs)
return httpx.AsyncClient(**client_config)
# 便捷访问函数

View File

@@ -148,6 +148,7 @@ class Config:
# HTTP 请求超时配置(秒)
self.http_connect_timeout = float(os.getenv("HTTP_CONNECT_TIMEOUT", "10.0"))
self.http_read_timeout = float(os.getenv("HTTP_READ_TIMEOUT", "300.0"))
self.http_write_timeout = float(os.getenv("HTTP_WRITE_TIMEOUT", "60.0"))
self.http_pool_timeout = float(os.getenv("HTTP_POOL_TIMEOUT", "10.0"))

View File

@@ -360,6 +360,9 @@ def init_db():
注意:数据库表结构由 Alembic 管理,部署时请运行 ./migrate.sh
"""
import sys
from sqlalchemy.exc import OperationalError
logger.info("初始化数据库...")
# 确保引擎已创建
@@ -382,6 +385,38 @@ def init_db():
db.commit()
logger.info("数据库初始化完成")
except OperationalError as e:
db.rollback()
# 提取数据库连接信息用于提示
db_url = config.database_url
# 隐藏密码,只显示 host:port/database
if "@" in db_url:
db_info = db_url.split("@")[-1]
else:
db_info = db_url
import os
# 直接打印到 stderr确保消息显示
print("", file=sys.stderr)
print("=" * 60, file=sys.stderr)
print("数据库连接失败", file=sys.stderr)
print("=" * 60, file=sys.stderr)
print("", file=sys.stderr)
print(f"无法连接到数据库: {db_info}", file=sys.stderr)
print("", file=sys.stderr)
print("请检查以下事项:", file=sys.stderr)
print(" 1. PostgreSQL 服务是否正在运行", file=sys.stderr)
print(" 2. 数据库连接配置是否正确 (DATABASE_URL)", file=sys.stderr)
print(" 3. 数据库用户名和密码是否正确", file=sys.stderr)
print("", file=sys.stderr)
print("如果使用 Docker请先运行:", file=sys.stderr)
print(" docker-compose up -d postgres redis", file=sys.stderr)
print("", file=sys.stderr)
print("=" * 60, file=sys.stderr)
# 使用 os._exit 直接退出,避免 uvicorn 捕获并打印堆栈
os._exit(1)
except Exception as e:
logger.error(f"数据库初始化失败: {e}")
db.rollback()

View File

@@ -274,6 +274,13 @@ class GlobalModelListResponse(BaseModel):
total: int
class GlobalModelProvidersResponse(BaseModel):
"""GlobalModel 关联提供商列表响应"""
providers: List[ModelCatalogProviderDetail]
total: int
class BatchAssignToProvidersRequest(BaseModel):
"""批量为 Provider 添加 GlobalModel 实现"""