debug: add logging for model support checking and refactor cache resolution priority

- 在 aware_scheduler.py 中添加调试日志,用于跟踪模型支持检查过程
- 重构 model_cache.py 的别名解析逻辑:调整优先级为 alias > provider_model_name > direct_match
- 优化缓存命中路径,将直接匹配逻辑移到别名匹配失败后执行
This commit is contained in:
fawney19
2025-12-15 18:52:34 +08:00
parent 84d4db0f8d
commit a7bfab1475
2 changed files with 84 additions and 65 deletions

View File

@@ -802,6 +802,11 @@ class CacheAwareScheduler:
# 查询该 Provider 是否有实现这个 GlobalModel # 查询该 Provider 是否有实现这个 GlobalModel
for model in provider.models: for model in provider.models:
if model.global_model_id == global_model.id and model.is_active: if model.global_model_id == global_model.id and model.is_active:
logger.debug(
f"[_check_model_support_for_global_model] Provider={provider.name}, "
f"GlobalModel={global_model.name}, "
f"provider_model_name={model.provider_model_name}"
)
# 检查流式支持 # 检查流式支持
if is_stream: if is_stream:
supports_streaming = model.get_effective_supports_streaming() supports_streaming = model.get_effective_supports_streaming()

View File

@@ -273,24 +273,7 @@ class ModelCacheService:
logger.debug(f"GlobalModel 缓存命中(别名解析): {normalized_name}") logger.debug(f"GlobalModel 缓存命中(别名解析): {normalized_name}")
return ModelCacheService._dict_to_global_model(cached_data) return ModelCacheService._dict_to_global_model(cached_data)
# 2. 直接通过 GlobalModel.name 查找 # 2. 优先通过 provider_model_name 和别名匹配Provider 配置的别名优先级最高)
global_model = (
db.query(GlobalModel)
.filter(GlobalModel.name == normalized_name, GlobalModel.is_active == True)
.first()
)
if global_model:
resolution_method = "direct_match"
# 缓存结果
global_model_dict = ModelCacheService._global_model_to_dict(global_model)
await CacheService.set(
cache_key, global_model_dict, ttl_seconds=ModelCacheService.CACHE_TTL
)
logger.debug(f"GlobalModel 已缓存(别名解析-直接匹配): {normalized_name}")
return global_model
# 3. 通过别名匹配(优化:精确查询,避免加载所有 Model
from sqlalchemy import or_ from sqlalchemy import or_
from src.models.database import Provider from src.models.database import Provider
@@ -337,72 +320,103 @@ class ModelCacheService:
.all() .all()
) )
# 用于检测别名冲突 # 用于存储匹配结果:{(model_id, global_model_id): (GlobalModel, match_type, priority)}
matched_global_models = [] # 使用字典去重,同一个 Model 只保留优先级最高的匹配
matched_models_dict = {}
# 遍历查询结果进行匹配 # 遍历查询结果进行匹配
for model, gm in models_with_global: for model, gm in models_with_global:
# 检查 provider_model_name 是否匹配 key = (model.id, gm.id)
if model.provider_model_name == normalized_name:
matched_global_models.append((gm, "provider_model_name"))
logger.debug(
f"模型名称 '{normalized_name}' 通过 provider_model_name 匹配到 "
f"GlobalModel: {gm.name}"
)
# 检查 provider_model_aliases 是否匹配 # 检查 provider_model_aliases 是否匹配(优先级更高)
if model.provider_model_aliases: if model.provider_model_aliases:
for alias_entry in model.provider_model_aliases: for alias_entry in model.provider_model_aliases:
if isinstance(alias_entry, dict): if isinstance(alias_entry, dict):
alias_name = alias_entry.get("name", "").strip() alias_name = alias_entry.get("name", "").strip()
if alias_name == normalized_name: if alias_name == normalized_name:
matched_global_models.append((gm, "alias")) # alias 优先级为 0最高覆盖任何已存在的匹配
matched_models_dict[key] = (gm, "alias", 0)
logger.debug( logger.debug(
f"模型名称 '{normalized_name}' 通过别名匹配到 " f"模型名称 '{normalized_name}' 通过别名匹配到 "
f"GlobalModel: {gm.name}" f"GlobalModel: {gm.name} (Model: {model.id[:8]}...)"
) )
break break
# 处理匹配结果 # 如果还没有匹配(或只有 provider_model_name 匹配),检查 provider_model_name
if not matched_global_models: if key not in matched_models_dict or matched_models_dict[key][1] != "alias":
resolution_method = "not_found" if model.provider_model_name == normalized_name:
# 未找到匹配,缓存负结果 # provider_model_name 优先级为 1兜底只在没有 alias 匹配时使用
await CacheService.set( if key not in matched_models_dict:
cache_key, "NOT_FOUND", ttl_seconds=ModelCacheService.CACHE_TTL matched_models_dict[key] = (gm, "provider_model_name", 1)
) logger.debug(
logger.debug(f"GlobalModel 未找到(别名解析): {normalized_name}") f"模型名称 '{normalized_name}' 通过 provider_model_name 匹配到 "
return None f"GlobalModel: {gm.name} (Model: {model.id[:8]}...)"
)
# 优先使用 provider_model_name 的直接匹配,其次才是 aliases同级别按名称排序保证确定性 # 如果通过 provider_model_name/alias 找到了,直接返回
matched_global_models.sort( if matched_models_dict:
key=lambda item: (0 if item[1] == "provider_model_name" else 1, item[0].name) # 转换为列表并排序:按 priorityalias=0 优先)、然后按 GlobalModel.name
) matched_global_models = [
(gm, match_type) for gm, match_type, priority in matched_models_dict.values()
# 记录解析方式 ]
resolution_method = matched_global_models[0][1] matched_global_models.sort(
key=lambda item: (
if len(matched_global_models) > 1: 0 if item[1] == "alias" else 1, # alias 优先
# 检测到别名冲突 item[0].name # 同优先级按名称排序(确定性)
unique_models = {gm.id: gm for gm, _ in matched_global_models}
if len(unique_models) > 1:
model_names = [gm.name for gm in unique_models.values()]
logger.warning(
f"别名冲突: 模型名称 '{normalized_name}' 匹配到多个不同的 GlobalModel: "
f"{', '.join(model_names)},使用第一个匹配结果"
) )
# 记录别名冲突指标 )
model_alias_conflict_total.inc()
# 返回第一个匹配的 GlobalModel # 记录解析方式
result_global_model: GlobalModel = matched_global_models[0][0] resolution_method = matched_global_models[0][1]
global_model_dict = ModelCacheService._global_model_to_dict(result_global_model)
if len(matched_global_models) > 1:
# 检测到冲突
unique_models = {gm.id: gm for gm, _ in matched_global_models}
if len(unique_models) > 1:
model_names = [gm.name for gm in unique_models.values()]
logger.warning(
f"模型冲突: 名称 '{normalized_name}' 匹配到多个不同的 GlobalModel: "
f"{', '.join(model_names)},使用第一个匹配结果(别名优先)"
)
# 记录冲突指标
model_alias_conflict_total.inc()
# 返回第一个匹配的 GlobalModel
result_global_model: GlobalModel = matched_global_models[0][0]
global_model_dict = ModelCacheService._global_model_to_dict(result_global_model)
await CacheService.set(
cache_key, global_model_dict, ttl_seconds=ModelCacheService.CACHE_TTL
)
logger.debug(
f"GlobalModel 已缓存(别名解析-{resolution_method}): {normalized_name} -> {result_global_model.name}"
)
return result_global_model
# 3. 如果通过 provider 别名没找到,最后尝试直接通过 GlobalModel.name 查找
global_model = (
db.query(GlobalModel)
.filter(GlobalModel.name == normalized_name, GlobalModel.is_active == True)
.first()
)
if global_model:
resolution_method = "direct_match"
# 缓存结果
global_model_dict = ModelCacheService._global_model_to_dict(global_model)
await CacheService.set(
cache_key, global_model_dict, ttl_seconds=ModelCacheService.CACHE_TTL
)
logger.debug(f"GlobalModel 已缓存(别名解析-直接匹配): {normalized_name}")
return global_model
# 4. 完全未找到
resolution_method = "not_found"
# 未找到匹配,缓存负结果
await CacheService.set( await CacheService.set(
cache_key, global_model_dict, ttl_seconds=ModelCacheService.CACHE_TTL cache_key, "NOT_FOUND", ttl_seconds=ModelCacheService.CACHE_TTL
) )
logger.debug( logger.debug(f"GlobalModel 未找到(别名解析): {normalized_name}")
f"GlobalModel 已缓存(别名解析): {normalized_name} -> {result_global_model.name}" return None
)
return result_global_model
finally: finally:
# 记录监控指标 # 记录监控指标