Initial commit

This commit is contained in:
fawney19
2025-12-10 20:52:44 +08:00
commit f784106826
485 changed files with 110993 additions and 0 deletions

View File

@@ -0,0 +1,14 @@
"""Admin monitoring router合集。"""
from fastapi import APIRouter
from .audit import router as audit_router
from .cache import router as cache_router
from .trace import router as trace_router
router = APIRouter()
router.include_router(audit_router)
router.include_router(cache_router)
router.include_router(trace_router)
__all__ = ["router"]

View File

@@ -0,0 +1,399 @@
"""管理员监控与审计端点。"""
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from sqlalchemy import func
from sqlalchemy.orm import Session
from src.api.base.admin_adapter import AdminApiAdapter
from src.api.base.pagination import PaginationMeta, build_pagination_payload, paginate_query
from src.api.base.pipeline import ApiRequestPipeline
from src.core.logger import logger
from src.database import get_db
from src.models.database import (
ApiKey,
AuditEventType,
AuditLog,
Provider,
Usage,
)
from src.models.database import User as DBUser
from src.services.health.monitor import HealthMonitor
from src.services.system.audit import audit_service
router = APIRouter(prefix="/api/admin/monitoring", tags=["Admin - Monitoring"])
pipeline = ApiRequestPipeline()
@router.get("/audit-logs")
async def get_audit_logs(
request: Request,
user_id: Optional[str] = Query(None, description="用户ID筛选 (支持UUID)"),
event_type: Optional[str] = Query(None, description="事件类型筛选"),
days: int = Query(7, description="查询天数"),
limit: int = Query(100, description="返回数量限制"),
offset: int = Query(0, description="偏移量"),
db: Session = Depends(get_db),
):
adapter = AdminGetAuditLogsAdapter(
user_id=user_id,
event_type=event_type,
days=days,
limit=limit,
offset=offset,
)
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.get("/system-status")
async def get_system_status(request: Request, db: Session = Depends(get_db)):
adapter = AdminSystemStatusAdapter()
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.get("/suspicious-activities")
async def get_suspicious_activities(
request: Request,
hours: int = Query(24, description="时间范围(小时)"),
db: Session = Depends(get_db),
):
adapter = AdminSuspiciousActivitiesAdapter(hours=hours)
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.get("/user-behavior/{user_id}")
async def analyze_user_behavior(
user_id: str,
request: Request,
days: int = Query(30, description="分析天数"),
db: Session = Depends(get_db),
):
adapter = AdminUserBehaviorAdapter(user_id=user_id, days=days)
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.get("/resilience-status")
async def get_resilience_status(request: Request, db: Session = Depends(get_db)):
adapter = AdminResilienceStatusAdapter()
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.delete("/resilience/error-stats")
async def reset_error_stats(request: Request, db: Session = Depends(get_db)):
"""Reset resilience error statistics"""
adapter = AdminResetErrorStatsAdapter()
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.get("/resilience/circuit-history")
async def get_circuit_history(
request: Request,
limit: int = Query(50, ge=1, le=200),
db: Session = Depends(get_db),
):
adapter = AdminCircuitHistoryAdapter(limit=limit)
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@dataclass
class AdminGetAuditLogsAdapter(AdminApiAdapter):
user_id: Optional[str]
event_type: Optional[str]
days: int
limit: int
offset: int
async def handle(self, context): # type: ignore[override]
db = context.db
cutoff_time = datetime.now(timezone.utc) - timedelta(days=self.days)
base_query = (
db.query(AuditLog, DBUser)
.outerjoin(DBUser, AuditLog.user_id == DBUser.id)
.filter(AuditLog.created_at >= cutoff_time)
)
if self.user_id:
base_query = base_query.filter(AuditLog.user_id == self.user_id)
if self.event_type:
base_query = base_query.filter(AuditLog.event_type == self.event_type)
ordered_query = base_query.order_by(AuditLog.created_at.desc())
total, logs_with_users = paginate_query(ordered_query, self.limit, self.offset)
items = [
{
"id": log.id,
"event_type": log.event_type,
"user_id": log.user_id,
"user_email": user.email if user else None,
"user_username": user.username if user else None,
"description": log.description,
"ip_address": log.ip_address,
"status_code": log.status_code,
"error_message": log.error_message,
"metadata": log.event_metadata,
"created_at": log.created_at.isoformat() if log.created_at else None,
}
for log, user in logs_with_users
]
meta = PaginationMeta(
total=total,
limit=self.limit,
offset=self.offset,
count=len(items),
)
payload = build_pagination_payload(
items,
meta,
filters={
"user_id": self.user_id,
"event_type": self.event_type,
"days": self.days,
},
)
context.add_audit_metadata(
action="monitor_audit_logs",
filter_user_id=self.user_id,
filter_event_type=self.event_type,
days=self.days,
limit=self.limit,
offset=self.offset,
total=total,
result_count=meta.count,
)
return payload
class AdminSystemStatusAdapter(AdminApiAdapter):
async def handle(self, context): # type: ignore[override]
db = context.db
total_users = db.query(func.count(DBUser.id)).scalar()
active_users = db.query(func.count(DBUser.id)).filter(DBUser.is_active.is_(True)).scalar()
total_providers = db.query(func.count(Provider.id)).scalar()
active_providers = (
db.query(func.count(Provider.id)).filter(Provider.is_active.is_(True)).scalar()
)
total_api_keys = db.query(func.count(ApiKey.id)).scalar()
active_api_keys = (
db.query(func.count(ApiKey.id)).filter(ApiKey.is_active.is_(True)).scalar()
)
today_start = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
today_requests = (
db.query(func.count(Usage.id)).filter(Usage.created_at >= today_start).scalar()
)
today_tokens = (
db.query(func.sum(Usage.total_tokens)).filter(Usage.created_at >= today_start).scalar()
or 0
)
today_cost = (
db.query(func.sum(Usage.total_cost_usd))
.filter(Usage.created_at >= today_start)
.scalar()
or 0
)
recent_errors = (
db.query(AuditLog)
.filter(
AuditLog.event_type.in_(
[
AuditEventType.REQUEST_FAILED.value,
AuditEventType.SUSPICIOUS_ACTIVITY.value,
]
),
AuditLog.created_at >= datetime.now(timezone.utc) - timedelta(hours=1),
)
.count()
)
context.add_audit_metadata(
action="system_status_snapshot",
total_users=int(total_users or 0),
active_users=int(active_users or 0),
total_providers=int(total_providers or 0),
active_providers=int(active_providers or 0),
total_api_keys=int(total_api_keys or 0),
active_api_keys=int(active_api_keys or 0),
today_requests=int(today_requests or 0),
today_tokens=int(today_tokens or 0),
today_cost=float(today_cost or 0.0),
recent_errors=int(recent_errors or 0),
)
return {
"timestamp": datetime.now(timezone.utc).isoformat(),
"users": {"total": total_users, "active": active_users},
"providers": {"total": total_providers, "active": active_providers},
"api_keys": {"total": total_api_keys, "active": active_api_keys},
"today_stats": {
"requests": today_requests,
"tokens": today_tokens,
"cost_usd": f"${today_cost:.4f}",
},
"recent_errors": recent_errors,
}
@dataclass
class AdminSuspiciousActivitiesAdapter(AdminApiAdapter):
hours: int
async def handle(self, context): # type: ignore[override]
db = context.db
activities = audit_service.get_suspicious_activities(db=db, hours=self.hours, limit=100)
response = {
"activities": [
{
"id": activity.id,
"event_type": activity.event_type,
"user_id": activity.user_id,
"description": activity.description,
"ip_address": activity.ip_address,
"metadata": activity.event_metadata,
"created_at": activity.created_at.isoformat() if activity.created_at else None,
}
for activity in activities
],
"count": len(activities),
"time_range_hours": self.hours,
}
context.add_audit_metadata(
action="monitor_suspicious_activity",
hours=self.hours,
result_count=len(activities),
)
return response
@dataclass
class AdminUserBehaviorAdapter(AdminApiAdapter):
user_id: str
days: int
async def handle(self, context): # type: ignore[override]
result = audit_service.analyze_user_behavior(
db=context.db,
user_id=self.user_id,
days=self.days,
)
context.add_audit_metadata(
action="monitor_user_behavior",
target_user_id=self.user_id,
days=self.days,
contains_summary=bool(result),
)
return result
class AdminResilienceStatusAdapter(AdminApiAdapter):
async def handle(self, context): # type: ignore[override]
try:
from src.core.resilience import resilience_manager
except ImportError as exc:
raise HTTPException(status_code=503, detail="韧性管理系统未启用") from exc
error_stats = resilience_manager.get_error_stats()
recent_errors = [
{
"error_id": info["error_id"],
"error_type": info["error_type"],
"operation": info["operation"],
"timestamp": info["timestamp"].isoformat(),
"context": info.get("context", {}),
}
for info in resilience_manager.last_errors[-10:]
]
total_errors = error_stats.get("total_errors", 0)
circuit_breakers = error_stats.get("circuit_breakers", {})
circuit_breakers_open = sum(
1 for status in circuit_breakers.values() if status.get("state") == "open"
)
health_score = max(0, 100 - (total_errors * 2) - (circuit_breakers_open * 20))
response = {
"timestamp": datetime.now(timezone.utc).isoformat(),
"health_score": health_score,
"status": (
"healthy" if health_score > 80 else "degraded" if health_score > 50 else "critical"
),
"error_statistics": error_stats,
"recent_errors": recent_errors,
"recommendations": _get_health_recommendations(error_stats, health_score),
}
context.add_audit_metadata(
action="resilience_status",
health_score=health_score,
error_total=error_stats.get("total_errors") if isinstance(error_stats, dict) else None,
open_circuit_breakers=circuit_breakers_open,
)
return response
class AdminResetErrorStatsAdapter(AdminApiAdapter):
async def handle(self, context): # type: ignore[override]
try:
from src.core.resilience import resilience_manager
except ImportError as exc:
raise HTTPException(status_code=503, detail="韧性管理系统未启用") from exc
old_stats = resilience_manager.get_error_stats()
resilience_manager.error_stats.clear()
resilience_manager.last_errors.clear()
logger.info(f"管理员 {context.user.email if context.user else 'unknown'} 重置了错误统计")
context.add_audit_metadata(
action="reset_error_stats",
previous_total_errors=(
old_stats.get("total_errors") if isinstance(old_stats, dict) else None
),
)
return {
"message": "错误统计已重置",
"previous_stats": old_stats,
"reset_by": context.user.email if context.user else None,
"reset_at": datetime.now(timezone.utc).isoformat(),
}
class AdminCircuitHistoryAdapter(AdminApiAdapter):
def __init__(self, limit: int = 50):
super().__init__()
self.limit = limit
async def handle(self, context): # type: ignore[override]
history = HealthMonitor.get_circuit_history(self.limit)
context.add_audit_metadata(
action="circuit_history",
limit=self.limit,
result_count=len(history),
)
return {"items": history, "count": len(history)}
def _get_health_recommendations(error_stats: dict, health_score: int) -> List[str]:
recommendations: List[str] = []
if health_score < 50:
recommendations.append("系统健康状况严重,请立即检查错误日志")
if error_stats.get("total_errors", 0) > 100:
recommendations.append("错误频率过高,建议检查系统配置和外部依赖")
circuit_breakers = error_stats.get("circuit_breakers", {})
open_breakers = [k for k, v in circuit_breakers.items() if v.get("state") == "open"]
if open_breakers:
recommendations.append(f"以下服务熔断器已打开:{', '.join(open_breakers)}")
if health_score > 90:
recommendations.append("系统运行良好")
return recommendations

View File

@@ -0,0 +1,871 @@
"""
缓存监控端点
提供缓存亲和性统计、管理和监控功能
"""
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from fastapi.responses import PlainTextResponse
from sqlalchemy.orm import Session
from src.api.base.admin_adapter import AdminApiAdapter
from src.api.base.pagination import PaginationMeta, build_pagination_payload, paginate_sequence
from src.api.base.pipeline import ApiRequestPipeline
from src.clients.redis_client import get_redis_client_sync
from src.core.crypto import crypto_service
from src.core.logger import logger
from src.database import get_db
from src.models.database import ApiKey, User
from src.services.cache.affinity_manager import get_affinity_manager
from src.services.cache.aware_scheduler import get_cache_aware_scheduler
router = APIRouter(prefix="/api/admin/monitoring/cache", tags=["Admin - Monitoring: Cache"])
pipeline = ApiRequestPipeline()
def mask_api_key(api_key: Optional[str], prefix_len: int = 8, suffix_len: int = 4) -> Optional[str]:
"""
脱敏 API Key显示前缀 + 星号 + 后缀
例如: sk-jhiId-xxxxxxxxxxxAABB -> sk-jhiId-********AABB
Args:
api_key: 原始 API Key
prefix_len: 显示的前缀长度,默认 8
suffix_len: 显示的后缀长度,默认 4
"""
if not api_key:
return None
total_visible = prefix_len + suffix_len
if len(api_key) <= total_visible:
# Key 太短,直接返回部分内容 + 星号
return api_key[:prefix_len] + "********"
return f"{api_key[:prefix_len]}********{api_key[-suffix_len:]}"
def decrypt_and_mask(encrypted_key: Optional[str], prefix_len: int = 8) -> Optional[str]:
"""
解密 API Key 后脱敏显示
Args:
encrypted_key: 加密后的 API Key
prefix_len: 显示的前缀长度
"""
if not encrypted_key:
return None
try:
decrypted = crypto_service.decrypt(encrypted_key)
return mask_api_key(decrypted, prefix_len)
except Exception:
# 解密失败时返回 None
return None
def resolve_user_identifier(db: Session, identifier: str) -> Optional[str]:
"""
将用户标识符username/email/user_id/api_key_id解析为 user_id
支持的输入格式:
1. User UUID (36位带横杠)
2. Username (用户名)
3. Email (邮箱)
4. API Key ID (36位UUID)
返回:
- user_id (UUID字符串) 或 None
"""
identifier = identifier.strip()
# 1. 先尝试作为 User UUID 查询
user = db.query(User).filter(User.id == identifier).first()
if user:
logger.debug(f"通过User ID解析: {identifier[:8]}... -> {user.username}")
return user.id
# 2. 尝试作为 Username 查询
user = db.query(User).filter(User.username == identifier).first()
if user:
logger.debug(f"通过Username解析: {identifier} -> {user.id[:8]}...")
return user.id
# 3. 尝试作为 Email 查询
user = db.query(User).filter(User.email == identifier).first()
if user:
logger.debug(f"通过Email解析: {identifier} -> {user.id[:8]}...")
return user.id
# 4. 尝试作为 API Key ID 查询
api_key = db.query(ApiKey).filter(ApiKey.id == identifier).first()
if api_key:
logger.debug(f"通过API Key ID解析: {identifier[:8]}... -> User ID: {api_key.user_id[:8]}...")
return api_key.user_id
# 无法识别
logger.debug(f"无法识别的用户标识符: {identifier}")
return None
@router.get("/stats")
async def get_cache_stats(
request: Request,
db: Session = Depends(get_db),
):
"""
获取缓存亲和性统计信息
返回:
- 缓存命中率
- 缓存用户数
- Provider切换次数
- Key切换次数
- 缓存预留配置
"""
adapter = AdminCacheStatsAdapter()
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.get("/affinity/{user_identifier}")
async def get_user_affinity(
user_identifier: str,
request: Request,
db: Session = Depends(get_db),
):
"""
查询指定用户的所有缓存亲和性
参数:
- user_identifier: 用户标识符,支持以下格式:
* 用户名 (username),如: yuanhonghu
* 邮箱 (email),如: user@example.com
* 用户UUID (user_id),如: 550e8400-e29b-41d4-a716-446655440000
* API Key ID如: 660e8400-e29b-41d4-a716-446655440000
返回:
- 用户信息
- 所有端点的缓存亲和性列表(每个端点一条记录)
"""
adapter = AdminGetUserAffinityAdapter(user_identifier=user_identifier)
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.get("/affinities")
async def list_affinities(
request: Request,
keyword: Optional[str] = None,
limit: int = Query(100, ge=1, le=1000, description="返回数量限制"),
offset: int = Query(0, ge=0, description="偏移量"),
db: Session = Depends(get_db),
):
"""
获取所有缓存亲和性列表,可选按关键词过滤
参数:
- keyword: 可选,支持用户名/邮箱/User ID/API Key ID 或模糊匹配
"""
adapter = AdminListAffinitiesAdapter(keyword=keyword, limit=limit, offset=offset)
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.delete("/users/{user_identifier}")
async def clear_user_cache(
user_identifier: str,
request: Request,
db: Session = Depends(get_db),
):
"""
Clear cache affinity for a specific user
Parameters:
- user_identifier: User identifier (username, email, user_id, or API Key ID)
"""
adapter = AdminClearUserCacheAdapter(user_identifier=user_identifier)
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.delete("")
async def clear_all_cache(
request: Request,
db: Session = Depends(get_db),
):
"""
Clear all cache affinities
Warning: This affects all users, use with caution
"""
adapter = AdminClearAllCacheAdapter()
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.delete("/providers/{provider_id}")
async def clear_provider_cache(
provider_id: str,
request: Request,
db: Session = Depends(get_db),
):
"""
Clear cache affinities for a specific provider
Parameters:
- provider_id: Provider ID
"""
adapter = AdminClearProviderCacheAdapter(provider_id=provider_id)
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.get("/config")
async def get_cache_config(
request: Request,
db: Session = Depends(get_db),
):
"""
获取缓存相关配置
返回:
- 缓存TTL
- 缓存预留比例
"""
adapter = AdminCacheConfigAdapter()
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.get("/metrics", response_class=PlainTextResponse)
async def get_cache_metrics(
request: Request,
db: Session = Depends(get_db),
):
"""
以 Prometheus 文本格式暴露缓存调度指标,方便接入 Grafana。
"""
adapter = AdminCacheMetricsAdapter()
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
# -------- 缓存监控适配器 --------
class AdminCacheStatsAdapter(AdminApiAdapter):
async def handle(self, context): # type: ignore[override]
try:
redis_client = get_redis_client_sync()
scheduler = await get_cache_aware_scheduler(redis_client)
stats = await scheduler.get_stats()
logger.info("缓存统计信息查询成功")
context.add_audit_metadata(
action="cache_stats",
scheduler=stats.get("scheduler"),
total_affinities=stats.get("total_affinities"),
cache_hit_rate=stats.get("cache_hit_rate"),
provider_switches=stats.get("provider_switches"),
)
return {"status": "ok", "data": stats}
except Exception as exc:
logger.exception(f"获取缓存统计信息失败: {exc}")
raise HTTPException(status_code=500, detail=f"获取缓存统计失败: {exc}")
class AdminCacheMetricsAdapter(AdminApiAdapter):
async def handle(self, context): # type: ignore[override]
try:
redis_client = get_redis_client_sync()
scheduler = await get_cache_aware_scheduler(redis_client)
stats = await scheduler.get_stats()
payload = self._format_prometheus(stats)
context.add_audit_metadata(
action="cache_metrics_export",
scheduler=stats.get("scheduler"),
metrics_lines=payload.count("\n"),
)
return PlainTextResponse(payload)
except Exception as exc:
logger.exception(f"导出缓存指标失败: {exc}")
raise HTTPException(status_code=500, detail=f"导出缓存指标失败: {exc}")
def _format_prometheus(self, stats: Dict[str, Any]) -> str:
"""
将 scheduler/affinity 指标转换为 Prometheus 文本格式。
"""
scheduler_metrics = stats.get("scheduler_metrics", {})
affinity_stats = stats.get("affinity_stats", {})
metric_map: List[Tuple[str, str, float]] = [
(
"cache_scheduler_total_batches",
"Total batches pulled from provider list",
float(scheduler_metrics.get("total_batches", 0)),
),
(
"cache_scheduler_last_batch_size",
"Size of the latest candidate batch",
float(scheduler_metrics.get("last_batch_size", 0)),
),
(
"cache_scheduler_total_candidates",
"Total candidates enumerated by scheduler",
float(scheduler_metrics.get("total_candidates", 0)),
),
(
"cache_scheduler_last_candidate_count",
"Number of candidates in the most recent batch",
float(scheduler_metrics.get("last_candidate_count", 0)),
),
(
"cache_scheduler_cache_hits",
"Cache hits counted during scheduling",
float(scheduler_metrics.get("cache_hits", 0)),
),
(
"cache_scheduler_cache_misses",
"Cache misses counted during scheduling",
float(scheduler_metrics.get("cache_misses", 0)),
),
(
"cache_scheduler_cache_hit_rate",
"Cache hit rate during scheduling",
float(scheduler_metrics.get("cache_hit_rate", 0.0)),
),
(
"cache_scheduler_concurrency_denied",
"Times candidate rejected due to concurrency limits",
float(scheduler_metrics.get("concurrency_denied", 0)),
),
(
"cache_scheduler_avg_candidates_per_batch",
"Average candidates per batch",
float(scheduler_metrics.get("avg_candidates_per_batch", 0.0)),
),
]
affinity_map: List[Tuple[str, str, float]] = [
(
"cache_affinity_total",
"Total cache affinities stored",
float(affinity_stats.get("total_affinities", 0)),
),
(
"cache_affinity_hits",
"Affinity cache hits",
float(affinity_stats.get("cache_hits", 0)),
),
(
"cache_affinity_misses",
"Affinity cache misses",
float(affinity_stats.get("cache_misses", 0)),
),
(
"cache_affinity_hit_rate",
"Affinity cache hit rate",
float(affinity_stats.get("cache_hit_rate", 0.0)),
),
(
"cache_affinity_invalidations",
"Affinity invalidations",
float(affinity_stats.get("cache_invalidations", 0)),
),
(
"cache_affinity_provider_switches",
"Affinity provider switches",
float(affinity_stats.get("provider_switches", 0)),
),
(
"cache_affinity_key_switches",
"Affinity key switches",
float(affinity_stats.get("key_switches", 0)),
),
]
lines = []
for name, help_text, value in metric_map + affinity_map:
lines.append(f"# HELP {name} {help_text}")
lines.append(f"# TYPE {name} gauge")
lines.append(f"{name} {value}")
scheduler_name = stats.get("scheduler", "cache_aware")
lines.append(f'cache_scheduler_info{{scheduler="{scheduler_name}"}} 1')
return "\n".join(lines) + "\n"
@dataclass
class AdminGetUserAffinityAdapter(AdminApiAdapter):
user_identifier: str
async def handle(self, context): # type: ignore[override]
db = context.db
try:
user_id = resolve_user_identifier(db, self.user_identifier)
if not user_id:
raise HTTPException(
status_code=404,
detail=f"无法识别的用户标识符: {self.user_identifier}。支持用户名、邮箱、User ID或API Key ID",
)
user = db.query(User).filter(User.id == user_id).first()
redis_client = get_redis_client_sync()
affinity_mgr = await get_affinity_manager(redis_client)
# 获取该用户的所有缓存亲和性
all_affinities = await affinity_mgr.list_affinities()
user_affinities = [aff for aff in all_affinities if aff.get("user_id") == user_id]
if not user_affinities:
response = {
"status": "not_found",
"message": f"用户 {user.username} ({user.email}) 没有缓存亲和性",
"user_info": {
"user_id": user_id,
"username": user.username,
"email": user.email,
},
"affinities": [],
}
context.add_audit_metadata(
action="cache_user_affinity",
user_identifier=self.user_identifier,
resolved_user_id=user_id,
affinity_count=0,
status="not_found",
)
return response
response = {
"status": "ok",
"user_info": {
"user_id": user_id,
"username": user.username,
"email": user.email,
},
"affinities": [
{
"provider_id": aff["provider_id"],
"endpoint_id": aff["endpoint_id"],
"key_id": aff["key_id"],
"api_format": aff.get("api_format"),
"model_name": aff.get("model_name"),
"created_at": aff["created_at"],
"expire_at": aff["expire_at"],
"request_count": aff["request_count"],
}
for aff in user_affinities
],
"total_endpoints": len(user_affinities),
}
context.add_audit_metadata(
action="cache_user_affinity",
user_identifier=self.user_identifier,
resolved_user_id=user_id,
affinity_count=len(user_affinities),
status="ok",
)
return response
except HTTPException:
raise
except Exception as exc:
logger.exception(f"查询用户缓存亲和性失败: {exc}")
raise HTTPException(status_code=500, detail=f"查询失败: {exc}")
@dataclass
class AdminListAffinitiesAdapter(AdminApiAdapter):
keyword: Optional[str]
limit: int
offset: int
async def handle(self, context): # type: ignore[override]
db = context.db
redis_client = get_redis_client_sync()
if not redis_client:
raise HTTPException(status_code=503, detail="Redis未初始化无法获取缓存亲和性")
affinity_mgr = await get_affinity_manager(redis_client)
matched_user_id = None
matched_api_key_id = None
raw_affinities: List[Dict[str, Any]] = []
if self.keyword:
# 首先检查是否是 API Key IDaffinity_key
api_key = db.query(ApiKey).filter(ApiKey.id == self.keyword).first()
if api_key:
# 直接通过 affinity_key 过滤
matched_api_key_id = str(api_key.id)
matched_user_id = str(api_key.user_id)
all_affinities = await affinity_mgr.list_affinities()
raw_affinities = [
aff for aff in all_affinities if aff.get("affinity_key") == matched_api_key_id
]
else:
# 尝试解析为用户标识
user_id = resolve_user_identifier(db, self.keyword)
if user_id:
matched_user_id = user_id
# 获取该用户所有的 API Key ID
user_api_keys = db.query(ApiKey).filter(ApiKey.user_id == user_id).all()
user_api_key_ids = {str(k.id) for k in user_api_keys}
# 过滤出该用户所有 API Key 的亲和性
all_affinities = await affinity_mgr.list_affinities()
raw_affinities = [
aff for aff in all_affinities if aff.get("affinity_key") in user_api_key_ids
]
else:
# 关键词不是有效标识,返回所有亲和性(后续会进行模糊匹配)
raw_affinities = await affinity_mgr.list_affinities()
else:
raw_affinities = await affinity_mgr.list_affinities()
# 收集所有 affinity_key (API Key ID)
affinity_keys = {
item.get("affinity_key") for item in raw_affinities if item.get("affinity_key")
}
# 批量查询用户 API Key 信息
user_api_key_map: Dict[str, ApiKey] = {}
if affinity_keys:
user_api_keys = db.query(ApiKey).filter(ApiKey.id.in_(list(affinity_keys))).all()
user_api_key_map = {str(k.id): k for k in user_api_keys}
# 收集所有 user_id
user_ids = {str(k.user_id) for k in user_api_key_map.values()}
user_map: Dict[str, User] = {}
if user_ids:
users = db.query(User).filter(User.id.in_(list(user_ids))).all()
user_map = {str(user.id): user for user in users}
# 收集所有provider_id、endpoint_id、key_id
provider_ids = {
item.get("provider_id") for item in raw_affinities if item.get("provider_id")
}
endpoint_ids = {
item.get("endpoint_id") for item in raw_affinities if item.get("endpoint_id")
}
key_ids = {item.get("key_id") for item in raw_affinities if item.get("key_id")}
# 批量查询Provider、Endpoint、Key信息
from src.models.database import Provider, ProviderAPIKey, ProviderEndpoint
provider_map = {}
if provider_ids:
providers = db.query(Provider).filter(Provider.id.in_(list(provider_ids))).all()
provider_map = {p.id: p for p in providers}
endpoint_map = {}
if endpoint_ids:
endpoints = (
db.query(ProviderEndpoint).filter(ProviderEndpoint.id.in_(list(endpoint_ids))).all()
)
endpoint_map = {e.id: e for e in endpoints}
key_map = {}
if key_ids:
keys = db.query(ProviderAPIKey).filter(ProviderAPIKey.id.in_(list(key_ids))).all()
key_map = {k.id: k for k in keys}
# 收集所有 model_name实际存储的是 global_model_id并批量查询 GlobalModel
from src.models.database import GlobalModel
global_model_ids = {
item.get("model_name") for item in raw_affinities if item.get("model_name")
}
global_model_map: Dict[str, GlobalModel] = {}
if global_model_ids:
# model_name 可能是 UUID 格式的 global_model_id也可能是原始模型名称
global_models = db.query(GlobalModel).filter(
GlobalModel.id.in_(list(global_model_ids))
).all()
global_model_map = {str(gm.id): gm for gm in global_models}
keyword_lower = self.keyword.lower() if self.keyword else None
items = []
for affinity in raw_affinities:
affinity_key = affinity.get("affinity_key")
if not affinity_key:
continue
# 通过 affinity_keyAPI Key ID找到用户 API Key 和用户
user_api_key = user_api_key_map.get(affinity_key)
user = user_map.get(str(user_api_key.user_id)) if user_api_key else None
user_id = str(user_api_key.user_id) if user_api_key else None
provider_id = affinity.get("provider_id")
endpoint_id = affinity.get("endpoint_id")
key_id = affinity.get("key_id")
provider = provider_map.get(provider_id)
endpoint = endpoint_map.get(endpoint_id)
key = key_map.get(key_id)
# 用户 API Key 脱敏显示(解密 key_encrypted 后脱敏)
user_api_key_masked = None
if user_api_key and user_api_key.key_encrypted:
user_api_key_masked = decrypt_and_mask(user_api_key.key_encrypted)
# Provider Key 脱敏显示(解密 api_key 后脱敏)
provider_key_masked = None
if key and key.api_key:
provider_key_masked = decrypt_and_mask(key.api_key)
item = {
"affinity_key": affinity_key,
"user_api_key_name": user_api_key.name if user_api_key else None,
"user_api_key_prefix": user_api_key_masked,
"is_standalone": user_api_key.is_standalone if user_api_key else False,
"user_id": user_id,
"username": user.username if user else None,
"email": user.email if user else None,
"provider_id": provider_id,
"provider_name": provider.display_name if provider else None,
"endpoint_id": endpoint_id,
"endpoint_api_format": (
endpoint.api_format if endpoint and endpoint.api_format else None
),
"endpoint_url": endpoint.base_url if endpoint else None,
"key_id": key_id,
"key_name": key.name if key else None,
"key_prefix": provider_key_masked,
"rate_multiplier": key.rate_multiplier if key else 1.0,
"model_name": (
global_model_map.get(affinity.get("model_name")).name
if affinity.get("model_name") and global_model_map.get(affinity.get("model_name"))
else affinity.get("model_name") # 如果找不到 GlobalModel显示原始值
),
"model_display_name": (
global_model_map.get(affinity.get("model_name")).display_name
if affinity.get("model_name") and global_model_map.get(affinity.get("model_name"))
else None
),
"api_format": affinity.get("api_format"),
"created_at": affinity.get("created_at"),
"expire_at": affinity.get("expire_at"),
"request_count": affinity.get("request_count", 0),
}
if keyword_lower and not matched_user_id and not matched_api_key_id:
searchable = [
item["affinity_key"],
item["user_api_key_name"] or "",
item["user_id"] or "",
item["username"] or "",
item["email"] or "",
item["provider_id"] or "",
item["key_id"] or "",
]
if not any(keyword_lower in str(value).lower() for value in searchable if value):
continue
items.append(item)
items.sort(key=lambda x: x.get("expire_at") or 0, reverse=True)
paged_items, meta = paginate_sequence(items, self.limit, self.offset)
payload = build_pagination_payload(
paged_items,
meta,
matched_user_id=matched_user_id,
)
response = {
"status": "ok",
"data": payload,
}
result_count = meta.count if hasattr(meta, "count") else len(paged_items)
context.add_audit_metadata(
action="cache_affinity_list",
keyword=self.keyword,
matched_user_id=matched_user_id,
matched_api_key_id=matched_api_key_id,
limit=self.limit,
offset=self.offset,
result_count=result_count,
)
return response
@dataclass
class AdminClearUserCacheAdapter(AdminApiAdapter):
user_identifier: str
async def handle(self, context): # type: ignore[override]
db = context.db
try:
redis_client = get_redis_client_sync()
affinity_mgr = await get_affinity_manager(redis_client)
# 首先检查是否直接是 API Key ID (affinity_key)
api_key = db.query(ApiKey).filter(ApiKey.id == self.user_identifier).first()
if api_key:
# 直接按 affinity_key 清除
affinity_key = str(api_key.id)
user = db.query(User).filter(User.id == api_key.user_id).first()
all_affinities = await affinity_mgr.list_affinities()
target_affinities = [
aff for aff in all_affinities if aff.get("affinity_key") == affinity_key
]
count = 0
for aff in target_affinities:
api_format = aff.get("api_format")
model_name = aff.get("model_name")
endpoint_id = aff.get("endpoint_id")
if api_format and model_name:
await affinity_mgr.invalidate_affinity(
affinity_key, api_format, model_name, endpoint_id=endpoint_id
)
count += 1
logger.info(f"已清除API Key缓存亲和性: api_key_name={api_key.name}, affinity_key={affinity_key[:8]}..., 清除数量={count}")
response = {
"status": "ok",
"message": f"已清除 API Key {api_key.name} 的缓存亲和性",
"user_info": {
"user_id": str(api_key.user_id),
"username": user.username if user else None,
"email": user.email if user else None,
"api_key_id": affinity_key,
"api_key_name": api_key.name,
},
}
context.add_audit_metadata(
action="cache_clear_api_key",
user_identifier=self.user_identifier,
resolved_api_key_id=affinity_key,
cleared_count=count,
)
return response
# 如果不是 API Key ID尝试解析为用户标识
user_id = resolve_user_identifier(db, self.user_identifier)
if not user_id:
raise HTTPException(
status_code=404,
detail=f"无法识别的标识符: {self.user_identifier}。支持用户名、邮箱、User ID或API Key ID",
)
user = db.query(User).filter(User.id == user_id).first()
# 获取该用户所有的 API Key
user_api_keys = db.query(ApiKey).filter(ApiKey.user_id == user_id).all()
user_api_key_ids = {str(k.id) for k in user_api_keys}
# 获取该用户所有 API Key 的缓存亲和性并逐个失效
all_affinities = await affinity_mgr.list_affinities()
user_affinities = [
aff for aff in all_affinities if aff.get("affinity_key") in user_api_key_ids
]
count = 0
for aff in user_affinities:
affinity_key = aff.get("affinity_key")
api_format = aff.get("api_format")
model_name = aff.get("model_name")
endpoint_id = aff.get("endpoint_id")
if affinity_key and api_format and model_name:
await affinity_mgr.invalidate_affinity(
affinity_key, api_format, model_name, endpoint_id=endpoint_id
)
count += 1
logger.info(f"已清除用户缓存亲和性: username={user.username}, user_id={user_id[:8]}..., 清除数量={count}")
response = {
"status": "ok",
"message": f"已清除用户 {user.username} 的所有缓存亲和性",
"user_info": {"user_id": user_id, "username": user.username, "email": user.email},
}
context.add_audit_metadata(
action="cache_clear_user",
user_identifier=self.user_identifier,
resolved_user_id=user_id,
cleared_count=count,
)
return response
except HTTPException:
raise
except Exception as exc:
logger.exception(f"清除用户缓存亲和性失败: {exc}")
raise HTTPException(status_code=500, detail=f"清除失败: {exc}")
class AdminClearAllCacheAdapter(AdminApiAdapter):
async def handle(self, context): # type: ignore[override]
try:
redis_client = get_redis_client_sync()
affinity_mgr = await get_affinity_manager(redis_client)
count = await affinity_mgr.clear_all()
logger.warning(f"已清除所有缓存亲和性(管理员操作): {count}")
context.add_audit_metadata(
action="cache_clear_all",
cleared_count=count,
)
return {"status": "ok", "message": "已清除所有缓存亲和性", "count": count}
except Exception as exc:
logger.exception(f"清除所有缓存亲和性失败: {exc}")
raise HTTPException(status_code=500, detail=f"清除失败: {exc}")
@dataclass
class AdminClearProviderCacheAdapter(AdminApiAdapter):
provider_id: str
async def handle(self, context): # type: ignore[override]
try:
redis_client = get_redis_client_sync()
affinity_mgr = await get_affinity_manager(redis_client)
count = await affinity_mgr.invalidate_all_for_provider(self.provider_id)
logger.info(f"已清除Provider缓存亲和性: provider_id={self.provider_id[:8]}..., count={count}")
context.add_audit_metadata(
action="cache_clear_provider",
provider_id=self.provider_id,
cleared_count=count,
)
return {
"status": "ok",
"message": "已清除Provider的缓存亲和性",
"provider_id": self.provider_id,
"count": count,
}
except Exception as exc:
logger.exception(f"清除Provider缓存亲和性失败: {exc}")
raise HTTPException(status_code=500, detail=f"清除失败: {exc}")
class AdminCacheConfigAdapter(AdminApiAdapter):
async def handle(self, context): # type: ignore[override]
from src.services.cache.affinity_manager import CacheAffinityManager
from src.services.cache.aware_scheduler import CacheAwareScheduler
from src.services.rate_limit.adaptive_reservation import get_adaptive_reservation_manager
# 获取动态预留管理器的配置
reservation_manager = get_adaptive_reservation_manager()
reservation_stats = reservation_manager.get_stats()
response = {
"status": "ok",
"data": {
"cache_ttl_seconds": CacheAffinityManager.DEFAULT_CACHE_TTL,
"cache_reservation_ratio": CacheAwareScheduler.CACHE_RESERVATION_RATIO,
"dynamic_reservation": {
"enabled": True,
"config": reservation_stats["config"],
"description": {
"probe_phase_requests": "探测阶段请求数阈值",
"probe_reservation": "探测阶段预留比例",
"stable_min_reservation": "稳定阶段最小预留比例",
"stable_max_reservation": "稳定阶段最大预留比例",
"low_load_threshold": "低负载阈值(低于此值使用最小预留)",
"high_load_threshold": "高负载阈值(高于此值根据置信度使用较高预留)",
},
},
"description": {
"cache_ttl": "缓存亲和性有效期(秒)",
"cache_reservation_ratio": "静态预留比例(已被动态预留替代)",
"dynamic_reservation": "动态预留机制配置",
},
},
}
context.add_audit_metadata(
action="cache_config",
cache_ttl_seconds=CacheAffinityManager.DEFAULT_CACHE_TTL,
cache_reservation_ratio=CacheAwareScheduler.CACHE_RESERVATION_RATIO,
dynamic_reservation_enabled=True,
)
return response

View File

@@ -0,0 +1,280 @@
"""
请求链路追踪 API 端点
"""
from dataclasses import dataclass
from datetime import datetime
from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from pydantic import BaseModel
from sqlalchemy.orm import Session
from src.api.base.admin_adapter import AdminApiAdapter
from src.api.base.pipeline import ApiRequestPipeline
from src.database import get_db
from src.models.database import Provider, ProviderEndpoint, ProviderAPIKey
from src.services.request.candidate import RequestCandidateService
router = APIRouter(prefix="/api/admin/monitoring/trace", tags=["Admin - Monitoring: Trace"])
pipeline = ApiRequestPipeline()
class CandidateResponse(BaseModel):
"""候选记录响应"""
id: str
request_id: str
candidate_index: int
retry_index: int = 0 # 重试序号从0开始
provider_id: Optional[str] = None
provider_name: Optional[str] = None
provider_website: Optional[str] = None # Provider 官网
endpoint_id: Optional[str] = None
endpoint_name: Optional[str] = None # 端点显示名称api_format
key_id: Optional[str] = None
key_name: Optional[str] = None # 密钥名称
key_preview: Optional[str] = None # 密钥脱敏预览(如 sk-***abc
key_capabilities: Optional[dict] = None # Key 支持的能力
required_capabilities: Optional[dict] = None # 请求实际需要的能力标签
status: str # 'pending', 'success', 'failed', 'skipped'
skip_reason: Optional[str] = None
is_cached: bool = False
# 执行结果字段
status_code: Optional[int] = None
error_type: Optional[str] = None
error_message: Optional[str] = None
latency_ms: Optional[int] = None
concurrent_requests: Optional[int] = None
extra_data: Optional[dict] = None
created_at: datetime
started_at: Optional[datetime] = None
finished_at: Optional[datetime] = None
class Config:
from_attributes = True
class RequestTraceResponse(BaseModel):
"""请求追踪完整响应"""
request_id: str
total_candidates: int
final_status: str # 'success', 'failed', 'streaming', 'pending'
total_latency_ms: int
candidates: List[CandidateResponse]
@router.get("/{request_id}", response_model=RequestTraceResponse)
async def get_request_trace(
request_id: str,
request: Request,
db: Session = Depends(get_db),
):
"""获取特定请求的完整追踪信息"""
adapter = AdminGetRequestTraceAdapter(request_id=request_id)
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
@router.get("/stats/provider/{provider_id}")
async def get_provider_failure_rate(
provider_id: str,
request: Request,
limit: int = Query(100, ge=1, le=1000, description="统计最近的尝试数量"),
db: Session = Depends(get_db),
):
"""
获取某个 Provider 的失败率统计
需要管理员权限
"""
adapter = AdminProviderFailureRateAdapter(provider_id=provider_id, limit=limit)
return await pipeline.run(adapter=adapter, http_request=request, db=db, mode=adapter.mode)
# -------- 请求追踪适配器 --------
@dataclass
class AdminGetRequestTraceAdapter(AdminApiAdapter):
request_id: str
async def handle(self, context): # type: ignore[override]
db = context.db
# 只查询 candidates
candidates = RequestCandidateService.get_candidates_by_request_id(db, self.request_id)
# 如果没有数据,返回 404
if not candidates:
raise HTTPException(status_code=404, detail="Request not found")
# 计算总延迟只统计已完成的候选success 或 failed
# 使用显式的 is not None 检查,避免过滤掉 0ms 的快速响应
total_latency = sum(
c.latency_ms
for c in candidates
if c.status in ("success", "failed") and c.latency_ms is not None
)
# 判断最终状态:
# 1. status="success" 即视为成功(无论 status_code 是什么)
# - 流式请求即使客户端断开499只要 Provider 成功返回数据,也算成功
# 2. 同时检查 status_code 在 200-299 范围,作为额外的成功判断条件
# - 用于兼容非流式请求或未正确设置 status 的旧数据
# 3. status="streaming" 表示流式请求正在进行中
# 4. status="pending" 表示请求尚未开始执行
has_success = any(
c.status == "success"
or (c.status_code is not None and 200 <= c.status_code < 300)
for c in candidates
)
has_streaming = any(c.status == "streaming" for c in candidates)
has_pending = any(c.status == "pending" for c in candidates)
if has_success:
final_status = "success"
elif has_streaming:
# 有候选正在流式传输中
final_status = "streaming"
elif has_pending:
# 有候选正在等待执行
final_status = "pending"
else:
final_status = "failed"
# 批量加载 provider 信息,避免 N+1 查询
provider_ids = {c.provider_id for c in candidates if c.provider_id}
provider_map = {}
provider_website_map = {}
if provider_ids:
providers = db.query(Provider).filter(Provider.id.in_(provider_ids)).all()
for p in providers:
provider_map[p.id] = p.name
provider_website_map[p.id] = p.website
# 批量加载 endpoint 信息
endpoint_ids = {c.endpoint_id for c in candidates if c.endpoint_id}
endpoint_map = {}
if endpoint_ids:
endpoints = db.query(ProviderEndpoint).filter(ProviderEndpoint.id.in_(endpoint_ids)).all()
endpoint_map = {e.id: e.api_format for e in endpoints}
# 批量加载 key 信息
key_ids = {c.key_id for c in candidates if c.key_id}
key_map = {}
key_preview_map = {}
key_capabilities_map = {}
if key_ids:
keys = db.query(ProviderAPIKey).filter(ProviderAPIKey.id.in_(key_ids)).all()
for k in keys:
key_map[k.id] = k.name
key_capabilities_map[k.id] = k.capabilities
# 生成脱敏预览保留前缀和最后4位
api_key = k.api_key or ""
if len(api_key) > 8:
# 检测常见前缀模式
prefix_end = 0
for prefix in ["sk-", "key-", "api-", "ak-"]:
if api_key.lower().startswith(prefix):
prefix_end = len(prefix)
break
if prefix_end > 0:
key_preview_map[k.id] = f"{api_key[:prefix_end]}***{api_key[-4:]}"
else:
key_preview_map[k.id] = f"{api_key[:3]}***{api_key[-4:]}"
elif len(api_key) > 4:
key_preview_map[k.id] = f"***{api_key[-4:]}"
else:
key_preview_map[k.id] = "***"
# 构建 candidate 响应列表
candidate_responses: List[CandidateResponse] = []
for candidate in candidates:
provider_name = (
provider_map.get(candidate.provider_id) if candidate.provider_id else None
)
provider_website = (
provider_website_map.get(candidate.provider_id) if candidate.provider_id else None
)
endpoint_name = (
endpoint_map.get(candidate.endpoint_id) if candidate.endpoint_id else None
)
key_name = (
key_map.get(candidate.key_id) if candidate.key_id else None
)
key_preview = (
key_preview_map.get(candidate.key_id) if candidate.key_id else None
)
key_capabilities = (
key_capabilities_map.get(candidate.key_id) if candidate.key_id else None
)
candidate_responses.append(
CandidateResponse(
id=candidate.id,
request_id=candidate.request_id,
candidate_index=candidate.candidate_index,
retry_index=candidate.retry_index,
provider_id=candidate.provider_id,
provider_name=provider_name,
provider_website=provider_website,
endpoint_id=candidate.endpoint_id,
endpoint_name=endpoint_name,
key_id=candidate.key_id,
key_name=key_name,
key_preview=key_preview,
key_capabilities=key_capabilities,
required_capabilities=candidate.required_capabilities,
status=candidate.status,
skip_reason=candidate.skip_reason,
is_cached=candidate.is_cached,
status_code=candidate.status_code,
error_type=candidate.error_type,
error_message=candidate.error_message,
latency_ms=candidate.latency_ms,
concurrent_requests=candidate.concurrent_requests,
extra_data=candidate.extra_data,
created_at=candidate.created_at,
started_at=candidate.started_at,
finished_at=candidate.finished_at,
)
)
response = RequestTraceResponse(
request_id=self.request_id,
total_candidates=len(candidates),
final_status=final_status,
total_latency_ms=total_latency,
candidates=candidate_responses,
)
context.add_audit_metadata(
action="trace_request_detail",
request_id=self.request_id,
total_candidates=len(candidates),
final_status=final_status,
total_latency_ms=total_latency,
)
return response
@dataclass
class AdminProviderFailureRateAdapter(AdminApiAdapter):
provider_id: str
limit: int
async def handle(self, context): # type: ignore[override]
result = RequestCandidateService.get_candidate_stats_by_provider(
db=context.db,
provider_id=self.provider_id,
limit=self.limit,
)
context.add_audit_metadata(
action="trace_provider_failure_rate",
provider_id=self.provider_id,
limit=self.limit,
total_attempts=result.get("total_attempts"),
failure_rate=result.get("failure_rate"),
)
return result