mirror of
https://github.com/DayuanJiang/next-ai-draw-io.git
synced 2026-01-02 22:32:27 +08:00
fix: add manual token usage reporting to Langfuse for Bedrock streaming
Bedrock streaming responses don't auto-report token usage to OpenTelemetry. This fix manually sets span attributes (ai.usage.promptTokens, gen_ai.usage.input_tokens) from the AI SDK onFinish callback to ensure Langfuse captures token counts.
This commit is contained in:
@@ -182,7 +182,12 @@ ${lastMessageText}
|
||||
onFinish: ({ text, usage, providerMetadata }) => {
|
||||
console.log('[Cache] Full providerMetadata:', JSON.stringify(providerMetadata, null, 2));
|
||||
console.log('[Cache] Usage:', JSON.stringify(usage, null, 2));
|
||||
setTraceOutput(text);
|
||||
// Pass usage to Langfuse (Bedrock streaming doesn't auto-report tokens to telemetry)
|
||||
// AI SDK uses inputTokens/outputTokens, Langfuse expects promptTokens/completionTokens
|
||||
setTraceOutput(text, {
|
||||
promptTokens: usage?.inputTokens,
|
||||
completionTokens: usage?.outputTokens,
|
||||
});
|
||||
},
|
||||
tools: {
|
||||
// Client-side tool that will be executed on the client
|
||||
|
||||
Reference in New Issue
Block a user