Skip to content

Commit b4003af

Browse files
nirgaclaude
andcommitted
refactor: simplify to single span per chat model call
- Remove redundant task span creation in handleChatModelStart - Create only LLM completion span like Python implementation - Update error handling for single span approach - Update tests to expect single span - Remove unused imports (trace, context) - Build and tests passing 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
1 parent 6b4a270 commit b4003af

File tree

2 files changed

+67
-156
lines changed

2 files changed

+67
-156
lines changed

packages/instrumentation-langchain/src/callback_handler.ts

Lines changed: 66 additions & 144 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,8 @@ import { Serialized } from "@langchain/core/load/serializable";
2121
import { ChainValues } from "@langchain/core/utils/types";
2222
import {
2323
Tracer,
24-
trace,
2524
SpanKind,
2625
SpanStatusCode,
27-
context,
2826
} from "@opentelemetry/api";
2927
import { SpanAttributes } from "@traceloop/ai-semantic-conventions";
3028

@@ -51,54 +49,24 @@ export class TraceloopCallbackHandler extends BaseCallbackHandler {
5149
llm: Serialized,
5250
messages: BaseMessage[][],
5351
runId: string,
54-
parentRunId?: string,
55-
extraParams?: Record<string, unknown>,
56-
tags?: string[],
57-
metadata?: Record<string, unknown>,
58-
runName?: string,
52+
_parentRunId?: string,
53+
_extraParams?: Record<string, unknown>,
54+
_tags?: string[],
55+
_metadata?: Record<string, unknown>,
56+
_runName?: string,
5957
): Promise<void> {
6058
const className = llm.id?.[llm.id.length - 1] || "unknown";
6159
const modelName = this.extractModelName(llm);
6260
const vendor = this.detectVendor(llm);
6361
const spanBaseName = this.convertClassNameToSpanName(className);
6462

65-
// Create both a task span and an LLM span like Python implementation
66-
const taskSpanName = `${spanBaseName}.task`;
67-
const taskSpan = this.tracer.startSpan(taskSpanName, {
63+
// Create single LLM span like Python implementation
64+
const span = this.tracer.startSpan(`${spanBaseName}.completion`, {
6865
kind: SpanKind.CLIENT,
6966
});
7067

71-
taskSpan.setAttributes({
72-
"traceloop.span.kind": "task",
73-
"traceloop.workflow.name": runName || taskSpanName,
74-
});
75-
76-
if (this.traceContent) {
77-
const flatMessages = messages.flat();
78-
taskSpan.setAttributes({
79-
"traceloop.entity.input": JSON.stringify(
80-
flatMessages.map((m) => ({
81-
role: m._getType(),
82-
content:
83-
typeof m.content === "string"
84-
? m.content
85-
: JSON.stringify(m.content),
86-
})),
87-
),
88-
});
89-
}
90-
91-
// Create LLM span as child of task span
92-
const llmSpan = this.tracer.startSpan(
93-
`${spanBaseName}.completion`,
94-
{
95-
kind: SpanKind.CLIENT,
96-
},
97-
trace.setSpan(context.active(), taskSpan),
98-
);
99-
10068
const flatMessages = messages.flat();
101-
llmSpan.setAttributes({
69+
span.setAttributes({
10270
[SpanAttributes.LLM_SYSTEM]: vendor,
10371
[SpanAttributes.LLM_REQUEST_TYPE]: "completion",
10472
[SpanAttributes.LLM_REQUEST_MODEL]: modelName,
@@ -108,7 +76,7 @@ export class TraceloopCallbackHandler extends BaseCallbackHandler {
10876
if (this.traceContent && flatMessages.length > 0) {
10977
flatMessages.forEach((message, idx) => {
11078
const role = this.mapMessageTypeToRole(message._getType());
111-
llmSpan.setAttributes({
79+
span.setAttributes({
11280
[`${SpanAttributes.LLM_PROMPTS}.${idx}.role`]: role,
11381
[`${SpanAttributes.LLM_PROMPTS}.${idx}.content`]:
11482
typeof message.content === "string"
@@ -118,8 +86,7 @@ export class TraceloopCallbackHandler extends BaseCallbackHandler {
11886
});
11987
}
12088

121-
this.spans.set(runId, { span: taskSpan, runId });
122-
this.spans.set(`${runId}_llm`, { span: llmSpan, runId: `${runId}_llm` });
89+
this.spans.set(runId, { span, runId });
12390
}
12491

12592
override async handleLLMStart(
@@ -169,100 +136,66 @@ export class TraceloopCallbackHandler extends BaseCallbackHandler {
169136
_tags?: string[],
170137
_extraParams?: Record<string, unknown>,
171138
): Promise<void> {
172-
// End both LLM and task spans
173-
const llmSpanData = this.spans.get(`${runId}_llm`);
174-
const taskSpanData = this.spans.get(runId);
175-
176-
if (llmSpanData) {
177-
const { span: llmSpan } = llmSpanData;
178-
179-
if (
180-
this.traceContent &&
181-
output.generations &&
182-
output.generations.length > 0
183-
) {
184-
output.generations.forEach((generation, idx) => {
185-
if (generation && generation.length > 0) {
186-
llmSpan.setAttributes({
187-
[`${SpanAttributes.LLM_COMPLETIONS}.${idx}.role`]: "assistant",
188-
[`${SpanAttributes.LLM_COMPLETIONS}.${idx}.content`]:
189-
generation[0].text,
190-
});
191-
}
192-
});
193-
}
139+
const spanData = this.spans.get(runId);
140+
if (!spanData) return;
194141

195-
// Add usage metrics if available
196-
if (output.llmOutput?.usage) {
197-
const usage = output.llmOutput.usage;
198-
if (usage.input_tokens) {
199-
llmSpan.setAttributes({
200-
[SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: usage.input_tokens,
201-
});
202-
}
203-
if (usage.output_tokens) {
204-
llmSpan.setAttributes({
205-
[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: usage.output_tokens,
206-
});
207-
}
208-
const totalTokens =
209-
(usage.input_tokens || 0) + (usage.output_tokens || 0);
210-
if (totalTokens > 0) {
211-
llmSpan.setAttributes({
212-
[SpanAttributes.LLM_USAGE_TOTAL_TOKENS]: totalTokens,
213-
});
214-
}
215-
}
142+
const { span } = spanData;
216143

217-
// Also check for tokenUsage format (for compatibility)
218-
if (output.llmOutput?.tokenUsage) {
219-
const usage = output.llmOutput.tokenUsage;
220-
if (usage.promptTokens) {
221-
llmSpan.setAttributes({
222-
[SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: usage.promptTokens,
223-
});
224-
}
225-
if (usage.completionTokens) {
226-
llmSpan.setAttributes({
227-
[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]:
228-
usage.completionTokens,
229-
});
230-
}
231-
if (usage.totalTokens) {
232-
llmSpan.setAttributes({
233-
[SpanAttributes.LLM_USAGE_TOTAL_TOKENS]: usage.totalTokens,
144+
if (this.traceContent && output.generations && output.generations.length > 0) {
145+
output.generations.forEach((generation, idx) => {
146+
if (generation && generation.length > 0) {
147+
span.setAttributes({
148+
[`${SpanAttributes.LLM_COMPLETIONS}.${idx}.role`]: "assistant",
149+
[`${SpanAttributes.LLM_COMPLETIONS}.${idx}.content`]: generation[0].text,
234150
});
235151
}
236-
}
237-
238-
llmSpan.setStatus({ code: SpanStatusCode.OK });
239-
llmSpan.end();
240-
this.spans.delete(`${runId}_llm`);
152+
});
241153
}
242154

243-
if (taskSpanData) {
244-
const { span: taskSpan } = taskSpanData;
245-
246-
if (
247-
this.traceContent &&
248-
output.generations &&
249-
output.generations.length > 0
250-
) {
251-
const completions = output.generations.map((generation, _idx) => {
252-
if (generation && generation.length > 0) {
253-
return generation[0].text;
254-
}
255-
return "";
155+
// Add usage metrics if available
156+
if (output.llmOutput?.usage) {
157+
const usage = output.llmOutput.usage;
158+
if (usage.input_tokens) {
159+
span.setAttributes({
160+
[SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: usage.input_tokens,
256161
});
257-
taskSpan.setAttributes({
258-
"traceloop.entity.output": JSON.stringify(completions),
162+
}
163+
if (usage.output_tokens) {
164+
span.setAttributes({
165+
[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: usage.output_tokens,
259166
});
260167
}
168+
const totalTokens = (usage.input_tokens || 0) + (usage.output_tokens || 0);
169+
if (totalTokens > 0) {
170+
span.setAttributes({
171+
[SpanAttributes.LLM_USAGE_TOTAL_TOKENS]: totalTokens,
172+
});
173+
}
174+
}
261175

262-
taskSpan.setStatus({ code: SpanStatusCode.OK });
263-
taskSpan.end();
264-
this.spans.delete(runId);
176+
// Also check for tokenUsage format (for compatibility)
177+
if (output.llmOutput?.tokenUsage) {
178+
const usage = output.llmOutput.tokenUsage;
179+
if (usage.promptTokens) {
180+
span.setAttributes({
181+
[SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: usage.promptTokens,
182+
});
183+
}
184+
if (usage.completionTokens) {
185+
span.setAttributes({
186+
[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: usage.completionTokens,
187+
});
188+
}
189+
if (usage.totalTokens) {
190+
span.setAttributes({
191+
[SpanAttributes.LLM_USAGE_TOTAL_TOKENS]: usage.totalTokens,
192+
});
193+
}
265194
}
195+
196+
span.setStatus({ code: SpanStatusCode.OK });
197+
span.end();
198+
this.spans.delete(runId);
266199
}
267200

268201
async handleChatModelEnd(
@@ -283,25 +216,14 @@ export class TraceloopCallbackHandler extends BaseCallbackHandler {
283216
_tags?: string[],
284217
_extraParams?: Record<string, unknown>,
285218
): Promise<void> {
286-
// End both spans on error
287-
const llmSpanData = this.spans.get(`${runId}_llm`);
288-
const taskSpanData = this.spans.get(runId);
289-
290-
if (llmSpanData) {
291-
const { span: llmSpan } = llmSpanData;
292-
llmSpan.recordException(err);
293-
llmSpan.setStatus({ code: SpanStatusCode.ERROR, message: err.message });
294-
llmSpan.end();
295-
this.spans.delete(`${runId}_llm`);
296-
}
219+
const spanData = this.spans.get(runId);
220+
if (!spanData) return;
297221

298-
if (taskSpanData) {
299-
const { span: taskSpan } = taskSpanData;
300-
taskSpan.recordException(err);
301-
taskSpan.setStatus({ code: SpanStatusCode.ERROR, message: err.message });
302-
taskSpan.end();
303-
this.spans.delete(runId);
304-
}
222+
const { span } = spanData;
223+
span.recordException(err);
224+
span.setStatus({ code: SpanStatusCode.ERROR, message: err.message });
225+
span.end();
226+
this.spans.delete(runId);
305227
}
306228

307229
override async handleChainStart(

packages/instrumentation-langchain/test/instrumentation.test.ts

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -461,27 +461,16 @@ describe("Test Langchain instrumentation", async function () {
461461
assert.ok(attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]);
462462
assert.ok(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS]);
463463
} else {
464-
// Test LangChain callback handler spans
465-
const taskSpan = spans.find((span) => span.name === "bedrock.chat.task");
464+
// Test LangChain callback handler spans - now only creates completion span
466465
const completionSpan = spans.find(
467466
(span) => span.name === "bedrock.chat.completion",
468467
);
469468

470-
assert.ok(
471-
taskSpan,
472-
`No task span found. Available spans: ${spans.map((s) => s.name).join(", ")}`,
473-
);
474469
assert.ok(
475470
completionSpan,
476471
`No completion span found. Available spans: ${spans.map((s) => s.name).join(", ")}`,
477472
);
478473

479-
// Test task span attributes
480-
const taskAttributes = taskSpan.attributes;
481-
assert.strictEqual(taskAttributes["traceloop.span.kind"], "task");
482-
assert.ok(taskAttributes["traceloop.entity.input"]);
483-
assert.ok(taskAttributes["traceloop.entity.output"]);
484-
485474
// Test completion span attributes
486475
const completionAttributes = completionSpan.attributes;
487476
assert.strictEqual(

0 commit comments

Comments
 (0)