Skip to content

Commit 0d61000

Browse files
committed
Reverted back openai sdk package back to 4.38.3, since polly can't intercept the new version http requests so tests fail
1 parent 511c570 commit 0d61000

File tree

5 files changed

+175
-55
lines changed

5 files changed

+175
-55
lines changed

packages/traceloop-sdk/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@
119119
"esbuild": "^0.25.7",
120120
"langchain": "^0.3.37",
121121
"llamaindex": "^0.12.1",
122-
"openai": "^6.32.0",
122+
"openai": "4.38.3",
123123
"together-ai": "^0.20.0",
124124
"ts-mocha": "^11.1.0",
125125
"zod": "^3.25.76"

packages/traceloop-sdk/test/agent_decorator.test.ts

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ import FSPersister from "@pollyjs/persister-fs";
2727
import { SpanAttributes } from "@traceloop/ai-semantic-conventions";
2828
import {
2929
ATTR_GEN_AI_AGENT_NAME,
30-
ATTR_GEN_AI_PROMPT,
30+
ATTR_GEN_AI_INPUT_MESSAGES,
3131
ATTR_GEN_AI_REQUEST_MODEL,
3232
} from "@opentelemetry/semantic-conventions/incubating";
3333
import { ChatCompletionMessageParam } from "openai/resources/index.mjs";
@@ -107,7 +107,7 @@ describe("Test Agent Decorator", () => {
107107
const spans = memoryExporter.getFinishedSpans();
108108

109109
const agentSpan = spans.find((span) => span.name === "plan_trip.agent");
110-
const chatSpan = spans.find((span) => span.name === "openai.chat");
110+
const chatSpan = spans.find((span) => span.name.startsWith("chat "));
111111

112112
assert.ok(result);
113113
assert.ok(agentSpan);
@@ -144,17 +144,19 @@ describe("Test Agent Decorator", () => {
144144
chatSpan.attributes[`${ATTR_GEN_AI_AGENT_NAME}`],
145145
"plan_trip",
146146
);
147-
assert.strictEqual(
148-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.role`],
149-
"user",
147+
148+
const inputMessages = JSON.parse(
149+
chatSpan.attributes[ATTR_GEN_AI_INPUT_MESSAGES] as string,
150150
);
151+
assert.strictEqual(inputMessages[0].role, "user");
151152
assert.strictEqual(
152-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.content`],
153+
inputMessages[0].parts[0].content,
153154
"Tell me a joke about OpenTelemetry",
154155
);
155156
});
156157

157-
it("should create spans for agents using decoration syntax", async () => {
158+
it("should create spans for agents using decoration syntax", async function () {
159+
this.timeout(30000);
158160
class TestAgent {
159161
@traceloop.agent({ name: "travel_planner", version: 2 })
160162
async planTrip(destination: string) {
@@ -176,7 +178,7 @@ describe("Test Agent Decorator", () => {
176178
const agentSpan = spans.find(
177179
(span) => span.name === "travel_planner.agent",
178180
);
179-
const chatSpan = spans.find((span) => span.name === "openai.chat");
181+
const chatSpan = spans.find((span) => span.name.startsWith("chat "));
180182

181183
assert.ok(result);
182184
assert.ok(agentSpan);
@@ -205,12 +207,13 @@ describe("Test Agent Decorator", () => {
205207
chatSpan.attributes[`${ATTR_GEN_AI_AGENT_NAME}`],
206208
"travel_planner",
207209
);
208-
assert.strictEqual(
209-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.role`],
210-
"user",
210+
211+
const inputMessages = JSON.parse(
212+
chatSpan.attributes[ATTR_GEN_AI_INPUT_MESSAGES] as string,
211213
);
214+
assert.strictEqual(inputMessages[0].role, "user");
212215
assert.strictEqual(
213-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.content`],
216+
inputMessages[0].parts[0].content,
214217
"Tell me a joke about OpenTelemetry",
215218
);
216219
});
@@ -275,12 +278,13 @@ describe("Test Agent Decorator", () => {
275278
completionSpan.attributes[`${ATTR_GEN_AI_REQUEST_MODEL}`],
276279
"gpt-3.5-turbo",
277280
);
281+
// Manual instrumentation (withLLMCall/reportRequest) uses old indexed format
278282
assert.strictEqual(
279-
completionSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.role`],
283+
completionSpan.attributes[`gen_ai.prompt.0.role`],
280284
"user",
281285
);
282286
assert.strictEqual(
283-
completionSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.content`],
287+
completionSpan.attributes[`gen_ai.prompt.0.content`],
284288
"Tell me a joke about OpenTelemetry",
285289
);
286290
});

packages/traceloop-sdk/test/decorators.test.ts

Lines changed: 45 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@ import FSPersister from "@pollyjs/persister-fs";
3030
import { SpanAttributes } from "@traceloop/ai-semantic-conventions";
3131
import {
3232
ATTR_GEN_AI_COMPLETION,
33+
ATTR_GEN_AI_INPUT_MESSAGES,
34+
ATTR_GEN_AI_OUTPUT_MESSAGES,
3335
ATTR_GEN_AI_PROMPT,
3436
ATTR_GEN_AI_REQUEST_MODEL,
3537
ATTR_GEN_AI_RESPONSE_MODEL,
@@ -117,7 +119,7 @@ describe("Test SDK Decorators", () => {
117119
const workflowSpan = spans.find(
118120
(span) => span.name === "sample_chat.workflow",
119121
);
120-
const chatSpan = spans.find((span) => span.name === "openai.chat");
122+
const chatSpan = spans.find((span) => span.name.startsWith("chat "));
121123

122124
assert.ok(result);
123125
assert.ok(workflowSpan);
@@ -158,12 +160,13 @@ describe("Test SDK Decorators", () => {
158160
chatSpan.attributes[`${SpanAttributes.TRACELOOP_WORKFLOW_NAME}`],
159161
"sample_chat",
160162
);
161-
assert.strictEqual(
162-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.role`],
163-
"user",
163+
164+
const inputMessages = JSON.parse(
165+
chatSpan.attributes[ATTR_GEN_AI_INPUT_MESSAGES] as string,
164166
);
167+
assert.strictEqual(inputMessages[0].role, "user");
165168
assert.strictEqual(
166-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.content`],
169+
inputMessages[0].parts[0].content,
167170
"Tell me a joke about OpenTelemetry",
168171
);
169172
});
@@ -195,7 +198,8 @@ describe("Test SDK Decorators", () => {
195198
assert.strictEqual(spans.length, 0);
196199
});
197200

198-
it("should create spans for workflows using decoration syntax", async () => {
201+
it("should create spans for workflows using decoration syntax", async function () {
202+
this.timeout(30000);
199203
class TestOpenAI {
200204
@traceloop.workflow({ name: "sample_chat", version: 2 })
201205
async chat(things: Map<string, string>) {
@@ -229,7 +233,7 @@ describe("Test SDK Decorators", () => {
229233
const workflowSpan = spans.find(
230234
(span) => span.name === "sample_chat.workflow",
231235
);
232-
const chatSpan = spans.find((span) => span.name === "openai.chat");
236+
const chatSpan = spans.find((span) => span.name.startsWith("chat "));
233237

234238
assert.ok(result);
235239
assert.ok(workflowSpan);
@@ -273,12 +277,13 @@ describe("Test SDK Decorators", () => {
273277
chatSpan.attributes[`${SpanAttributes.TRACELOOP_WORKFLOW_NAME}`],
274278
"sample_chat",
275279
);
276-
assert.strictEqual(
277-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.role`],
278-
"user",
280+
281+
const inputMessages = JSON.parse(
282+
chatSpan.attributes[ATTR_GEN_AI_INPUT_MESSAGES] as string,
279283
);
284+
assert.strictEqual(inputMessages[0].role, "user");
280285
assert.strictEqual(
281-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.content`],
286+
inputMessages[0].parts[0].content,
282287
"Tell me a joke about OpenTelemetry",
283288
);
284289
});
@@ -322,7 +327,7 @@ describe("Test SDK Decorators", () => {
322327
const workflowSpan = spans.find(
323328
(span) => span.name === `${workflowName}.workflow`,
324329
);
325-
const chatSpan = spans.find((span) => span.name === "openai.chat");
330+
const chatSpan = spans.find((span) => span.name.startsWith("chat "));
326331

327332
assert.ok(result);
328333
assert.ok(workflowSpan);
@@ -362,12 +367,13 @@ describe("Test SDK Decorators", () => {
362367
chatSpan.attributes[`${SpanAttributes.TRACELOOP_WORKFLOW_NAME}`],
363368
workflowName,
364369
);
365-
assert.strictEqual(
366-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.role`],
367-
"user",
370+
371+
const inputMessages = JSON.parse(
372+
chatSpan.attributes[ATTR_GEN_AI_INPUT_MESSAGES] as string,
368373
);
374+
assert.strictEqual(inputMessages[0].role, "user");
369375
assert.strictEqual(
370-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.content`],
376+
inputMessages[0].parts[0].content,
371377
"Tell me a joke about OpenTelemetry",
372378
);
373379
});
@@ -394,7 +400,7 @@ describe("Test SDK Decorators", () => {
394400
const workflowSpan = spans.find(
395401
(span) => span.name === "sample_chat.workflow",
396402
);
397-
const chatSpan = spans.find((span) => span.name === "openai.chat");
403+
const chatSpan = spans.find((span) => span.name.startsWith("chat "));
398404

399405
assert.ok(result);
400406
assert.ok(workflowSpan);
@@ -424,20 +430,17 @@ describe("Test SDK Decorators", () => {
424430
"sample_chat",
425431
);
426432
assert.strictEqual(
427-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.role`],
433+
chatSpan.attributes[ATTR_GEN_AI_INPUT_MESSAGES],
428434
undefined,
429435
);
430436
assert.strictEqual(
431-
chatSpan.attributes[`${ATTR_GEN_AI_PROMPT}.0.content`],
432-
undefined,
433-
);
434-
assert.strictEqual(
435-
chatSpan.attributes[`${ATTR_GEN_AI_COMPLETION}.0.content`],
437+
chatSpan.attributes[ATTR_GEN_AI_OUTPUT_MESSAGES],
436438
undefined,
437439
);
438440
});
439441

440-
it("should create spans for manual LLM instrumentation", async () => {
442+
it("should create spans for manual LLM instrumentation", async function () {
443+
this.timeout(30000);
441444
const result = await traceloop.withWorkflow(
442445
{ name: "joke_generator", associationProperties: { userId: "123" } },
443446
() =>
@@ -456,7 +459,14 @@ describe("Test SDK Decorators", () => {
456459
model,
457460
});
458461

459-
span.reportResponse(response);
462+
span.reportResponse({
463+
model: response.model,
464+
usage: response.usage,
465+
completions: response.choices.map((c) => ({
466+
finish_reason: c.finish_reason,
467+
message: c.message,
468+
})),
469+
});
460470

461471
return response;
462472
},
@@ -522,12 +532,11 @@ describe("Test SDK Decorators", () => {
522532
assert.ok(
523533
completionSpan.attributes[`${SpanAttributes.LLM_USAGE_TOTAL_TOKENS}`],
524534
);
525-
assert.equal(
526-
completionSpan.attributes[`${ATTR_GEN_AI_USAGE_PROMPT_TOKENS}`],
527-
"15",
535+
assert.ok(
536+
+completionSpan.attributes[`${ATTR_GEN_AI_USAGE_INPUT_TOKENS}`]! > 0,
528537
);
529538
assert.ok(
530-
+completionSpan.attributes[`${ATTR_GEN_AI_USAGE_COMPLETION_TOKENS}`]! > 0,
539+
+completionSpan.attributes[`${ATTR_GEN_AI_USAGE_OUTPUT_TOKENS}`]! > 0,
531540
);
532541
});
533542

@@ -559,15 +568,15 @@ describe("Test SDK Decorators", () => {
559568
assert.ok(result1);
560569
assert.ok(result2);
561570

562-
const openAI1Span = spans.find(
563-
(span) =>
564-
span.attributes[`${ATTR_GEN_AI_PROMPT}.0.content`] ===
571+
const openAI1Span = spans.find((span) =>
572+
(span.attributes[ATTR_GEN_AI_INPUT_MESSAGES] as string)?.includes(
565573
"Tell me a joke about OpenTelemetry",
574+
),
566575
);
567-
const openAI2Span = spans.find(
568-
(span) =>
569-
span.attributes[`${ATTR_GEN_AI_PROMPT}.0.content`] ===
576+
const openAI2Span = spans.find((span) =>
577+
(span.attributes[ATTR_GEN_AI_INPUT_MESSAGES] as string)?.includes(
570578
"Tell me a joke about Typescript",
579+
),
571580
);
572581

573582
assert.ok(openAI1Span);
@@ -619,7 +628,7 @@ describe("Test SDK Decorators", () => {
619628
const jokeCreationSpan = spans.find(
620629
(span) => span.name === "joke_creation.task",
621630
);
622-
const openAiChatSpans = spans.find((span) => span.name === "openai.chat");
631+
const openAiChatSpans = spans.find((span) => span.name.startsWith("chat "));
623632

624633
assert.ok(result);
625634
assert.ok(jokeCreationTaskWrapperSpan);

packages/traceloop-sdk/test/test-setup.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,11 @@
1414
* limitations under the License.
1515
*/
1616

17+
// TODO: Bump openai devDependency from 4.x to ^6 once we migrate the test
18+
// HTTP mocking from Polly.js to msw (or undici MockAgent). OpenAI SDK v6 uses
19+
// native fetch/undici which Polly's adapters cannot intercept in replay mode.
20+
// See: openai-semconv-review.md and semantic-conventions-migration.md for context.
21+
1722
import { InMemorySpanExporter } from "@opentelemetry/sdk-trace-base";
1823
import * as traceloop from "../src";
1924

0 commit comments

Comments
 (0)