Skip to content

Commit 0ba0244

Browse files
committed
fix output type semconv value and run ruff format
1 parent 5db9b7a commit 0ba0244

3 files changed

Lines changed: 28 additions & 70 deletions

File tree

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/__init__.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -162,9 +162,7 @@ def _instrument(self, **kwargs):
162162
wrap_function_wrapper(
163163
module="openai.resources.embeddings",
164164
name="Embeddings.create",
165-
wrapper=embeddings_create(
166-
tracer, instruments, latest_experimental_enabled
167-
),
165+
wrapper=embeddings_create(tracer, instruments, latest_experimental_enabled),
168166
)
169167

170168
wrap_function_wrapper(

instrumentation-genai/opentelemetry-instrumentation-openai-v2/src/opentelemetry/instrumentation/openai_v2/utils.py

Lines changed: 21 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -161,9 +161,7 @@ def choice_to_event(choice, capture_content):
161161
if choice.message:
162162
message = {
163163
"role": (
164-
choice.message.role
165-
if choice.message and choice.message.role
166-
else None
164+
choice.message.role if choice.message and choice.message.role else None
167165
)
168166
}
169167
tool_calls = extract_tool_calls(choice.message, capture_content)
@@ -237,14 +235,10 @@ def get_llm_request_attributes(
237235
if operation_name == GenAIAttributes.GenAiOperationNameValues.CHAT.value:
238236
attributes.update(
239237
{
240-
GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE: kwargs.get(
241-
"temperature"
242-
),
238+
GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE: kwargs.get("temperature"),
243239
GenAIAttributes.GEN_AI_REQUEST_TOP_P: kwargs.get("p")
244240
or kwargs.get("top_p"),
245-
GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS: kwargs.get(
246-
"max_tokens"
247-
),
241+
GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS: kwargs.get("max_tokens"),
248242
GenAIAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY: kwargs.get(
249243
"presence_penalty"
250244
),
@@ -258,16 +252,12 @@ def get_llm_request_attributes(
258252
if (choice_count := kwargs.get("n")) is not None:
259253
# Only add non default, meaningful values
260254
if isinstance(choice_count, int) and choice_count != 1:
261-
attributes[GenAIAttributes.GEN_AI_REQUEST_CHOICE_COUNT] = (
262-
choice_count
263-
)
255+
attributes[GenAIAttributes.GEN_AI_REQUEST_CHOICE_COUNT] = choice_count
264256

265257
if (stop_sequences := kwargs.get("stop")) is not None:
266258
if isinstance(stop_sequences, str):
267259
stop_sequences = [stop_sequences]
268-
attributes[GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES] = (
269-
stop_sequences
270-
)
260+
attributes[GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES] = stop_sequences
271261

272262
request_response_format_attr_key = (
273263
GenAIAttributes.GEN_AI_OUTPUT_TYPE
@@ -278,14 +268,10 @@ def get_llm_request_attributes(
278268
# response_format may be string, object with a string in the `type` key,
279269
# or a type (e.g. Pydantic model class used with parse())
280270
if isinstance(response_format, type):
281-
attributes[request_response_format_attr_key] = "json_schema"
271+
attributes[request_response_format_attr_key] = "json"
282272
elif isinstance(response_format, Mapping):
283-
if (
284-
response_format_type := response_format.get("type")
285-
) is not None:
286-
attributes[request_response_format_attr_key] = (
287-
response_format_type
288-
)
273+
if (response_format_type := response_format.get("type")) is not None:
274+
attributes[request_response_format_attr_key] = response_format_type
289275
else:
290276
attributes[request_response_format_attr_key] = response_format
291277

@@ -306,10 +292,7 @@ def get_llm_request_attributes(
306292
)
307293

308294
# Add embeddings-specific attributes
309-
elif (
310-
operation_name
311-
== GenAIAttributes.GenAiOperationNameValues.EMBEDDINGS.value
312-
):
295+
elif operation_name == GenAIAttributes.GenAiOperationNameValues.EMBEDDINGS.value:
313296
# Add embedding dimensions if specified
314297
if (dimensions := kwargs.get("dimensions")) is not None:
315298
# TODO: move to GEN_AI_EMBEDDINGS_DIMENSION_COUNT when 1.39.0 is baseline
@@ -339,16 +322,12 @@ def create_chat_invocation(
339322
# pylint: disable=too-many-branches
340323

341324
llm_invocation = LLMInvocation(request_model=kwargs.get("model", ""))
342-
llm_invocation.provider = (
343-
GenAIAttributes.GenAiProviderNameValues.OPENAI.value
344-
)
325+
llm_invocation.provider = GenAIAttributes.GenAiProviderNameValues.OPENAI.value
345326
llm_invocation.temperature = get_value(kwargs.get("temperature"))
346327
llm_invocation.top_p = get_value(kwargs.get("p") or kwargs.get("top_p"))
347328
llm_invocation.max_tokens = get_value(kwargs.get("max_tokens"))
348329
llm_invocation.presence_penalty = get_value(kwargs.get("presence_penalty"))
349-
llm_invocation.frequency_penalty = get_value(
350-
kwargs.get("frequency_penalty")
351-
)
330+
llm_invocation.frequency_penalty = get_value(kwargs.get("frequency_penalty"))
352331
llm_invocation.seed = get_value(kwargs.get("seed"))
353332
if (stop_sequences := get_value(kwargs.get("stop"))) is not None:
354333
if isinstance(stop_sequences, str):
@@ -365,28 +344,22 @@ def create_chat_invocation(
365344
if (choice_count := get_value(kwargs.get("n"))) is not None:
366345
# Only add non default, meaningful values
367346
if isinstance(choice_count, int) and choice_count != 1:
368-
attributes[GenAIAttributes.GEN_AI_REQUEST_CHOICE_COUNT] = (
369-
choice_count
370-
)
347+
attributes[GenAIAttributes.GEN_AI_REQUEST_CHOICE_COUNT] = choice_count
371348

372-
if (
373-
response_format := get_value(kwargs.get("response_format"))
374-
) is not None:
349+
if (response_format := get_value(kwargs.get("response_format"))) is not None:
375350
# response_format may be string, object with a string in the `type` key,
376351
# or a type (e.g. Pydantic model class used with parse())
377352
if isinstance(response_format, type):
378-
attributes[GenAIAttributes.GEN_AI_OUTPUT_TYPE] = "json_schema"
353+
attributes[GenAIAttributes.GEN_AI_OUTPUT_TYPE] = "json"
379354
elif isinstance(response_format, Mapping):
380355
if (
381356
response_format_type := get_value(response_format.get("type"))
382357
) is not None:
383-
attributes[GenAIAttributes.GEN_AI_OUTPUT_TYPE] = (
384-
response_format_type
385-
)
358+
attributes[GenAIAttributes.GEN_AI_OUTPUT_TYPE] = response_format_type
386359
else:
387-
attributes[
388-
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
389-
] = response_format
360+
attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT] = (
361+
response_format
362+
)
390363

391364
# service_tier can be passed directly or in extra_body (in SDK 1.26.0 it's via extra_body)
392365
service_tier = get_value(kwargs.get("service_tier"))
@@ -416,16 +389,13 @@ def get_value(v: Any):
416389
def handle_span_exception(span, error: BaseException):
417390
span.set_status(Status(StatusCode.ERROR, str(error)))
418391
if span.is_recording():
419-
span.set_attribute(
420-
ErrorAttributes.ERROR_TYPE, type(error).__qualname__
421-
)
392+
span.set_attribute(ErrorAttributes.ERROR_TYPE, type(error).__qualname__)
422393
span.end()
423394

424395

425396
def _is_text_part(content: Any) -> bool:
426397
return isinstance(content, str) or (
427-
isinstance(content, Iterable)
428-
and all(isinstance(part, str) for part in content)
398+
isinstance(content, Iterable) and all(isinstance(part, str) for part in content)
429399
)
430400

431401

@@ -476,9 +446,7 @@ def extract_tool_calls_new(tool_calls) -> list[ToolCallRequest]:
476446
arguments = arguments_str
477447

478448
# TODO: support custom
479-
parts.append(
480-
ToolCallRequest(id=call_id, name=func_name, arguments=arguments)
481-
)
449+
parts.append(ToolCallRequest(id=call_id, name=func_name, arguments=arguments))
482450
return parts
483451

484452

instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_structured_outputs.py

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def test_structured_output_with_content(
6262
if latest_experimental_enabled
6363
else GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
6464
)
65-
assert spans[0].attributes[output_type_attr_key] == "json_schema"
65+
assert spans[0].attributes[output_type_attr_key] == "json"
6666

6767
if latest_experimental_enabled:
6868
assert_messages_attribute(
@@ -71,18 +71,14 @@ def test_structured_output_with_content(
7171
)
7272
assert_messages_attribute(
7373
spans[0].attributes["gen_ai.output.messages"],
74-
format_simple_expected_output_message(
75-
response.choices[0].message.content
76-
),
74+
format_simple_expected_output_message(response.choices[0].message.content),
7775
)
7876
else:
7977
logs = log_exporter.get_finished_logs()
8078
assert len(logs) == 2
8179

8280
user_message = {"content": STRUCTURED_OUTPUT_PROMPT[0]["content"]}
83-
assert_message_in_logs(
84-
logs[0], "gen_ai.user.message", user_message, spans[0]
85-
)
81+
assert_message_in_logs(logs[0], "gen_ai.user.message", user_message, spans[0])
8682

8783
choice_event = {
8884
"index": 0,
@@ -92,9 +88,7 @@ def test_structured_output_with_content(
9288
"content": response.choices[0].message.content,
9389
},
9490
}
95-
assert_message_in_logs(
96-
logs[1], "gen_ai.choice", choice_event, spans[0]
97-
)
91+
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])
9892

9993

10094
def test_structured_output_no_content(
@@ -126,7 +120,7 @@ def test_structured_output_no_content(
126120
if latest_experimental_enabled
127121
else GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
128122
)
129-
assert spans[0].attributes[output_type_attr_key] == "json_schema"
123+
assert spans[0].attributes[output_type_attr_key] == "json"
130124

131125
logs = log_exporter.get_finished_logs()
132126
if latest_experimental_enabled:
@@ -143,6 +137,4 @@ def test_structured_output_no_content(
143137
"finish_reason": "stop",
144138
"message": {"role": "assistant"},
145139
}
146-
assert_message_in_logs(
147-
logs[1], "gen_ai.choice", choice_event, spans[0]
148-
)
140+
assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0])

0 commit comments

Comments
 (0)