Skip to content

Commit 3277876

Browse files
feat(api): responses
1 parent f9706f4 commit 3277876

4 files changed

Lines changed: 346 additions & 1 deletion

File tree

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 193
22
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-2344b44246a44d39ad5b74d3077bd2958745aad67feb15970756532fa0b3f9d6.yml
33
openapi_spec_hash: a1913979235ce152a8dc380fabe5362e
4-
config_hash: 6c9a04f3cc5dd88e1e4f0ae42d98ba9a
4+
config_hash: 3302f40607e596148c9ac7706346a858

api.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1075,3 +1075,15 @@ from gradient.types import BillingListInsightsResponse
10751075
Methods:
10761076

10771077
- <code title="get /v2/billing/{account_urn}/insights/{start_date}/{end_date}">client.billing.<a href="./src/gradient/resources/billing.py">list_insights</a>(end_date, \*, account_urn, start_date, \*\*<a href="src/gradient/types/billing_list_insights_params.py">params</a>) -> <a href="./src/gradient/types/billing_list_insights_response.py">BillingListInsightsResponse</a></code>
1078+
1079+
# Responses
1080+
1081+
Types:
1082+
1083+
```python
1084+
from gradient.types import ResponseCreateResponse
1085+
```
1086+
1087+
Methods:
1088+
1089+
- <code title="post /responses">client.responses.<a href="./src/gradient/resources/responses.py">create</a>(\*\*<a href="src/gradient/types/response_create_params.py">params</a>) -> <a href="./src/gradient/types/response_create_response.py">ResponseCreateResponse</a></code>

src/gradient/types/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,7 @@
8484
from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo
8585
from .gpu_droplet_list_params import GPUDropletListParams as GPUDropletListParams
8686
from .image_generate_response import ImageGenerateResponse as ImageGenerateResponse
87+
from .response_create_response import ResponseCreateResponse as ResponseCreateResponse
8788
from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility
8889
from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams
8990
from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse
Lines changed: 332 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,332 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
import builtins
4+
from typing import Dict, List, Union, Optional
5+
from typing_extensions import Literal, TypeAlias
6+
7+
from .._models import BaseModel
8+
from .shared.chat_completion_token_logprob import ChatCompletionTokenLogprob
9+
10+
__all__ = [
11+
"ResponseCreateResponse",
12+
"Usage",
13+
"UsageInputTokensDetails",
14+
"UsageOutputTokensDetails",
15+
"Choice",
16+
"ChoiceMessage",
17+
"ChoiceMessageToolCall",
18+
"ChoiceMessageToolCallFunction",
19+
"ChoiceLogprobs",
20+
"Output",
21+
"OutputUnionMember0",
22+
"OutputUnionMember1",
23+
"OutputUnionMember2",
24+
"OutputUnionMember2Content",
25+
"Tool",
26+
]
27+
28+
29+
class UsageInputTokensDetails(BaseModel):
30+
"""A detailed breakdown of the input tokens."""
31+
32+
cached_tokens: int
33+
"""The number of tokens that were retrieved from the cache.
34+
35+
[More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
36+
"""
37+
38+
39+
class UsageOutputTokensDetails(BaseModel):
40+
"""A detailed breakdown of the output tokens."""
41+
42+
reasoning_tokens: int
43+
"""The number of reasoning tokens."""
44+
45+
tool_output_tokens: int
46+
"""The number of tool output tokens."""
47+
48+
49+
class Usage(BaseModel):
50+
"""
51+
Detailed token usage statistics for the request, including input/output token counts and detailed breakdowns.
52+
"""
53+
54+
input_tokens: int
55+
"""The number of input tokens."""
56+
57+
input_tokens_details: UsageInputTokensDetails
58+
"""A detailed breakdown of the input tokens."""
59+
60+
output_tokens: int
61+
"""The number of output tokens."""
62+
63+
output_tokens_details: UsageOutputTokensDetails
64+
"""A detailed breakdown of the output tokens."""
65+
66+
total_tokens: int
67+
"""The total number of tokens used."""
68+
69+
70+
class ChoiceMessageToolCallFunction(BaseModel):
71+
"""The function that the model called."""
72+
73+
arguments: str
74+
"""
75+
The arguments to call the function with, as generated by the model in JSON
76+
format. Note that the model does not always generate valid JSON, and may
77+
hallucinate parameters not defined by your function schema. Validate the
78+
arguments in your code before calling your function.
79+
"""
80+
81+
name: str
82+
"""The name of the function to call."""
83+
84+
85+
class ChoiceMessageToolCall(BaseModel):
86+
id: str
87+
"""The ID of the tool call."""
88+
89+
function: ChoiceMessageToolCallFunction
90+
"""The function that the model called."""
91+
92+
type: Literal["function"]
93+
"""The type of the tool. Currently, only `function` is supported."""
94+
95+
96+
class ChoiceMessage(BaseModel):
97+
"""The generated message response."""
98+
99+
content: Optional[str] = None
100+
"""The generated text content."""
101+
102+
role: Optional[Literal["assistant"]] = None
103+
"""The role of the message author, which is always `assistant`."""
104+
105+
tool_calls: Optional[List[ChoiceMessageToolCall]] = None
106+
"""The tool calls generated by the model, such as function calls."""
107+
108+
109+
class ChoiceLogprobs(BaseModel):
110+
"""Log probability information for the choice.
111+
112+
Only present if logprobs was requested in the request.
113+
"""
114+
115+
content: Optional[List[ChatCompletionTokenLogprob]] = None
116+
"""A list of message content tokens with log probability information."""
117+
118+
119+
class Choice(BaseModel):
120+
finish_reason: Literal["stop", "length", "tool_calls", "content_filter"]
121+
"""The reason the model stopped generating tokens.
122+
123+
This will be `stop` if the model hit a natural stop point or a provided stop
124+
sequence, `length` if the maximum number of tokens specified in the request was
125+
reached, or `tool_calls` if the model called a tool.
126+
"""
127+
128+
index: int
129+
"""The index of the choice in the list of choices."""
130+
131+
message: ChoiceMessage
132+
"""The generated message response."""
133+
134+
logprobs: Optional[ChoiceLogprobs] = None
135+
"""Log probability information for the choice.
136+
137+
Only present if logprobs was requested in the request.
138+
"""
139+
140+
141+
class OutputUnionMember0(BaseModel):
142+
arguments: str
143+
"""JSON string of function arguments"""
144+
145+
call_id: str
146+
"""The unique ID of the function tool call"""
147+
148+
name: str
149+
"""The name of the function to call"""
150+
151+
type: Literal["function_call"]
152+
"""The type of output item"""
153+
154+
id: Optional[str] = None
155+
"""The unique ID of the function tool call (same as call_id)"""
156+
157+
encrypted_content: Optional[str] = None
158+
"""Encrypted content (optional)"""
159+
160+
status: Optional[str] = None
161+
"""Status of the item (optional, can be null)"""
162+
163+
164+
class OutputUnionMember1(BaseModel):
165+
text: str
166+
"""The text content"""
167+
168+
type: Literal["text"]
169+
"""The type of output item"""
170+
171+
172+
class OutputUnionMember2Content(BaseModel):
173+
text: str
174+
"""The reasoning text content"""
175+
176+
type: Literal["reasoning_text"]
177+
"""The type of content"""
178+
179+
180+
class OutputUnionMember2(BaseModel):
181+
id: str
182+
"""The unique ID of the reasoning item"""
183+
184+
content: List[OutputUnionMember2Content]
185+
"""Array of reasoning content parts"""
186+
187+
summary: List[object]
188+
"""Summary of the reasoning (usually empty)"""
189+
190+
type: Literal["reasoning"]
191+
"""The type of output item"""
192+
193+
encrypted_content: Optional[str] = None
194+
"""Encrypted content (optional)"""
195+
196+
status: Optional[str] = None
197+
"""Status of the item (optional, can be null)"""
198+
199+
200+
Output: TypeAlias = Union[OutputUnionMember0, OutputUnionMember1, OutputUnionMember2]
201+
202+
203+
class Tool(BaseModel):
204+
"""Tool definition for Responses API (flat format).
205+
206+
This format is used by VLLM's Responses API where name, description, and parameters are at the top level of the tool object.
207+
"""
208+
209+
type: Literal["function", "web_search", "web_search_2025_08_26"]
210+
"""The type of the tool.
211+
212+
Supported values are `function` (custom tools), `web_search`, and
213+
`web_search_2025_08_26` (built-in web search).
214+
"""
215+
216+
description: Optional[str] = None
217+
"""
218+
A description of what the function does, used by the model to choose when and
219+
how to call the function.
220+
"""
221+
222+
name: Optional[str] = None
223+
"""The name of the function to be called.
224+
225+
Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
226+
of 64.
227+
"""
228+
229+
parameters: Optional[Dict[str, object]] = None
230+
"""The parameters the functions accepts, described as a JSON Schema object.
231+
232+
See the [guide](/docs/guides/function-calling) for examples, and the
233+
[JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
234+
documentation about the format.
235+
236+
Omitting `parameters` defines a function with an empty parameter list.
237+
"""
238+
239+
240+
class ResponseCreateResponse(BaseModel):
241+
"""
242+
Represents a text-to-text response returned by the model, based on the provided input. VLLM models only.
243+
"""
244+
245+
id: str
246+
"""A unique identifier for the response."""
247+
248+
created: int
249+
"""The Unix timestamp (in seconds) of when the response was created."""
250+
251+
model: str
252+
"""The model used to generate the response."""
253+
254+
object: Literal["response"]
255+
"""The object type, which is always `response`."""
256+
257+
usage: Usage
258+
"""
259+
Detailed token usage statistics for the request, including input/output token
260+
counts and detailed breakdowns.
261+
"""
262+
263+
background: Optional[bool] = None
264+
"""Whether the request was processed in the background"""
265+
266+
choices: Optional[List[Choice]] = None
267+
"""A list of response choices.
268+
269+
Can be more than one if `n` is greater than 1. Optional - Responses API
270+
primarily uses the output array.
271+
"""
272+
273+
input_messages: Optional[List[builtins.object]] = None
274+
"""Input messages (if applicable)"""
275+
276+
max_output_tokens: Optional[int] = None
277+
"""Maximum output tokens setting"""
278+
279+
max_tool_calls: Optional[int] = None
280+
"""Maximum tool calls setting"""
281+
282+
output: Optional[List[Output]] = None
283+
"""An array of content items generated by the model.
284+
285+
This includes text content, function calls, reasoning items, and other output
286+
types. Use this field for Responses API compatibility.
287+
"""
288+
289+
output_messages: Optional[List[builtins.object]] = None
290+
"""Output messages (if applicable)"""
291+
292+
parallel_tool_calls: Optional[bool] = None
293+
"""Whether parallel tool calls are enabled"""
294+
295+
previous_response_id: Optional[str] = None
296+
"""Previous response ID (for multi-turn conversations)"""
297+
298+
prompt: Optional[str] = None
299+
"""Prompt used for the response"""
300+
301+
reasoning: Optional[str] = None
302+
"""Reasoning content"""
303+
304+
service_tier: Optional[str] = None
305+
"""Service tier used"""
306+
307+
status: Optional[str] = None
308+
"""Status of the response"""
309+
310+
temperature: Optional[float] = None
311+
"""Temperature setting used for the response"""
312+
313+
text: Optional[str] = None
314+
"""Text content"""
315+
316+
tool_choice: Optional[str] = None
317+
"""Tool choice setting used for the response"""
318+
319+
tools: Optional[List[Tool]] = None
320+
"""Tools available for the response"""
321+
322+
top_logprobs: Optional[int] = None
323+
"""Top logprobs setting"""
324+
325+
top_p: Optional[float] = None
326+
"""Top-p setting used for the response"""
327+
328+
truncation: Optional[str] = None
329+
"""Truncation setting"""
330+
331+
user: Optional[str] = None
332+
"""User identifier"""

0 commit comments

Comments
 (0)