-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_api.py
More file actions
136 lines (115 loc) · 3.78 KB
/
test_api.py
File metadata and controls
136 lines (115 loc) · 3.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#!/usr/bin/env python3
"""
Test OpenAI config (API key + base_url) and verify JSON response.
What this script does:
- Loads openai.api_key, base_url, model from config.yml
- Sends a minimal request
- Forces STRICT JSON output
- Asks the model to identify itself (model species / family)
"""
import json
import sys
from pathlib import Path
import yaml
from openai import OpenAI
from openai.types.shared_params import Reasoning
def load_config(path: str):
cfg = yaml.safe_load(Path(path).read_text(encoding="utf-8"))
openai_cfg = cfg.get("openai", {})
api_key = openai_cfg.get("api_key")
base_url = openai_cfg.get("base_url")
model = openai_cfg.get("model")
effort = openai_cfg.get("reasoning_effort")
if not api_key:
raise ValueError("api_key missing in config")
if not model:
raise ValueError("model missing in config")
print(f"api_key: {api_key}")
print(f"base_url: {base_url}")
print(f"model: {model}")
print(f"effort: {effort}")
return api_key, base_url, model, effort
def eval_reasoning_effort(effort: str) -> Reasoning:
effort_lower = effort.lower()
if effort_lower == "none":
return Reasoning(effort="none")
elif effort_lower == "minimal":
return Reasoning(effort="minimal")
elif effort_lower == "low":
return Reasoning(effort="low")
elif effort_lower == "medium":
return Reasoning(effort="medium")
elif effort_lower == "high":
return Reasoning(effort="high")
elif effort_lower == "xhigh":
return Reasoning(effort="xhigh")
else:
return Reasoning(effort="low") # Default to low if unrecognized
def main():
if len(sys.argv) < 2:
print("Usage: python test_openai_config.py config.yml")
sys.exit(1)
api_key, base_url, model, reasoning_effort = load_config(sys.argv[1])
client_kwargs = {"api_key": api_key}
if base_url:
client_kwargs["base_url"] = base_url
client = OpenAI(**client_kwargs)
# JSON schema for strict verification
schema = {
"name": "model_identity",
"strict": True,
"schema": {
"type": "object",
"additionalProperties": False,
"properties": {
"model_name": {"type": "string"},
"model_family": {"type": "string"},
"provider": {"type": "string"},
"api_ok": {"type": "boolean"},
"miscellaneous": {"type": "string"},
},
"required": ["model_name", "model_family", "provider", "api_ok", "miscellaneous"],
},
}
print("Sending test request...")
resp = client.responses.create(
model=model,
input=[
{
"role": "system",
"content": (
"You are a test endpoint. "
"Identify yourself accurately. "
"Output must strictly follow the provided JSON schema."
),
},
{
"role": "user",
"content": (
"Return your exact model name, model family/species, "
"and provider. Set api_ok=true."
),
},
],
text={
"format": {
"type": "json_schema",
"name": schema["name"],
"strict": True,
"schema": schema["schema"],
}
}
)
# Extract structured output
try:
raw = resp.output_text
data = json.loads(raw)
except Exception as e:
print("❌ Failed to parse JSON response")
print(e)
print(resp)
sys.exit(2)
print("\n✅ API TEST SUCCESS")
print(json.dumps(data, indent=2, ensure_ascii=False))
if __name__ == "__main__":
main()