Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions eyepop/data/types/vlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,29 @@ class InferRunInfo(BaseModel):
)


class LLMProviderConfig(BaseModel):
provider: Literal["openai", "qwen3"] = Field(
default="openai",
description="LLM provider: 'openai' for OpenAI API, 'qwen3' for local Qwen3 LLM worker",
)
llm_api_key: str | None = Field(
default=None,
description="API key for the LLM provider (required for OpenAI)",
)
model: str = Field(
default="gpt-4o",
description="Model identifier for the LLM",
)
temperature: float = Field(
default=0.3,
description="Sampling temperature for LLM",
)
max_new_tokens: int = Field(
default=1048,
description="Maximum tokens to generate (used for qwen3 provider)",
)


class AutoPromptConfig(BaseModel):
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is deprecated and support will be removed in future. LLM configuration choices should be owned by the backend.

num_samples: int = Field(
default=5,
Expand All @@ -213,6 +236,9 @@ class AutoPromptConfig(BaseModel):
evaluate: EvaluateConfig = Field(
description="EvaluateConfig for VLM evaluation.",
)
llm_config: LLMProviderConfig = Field(
description="LLM provider configuration for prompt generation.",
)


class TaskType(StrEnum):
Expand Down
Loading