Skip to content

Commit f797ba8

Browse files
author
Wojciech Napierała
committed
Update model resolution logic in TaskExecutor for LiteLLM provider compatibility
- Prefixed model names for Ollama and Azure providers to ensure correct resolution. - Enhanced handling of provider-specific model identifiers to improve integration with LiteLLM.
1 parent 029d576 commit f797ba8

1 file changed

Lines changed: 12 additions & 5 deletions

File tree

core/task_executor.py

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
v0.3 - 2025-11-06 - Integrated controller bias into workflow selection metadata.
88
v0.4 - 2025-11-07 - Enabled optional LiteLLM debug toggling from configuration.
99
v0.5 - 2025-11-07 - Normalised Azure provider routing for LiteLLM compatibility.
10+
v0.6 - 2025-11-07 - Prefixed Ollama models for LiteLLM provider resolution.
1011
"""
1112

1213
from __future__ import annotations
@@ -248,10 +249,16 @@ def _activate_litellm_debug(self) -> None:
248249
def _resolve_model_name(self, workflow_cfg: WorkflowModelConfig) -> str:
249250
"""Normalise provider-specific model identifiers for LiteLLM."""
250251
model_name = workflow_cfg.model
251-
if workflow_cfg.provider.lower() != "azure":
252-
return model_name
252+
provider = workflow_cfg.provider.lower()
253253

254-
if model_name.startswith("azure/"):
255-
return model_name
254+
if provider == "azure":
255+
if model_name.startswith("azure/"):
256+
return model_name
257+
return f"azure/{model_name}"
256258

257-
return f"azure/{model_name}"
259+
if provider == "ollama":
260+
if model_name.startswith(("ollama/", "ollama_chat/")):
261+
return model_name
262+
return f"ollama/{model_name}"
263+
264+
return model_name

0 commit comments

Comments
 (0)