Skip to content

Commit 13c3234

Browse files
author
Wojciech Napierała
committed
Enhance WSL2 support for Ollama IP detection and update documentation
- Added automatic detection of the Windows host IP for Ollama when running inside WSL2, simplifying user configuration. - Implemented regression tests for Ollama base URL resolution logic. - Updated README to document the new WSL workflow and manual override steps. - Updated CHANGELOG to reflect these enhancements.
1 parent 83d2e78 commit 13c3234

4 files changed

Lines changed: 176 additions & 2 deletions

File tree

CHANGELOG.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,15 @@
22

33
All notable changes to this project will be documented in this file. The format follows [Keep a Changelog](https://keepachangelog.com/) and dates use ISO-8601 (`YYYY-MM-DD`).
44

5+
## [0.1.4] - 2025-11-07
6+
7+
### Added
8+
- Automatic detection of the Windows host IP for Ollama when running inside WSL2, removing the need for manual configuration in common setups.
9+
- Regression tests covering Ollama base URL resolution logic.
10+
11+
### Documentation
12+
- Documented the WSL Ollama workflow and manual override steps in `README.md`.
13+
514
## [0.1.2] - 2025-11-07
615

716
### Added

README.md

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,12 @@ The project targets **Python 3.12**. Create a virtual environment with that inte
3636

3737
- Redis (port 6379) persists working memory.
3838
- ChromaDB persists via the Python package; no container is required.
39-
- Ollama (port 11434) is available for local inference; pull models with `ollama pull <model>` and set `OLLAMA_BASE_URL` when using the `local` workflow.
39+
- Ollama (port 11434) is available for local inference; pull models with `ollama pull <model>`. When running DRM inside WSL2, the executor now auto-detects the Windows host IP, but you can still override it with `OLLAMA_BASE_URL` if needed. For manual discovery:
40+
41+
```bash
42+
export WINHOST=$(ip route | grep -oE 'via [0-9.]+' | head -1 | cut -d' ' -f2)
43+
export OLLAMA_BASE_URL="http://$WINHOST:11434"
44+
```
4045

4146
3. **Launch the app**
4247

core/task_executor.py

Lines changed: 82 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,12 +10,16 @@
1010
v0.6 - 2025-11-07 - Prefixed Ollama models for LiteLLM provider resolution.
1111
v0.7 - 2025-11-07 - Added explicit Ollama provider hints for LiteLLM routing.
1212
v0.8 - 2025-11-07 - Emitted detailed error context when workflows fail.
13+
v0.9 - 2025-11-07 - Auto-detected Ollama base URL when running under WSL.
1314
"""
1415

1516
from __future__ import annotations
1617

18+
import ipaddress
1719
import logging
20+
import subprocess
1821
import time
22+
from functools import lru_cache
1923
from os import getenv
2024
from typing import Any, Dict, Mapping, Optional, Tuple, cast
2125

@@ -34,9 +38,67 @@
3438
LOGGER = logging.getLogger("drm.executor")
3539

3640

41+
@lru_cache(maxsize=1)
42+
def _detect_windows_host_ip(timeout_seconds: float = 2.0) -> Optional[str]:
43+
"""Detect the Windows host IP when running inside WSL2.
44+
45+
Returns the first "default via" IP discovered by ``ip route`` if the
46+
environment appears to be Windows Subsystem for Linux. The lookup is cached
47+
to avoid repeated subprocess execution.
48+
"""
49+
50+
try:
51+
with open("/proc/version", encoding="utf-8") as version_file:
52+
if "microsoft" not in version_file.read().lower():
53+
return None
54+
except OSError:
55+
return None
56+
57+
try:
58+
result = subprocess.run(
59+
["ip", "route"],
60+
check=False,
61+
capture_output=True,
62+
text=True,
63+
timeout=timeout_seconds,
64+
)
65+
except (OSError, subprocess.SubprocessError):
66+
return None
67+
68+
if result.returncode != 0 or not result.stdout:
69+
return None
70+
71+
for line in result.stdout.splitlines():
72+
tokens = line.strip().split()
73+
if not tokens or tokens[0] != "default":
74+
continue
75+
try:
76+
via_index = tokens.index("via")
77+
except ValueError:
78+
continue
79+
if via_index + 1 >= len(tokens):
80+
continue
81+
candidate = tokens[via_index + 1]
82+
if _is_ipv4(candidate):
83+
return candidate
84+
85+
return None
86+
87+
88+
def _is_ipv4(value: str) -> bool:
89+
"""Return ``True`` when *value* is a valid IPv4 address."""
90+
91+
try:
92+
return isinstance(ipaddress.ip_address(value), ipaddress.IPv4Address)
93+
except ValueError:
94+
return False
95+
96+
3797
class TaskExecutor:
3898
"""Executes prompts through configured LLM workflows."""
3999

100+
DEFAULT_OLLAMA_BASE = "http://localhost:11434"
101+
40102
def __init__(
41103
self,
42104
config: AppConfig,
@@ -213,7 +275,7 @@ def _build_provider_kwargs(self, provider: str) -> Dict[str, object]:
213275
}
214276

215277
if provider.lower() == "ollama":
216-
base_url = getenv("OLLAMA_BASE_URL", "http://localhost:11434").rstrip("/")
278+
base_url = self._resolve_ollama_base_url()
217279
return {
218280
"base_url": base_url,
219281
"api_base": base_url,
@@ -269,3 +331,22 @@ def _resolve_model_name(self, workflow_cfg: WorkflowModelConfig) -> str:
269331
return f"ollama/{model_name}"
270332

271333
return model_name
334+
335+
def _resolve_ollama_base_url(self) -> str:
336+
"""Resolve the Ollama base URL with WSL host detection fallback."""
337+
338+
provided = getenv("OLLAMA_BASE_URL")
339+
if provided:
340+
base_url = provided.rstrip("/")
341+
self._logger.debug("Using OLLAMA_BASE_URL override: %s", base_url)
342+
return base_url
343+
344+
detected_host = _detect_windows_host_ip()
345+
if detected_host:
346+
base_url = f"http://{detected_host}:11434"
347+
self._logger.debug(
348+
"Detected Windows host IP %s for Ollama base URL.", detected_host
349+
)
350+
return base_url
351+
352+
return self.DEFAULT_OLLAMA_BASE

tests/test_task_executor.py

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
"""Tests for TaskExecutor provider configuration helpers.
2+
3+
Updates: v0.1 - 2025-11-07 - Added coverage for Ollama base URL resolution.
4+
"""
5+
6+
from __future__ import annotations
7+
8+
import pytest
9+
10+
from config import settings
11+
from core.task_executor import TaskExecutor
12+
13+
14+
@pytest.fixture()
15+
def task_executor() -> TaskExecutor:
16+
"""Provide a TaskExecutor configured with a minimal workflow set."""
17+
18+
config = settings.AppConfig.model_validate(
19+
{
20+
"version": "0.1",
21+
"llm": {
22+
"default_workflow": "local",
23+
"workflows": {
24+
"local": {
25+
"provider": "ollama",
26+
"model": "gemma3:1b",
27+
"temperature": 0.2,
28+
}
29+
},
30+
"timeouts": {
31+
"request_seconds": 10,
32+
"retry_attempts": 1,
33+
"retry_backoff_seconds": 1,
34+
},
35+
"enable_debug": False,
36+
},
37+
"memory": {
38+
"redis": {"host": "localhost", "port": 6379, "db": 0, "ttl_seconds": 120},
39+
"chromadb": {
40+
"persist_directory": "data/chromadb",
41+
"collection": "test",
42+
},
43+
},
44+
"review": {
45+
"enabled": False,
46+
"auto_reviewer_model": None,
47+
"auto_reviewer_provider": None,
48+
},
49+
"embedding": None,
50+
"telemetry": {"log_level": "INFO"},
51+
}
52+
)
53+
return TaskExecutor(config)
54+
55+
56+
def test_resolve_ollama_base_url_prefers_env_override(
57+
task_executor: TaskExecutor, monkeypatch: pytest.MonkeyPatch
58+
) -> None:
59+
monkeypatch.setenv("OLLAMA_BASE_URL", "http://172.16.0.1:9000/")
60+
base_url = task_executor._resolve_ollama_base_url()
61+
assert base_url == "http://172.16.0.1:9000"
62+
63+
64+
def test_resolve_ollama_base_url_detects_wsl_host(
65+
task_executor: TaskExecutor, monkeypatch: pytest.MonkeyPatch
66+
) -> None:
67+
monkeypatch.delenv("OLLAMA_BASE_URL", raising=False)
68+
monkeypatch.setattr("core.task_executor._detect_windows_host_ip", lambda: "172.31.52.230")
69+
base_url = task_executor._resolve_ollama_base_url()
70+
assert base_url == "http://172.31.52.230:11434"
71+
72+
73+
def test_resolve_ollama_base_url_falls_back_to_localhost(
74+
task_executor: TaskExecutor, monkeypatch: pytest.MonkeyPatch
75+
) -> None:
76+
monkeypatch.delenv("OLLAMA_BASE_URL", raising=False)
77+
monkeypatch.setattr("core.task_executor._detect_windows_host_ip", lambda: None)
78+
base_url = task_executor._resolve_ollama_base_url()
79+
assert base_url == TaskExecutor.DEFAULT_OLLAMA_BASE

0 commit comments

Comments
 (0)