Skip to content

Commit f3e9cce

Browse files
committed
Add auto-detection for Intel GPUs
1 parent a3db9a9 commit f3e9cce

3 files changed

Lines changed: 74 additions & 9 deletions

File tree

crates/uv-torch/src/accelerator.rs

Lines changed: 50 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,27 +30,34 @@ pub enum Accelerator {
3030
Amd {
3131
gpu_architecture: AmdGpuArchitecture,
3232
},
33+
/// The Intel GPU (XPU).
34+
///
35+
/// Currently, Intel GPUs do not depend on a driver/toolkit version at this level.
36+
Xpu,
3337
}
3438

3539
impl std::fmt::Display for Accelerator {
3640
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
3741
match self {
3842
Self::Cuda { driver_version } => write!(f, "CUDA {driver_version}"),
3943
Self::Amd { gpu_architecture } => write!(f, "AMD {gpu_architecture}"),
44+
Self::Xpu => write!(f, "Intel GPU (XPU) detected"),
4045
}
4146
}
4247
}
4348

4449
impl Accelerator {
45-
/// Detect the CUDA driver version from the system.
50+
/// Detect the GPU driver/architecture version from the system.
4651
///
4752
/// Query, in order:
4853
/// 1. The `UV_CUDA_DRIVER_VERSION` environment variable.
4954
/// 2. The `UV_AMD_GPU_ARCHITECTURE` environment variable.
50-
/// 2. `/sys/module/nvidia/version`, which contains the driver version (e.g., `550.144.03`).
51-
/// 3. `/proc/driver/nvidia/version`, which contains the driver version among other information.
52-
/// 4. `nvidia-smi --query-gpu=driver_version --format=csv,noheader`.
53-
/// 5. `rocm_agent_enumerator`, which lists the AMD GPU architectures.
55+
/// 3. `/sys/module/nvidia/version`, which contains the driver version (e.g., `550.144.03`).
56+
/// 4. `/proc/driver/nvidia/version`, which contains the driver version among other information.
57+
/// 5. `nvidia-smi --query-gpu=driver_version --format=csv,noheader`.
58+
/// 6. `rocm_agent_enumerator`, which lists the AMD GPU architectures.
59+
/// 7. `clinfo -l`, which lists the Intel GPU.
60+
/// 8. `powershell` command querying `Win32_VideoController` for Intel GPU.
5461
pub fn detect() -> Result<Option<Self>, AcceleratorError> {
5562
// Read from `UV_CUDA_DRIVER_VERSION`.
5663
if let Ok(driver_version) = std::env::var(EnvVars::UV_CUDA_DRIVER_VERSION) {
@@ -150,6 +157,44 @@ impl Accelerator {
150157
}
151158
}
152159

160+
// Query `clinfo -l` to detect the Intel GPU.
161+
if let Ok(output) = std::process::Command::new("clinfo").arg("-l").output() {
162+
if output.status.success() {
163+
let stdout = String::from_utf8(output.stdout)?;
164+
if stdout.contains("Intel") {
165+
debug!("Detected Intel GPU from `clinfo` output: {stdout}");
166+
return Ok(Some(Self::Xpu));
167+
}
168+
} else {
169+
debug!(
170+
"Failed to query Intel GPU with `clinfo` with status `{}`: {}",
171+
output.status,
172+
String::from_utf8_lossy(&output.stderr)
173+
);
174+
}
175+
}
176+
177+
// Query Intel GPU by `powershell` command on Windows.
178+
if let Ok(output) = std::process::Command::new("powershell")
179+
.arg("-Command")
180+
.arg("Get-WmiObject Win32_VideoController | Where-Object { $_.Name -like '*Intel*' } | Select-Object Name, DriverVersion")
181+
.output()
182+
{
183+
if output.status.success() {
184+
let stdout = String::from_utf8(output.stdout)?;
185+
if stdout.contains("Intel") {
186+
debug!("Detected Intel GPU from powershell output: {stdout}");
187+
return Ok(Some(Self::Xpu));
188+
}
189+
} else {
190+
debug!(
191+
"Failed to query Intel GPU with powershell with status `{}`: {}",
192+
output.status,
193+
String::from_utf8_lossy(&output.stderr)
194+
);
195+
}
196+
}
197+
153198
debug!("Failed to detect GPU driver version");
154199

155200
Ok(None)

crates/uv-torch/src/backend.rs

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,8 @@ pub enum TorchStrategy {
185185
os: Os,
186186
gpu_architecture: AmdGpuArchitecture,
187187
},
188+
/// Select the appropriate PyTorch index based on the operating system and Intel GPU presence.
189+
Xpu { os: Os },
188190
/// Use the specified PyTorch index.
189191
Backend(TorchBackend),
190192
}
@@ -202,6 +204,7 @@ impl TorchStrategy {
202204
os: os.clone(),
203205
gpu_architecture,
204206
}),
207+
Some(Accelerator::Xpu) => Ok(Self::Xpu { os: os.clone() }),
205208
None => Ok(Self::Backend(TorchBackend::Cpu)),
206209
},
207210
TorchMode::Cpu => Ok(Self::Backend(TorchBackend::Cpu)),
@@ -347,9 +350,26 @@ impl TorchStrategy {
347350
Either::Right(Either::Left(std::iter::once(TorchBackend::Cpu.index_url())))
348351
}
349352
},
350-
TorchStrategy::Backend(backend) => {
351-
Either::Right(Either::Right(std::iter::once(backend.index_url())))
352-
}
353+
TorchStrategy::Xpu { os } => match os {
354+
Os::Manylinux { .. } | Os::Windows => Either::Right(Either::Right(Either::Left(
355+
std::iter::once(TorchBackend::Xpu.index_url()),
356+
))),
357+
Os::Musllinux { .. }
358+
| Os::Macos { .. }
359+
| Os::FreeBsd { .. }
360+
| Os::NetBsd { .. }
361+
| Os::OpenBsd { .. }
362+
| Os::Dragonfly { .. }
363+
| Os::Illumos { .. }
364+
| Os::Haiku { .. }
365+
| Os::Android { .. }
366+
| Os::Pyodide { .. } => {
367+
Either::Right(Either::Left(std::iter::once(TorchBackend::Cpu.index_url())))
368+
}
369+
},
370+
TorchStrategy::Backend(backend) => Either::Right(Either::Right(Either::Right(
371+
std::iter::once(backend.index_url()),
372+
))),
353373
}
354374
}
355375
}

docs/guides/integration/pytorch.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -444,7 +444,7 @@ $ # With an environment variable.
444444
$ UV_TORCH_BACKEND=auto uv pip install torch
445445
```
446446

447-
When enabled, uv will query for the installed CUDA driver and AMD GPU versions then use the
447+
When enabled, uv will query for the installed CUDA driver, AMD GPU versions and Intel GPU presence then use the
448448
most-compatible PyTorch index for all relevant packages (e.g., `torch`, `torchvision`, etc.). If no
449449
such GPU is found, uv will fall back to the CPU-only index. uv will continue to respect existing
450450
index configuration for any packages outside the PyTorch ecosystem.

0 commit comments

Comments
 (0)