Skip to content

Commit bf81a5b

Browse files
Add CUDA 13.0 support (#16321)
## Summary Closes #16319.
1 parent 766bd95 commit bf81a5b

3 files changed

Lines changed: 32 additions & 2 deletions

File tree

crates/uv-torch/src/backend.rs

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,8 @@ pub enum TorchMode {
6161
Auto,
6262
/// Use the CPU-only PyTorch index.
6363
Cpu,
64+
/// Use the PyTorch index for CUDA 13.0.
65+
Cu130,
6466
/// Use the PyTorch index for CUDA 12.9.
6567
Cu129,
6668
/// Use the PyTorch index for CUDA 12.8.
@@ -244,6 +246,7 @@ impl TorchStrategy {
244246
None => TorchBackend::Cpu,
245247
},
246248
TorchMode::Cpu => TorchBackend::Cpu,
249+
TorchMode::Cu130 => TorchBackend::Cu130,
247250
TorchMode::Cu129 => TorchBackend::Cu129,
248251
TorchMode::Cu128 => TorchBackend::Cu128,
249252
TorchMode::Cu126 => TorchBackend::Cu126,
@@ -492,6 +495,7 @@ impl TorchStrategy {
492495
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
493496
pub enum TorchBackend {
494497
Cpu,
498+
Cu130,
495499
Cu129,
496500
Cu128,
497501
Cu126,
@@ -544,6 +548,10 @@ impl TorchBackend {
544548
TorchSource::PyTorch => &PYTORCH_CPU_INDEX_URL,
545549
TorchSource::Pyx => &PYX_CPU_INDEX_URL,
546550
},
551+
Self::Cu130 => match source {
552+
TorchSource::PyTorch => &PYTORCH_CU130_INDEX_URL,
553+
TorchSource::Pyx => &PYX_CU130_INDEX_URL,
554+
},
547555
Self::Cu129 => match source {
548556
TorchSource::PyTorch => &PYTORCH_CU129_INDEX_URL,
549557
TorchSource::Pyx => &PYX_CU129_INDEX_URL,
@@ -745,6 +753,7 @@ impl TorchBackend {
745753
pub fn cuda_version(&self) -> Option<Version> {
746754
match self {
747755
Self::Cpu => None,
756+
Self::Cu130 => Some(Version::new([13, 0])),
748757
Self::Cu129 => Some(Version::new([12, 9])),
749758
Self::Cu128 => Some(Version::new([12, 8])),
750759
Self::Cu126 => Some(Version::new([12, 6])),
@@ -794,6 +803,7 @@ impl TorchBackend {
794803
pub fn rocm_version(&self) -> Option<Version> {
795804
match self {
796805
Self::Cpu => None,
806+
Self::Cu130 => None,
797807
Self::Cu129 => None,
798808
Self::Cu128 => None,
799809
Self::Cu126 => None,
@@ -846,6 +856,8 @@ impl FromStr for TorchBackend {
846856
fn from_str(s: &str) -> Result<Self, Self::Err> {
847857
match s {
848858
"cpu" => Ok(Self::Cpu),
859+
"cu130" => Ok(Self::Cu130),
860+
"cu129" => Ok(Self::Cu129),
849861
"cu128" => Ok(Self::Cu128),
850862
"cu126" => Ok(Self::Cu126),
851863
"cu125" => Ok(Self::Cu125),
@@ -895,10 +907,12 @@ impl FromStr for TorchBackend {
895907
/// Linux CUDA driver versions and the corresponding CUDA versions.
896908
///
897909
/// See: <https://github.com/pmeier/light-the-torch/blob/33397cbe45d07b51ad8ee76b004571a4c236e37f/light_the_torch/_cb.py#L150-L213>
898-
static LINUX_CUDA_DRIVERS: LazyLock<[(TorchBackend, Version); 24]> = LazyLock::new(|| {
910+
static LINUX_CUDA_DRIVERS: LazyLock<[(TorchBackend, Version); 26]> = LazyLock::new(|| {
899911
[
900912
// Table 2 from
901913
// https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html
914+
(TorchBackend::Cu130, Version::new([580])),
915+
(TorchBackend::Cu129, Version::new([525, 60, 13])),
902916
(TorchBackend::Cu128, Version::new([525, 60, 13])),
903917
(TorchBackend::Cu126, Version::new([525, 60, 13])),
904918
(TorchBackend::Cu125, Version::new([525, 60, 13])),
@@ -933,10 +947,12 @@ static LINUX_CUDA_DRIVERS: LazyLock<[(TorchBackend, Version); 24]> = LazyLock::n
933947
/// Windows CUDA driver versions and the corresponding CUDA versions.
934948
///
935949
/// See: <https://github.com/pmeier/light-the-torch/blob/33397cbe45d07b51ad8ee76b004571a4c236e37f/light_the_torch/_cb.py#L150-L213>
936-
static WINDOWS_CUDA_VERSIONS: LazyLock<[(TorchBackend, Version); 24]> = LazyLock::new(|| {
950+
static WINDOWS_CUDA_VERSIONS: LazyLock<[(TorchBackend, Version); 26]> = LazyLock::new(|| {
937951
[
938952
// Table 2 from
939953
// https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html
954+
(TorchBackend::Cu130, Version::new([580])),
955+
(TorchBackend::Cu129, Version::new([528, 33])),
940956
(TorchBackend::Cu128, Version::new([528, 33])),
941957
(TorchBackend::Cu126, Version::new([528, 33])),
942958
(TorchBackend::Cu125, Version::new([528, 33])),
@@ -1037,6 +1053,8 @@ static LINUX_AMD_GPU_DRIVERS: LazyLock<[(TorchBackend, AmdGpuArchitecture); 44]>
10371053

10381054
static PYTORCH_CPU_INDEX_URL: LazyLock<IndexUrl> =
10391055
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cpu").unwrap());
1056+
static PYTORCH_CU130_INDEX_URL: LazyLock<IndexUrl> =
1057+
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu130").unwrap());
10401058
static PYTORCH_CU129_INDEX_URL: LazyLock<IndexUrl> =
10411059
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu129").unwrap());
10421060
static PYTORCH_CU128_INDEX_URL: LazyLock<IndexUrl> =
@@ -1131,6 +1149,10 @@ static PYX_CPU_INDEX_URL: LazyLock<IndexUrl> = LazyLock::new(|| {
11311149
let api_base_url = &*PYX_API_BASE_URL;
11321150
IndexUrl::from_str(&format!("{api_base_url}/simple/astral-sh/cpu")).unwrap()
11331151
});
1152+
static PYX_CU130_INDEX_URL: LazyLock<IndexUrl> = LazyLock::new(|| {
1153+
let api_base_url = &*PYX_API_BASE_URL;
1154+
IndexUrl::from_str(&format!("{api_base_url}/simple/astral-sh/cu130")).unwrap()
1155+
});
11341156
static PYX_CU129_INDEX_URL: LazyLock<IndexUrl> = LazyLock::new(|| {
11351157
let api_base_url = &*PYX_API_BASE_URL;
11361158
IndexUrl::from_str(&format!("{api_base_url}/simple/astral-sh/cu129")).unwrap()

docs/reference/cli.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4254,6 +4254,7 @@ by <code>--python-version</code>.</p>
42544254
<ul>
42554255
<li><code>auto</code>: Select the appropriate PyTorch index based on the operating system and CUDA driver version</li>
42564256
<li><code>cpu</code>: Use the CPU-only PyTorch index</li>
4257+
<li><code>cu130</code>: Use the PyTorch index for CUDA 13.0</li>
42574258
<li><code>cu129</code>: Use the PyTorch index for CUDA 12.9</li>
42584259
<li><code>cu128</code>: Use the PyTorch index for CUDA 12.8</li>
42594260
<li><code>cu126</code>: Use the PyTorch index for CUDA 12.6</li>
@@ -4536,6 +4537,7 @@ be used with caution, as it can modify the system Python installation.</p>
45364537
<ul>
45374538
<li><code>auto</code>: Select the appropriate PyTorch index based on the operating system and CUDA driver version</li>
45384539
<li><code>cpu</code>: Use the CPU-only PyTorch index</li>
4540+
<li><code>cu130</code>: Use the PyTorch index for CUDA 13.0</li>
45394541
<li><code>cu129</code>: Use the PyTorch index for CUDA 12.9</li>
45404542
<li><code>cu128</code>: Use the PyTorch index for CUDA 12.8</li>
45414543
<li><code>cu126</code>: Use the PyTorch index for CUDA 12.6</li>
@@ -4844,6 +4846,7 @@ should be used with caution, as it can modify the system Python installation.</p
48444846
<ul>
48454847
<li><code>auto</code>: Select the appropriate PyTorch index based on the operating system and CUDA driver version</li>
48464848
<li><code>cpu</code>: Use the CPU-only PyTorch index</li>
4849+
<li><code>cu130</code>: Use the PyTorch index for CUDA 13.0</li>
48474850
<li><code>cu129</code>: Use the PyTorch index for CUDA 12.9</li>
48484851
<li><code>cu128</code>: Use the PyTorch index for CUDA 12.8</li>
48494852
<li><code>cu126</code>: Use the PyTorch index for CUDA 12.6</li>

uv.schema.json

Lines changed: 5 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)