Skip to content

Commit 2b6dd88

Browse files
committed
[Autotuner] Add crash recovery script for unrecoverable CUDA errors
stack-info: PR: #1923, branch: yf225/stack/93
1 parent c22cbfe commit 2b6dd88

File tree

6 files changed

+387
-3
lines changed

6 files changed

+387
-3
lines changed

docs/deployment_autotuning.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -196,10 +196,10 @@ automatically. On successful completion, the checkpoint file is cleaned up.
196196

197197
```bash
198198
# Enable checkpointing to a directory:
199-
HELION_AUTOTUNE_CHECKPOINT_DIR=/tmp/helion_checkpoints python run_kernel.py
199+
HELION_AUTOTUNE_CHECKPOINT_DIR=/tmp/$USER/helion_checkpoints python run_kernel.py
200200

201201
# If interrupted, just re-run with the same directory to resume:
202-
HELION_AUTOTUNE_CHECKPOINT_DIR=/tmp/helion_checkpoints python run_kernel.py
202+
HELION_AUTOTUNE_CHECKPOINT_DIR=/tmp/$USER/helion_checkpoints python run_kernel.py
203203
```
204204

205205
Without `HELION_AUTOTUNE_CHECKPOINT_DIR`, no checkpoints are saved (opt-in).

helion/autotuner/base_search.py

Lines changed: 76 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -367,6 +367,7 @@ def __init__(self, kernel: _AutotunableKernel, args: Sequence[object]) -> None:
367367
self._precompile_tmpdir: tempfile.TemporaryDirectory[str] | None = None
368368
self._precompile_args_path: str | None = None
369369
self._precompile_result_counter = count()
370+
self._crashed_config_strs: set[str] = set()
370371

371372
def _prepare(self) -> None:
372373
"""Some initialization deferred until autotuning actually runs.
@@ -471,6 +472,32 @@ def _try_load_checkpoint(self) -> bool:
471472
self.log(f"Resumed at generation {self._current_generation}")
472473
return True
473474

475+
def _load_crashed_configs(self) -> None:
476+
"""Load crashed configs from {hash}.crashed_configs (written by crash-recovery script)."""
477+
checkpoint_dir_str = self.settings.autotune_checkpoint_dir
478+
if checkpoint_dir_str is None:
479+
return
480+
crashed_configs_path = (
481+
Path(checkpoint_dir_str) / f"{self._get_stable_hash()}.crashed_configs"
482+
)
483+
if crashed_configs_path.exists():
484+
self._crashed_config_strs |= {
485+
line.strip()
486+
for line in crashed_configs_path.read_text().splitlines()
487+
if line.strip()
488+
}
489+
if self._crashed_config_strs:
490+
self.log(
491+
f"Loaded {len(self._crashed_config_strs)} crashed config(s) to skip"
492+
)
493+
494+
def _get_pending_config_path(self) -> Path | None:
495+
"""Get path for pending-config sentinel, or None if checkpointing disabled."""
496+
checkpoint_dir_str = self.settings.autotune_checkpoint_dir
497+
if checkpoint_dir_str is None:
498+
return None
499+
return Path(checkpoint_dir_str) / f"{self._get_stable_hash()}.pending_config"
500+
474501
def _compute_baseline(
475502
self,
476503
) -> tuple[object, Sequence[int], Sequence[object] | None]:
@@ -693,6 +720,12 @@ def benchmark_function(self, config: Config, fn: CompiledConfig) -> float:
693720
Returns:
694721
The performance of the configuration in ms.
695722
"""
723+
# Skip configs that previously crashed the subprocess
724+
config_str = str(config)
725+
if config_str in self._crashed_config_strs:
726+
self.log.warning(f"Skipping known-crashed config: {config}")
727+
return inf
728+
696729
self._autotune_metrics.num_configs_tested += 1
697730
self.counters["benchmark"] += 1
698731
self.log.debug(lambda: f"Running benchmark for {config!r}")
@@ -973,13 +1006,36 @@ def _benchmark(
9731006
A list of BenchmarkResult entries containing the configuration, compiled
9741007
callable, measured performance, status, and compilation time.
9751008
"""
1009+
# Filter out known-crashed configs before compilation
1010+
if self._crashed_config_strs:
1011+
original_len = len(configs)
1012+
configs = [c for c in configs if str(c) not in self._crashed_config_strs]
1013+
skipped = original_len - len(configs)
1014+
if skipped:
1015+
self.log.warning(
1016+
f"Skipped {skipped} known-crashed config(s) before compilation"
1017+
)
1018+
if not configs:
1019+
return []
1020+
9761021
fns: list[Callable[..., object]] = []
9771022
valid_configs: list[Config] = []
9781023
futures: list[PrecompileFuture] | None = None
1024+
pending_path = self._get_pending_config_path()
9791025
for i, config in enumerate(configs):
1026+
# Write sentinel before compile so a hard crash (SIGKILL /
1027+
# CUDA IMA) leaves a trace the crash recovery script can find.
1028+
if pending_path is not None:
1029+
pending_path.write_text(str(config))
9801030
try:
9811031
fn = self.kernel.compile_config(config, allow_print=False)
982-
except Exception:
1032+
except Exception as e:
1033+
if match_unrecoverable_runtime_error(e):
1034+
# Leave sentinel for crash recovery — CUDA context is
1035+
# corrupted and the process cannot continue.
1036+
raise
1037+
if pending_path is not None:
1038+
pending_path.unlink(missing_ok=True)
9831039
# If all configs failed, raise error
9841040
if not valid_configs and i == len(configs) - 1:
9851041
raise
@@ -989,9 +1045,14 @@ def _benchmark(
9891045
exc_info=True,
9901046
)
9911047
continue
1048+
if pending_path is not None:
1049+
pending_path.unlink(missing_ok=True)
9921050
fns.append(fn)
9931051
valid_configs.append(config)
9941052
configs = valid_configs
1053+
# NOTE: precompile runs in separate subprocesses with isolated CUDA
1054+
# contexts; crashes there are caught via is_working checks, not
1055+
# sentinels.
9951056
if self.settings.autotune_precompile:
9961057
futures = list(
9971058
starmap(
@@ -1053,7 +1114,14 @@ def _benchmark(
10531114
)
10541115
)
10551116
# benchmark one-by-one to avoid noisy results
1117+
# Write pending-config sentinel; cleared after benchmark.
1118+
# On crash the file stays so the crash recovery script can
1119+
# detect which config caused the failure.
1120+
if pending_path is not None:
1121+
pending_path.write_text(str(config))
10561122
perf = self.benchmark_function(config, fn)
1123+
if pending_path is not None:
1124+
pending_path.unlink(missing_ok=True)
10571125
status = "ok" if math.isfinite(perf) else "error"
10581126
# Log completion after benchmarking
10591127
self.log.record_autotune_entry(
@@ -1158,6 +1226,7 @@ def autotune(self, *, skip_cache: bool = False) -> Config:
11581226

11591227
if not self._try_load_checkpoint():
11601228
self._init_search()
1229+
self._load_crashed_configs()
11611230
try:
11621231
best = self._autotune()
11631232
self._cleanup_checkpoint()
@@ -1259,6 +1328,12 @@ def _cleanup_checkpoint(self) -> None:
12591328
checkpoint_file.unlink()
12601329
self.log(f"Checkpoint cleaned up: {checkpoint_file}")
12611330

1331+
# Clean up crash-recovery artifacts
1332+
for suffix in (".pending_config", ".crashed_configs"):
1333+
artifact = Path(checkpoint_dir_str) / f"{stable_hash}{suffix}"
1334+
if artifact.exists():
1335+
artifact.unlink()
1336+
12621337
@staticmethod
12631338
def _serialize_numpy_rng_state(
12641339
state: tuple[str, Any, int, int, float],
Lines changed: 146 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,146 @@
1+
"""Autotuner crash recovery wrapper.
2+
3+
Runs a command (typically a Python script that calls helion autotuning) in a
4+
retry loop. When the process crashes due to an unrecoverable CUDA error
5+
(illegal memory access, misaligned address, etc.), the autotuner leaves a
6+
``{hash}.pending_config`` sentinel in the checkpoint directory. This script
7+
detects that file, records the poison config in ``{hash}.crashed_configs``, and
8+
re-runs the command. On re-run the autotuner loads its checkpoint and skips
9+
the crashed config.
10+
11+
Progress detection
12+
------------------
13+
Each crash should block a different config (since blocked configs are skipped
14+
on re-run). If the same config crashes twice, the autotuner is stuck and we
15+
give up.
16+
17+
Requirements
18+
------------
19+
``HELION_AUTOTUNE_CHECKPOINT_DIR`` must be set in the environment.
20+
21+
Usage
22+
-----
23+
::
24+
25+
HELION_AUTOTUNE_CHECKPOINT_DIR=/tmp/$USER/helion_ckpt \\
26+
python -m helion.experimental.crash_recovery [--max-retries N] -- COMMAND [ARGS...]
27+
28+
Examples
29+
--------
30+
::
31+
32+
HELION_AUTOTUNE_CHECKPOINT_DIR=/tmp/$USER/helion_autotune_ckpt \\
33+
python -m helion.experimental.crash_recovery -- python train.py
34+
"""
35+
36+
from __future__ import annotations
37+
38+
import argparse
39+
import os
40+
from pathlib import Path
41+
import subprocess
42+
import sys
43+
44+
45+
def _log(msg: str) -> None:
46+
print(f"[crash-recovery] {msg}", file=sys.stderr)
47+
48+
49+
def main(argv: list[str] | None = None) -> int:
50+
parser = argparse.ArgumentParser(
51+
description="Autotuner crash recovery wrapper.",
52+
usage=(
53+
"HELION_AUTOTUNE_CHECKPOINT_DIR=/path/to/dir\n"
54+
" %(prog)s [--max-retries N] -- COMMAND [ARGS...]"
55+
),
56+
)
57+
parser.add_argument(
58+
"--max-retries",
59+
type=int,
60+
default=50,
61+
help="Maximum number of crash recovery retries (default: 50)",
62+
)
63+
parser.add_argument(
64+
"command",
65+
nargs=argparse.REMAINDER,
66+
help="Command to run (after '--' separator)",
67+
)
68+
args = parser.parse_args(argv)
69+
70+
# argparse.REMAINDER absorbs '--' as first element when present.
71+
command: list[str] = args.command
72+
if command and command[0] == "--":
73+
command = command[1:]
74+
if not command:
75+
parser.error("no command specified after --")
76+
77+
checkpoint_dir_str = os.environ.get("HELION_AUTOTUNE_CHECKPOINT_DIR", "")
78+
if not checkpoint_dir_str:
79+
print(
80+
"Error: HELION_AUTOTUNE_CHECKPOINT_DIR must be set.",
81+
file=sys.stderr,
82+
)
83+
return 1
84+
85+
checkpoint_dir = Path(checkpoint_dir_str)
86+
checkpoint_dir.mkdir(parents=True, exist_ok=True)
87+
88+
attempt = 0
89+
all_crashed: set[str] = set()
90+
91+
while True:
92+
attempt += 1
93+
94+
result = subprocess.run(command)
95+
exit_code = result.returncode
96+
97+
if exit_code == 0:
98+
return 0
99+
100+
# Look for any *.pending_config sentinel left by the autotuner.
101+
pending_files = sorted(checkpoint_dir.glob("*.pending_config"))
102+
103+
if pending_files:
104+
stuck = False
105+
for pending_path in pending_files:
106+
hash_prefix = pending_path.stem # {hash} without .pending_config
107+
crashed_configs_path = checkpoint_dir / f"{hash_prefix}.crashed_configs"
108+
109+
config = pending_path.read_text().strip()
110+
pending_path.unlink()
111+
112+
with open(crashed_configs_path, "a") as f:
113+
f.write(config + "\n")
114+
115+
_log(f"Blocked config: {config}")
116+
117+
# If this config was already blocked in a previous attempt,
118+
# the autotuner is not skipping it -- it's stuck.
119+
if config in all_crashed:
120+
stuck = True
121+
all_crashed.add(config)
122+
123+
_log(f"Process crashed (exit code {exit_code}, attempt {attempt}).")
124+
125+
if stuck:
126+
_log("Same config crashed twice \u2014 the autotuner appears stuck.")
127+
_log(
128+
"All crashed configs have been recorded. You can re-run "
129+
"this script and it will resume from the latest "
130+
"checkpoint, skipping all previously recorded crashed "
131+
"configs."
132+
)
133+
return 1
134+
135+
if attempt >= args.max_retries:
136+
_log(f"Reached maximum retry limit ({args.max_retries}). Giving up.")
137+
return 1
138+
139+
_log("Restarting from checkpoint...")
140+
else:
141+
# No pending file -- not a recoverable CUDA crash.
142+
return exit_code
143+
144+
145+
if __name__ == "__main__":
146+
sys.exit(main())

0 commit comments

Comments
 (0)