|
| 1 | +import math |
| 2 | +import random |
| 3 | +import time |
| 4 | + |
| 5 | +import trackio as wandb |
| 6 | + |
| 7 | +EPOCHS = 20 |
| 8 | +NUM_RUNS = 20 |
| 9 | +PROJECT_ID = random.randint(100000, 999999) |
| 10 | + |
| 11 | + |
| 12 | +def generate_loss_curve(epoch, max_epochs, base_loss=2.5, min_loss=0.1): |
| 13 | + progress = epoch / max_epochs |
| 14 | + base_curve = base_loss * math.exp(-3 * progress) + min_loss |
| 15 | + |
| 16 | + noise_scale = 0.3 * (1 - progress * 0.7) |
| 17 | + noise = random.gauss(0, noise_scale) |
| 18 | + |
| 19 | + return max(min_loss * 0.5, base_curve + noise) |
| 20 | + |
| 21 | + |
| 22 | +def generate_accuracy_curve(epoch, max_epochs, max_acc=0.95, min_acc=0.1): |
| 23 | + progress = epoch / max_epochs |
| 24 | + base_curve = max_acc / (1 + math.exp(-6 * (progress - 0.5))) + min_acc |
| 25 | + |
| 26 | + noise_scale = 0.08 * (1 - progress * 0.5) |
| 27 | + noise = random.gauss(0, noise_scale) |
| 28 | + |
| 29 | + return max(0, min(max_acc, base_curve + noise)) |
| 30 | + |
| 31 | + |
| 32 | +def generate_grad_norm_curve(epoch, max_epochs): |
| 33 | + if epoch == 0: |
| 34 | + return float("inf") |
| 35 | + elif epoch == 1: |
| 36 | + return 1000.0 |
| 37 | + elif epoch == 2: |
| 38 | + return 100.0 |
| 39 | + else: |
| 40 | + progress = (epoch - 2) / (max_epochs - 2) |
| 41 | + base_value = 50 * math.exp(-4 * progress) + 1.0 |
| 42 | + noise = random.gauss(0, 0.5) |
| 43 | + return max(0.1, base_value + noise) |
| 44 | + |
| 45 | + |
| 46 | +for run in range(NUM_RUNS): |
| 47 | + wandb.init( |
| 48 | + project=f"fake-training-many-{PROJECT_ID}", |
| 49 | + name=f"test-run-{run}", |
| 50 | + config=dict( |
| 51 | + epochs=EPOCHS, |
| 52 | + learning_rate=0.001, |
| 53 | + batch_size=32, |
| 54 | + ), |
| 55 | + ) |
| 56 | + |
| 57 | + for epoch in range(EPOCHS): |
| 58 | + train_loss = generate_loss_curve( |
| 59 | + epoch, |
| 60 | + EPOCHS, |
| 61 | + base_loss=random.uniform(2.5, 3.5), |
| 62 | + min_loss=random.uniform(0.05, 0.15), |
| 63 | + ) |
| 64 | + val_loss = generate_loss_curve( |
| 65 | + epoch, |
| 66 | + EPOCHS, |
| 67 | + base_loss=random.uniform(2.5, 3.5), |
| 68 | + min_loss=random.uniform(0.05, 0.15), |
| 69 | + ) |
| 70 | + |
| 71 | + train_accuracy = generate_accuracy_curve( |
| 72 | + epoch, |
| 73 | + EPOCHS, |
| 74 | + max_acc=random.uniform(0.7, 0.9), |
| 75 | + min_acc=random.uniform(0.1, 0.3), |
| 76 | + ) |
| 77 | + val_accuracy = generate_accuracy_curve( |
| 78 | + epoch, |
| 79 | + EPOCHS, |
| 80 | + max_acc=random.uniform(0.7, 0.9), |
| 81 | + min_acc=random.uniform(0.1, 0.3), |
| 82 | + ) |
| 83 | + |
| 84 | + grad_norm = generate_grad_norm_curve(epoch, EPOCHS) |
| 85 | + |
| 86 | + if epoch > 2 and random.random() < 0.3: |
| 87 | + val_loss *= 1.1 |
| 88 | + val_accuracy *= 0.95 |
| 89 | + |
| 90 | + wandb.log( |
| 91 | + { |
| 92 | + "train/loss": round(train_loss, 4), |
| 93 | + "train/accuracy": round(train_accuracy, 4), |
| 94 | + "train/rewards/reward1": random.random(), |
| 95 | + "train/rewards/reward2": random.random(), |
| 96 | + "val/loss": round(val_loss, 4), |
| 97 | + "val/accuracy": round(val_accuracy, 4), |
| 98 | + "grad_norm": grad_norm, |
| 99 | + } |
| 100 | + ) |
| 101 | + |
| 102 | + time.sleep(0.05) |
| 103 | + |
| 104 | + wandb.finish() |
0 commit comments