-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
68 lines (57 loc) · 2.78 KB
/
main.py
File metadata and controls
68 lines (57 loc) · 2.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import pickle
from time import perf_counter
import numpy as np
import torch
from torch.nn import CrossEntropyLoss, MSELoss
from modules.parser import OPTION
from modules.utils import test, double_backprop_train
if OPTION == "KMNIST_LeNet":
from KMNIST_LeNet import train_set, test_set, train_loader, test_loader, Model, CHECKPOINTS_FOLDER
elif OPTION == "FMNIST_LeNet":
from FMNIST_LeNet import train_set, test_set, train_loader, test_loader, Model, CHECKPOINTS_FOLDER
elif OPTION == "MNIST_LeNet":
from MNIST_LeNet import train_set, test_set, train_loader, test_loader, Model, CHECKPOINTS_FOLDER
elif OPTION == "MNIST_ResNet":
from MNIST_ResNet import train_set, test_set, train_loader, test_loader, Model, CHECKPOINTS_FOLDER
elif OPTION == "CIFAR_ResNet":
from CIFAR_ResNet import train_set, test_set, train_loader, test_loader, Model, CHECKPOINTS_FOLDER
elif OPTION == "Imagenette_ResNet":
from Imagnette_ResNet import train_set, test_set, train_loader, test_loader, Model, CHECKPOINTS_FOLDER
from Imagnette_ResNet import imagenette_galphas
else:
raise NotImplementedError
REGULAR_DOUBLE_BACKPROP = "regular_double_backprop_finetune"
CHECKPOINTS_FOLDER.mkdir(parents=True, exist_ok=True)
# torch.autograd.set_detect_anomaly(True)
print("writing to:", CHECKPOINTS_FOLDER)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
assert torch.cuda.is_available()
if __name__ == "__main__":
start_time = perf_counter()
criterion = CrossEntropyLoss()
mse_criterion = MSELoss()
regular_backprop_result = {}
all_backprop_result = {}
intermediate_test_result = {}
learning_rate = 0.001 if "LeNet" in OPTION else 0.0001
epochs = 25 if "LeNet" in OPTION else 50
for galpha in (np.logspace(2, 10) if OPTION != "Imagenette_ResNet" else imagenette_galphas):
try:
print("grad_alpha", galpha)
model = Model.to(device)
# torch.manual_seed(42)
nepochs_regular, loss_curve = double_backprop_train(
model, train_loader, val_loader=None, epochs=epochs, learning_rate=learning_rate, device=device,
grad_alpha=galpha
)
model.eval()
torch.save(model.state_dict(), CHECKPOINTS_FOLDER / f"{REGULAR_DOUBLE_BACKPROP}_{galpha}.pt")
torch.save(loss_curve, CHECKPOINTS_FOLDER / f"loss_curve_{REGULAR_DOUBLE_BACKPROP}_{galpha}.pickle")
regular_backprop_result[galpha] = (test(model, test_loader, device), nepochs_regular)
with (CHECKPOINTS_FOLDER / "regular_backprop_result.pickle").open("wb+") as file:
pickle.dump(regular_backprop_result, file)
except Exception as e:
print(f"Warning: {e}")
raise e
#pass
print(f"total time [s]: {perf_counter()-start_time}")