-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathperceptron.py
More file actions
80 lines (57 loc) · 1.88 KB
/
perceptron.py
File metadata and controls
80 lines (57 loc) · 1.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
from torch import nn
'''
Perceptron
'''
class Perceptron(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(1,1)
self.relu = nn.ReLU()
def forward(self, x):
'''Forward pass'''
return self.relu(self.fc(x))
'''
if __name__ == '__main__':
# Hyperparameters
batch_size = 10
learning_rate = 1e-4
epochs = 5
# data transformations
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))]
)
# dataset
dataset = CIFAR10(root='./data/cifar10',train=True,download=True,transform=transform)
trainloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=1)
# Initializing Net
net = Perceptron()
# defining loss function
loss_function = nn.CrossEntropyLoss()
# defining optimizer
optimizer = torch.optim.Adam(mlp.parameters(), lr=learning_rate)
# training loop
for epoch in range(epochs):
print('Epoch %s' % (epoch+1))
current_loss = 0.0
# iterate over the data
for i, data in enumerate(trainloader, 0):
# get data and ground truth
inputs, targets = data
# set gradients of all optimized tensors to zero
optimizer.zero_grad()
# forward pass of data through net
outputs = net(inputs)
# compute loss
loss = loss_function(outputs, targets)
# backward pass
loss.backward()
# optimizing parameters
optimizer.step()
# show stats
current_loss += loss.item()
if i % 500 == 499:
print('Loss after mini-batch %5d: %.3f'% (i+1, current_loss/500))
current_loss = 0.0
# end
print('Training finished!')'''