-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathintra.py
More file actions
263 lines (214 loc) · 8.55 KB
/
intra.py
File metadata and controls
263 lines (214 loc) · 8.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
import torch
import time
import os
from datahandler import PlainRNNDataHandler
from tester import Tester
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
#datasets
reddit = "subreddit"
lastfm = "lastfm"
nowplaying = "nowplaying"
#set current dataset here
dataset = lastfm
dataset_path = "/data/stud/bjorva/datasets/" + dataset + "/4_train_test_split.pickle"
pickle_path = "/data/stud/bjorva/logs/nowplaying/"
#universal settings
BATCHSIZE = 100
N_LAYERS = 1 #currently not used
SEQLEN = 20-1
TOP_K = 20
MAX_EPOCHS = 30
#runtime settings
USE_CUDA = True
USE_CUDA_EMBED = True
SEED = 0
GPU = 1
debug = False
#dataset dependent settings
if dataset == reddit:
HIDDEN_SIZE = 50
lr = 0.001
dropout = 0.0
elif dataset == lastfm:
HIDDEN_SIZE = 100
lr = 0.001
dropout = 0.2 # approximate equal to 2 dropouts of 0.2 TODO: Not really, look at this
EMBEDDING_SIZE = HIDDEN_SIZE
#setting of seed
torch.manual_seed(SEED) #seed CPU
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(GPU)
#loading of dataset into datahandler and getting relevant iformation about the dataset
datahandler = PlainRNNDataHandler(dataset_path, BATCHSIZE)
N_ITEMS = datahandler.get_num_items()
N_SESSIONS = datahandler.get_num_training_sessions()
#intra session RNN module
class Intra_RNN(nn.Module):
def __init__(self, embedding_dim, hidden_size, output_size, dropout_rate):
super(Intra_RNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
if(USE_CUDA_EMBED):
self.embed = nn.Embedding(output_size, embedding_dim)
self.gru_dropout= nn.Dropout(dropout_rate)
self.gru = nn.GRU(embedding_dim, hidden_size,batch_first=True)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden):
if(USE_CUDA_EMBED):
embeddings = self.embed(input)
else:
embeddings = input
embeddings = self.gru_dropout(embeddings)
out, hidden = self.gru(embeddings, hidden)
out = self.gru_dropout(out)
out = self.linear(out)
return out, hidden
def init_hidden(self, batch_size):
hidden = Variable(torch.zeros(1, batch_size, self.hidden_size))
if USE_CUDA:
hidden = hidden.cuda()
return hidden
#setting up embedding matrix, network and optimizer
embedding_matrix = None
if(not USE_CUDA_EMBED):
embedding_matrix = nn.Embedding(N_ITEMS, EMBEDDING_SIZE)
embed_optimizer = torch.optim.Adam(embedding_matrix.parameters(), lr=lr) #need to cover the parameters in the embedding matrix as well if this is outside the RNN module
#model
rnn = Intra_RNN(EMBEDDING_SIZE, HIDDEN_SIZE, N_ITEMS, dropout)
if USE_CUDA:
rnn = rnn.cuda()
optimizer = torch.optim.Adam(rnn.parameters(), lr=lr)
#library option that does not allow us to mask away 0-paddings and returns a mean by default
criterion = nn.CrossEntropyLoss()
#CUSTOM CROSS ENTROPY LOSS(Replace as soon as pytorch has implemented an option for non-summed losses)
#https://github.com/pytorch/pytorch/issues/264
def masked_cross_entropy_loss(y_hat, y):
logp = -F.log_softmax(y_hat, dim=1)
logpy = torch.gather(logp,1,y.view(-1,1))
mask = Variable(y.data.float().sign().view(-1,1))
logpy = logpy*mask
return logpy.view(-1)
def process_batch(xinput, targetvalues):
training_batch = torch.LongTensor(xinput)
training_batch = Variable(training_batch)
targets = torch.LongTensor(targetvalues)
targets = Variable(targets)
if(USE_CUDA_EMBED):
training_batch = training_batch.cuda()
embedded_data = training_batch
targets = targets.cuda()
else:
embedded_data = embedding_matrix(training_batch)
if(USE_CUDA):
embedded_data = embedded_data.cuda()
targets = targets.cuda()
return embedded_data, targets
def train_on_batch(xinput, targetvalues, sl):
#zero gradients
optimizer.zero_grad()
if(not USE_CUDA_EMBED):
embed_optimizer.zero_grad()
#get batch from datahandler and turn into tensors of expected format, embed input if embedding not in module (not done on GPU)
X, Y = process_batch(xinput, targetvalues)
#get initial hidden state of gru layer and call forward on the module
hidden = rnn.init_hidden(X.size(0))
output, _ = rnn(X, hidden)
#prepare tensors for loss evaluation
reshaped_Y = Y.view(-1)
reshaped_output = output.view(-1,N_ITEMS) #[SEQLEN*BATCHSIZE,N_items]
#call loss function on reshaped data
reshaped_loss = masked_cross_entropy_loss(reshaped_output, reshaped_Y)
#get mean loss based on actual number of valid events in batch
sum_loss = reshaped_loss.sum(0)
divident = Variable(torch.FloatTensor(1))
divident[0] = sum(sl)
if(USE_CUDA):
divident = divident.cuda()
mean_loss = reshaped_loss.mean(0)#sum_loss/divident
#calculate gradients
mean_loss.backward()
#update parameters by using the gradients and optimizers
optimizer.step()
if(not USE_CUDA_EMBED):
embed_optimizer.step()
return mean_loss.data[0]
def predict_on_batch(xinput, targetvalues, sl):
X, Y = process_batch(xinput, targetvalues)
hidden = rnn.init_hidden(X.size(0))
output, _ = rnn(X, hidden)
k_values, k_predictions = torch.topk(output, TOP_K)
return k_predictions
#setting up or training
epoch_nr = 0
start_time = time.time()
num_training_batches = datahandler.get_num_training_batches()
num_test_batches = datahandler.get_num_test_batches()
epoch_loss = 0
#epoch loop
while epoch_nr < MAX_EPOCHS:
#print("Starting epoch #" + str(epoch_nr))
start_time_epoch = time.time()
#reset state of datahandler and get first training batch
datahandler.reset_user_batch_data()
xinput, targetvalues, sl = datahandler.get_next_train_batch()
batch_nr = 0
rnn.train()
while(len(xinput) > int(BATCHSIZE/2)): #TODO: CHECK IF THIS IS THE REASON FOR THE BAD RESULTS, OR IF THE REDUCED DATASET WAS. COULD BE A COMBINATION (A FEW USERS WITH VERY MANY SESSIONS/UNBALANCED)
#batch training
batch_start_time = time.time()
#training call
batch_loss = train_on_batch(xinput, targetvalues, sl)
epoch_loss += batch_loss
batch_runtime = time.time() - batch_start_time
#get next training batch
xinput, targetvalues, sl = datahandler.get_next_train_batch()
#print batch loss and ETA occationally
if(debug):
if batch_nr%1000 == 0:
print("Batch: " + str(batch_nr) + "/" + str(num_training_batches) + " loss: " + str(batch_loss))
eta = (batch_runtime*(num_training_batches-batch_nr))/60
eta = "%.2f" % eta
print(" | ETA:", eta, "minutes.")
batch_nr += 1
#finished training in epoch
if(debug):
print("Epoch loss: " + str(epoch_loss/batch_nr))
if(epoch_nr == MAX_EPOCHS-1):
if(debug):
print("Starting testing")
#initialize trainer
tester = Tester(pickle_path+ dataset + str(SEED))
#reset state of datahandler and get first test batch
datahandler.reset_user_batch_data()
xinput, targetvalues, sl = datahandler.get_next_test_batch()
batch_nr = 0
rnn.eval()
while(len(xinput) > int(BATCHSIZE/2)):
#batch testing
batch_nr += 1
batch_start_time = time.time()
#run predictions on test batch
k_predictions = predict_on_batch(xinput, targetvalues, sl)
#evaluate results
tester.evaluate_batch(k_predictions, targetvalues, sl)
#get next test batch
xinput, targetvalues, sl = datahandler.get_next_test_batch()
batch_runtime = time.time() - batch_start_time
#print progress and ETA occationally
if(debug):
if batch_nr%400 == 0:
print("Batch: " + str(batch_nr) + "/" + str(num_test_batches))
eta = (batch_runtime*(num_test_batches-batch_nr))/60
eta = "%.2f" % eta
print(" | ETA:", eta, "minutes.")
# Print final test stats for epoch
test_stats, current_recall5, current_recall20 = tester.get_stats_and_reset()
if(debug):
print("Recall@5 = " + str(current_recall5))
print("Recall@20 = " + str(current_recall20))
print("Epoch #" + str(epoch_nr) + " Time: " + str(time.time()-start_time_epoch))
print(test_stats)
epoch_nr += 1
epoch_loss = 0