-
Notifications
You must be signed in to change notification settings - Fork 0
/
uncertaintyNet.py
105 lines (83 loc) · 3.24 KB
/
uncertaintyNet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import h5py
import numpy as np
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import Dataset, DataLoader
lr = 1e-3
num_epochs = 15
class PredictionDataset(Dataset):
def __init__(self, model_name, data_run):
f = h5py.File(os.path.join('Test_Results', f'{data_run}__{model_name}.h5'))
self.pred_pos = f['pred_pos']
self.xerr = f['xerr']
self.yerr = f['yerr']
def __len__(self):
return len(self.xerr)
def __getitem__(self, idx):
rerr = np.sqrt(self.xerr[idx] ** 2 + self.yerr[idx] ** 2)
xyerr = np.array([self.xerr[idx], self.yerr[idx]])
xyerr = np.abs(xyerr)
# xyerr = 100 * xyerr
# xyerr = np.multiply(np.log(1000 * np.abs(xyerr)), np.sign(xyerr))
# xybool = np.array([int(self.xerr[idx] > 0.3 / np.sqrt(2)), int(self.yerr[idx] > 0.3 / np.sqrt(2))])
return [self.pred_pos[idx].astype(np.float32), xyerr.astype(np.float32)]
class UncertaintyNet(nn.Module):
def __init__(self):
super(UncertaintyNet, self).__init__()
self.fc1 = nn.Linear(2, 7)
# self.fc2 = nn.Linear(7, 7)
self.fc3 = nn.Linear(7, 2)
def forward(self, X):
X = self.fc1(X)
# X = self.fc2(X)
X = self.fc3(X)
return X
def train(model, trainloader, optimizer, epoch):
model.train()
for batch_idx, batch in enumerate(trainloader):
optimizer.zero_grad()
output = model(batch[0])
# print('train')
# print(output)
# print(batch[1])
loss = F.l1_loss(output, batch[1])
loss.backward()
optimizer.step()
if batch_idx % 25 == 0:
print(f'Train Epoch: {epoch} [{batch_idx*len(batch[1])}/{len(trainloader.dataset)}]\tLoss: {loss.item()/len(batch[1])}')
def test(model, testloader):
model.eval()
test_loss = 0
test_acc = 0
with torch.no_grad():
for batch in testloader:
output = model(batch[0])
# print('test')
# print(output)
# print(batch[1])
test_loss += F.l1_loss(output, batch[1]).item()
test_loss /= len(testloader.dataset)
print(f'\nTest Set: Average Loss {test_loss}\n')
def main():
# trainset = PredictionDataset(model_name='Wide', data_run='Very_Wide_Noisy_Data')
trainset = PredictionDataset(model_name='Wide', data_run='Wide_Noisy_Data')
trainloader = DataLoader(trainset, batch_size=8, shuffle=True)
testset = PredictionDataset(model_name='Wide', data_run='run__6_01_21__data_1s_bin1__spiders__median')
# testset = PredictionDataset(model_name='Wide', data_run='Far_Noisy_Data')
testloader = DataLoader(testset, batch_size=1, shuffle=True)
model = UncertaintyNet()
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-2)
scheduler = OneCycleLR(optimizer, lr, total_steps=num_epochs)
for epoch in range(num_epochs):
train(model, trainloader, optimizer, epoch)
scheduler.step()
test(model, testloader)
save_name = 'Uncertainty'
save_dir = 'models'
torch.save(model.state_dict(), os.path.join(save_dir, save_name + '.pt'))
if __name__ == '__main__':
main()