-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathtest.py
113 lines (90 loc) · 3.24 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import time
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import os
import argparse
# from models import *
from .SC-ResNet import *
from utils import progress_bar
parser = argparse.ArgumentParser(description='PyTorch ImageNet Test')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
valdir = os.path.join('../data/', 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=256, shuffle=False,
num_workers=4, pin_memory=True)
# exit(0)
# Model
print('==> Building model..')
net = resnet50()
if device == 'cuda':
net = torch.nn.DataParallel(net).cuda()
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
checkpoint = torch.load('./')
best = checkpoint['best_prec1']
print(best)
net.load_state_dict(checkpoint['state_dict'])
criterion = nn.CrossEntropyLoss()
def test():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
latency=0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(val_loader):
inputs, targets = inputs.to(device), targets.to(device)
time_s = time.time()
outputs = net(inputs)
torch.cuda.synchronize()
latency += time.time() - time_s
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(val_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
avg_time = latency
print(avg_time)
def test2():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(val_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
_, predicted = outputs.topk(5,1,True,True)
total += targets.size(0)
predicted = predicted.t()
ct = predicted.eq(targets.view(1,-1).expand_as(predicted))
correct += ct[:5].view(-1).float().sum(0).item()
progress_bar(batch_idx, len(val_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
test()
#test2()