-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy patheval_demo.lua
142 lines (116 loc) · 3.83 KB
/
eval_demo.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
--require ('mobdebug').start()
require 'dp'
require 'rnn'
require 'optim'
require 'util/data_create'
require 'ViewSelect'
require 'RecurrentAttention_ex'
require 'RewardCriterion'
local matio = require 'matio'
cmd = torch.CmdLine()
cmd:text()
cmd:text('Evaluate a 3d attention Model')
cmd:text('Options:')
cmd:option('--cuda', true, 'model was saved with cuda')
cmd:option('--xpPath', 'data_hierarchy_tree/cur_model/model.dat', 'path to a previously saved model')
cmd:option('--stochastic', false, 'evaluate the model stochatically. Generate glimpses stochastically')
cmd:option('--view_num', 21, 'num of all view')
cmd:option('--rho', 5, 'time-steps')
cmd:option('--eval_dir', 'evaluation', 'save path')
cmd:text()
local opt = cmd:parse(arg or {})
if opt.cuda then
require 'cutorch'
require 'cunn'
end
if not paths.dirp(opt.eval_dir) then
os.execute('mkdir ' .. opt.eval_dir)
end
require'util/viewsLoc'
viewsLoc= getAllViewsLoc()
classes={'chair', 'display', 'flowerpot','guitar','table'}
function getNextViewId(location)
local viewId
_,viewId=(viewsLoc-location:resize(1,2):expandAs(viewsLoc)):norm(2,2):min(1)
return viewId
end
function getNextView(location,input)
local index_input=torch.round(input[1][1][1])
local viewId=index_input%opt.view_num
if viewId==0 then
viewId=opt.view_num
end
local objId = (index_input-viewId)/opt.view_num
local nextViewId=getNextViewId(location)[1][1]
local output=AllData[objId*opt.view_num+nextViewId]
local result=output:clone()
return result
end
xp = torch.load(opt.xpPath)
model = xp:model().module:float()
tester = xp:tester() or xp:validator() -- dp.Evaluator
tester:sampler()._epoch_size = nil
conf = tester:feedback() -- dp.Confusion
cm = conf._cm -- optim.ConfusionMatrix
print("Last evaluation of "..(xp:tester() and 'test' or 'valid').." set :")
print(cm)
ds,pureTrainData,pureTestData=dataCreation()
AllData=torch.cat(pureTrainData,pureTestData,1)
ra = model:findModules('RecurrentAttention')[1]
-- stochastic or deterministic
for i=1,#ra.actions do
local rn = ra.action:getStepModule(i):findModules('nn.ReinforceNormal')[1]
rn.stochastic = opt.stochastic
end
model:training() -- otherwise the rnn doesn't save intermediate time-step states
if not opt.stochastic then
for i=1,#ra.actions do
local rn = ra.action:getStepModule(i):findModules('nn.ReinforceNormal')[1]
rn.stdev = 0 -- deterministic
end
end
--
-- save sequences
inputs = ds:get('test','inputs')
targets = ds:get('test','targets', 'b')
output = model:forward(inputs)
locations = ra.actions
sequences = torch.IntTensor(inputs:size(1),opt.rho)
sequences_locations = torch.Tensor(inputs:size(1),opt.rho*2)
for i=1,inputs:size(1) do
for j=1,opt.rho do
sequences[i][j]=getNextViewId(locations[j][i])
sequences_locations[i][2*(j-1)+1]=locations[j][i][1]
sequences_locations[i][2*(j-1)+2]=locations[j][i][2]
end
end
matio.save(opt.eval_dir .. '/eval_sequences.mat',sequences)
matio.save(opt.eval_dir .. '/eval_sequences_locations.mat',sequences_locations)
--
input = inputs:narrow(1,30,30)
target= targets:narrow(1,30,30)
confusion = optim.ConfusionMatrix(classes)
test_inputs = ds:get('test','inputs')
test_targets = ds:get('test','targets', 'b')
output = model:forward(input)
for i = 1,target:size(1) do
confusion:add(output[opt.rho][i], target[i])
end
print(confusion)
locations = ra.actions
views_seqs = {}
for i=1,input:size(1) do
local img = input[i]
for j,location in ipairs(locations) do
local views = views_seqs[j] or {}
views_seqs[j] = views
local xy = location[i]
views[i] =getNextView(xy,input[i])
collectgarbage()
end
end
paths.mkdir(opt.eval_dir .. '/views_seqs')
for j,views in ipairs(views_seqs) do
local g = image.toDisplayTensor{input=views,nrow=10,padding=3}
image.save(opt.eval_dir .. "/views_seqs/view_"..j..".png", g)
end