-
Notifications
You must be signed in to change notification settings - Fork 0
/
attribute.py
77 lines (72 loc) · 2.69 KB
/
attribute.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
from captum.attr import (Saliency, InputXGradient, GuidedBackprop, DeepLift,
GuidedGradCam, IntegratedGradients)
import numpy as np
import torch
from torch import nn
import zarr
from data import get_haunted_dataset
from utils import Vgg2D
def create_batch(haunted, items=None):
if items is None:
items = [7, 13, 42]
additions = ['ghost', 'fog', 'both']
images = []
target = []
for i in items:
img, _ = haunted.dataset[i]
img = np.array(img)
images.append(img)
target.append(0)
for addition in additions:
if addition == 'ghost':
ghost = haunted.add_ghost(img)
images.append(ghost)
target.append(1)
elif addition == 'fog':
fog = haunted.add_fog(img)
images.append(fog)
target.append(1)
elif addition == 'both':
both = haunted.add_fog(haunted.add_ghost(img))
images.append(both)
target.append(1)
# Stack the images
images.append(np.zeros_like(img))
target.append(0)
images = np.stack(images)
return images, target
if __name__ == "__main__":
# Create the datasets
print("Creating and saving data")
haunted = get_haunted_dataset(split="test")
images, target = create_batch(haunted)
#
results = zarr.open("result.zarr")
results["images"] = images
results["target"] = target
print("Creating model and loading weights")
# Get the network and load weights
classifier = Vgg2D(input_size=(256, 256),
fmaps=8,
output_classes=2,
input_fmaps=3)
classifier.load_state_dict(torch.load('models/model.pth'))
model = nn.Sequential(classifier, nn.Softmax(dim=1))
model.eval()
# predict each
print("Running predictions")
tensor_img = np.stack([haunted.normalize(img) for img in images])
tensor_img = torch.from_numpy(tensor_img).contiguous()
with torch.no_grad():
predictions = classifier(tensor_img)
results["predictions"] = predictions.numpy()
names = ['saliency', 'inputXgradient', 'guided_backprop', 'deeplift',
'guided_gradcam', 'integrated_gradients']
attributions = [Saliency(model), InputXGradient(model),
GuidedBackprop(model), DeepLift(model),
GuidedGradCam(model, model[0].features[24]),
IntegratedGradients(model)]
for name, attr in zip(names, attributions):
print(f"Running {name} attribution.")
attribution = attr.attribute(tensor_img, target=target)
results[name] = attribution.detach().numpy()