forked from HanxunH/Unlearnable-Examples
-
Notifications
You must be signed in to change notification settings - Fork 0
/
toolbox.py
99 lines (85 loc) · 4.27 KB
/
toolbox.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import numpy as np
import torch
from torch.autograd import Variable
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
class PerturbationTool():
def __init__(self, seed=0, epsilon=0.03137254901, num_steps=20, step_size=0.00784313725):
self.epsilon = epsilon
self.num_steps = num_steps
self.step_size = step_size
self.seed = seed
np.random.seed(seed)
def random_noise(self, noise_shape=[10, 3, 32, 32]):
random_noise = torch.FloatTensor(*noise_shape).uniform_(-self.epsilon, self.epsilon).to(device)
return random_noise
def min_min_attack(self, images, labels, model, optimizer, criterion, random_noise=None, sample_wise=False):
if random_noise is None:
random_noise = torch.FloatTensor(*images.shape).uniform_(-self.epsilon, self.epsilon).to(device)
perturb_img = Variable(images.data + random_noise, requires_grad=True)
perturb_img = Variable(torch.clamp(perturb_img, 0, 1), requires_grad=True)
eta = random_noise
for _ in range(self.num_steps):
opt = torch.optim.SGD([perturb_img], lr=1e-3)
opt.zero_grad()
model.zero_grad()
if isinstance(criterion, torch.nn.CrossEntropyLoss):
if hasattr(model, 'classify'):
model.classify = True
logits = model(perturb_img)
loss = criterion(logits, labels)
else:
logits, loss = criterion(model, perturb_img, labels, optimizer)
perturb_img.retain_grad()
loss.backward()
eta = self.step_size * perturb_img.grad.data.sign() * (-1)
perturb_img = Variable(perturb_img.data + eta, requires_grad=True)
eta = torch.clamp(perturb_img.data - images.data, -self.epsilon, self.epsilon)
perturb_img = Variable(images.data + eta, requires_grad=True)
perturb_img = Variable(torch.clamp(perturb_img, 0, 1), requires_grad=True)
return perturb_img, eta
def min_max_attack(self, images, labels, model, optimizer, criterion, random_noise=None, sample_wise=False):
if random_noise is None:
random_noise = torch.FloatTensor(*images.shape).uniform_(-self.epsilon, self.epsilon).to(device)
perturb_img = Variable(images.data + random_noise, requires_grad=True)
perturb_img = Variable(torch.clamp(perturb_img, 0, 1), requires_grad=True)
eta = random_noise
for _ in range(self.num_steps):
opt = torch.optim.SGD([perturb_img], lr=1e-3)
opt.zero_grad()
model.zero_grad()
if isinstance(criterion, torch.nn.CrossEntropyLoss):
logits = model(perturb_img)
loss = criterion(logits, labels)
else:
logits, loss = criterion(model, perturb_img, labels, optimizer)
loss.backward()
eta = self.step_size * perturb_img.grad.data.sign()
perturb_img = Variable(perturb_img.data + eta, requires_grad=True)
eta = torch.clamp(perturb_img.data - images.data, -self.epsilon, self.epsilon)
perturb_img = Variable(images.data + eta, requires_grad=True)
perturb_img = Variable(torch.clamp(perturb_img, 0, 1), requires_grad=True)
return perturb_img, eta
def _patch_noise_extend_to_img(self, noise, image_size=[3, 32, 32], patch_location='center'):
c, h, w = image_size[0], image_size[1], image_size[2]
mask = np.zeros((c, h, w), np.float32)
x_len, y_len = noise.shape[1], noise.shape[1]
if patch_location == 'center' or (h == w == x_len == y_len):
x = h // 2
y = w // 2
elif patch_location == 'random':
x = np.random.randint(x_len // 2, w - x_len // 2)
y = np.random.randint(y_len // 2, h - y_len // 2)
else:
raise('Invalid patch location')
x1 = np.clip(x - x_len // 2, 0, h)
x2 = np.clip(x + x_len // 2, 0, h)
y1 = np.clip(y - y_len // 2, 0, w)
y2 = np.clip(y + y_len // 2, 0, w)
if type(noise) is np.ndarray:
pass
else:
mask[:, x1: x2, y1: y2] = noise.cpu().numpy()
return ((x1, x2, y1, y2), torch.from_numpy(mask).to(device))