-
Notifications
You must be signed in to change notification settings - Fork 70
/
Copy pathRLLC_Generation.py
98 lines (78 loc) · 4.47 KB
/
RLLC_Generation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************
# @Time : 2018/10/16 19:28
# @Author : Xiang Ling
# @Lab : nesa.zju.edu.cn
# @File : RLLC_Generation.py
# **************************************
import argparse
import os
import random
import sys
import numpy as np
import torch
sys.path.append('%s/../' % os.path.dirname(os.path.realpath(__file__)))
from Attacks.AttackMethods.RLLC import RLLCAttack
from Attacks.Generation import Generation
from Attacks.AttackMethods.AttackUtils import predict
class RLLCGeneration(Generation):
def __init__(self, dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device,
attack_batch_size, eps, alpha_ratio):
super(RLLCGeneration, self).__init__(dataset, attack_name, targeted, raw_model_location, clean_data_location, adv_examples_dir, device)
self.attack_batch_size = attack_batch_size
self.epsilon = eps
self.alpha_ratio = alpha_ratio
def generate(self):
attacker = RLLCAttack(model=self.raw_model, epsilon=self.epsilon, alpha_ratio=self.alpha_ratio)
# prepare the Least Likely Class labels
llc_labels = np.argmax(self.targets_samples, 1)
# generating
adv_samples = attacker.batch_perturbation(xs=self.nature_samples, ys_target=llc_labels, batch_size=self.attack_batch_size,
device=self.device)
adv_labels = predict(model=self.raw_model, samples=adv_samples, device=self.device)
adv_labels = torch.max(adv_labels, 1)[1]
adv_labels = adv_labels.cpu().numpy()
np.save('{}{}_AdvExamples.npy'.format(self.adv_examples_dir, self.attack_name), adv_samples)
np.save('{}{}_AdvLabels.npy'.format(self.adv_examples_dir, self.attack_name), adv_labels)
np.save('{}{}_TrueLabels.npy'.format(self.adv_examples_dir, self.attack_name), self.labels_samples)
mis_target = 0
for i in range(len(adv_samples)):
if llc_labels[i] == adv_labels[i]:
mis_target += 1
print('\nFor the **{}** (targeted attack) on **{}**, the misclassified rate = {}/{} = {:.1f}%\n'.format(
self.attack_name, self.dataset, mis_target, len(adv_samples), mis_target / len(adv_samples) * 100))
def main(args):
# Device configuration
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Set the random seed manually for reproducibility.
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
name = 'RLLC'
targeted = True
rllc = RLLCGeneration(dataset=args.dataset, attack_name=name, targeted=targeted, raw_model_location=args.modelDir,
clean_data_location=args.cleanDir, adv_examples_dir=args.adv_saver, device=device,
eps=args.epsilon, alpha_ratio=args.alpha_ratio, attack_batch_size=args.attack_batch_size)
rllc.generate()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='The RLLC Attack Generation')
# common arguments
parser.add_argument('--dataset', type=str, default='CIFAR10', help='the dataset should be MNIST or CIFAR10')
parser.add_argument('--modelDir', type=str, default='../RawModels/', help='the directory for the raw model')
parser.add_argument('--cleanDir', type=str, default='../CleanDatasets/', help='the directory for the clean dataset that will be attacked')
parser.add_argument('--adv_saver', type=str, default='../AdversarialExampleDatasets/',
help='the directory used to save the generated adversarial examples')
parser.add_argument('--seed', type=int, default=100, help='the default random seed for numpy and torch')
parser.add_argument('--gpu_index', type=str, default='0', help="gpu index to use")
# arguments for the particular attack
parser.add_argument('--epsilon', type=float, default=0.1, help='the epsilon value of RLLC')
parser.add_argument('--alpha_ratio', type=float, default=0.5, help='the ratio of alpha value ralated to epsilon in RLLC')
parser.add_argument('--attack_batch_size', type=int, default=100, help='the default batch size for adversarial example generation')
arguments = parser.parse_args()
main(arguments)