-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathclip_inference.py
210 lines (180 loc) · 11.1 KB
/
clip_inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
import os
import clip
from numpy.core.function_base import linspace
import torch
from torch.utils.data.dataloader import DataLoader
import torchvision
import re
import argparse
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from logistic_regression import LogisticRegressionTorch
from transfer_learner import TransferLearner
import numpy as np
from timeit import default_timer as timer
from weensembles.utils import cuda_mem_try
from torchvision import datasets
from conf import settings
def infer_clip():
parser = argparse.ArgumentParser()
parser.add_argument('-folder', type=str, required=True, help='experiment root folder')
parser.add_argument('-dataset_data', type=str, help='Folder where dataset is stored')
parser.add_argument('-dataset', default="cifar10", choices=["cifar10", "cifar100", "imagenet"], help="Dataset to use.")
parser.add_argument('-clip_data', type=str, help='Folder where clip model should be stored')
parser.add_argument('-batch_sz_clip', type=int, default=64, help='batch size for clip inference')
parser.add_argument('-device', type=str, default="cpu", help='Device on which to perform the computations')
parser.add_argument('-architecture', type=str, default='ViT-B/32', help='Clip architecture')
parser.add_argument('-verbosity', default=0, type=int, help='Verbosity level')
parser.add_argument('-lr', type=float, default=0.1, help="Learning rate")
parser.add_argument('-epochs', type=int, default=25, help="Number of epochs")
parser.add_argument('-batch_size', type=int, default=500000, help="Batch size for training")
parser.add_argument('-n_Cs', type=int, default=21, help="Number of C values between 10**-2 and 10**2 to test")
parser.add_argument('-multiple_repl', action="store_true", dest="multi_repl", help="Root folder contains multiple replications folders")
parser.add_argument('-load_features', action="store_true", dest="load_feat", help="If specified, training and testing features are loaded. Expected location is the architecture subfolder of the root folder.")
args = parser.parse_args()
clip_name = "clip_{}".format(args.architecture.replace('/', '-')) + "_LP"
if args.multi_repl:
fold_ptrn = r'^\d+$'
folders = [f for f in os.listdir(args.folder) if os.path.isdir(os.path.join(args.folder, f))]
repl_folders = sorted([f for f in folders if re.match(fold_ptrn, f) is not None])
if args.verbosity > 0:
print("Replications found {}".format(repl_folders))
else:
repl_folders = [""]
has_saved_features = os.path.exists(
os.path.join(args.folder, clip_name, "train_features.npy")
) and os.path.exists(
os.path.join(args.folder, clip_name, "test_features.npy"))
has_saved_targets = os.path.exists(
os.path.join(args.folder, clip_name, "train_targets.npy")
) and os.path.exists(
os.path.join(args.folder, clip_name, "test_targets.npy"))
if (not args.load_feat) or (not has_saved_features):
print("Loading clip model")
model, preprocess = clip.load(args.architecture, device=args.device, download_root=args.clip_data)
model.eval().float()
else:
preprocess = None
if (not args.load_feat) or (not has_saved_features) or (not has_saved_targets):
print("Loading dataset")
if args.dataset == "cifar10":
dataset_train = torchvision.datasets.CIFAR10(root=args.dataset_data, train=True, download=True, transform=preprocess)
dataset_test = torchvision.datasets.CIFAR10(root=args.dataset_data, train=False, download=True, transform=preprocess)
elif args.dataset == "cifar100":
dataset_train = torchvision.datasets.CIFAR100(root=args.dataset_data, train=True, download=True, transform=preprocess)
dataset_test = torchvision.datasets.CIFAR100(root=args.dataset_data, train=False, download=True, transform=preprocess)
elif args.dataset == "imagenet":
traindir = os.path.join(args.dataset_data, "train")
testdir = os.path.join(args.dataset_data, "val")
dataset_train = torchvision.datasets.ImageFolder(root=traindir, transform=preprocess)
dataset_test = torchvision.datasets.ImageFolder(root=testdir, transform=preprocess)
else:
print("Error: unsupported dataset type {}".format(args.dataset))
return 1
if (not args.load_feat) or (not has_saved_features):
train_loader = DataLoader(dataset=dataset_train, batch_size=args.batch_sz_clip, shuffle=False, num_workers=4, pin_memory=True)
test_loader = DataLoader(dataset=dataset_test, batch_size=args.batch_sz_clip, shuffle=False, num_workers=4, pin_memory=True)
train_features = []
test_features = []
print("Processing train data")
for batch_index, (images, labels) in enumerate(train_loader):
print("Progress {}%".format(100 * (batch_index + 1) // len(train_loader)), end="\r")
with torch.no_grad():
train_features.append(model.encode_image(images.to(args.device)).cpu())
print("\n")
print("Processing test data")
for batch_index, (images, labels) in enumerate(test_loader):
print("Progress {}%".format(100 * (batch_index + 1) // len(test_loader)), end="\r")
with torch.no_grad():
test_features.append(model.encode_image(images.to(args.device)).cpu())
print("\n")
train_features = torch.cat(train_features, dim=0)
test_features = torch.cat(test_features, dim=0)
if not os.path.exists(os.path.join(args.folder, clip_name)):
os.mkdir(os.path.join(args.folder, clip_name))
print("Saving features")
np.save(os.path.join(args.folder, clip_name, "train_features.npy"), train_features.cpu())
np.save(os.path.join(args.folder, clip_name, "test_features.npy"), test_features.cpu())
if (not args.load_feat) or (not has_saved_targets):
if not os.path.exists(os.path.join(args.folder, clip_name)):
os.mkdir(os.path.join(args.folder, clip_name))
print("Saving targets")
train_targets = np.array(dataset_train.targets)
test_targets = np.array(dataset_test.targets)
np.save(os.path.join(args.folder, clip_name, "train_targets.npy"), train_targets)
np.save(os.path.join(args.folder, clip_name, "test_targets.npy"), test_targets)
if args.load_feat and has_saved_features:
print("Loading features")
train_features = torch.from_numpy(np.load(os.path.join(args.folder, clip_name, "train_features.npy")))
test_features = torch.from_numpy(np.load(os.path.join(args.folder, clip_name, "test_features.npy")))
if args.load_feat and has_saved_targets:
train_targets = np.load(os.path.join(args.folder, clip_name, "train_targets.npy"))
test_targets = np.load(os.path.join(args.folder, clip_name, "test_targets.npy"))
train_features /= torch.linalg.vector_norm(train_features, dim=-1, keepdim=True)
test_features /= torch.linalg.vector_norm(test_features, dim=-1, keepdim=True)
train_targets = torch.from_numpy(train_targets)
test_targets = torch.from_numpy(test_targets)
for repl_f in repl_folders:
print("Processing subfolder {}".format(repl_f))
train_idx = torch.from_numpy(np.load(os.path.join(args.folder, repl_f, "split", "train_idx.npy")))
val_idx = torch.from_numpy(np.load(os.path.join(args.folder, repl_f, "split", "val_idx.npy")))
start = timer()
lin_val_set_size = 5000
E_start = -2
E_end = 2
E_count = args.n_Cs
C_vals = 10**np.linspace(start=E_start, stop=E_end,
num=E_count, endpoint=True)
if len(val_idx) == 0:
lin_train_idx, lin_val_idx = train_test_split(np.arange(train_features.shape[0]), test_size=lin_val_set_size,
shuffle=True, stratify=train_targets)
lin_train_idx = torch.from_numpy(lin_train_idx).to(dtype=torch.long)
lin_val_idx = torch.from_numpy(lin_val_idx).to(dtype=torch.long)
else:
lin_train_idx = train_idx
lin_val_idx = val_idx
lin_train_features = train_features[lin_train_idx]
lin_val_features = train_features[lin_val_idx]
lin_train_tar = train_targets[lin_train_idx]
lin_val_tar = train_targets[lin_val_idx]
best_acc = 0
best_C = 1.0
best_model = None
for Ci, C_val in enumerate(C_vals):
if args.verbosity == 0:
print("Progress {}%".format(100 * (Ci + 1) // len(C_vals)), end="\r")
if args.verbosity > 0:
print("Testing C value {}".format(C_val))
transf_lear = TransferLearner(C=C_val, fit_intercept=True, epochs=args.epochs, verbosity=args.verbosity, device=args.device, learning_rate=args.lr)
cuda_mem_try(
fun=lambda batch_size: transf_lear.fit(X=lin_train_features, y=lin_train_tar, batch_size=batch_size),
start_bsz=args.batch_size,
device=args.device,
dec_coef=0.8,
verbose=args.verbosity)
val_pred = transf_lear.decision_function(lin_val_features.to(device=args.device))
cur_acc = torch.sum(val_pred.topk(k=1, dim=-1).indices.squeeze() == lin_val_tar.to(device=args.device)).item() / len(lin_val_tar)
if args.verbosity > 0:
print("Validation accuracy obtained {}".format(cur_acc))
if cur_acc > best_acc:
best_acc = cur_acc
best_C = C_val
best_model = transf_lear
print("C value selected {} with validation accuracy {}".format(best_C, best_acc))
train_logits = cuda_mem_try(fun = lambda batch_size: best_model.decision_function(train_features, batch_size=batch_size),
start_bsz=train_features.shape[0],
device=args.device).cpu()
test_logits = best_model.decision_function(test_features.to(device=args.device)).cpu()
print("Linear probe inference finished in {}s".format(timer() - start))
net_folder = os.path.join(args.folder, repl_f, "outputs", clip_name)
if not os.path.exists(net_folder):
os.mkdir(net_folder)
np.save(os.path.join(net_folder, "train_outputs.npy"), train_logits[train_idx].cpu())
np.save(os.path.join(net_folder, "train_labels.npy"), train_targets[train_idx].cpu())
if len(val_idx) > 0:
np.save(os.path.join(net_folder, "val_outputs.npy"), train_logits[val_idx].cpu())
np.save(os.path.join(net_folder, "val_labels.npy"), train_targets[val_idx].cpu())
np.save(os.path.join(net_folder, "test_outputs.npy"), test_logits.cpu())
np.save(os.path.join(net_folder, "test_labels.npy"), test_targets.cpu())
if __name__ == "__main__":
infer_clip()