-
Notifications
You must be signed in to change notification settings - Fork 0
/
utils.py
72 lines (61 loc) · 2.26 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import numpy as np
import torch
from logging import getLogger, INFO, StreamHandler, FileHandler, Formatter
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, OneCycleLR, CosineAnnealingLR, ReduceLROnPlateau
import os
import random
def set_seed(seed):
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def get_logger(filename):
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
log_formatter = Formatter(
'%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
handler1.setFormatter(log_formatter)
handler2 = FileHandler(filename=filename)
handler2.setFormatter(log_formatter)
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
def get_scheduler(optimizer, training_params):
scheduler_params = training_params['scheduler_params']
if training_params['scheduler'] == 'CosineAnnealingWarmRestarts':
scheduler = CosineAnnealingWarmRestarts(
optimizer,
T_0=scheduler_params['T_0'],
eta_min=scheduler_params['min_lr'],
last_epoch=-1
)
elif training_params['scheduler'] == 'OneCycleLR':
scheduler = OneCycleLR(
optimizer,
max_lr=scheduler_params['max_lr'],
steps_per_epochs=scheduler_params['steps_per_epochs'],
epochs=training_params['epochs'],
)
elif training_params['scheduler'] == 'CosineAnnealingLR':
scheduler = CosineAnnealingLR(
optimizer,
T_max=scheduler_params['T_max'],
eta_min=scheduler_params['min_lr'],
last_epoch=-1,
)
elif training_params['scheduler'] == 'ReduceLROnPlateau':
sch_mode = 'min' if training_params['monitor'] == 'loss' else 'max'
scheduler = ReduceLROnPlateau(
optimizer,
mode=sch_mode,
factor=scheduler_params['factor'],
patience=scheduler_params['patience'],
min_lr=scheduler_params['min_lr'],
verbose=True)
return scheduler