-
Notifications
You must be signed in to change notification settings - Fork 5
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
4 changed files
with
533 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,143 @@ | ||
# https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py | ||
import numpy as np | ||
import torch | ||
from my_utils import * | ||
|
||
|
||
def validation(epoch, model, data_loader, criterion, device, n_class=12): | ||
print('Start validation #{}'.format(epoch)) | ||
model.eval() | ||
with torch.no_grad(): | ||
total_loss = 0 | ||
cnt = 0 | ||
# mIoU_list = [] | ||
hist = np.zeros((n_class, n_class)) | ||
for step, (images, masks, _) in enumerate(data_loader): | ||
|
||
images = torch.stack(images) # (batch, channel, height, width) | ||
masks = torch.stack(masks).long() # (batch, channel, height, width) | ||
|
||
images, masks = images.to(device), masks.to(device) | ||
|
||
outputs = model(images) | ||
loss = criterion(outputs, masks) | ||
total_loss += loss | ||
cnt += 1 | ||
|
||
outputs = torch.argmax(outputs, dim=1).detach().cpu().numpy() | ||
|
||
hist = add_hist(hist, masks.detach().cpu().numpy(), outputs, n_class=n_class) | ||
# mIoU = label_accuracy_score(masks.detach().cpu().numpy(), outputs, n_class=12)[2] | ||
acc, acc_cls, mean_iu, fwavacc = label_accuracy_score2(hist) | ||
# mIoU_list.append(mIoU) | ||
|
||
avrg_loss = total_loss / cnt | ||
print('Validation #{} Average Loss: {:.4f}, mIoU: {:.4f}, acc: {:.4f}, acc_cls: {:.4f}'.format(epoch, avrg_loss, mean_iu, acc, acc_cls)) | ||
|
||
return avrg_loss, mean_iu | ||
|
||
|
||
def validation3(epoch, model, data_loader, criterion, device, n_class=12): | ||
print('Start validation #{}'.format(epoch)) | ||
model.eval() | ||
with torch.no_grad(): | ||
total_loss = 0 | ||
cnt = 0 | ||
mIoU_list = [] | ||
hist = np.zeros((n_class, n_class)) | ||
all_iou = [] | ||
for step, (images, masks, _) in enumerate(data_loader): | ||
|
||
images = torch.stack(images) # (batch, channel, height, width) | ||
masks = torch.stack(masks).long() # (batch, channel, height, width) | ||
|
||
images, masks = images.to(device), masks.to(device) | ||
|
||
outputs = model(images) | ||
loss = criterion(outputs, masks) | ||
total_loss += loss | ||
cnt += 1 | ||
|
||
outputs = torch.argmax(outputs, dim=1).detach().cpu().numpy() | ||
|
||
hist = add_hist(hist, masks.detach().cpu().numpy(), outputs, n_class=n_class) | ||
|
||
mIoU = label_accuracy_score(masks.detach().cpu().numpy(), outputs, n_class=12) | ||
mIoU_list.append(mIoU) | ||
|
||
batch_iou = batch_iou_score(masks.detach().cpu().numpy(), outputs, len(outputs)) | ||
all_iou.append(batch_iou) | ||
|
||
avrg_loss = total_loss / cnt | ||
miou2 = mIoU_score(hist) | ||
miou3 = np.mean(all_iou) | ||
print('Validation #{} Average Loss: {:.4f}, mIoU2: {:.4f}, mIOU3: {:.4f}'.format(epoch, avrg_loss, miou2, miou3)) | ||
|
||
return avrg_loss, np.mean(mIoU_list), miou2, miou3 | ||
|
||
|
||
def _fast_hist(label_true, label_pred, n_class): | ||
mask = (label_true >= 0) & (label_true < n_class) | ||
hist = np.bincount( | ||
n_class * label_true[mask].astype(int) + | ||
label_pred[mask], minlength=n_class ** 2).reshape(n_class, n_class) | ||
return hist | ||
|
||
|
||
def label_accuracy_score(label_trues, label_preds, n_class=12): | ||
hist = np.zeros((n_class, n_class)) | ||
for lt, lp in zip(label_trues, label_preds): | ||
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class) | ||
with np.errstate(divide='ignore', invalid='ignore'): | ||
iu = np.diag(hist) / ( | ||
hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist) | ||
) | ||
mean_iu = np.nanmean(iu) | ||
return mean_iu | ||
|
||
def label_accuracy_score2(hist): | ||
""" | ||
Returns accuracy score evaluation result. | ||
- [acc]: overall accuracy | ||
- [acc_cls]: mean accuracy | ||
- [mean_iu]: mean IU | ||
- [fwavacc]: fwavacc | ||
""" | ||
acc = np.diag(hist).sum() / hist.sum() | ||
with np.errstate(divide='ignore', invalid='ignore'): | ||
acc_cls = np.diag(hist) / hist.sum(axis=1) | ||
acc_cls = np.nanmean(acc_cls) | ||
|
||
with np.errstate(divide='ignore', invalid='ignore'): | ||
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) | ||
mean_iu = np.nanmean(iu) | ||
|
||
freq = hist.sum(axis=1) / hist.sum() | ||
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() | ||
return acc, acc_cls, mean_iu, fwavacc | ||
|
||
|
||
def add_hist(hist, label_trues, label_preds, n_class): | ||
for lt, lp in zip(label_trues, label_preds): | ||
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class) | ||
return hist | ||
|
||
|
||
def batch_iou_score(label_trues, label_preds, batch_size, n_class=12): | ||
hist = np.zeros((n_class, n_class)) | ||
batch_iou = 0 | ||
for lt, lp in zip(label_trues, label_preds): | ||
hist = _fast_hist(lt.flatten(), lp.flatten(), n_class) | ||
with np.errstate(divide='ignore', invalid='ignore'): | ||
iu = np.diag(hist) / ( | ||
hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist) | ||
) | ||
batch_iou += np.nanmean(iu) / batch_size | ||
return batch_iou | ||
|
||
|
||
def mIoU_score(hist): | ||
with np.errstate(divide='ignore', invalid='ignore'): | ||
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) | ||
mean_iu = np.nanmean(iu) | ||
return mean_iu |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
import os | ||
import glob | ||
import torch | ||
import numpy as np | ||
|
||
val_every = 1 | ||
|
||
saved_dir = './saved' | ||
if not os.path.isdir(saved_dir): | ||
os.mkdir(saved_dir) | ||
|
||
def save_model(model, saved_dir, file_name='default.pt'): | ||
check_point = {'net': model.state_dict()} | ||
output_path = os.path.join(saved_dir, file_name) | ||
torch.save(model.state_dict(), output_path) | ||
|
||
|
||
def load_model(model, device, saved_dir, file_name='default.pt'): | ||
model_path = os.path.join(saved_dir, file_name) | ||
checkpoint = torch.load(model_path, map_location=device) | ||
model.load_state_dict(checkpoint) | ||
|
||
|
||
def calculate_parameter(model, print_param=False): | ||
n_param = 0 | ||
n_conv = 0 | ||
for p_idx,(param_name,param) in enumerate(model.named_parameters()): | ||
if param.requires_grad: | ||
param_numpy = param.detach().cpu().numpy() # to numpy array | ||
n_param += len(param_numpy.reshape(-1)) | ||
if print_param==True: | ||
print ("[%d] name:[%s] shape:[%s]."%(p_idx,param_name,param_numpy.shape)) | ||
if "conv" in param_name: n_conv+=1 | ||
print("-"*50+f"\nTotal number of parameters: [{n_param:,d}]\n"+"-"*50) | ||
print(f"Total number of Conv layer : {n_conv}") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,88 @@ | ||
import math | ||
import torch | ||
from torch.optim.lr_scheduler import _LRScheduler | ||
|
||
class CosineAnnealingWarmupRestarts(_LRScheduler): | ||
""" | ||
optimizer (Optimizer): Wrapped optimizer. | ||
first_cycle_steps (int): First cycle step size. | ||
cycle_mult(float): Cycle steps magnification. Default: -1. | ||
max_lr(float): First cycle's max learning rate. Default: 0.1. | ||
min_lr(float): Min learning rate. Default: 0.001. | ||
warmup_steps(int): Linear warmup step size. Default: 0. | ||
gamma(float): Decrease rate of max learning rate by cycle. Default: 1. | ||
last_epoch (int): The index of last epoch. Default: -1. | ||
""" | ||
|
||
def __init__(self, | ||
optimizer : torch.optim.Optimizer, | ||
first_cycle_steps : int, | ||
cycle_mult : float = 1., | ||
max_lr : float = 0.1, | ||
min_lr : float = 0.001, | ||
warmup_steps : int = 0, | ||
gamma : float = 1., | ||
last_epoch : int = -1 | ||
): | ||
assert warmup_steps < first_cycle_steps | ||
|
||
self.first_cycle_steps = first_cycle_steps # first cycle step size | ||
self.cycle_mult = cycle_mult # cycle steps magnification | ||
self.base_max_lr = max_lr # first max learning rate | ||
self.max_lr = max_lr # max learning rate in the current cycle | ||
self.min_lr = min_lr # min learning rate | ||
self.warmup_steps = warmup_steps # warmup step size | ||
self.gamma = gamma # decrease rate of max learning rate by cycle | ||
|
||
self.cur_cycle_steps = first_cycle_steps # first cycle step size | ||
self.cycle = 0 # cycle count | ||
self.step_in_cycle = last_epoch # step size of the current cycle | ||
|
||
super(CosineAnnealingWarmupRestarts, self).__init__(optimizer, last_epoch) | ||
|
||
# set learning rate min_lr | ||
self.init_lr() | ||
|
||
def init_lr(self): | ||
self.base_lrs = [] | ||
for param_group in self.optimizer.param_groups: | ||
param_group['lr'] = self.min_lr | ||
self.base_lrs.append(self.min_lr) | ||
|
||
def get_lr(self): | ||
if self.step_in_cycle == -1: | ||
return self.base_lrs | ||
elif self.step_in_cycle < self.warmup_steps: | ||
return [(self.max_lr - base_lr)*self.step_in_cycle / self.warmup_steps + base_lr for base_lr in self.base_lrs] | ||
else: | ||
return [base_lr + (self.max_lr - base_lr) \ | ||
* (1 + math.cos(math.pi * (self.step_in_cycle-self.warmup_steps) \ | ||
/ (self.cur_cycle_steps - self.warmup_steps))) / 2 | ||
for base_lr in self.base_lrs] | ||
|
||
def step(self, epoch=None): | ||
if epoch is None: | ||
epoch = self.last_epoch + 1 | ||
self.step_in_cycle = self.step_in_cycle + 1 | ||
if self.step_in_cycle >= self.cur_cycle_steps: | ||
self.cycle += 1 | ||
self.step_in_cycle = self.step_in_cycle - self.cur_cycle_steps | ||
self.cur_cycle_steps = int((self.cur_cycle_steps - self.warmup_steps) * self.cycle_mult) + self.warmup_steps | ||
else: | ||
if epoch >= self.first_cycle_steps: | ||
if self.cycle_mult == 1.: | ||
self.step_in_cycle = epoch % self.first_cycle_steps | ||
self.cycle = epoch // self.first_cycle_steps | ||
else: | ||
n = int(math.log((epoch / self.first_cycle_steps * (self.cycle_mult - 1) + 1), self.cycle_mult)) | ||
self.cycle = n | ||
self.step_in_cycle = epoch - int(self.first_cycle_steps * (self.cycle_mult ** n - 1) / (self.cycle_mult - 1)) | ||
self.cur_cycle_steps = self.first_cycle_steps * self.cycle_mult ** (n) | ||
else: | ||
self.cur_cycle_steps = self.first_cycle_steps | ||
self.step_in_cycle = epoch | ||
|
||
self.max_lr = self.base_max_lr * (self.gamma**self.cycle) | ||
self.last_epoch = math.floor(epoch) | ||
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): | ||
param_group['lr'] = lr |
Oops, something went wrong.