-
Notifications
You must be signed in to change notification settings - Fork 5
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
5 changed files
with
616 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
import os | ||
from torch.utils.data import Dataset, DataLoader | ||
import numpy as np | ||
|
||
# 전처리를 위한 라이브러리 | ||
import cv2 | ||
from pycocotools.coco import COCO | ||
# import torchvision | ||
# import torchvision.transforms as transforms | ||
# import albumentations as A | ||
# from albumentations.pytorch import ToTensorV2 | ||
|
||
def get_classname(classID, cats): | ||
for i in range(len(cats)): | ||
if cats[i]['id']==classID: | ||
return cats[i]['name'] | ||
return "None" | ||
|
||
class CustomDataLoader(Dataset): | ||
"""COCO format""" | ||
def __init__(self, data_dir, mode = 'train', transform = None): | ||
super().__init__() | ||
self.mode = mode | ||
self.transform = transform | ||
self.coco = COCO(data_dir) | ||
self.dataset_path = '../input/data/' | ||
self.category_names = ['Backgroud', 'UNKNOWN', 'General trash', 'Paper', 'Paper pack', 'Metal', 'Glass', 'Plastic', 'Styrofoam', 'Plastic bag', 'Battery', 'Clothing'] | ||
|
||
def __getitem__(self, index: int): | ||
# dataset이 index되어 list처럼 동작 | ||
image_id = self.coco.getImgIds(imgIds=index) | ||
image_infos = self.coco.loadImgs(image_id)[0] | ||
|
||
# cv2 를 활용하여 image 불러오기 | ||
images = cv2.imread(os.path.join(self.dataset_path, image_infos['file_name'])) | ||
images = cv2.cvtColor(images, cv2.COLOR_BGR2RGB).astype(np.uint8) | ||
#images = cv2.cvtColor(images, cv2.COLOR_BGR2RGB).astype(np.float32) | ||
# images /= 255.0 | ||
|
||
if (self.mode in ('train', 'val')): | ||
ann_ids = self.coco.getAnnIds(imgIds=image_infos['id']) | ||
anns = self.coco.loadAnns(ann_ids) | ||
|
||
# Load the categories in a variable | ||
cat_ids = self.coco.getCatIds() | ||
cats = self.coco.loadCats(cat_ids) | ||
|
||
# masks : size가 (height x width)인 2D | ||
# 각각의 pixel 값에는 "category id + 1" 할당 | ||
# Background = 0 | ||
masks = np.zeros((image_infos["height"], image_infos["width"])) | ||
# Unknown = 1, General trash = 2, ... , Cigarette = 11 | ||
for i in range(len(anns)): | ||
className = get_classname(anns[i]['category_id'], cats) | ||
pixel_value = self.category_names.index(className) | ||
masks = np.maximum(self.coco.annToMask(anns[i])*pixel_value, masks) | ||
masks = masks.astype(np.float32) | ||
|
||
# transform -> albumentations 라이브러리 활용 | ||
if self.transform is not None: | ||
transformed = self.transform(image=images, mask=masks) | ||
images = transformed["image"] | ||
masks = transformed["mask"] | ||
|
||
return images, masks, image_infos | ||
|
||
if self.mode == 'test': | ||
# transform -> albumentations 라이브러리 활용 | ||
if self.transform is not None: | ||
transformed = self.transform(image=images) | ||
images = transformed["image"] | ||
|
||
return images, image_infos | ||
|
||
|
||
def __len__(self) -> int: | ||
# 전체 dataset의 size를 return | ||
return len(self.coco.getImgIds()) | ||
|
||
|
||
# collate_fn needs for batch | ||
def collate_fn(batch): | ||
return tuple(zip(*batch)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,143 @@ | ||
# https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py | ||
import numpy as np | ||
import torch | ||
from my_utils import * | ||
|
||
|
||
def validation(epoch, model, data_loader, criterion, device, n_class=12): | ||
print('Start validation #{}'.format(epoch)) | ||
model.eval() | ||
with torch.no_grad(): | ||
total_loss = 0 | ||
cnt = 0 | ||
# mIoU_list = [] | ||
hist = np.zeros((n_class, n_class)) | ||
for step, (images, masks, _) in enumerate(data_loader): | ||
|
||
images = torch.stack(images) # (batch, channel, height, width) | ||
masks = torch.stack(masks).long() # (batch, channel, height, width) | ||
|
||
images, masks = images.to(device), masks.to(device) | ||
|
||
outputs = model(images) | ||
loss = criterion(outputs, masks) | ||
total_loss += loss | ||
cnt += 1 | ||
|
||
outputs = torch.argmax(outputs, dim=1).detach().cpu().numpy() | ||
|
||
hist = add_hist(hist, masks.detach().cpu().numpy(), outputs, n_class=n_class) | ||
# mIoU = label_accuracy_score(masks.detach().cpu().numpy(), outputs, n_class=12)[2] | ||
acc, acc_cls, mean_iu, fwavacc = label_accuracy_score2(hist) | ||
# mIoU_list.append(mIoU) | ||
|
||
avrg_loss = total_loss / cnt | ||
print('Validation #{} Average Loss: {:.4f}, mIoU: {:.4f}, acc: {:.4f}, acc_cls: {:.4f}'.format(epoch, avrg_loss, mean_iu, acc, acc_cls)) | ||
|
||
return avrg_loss, mean_iu | ||
|
||
|
||
def validation3(epoch, model, data_loader, criterion, device, n_class=12): | ||
print('Start validation #{}'.format(epoch)) | ||
model.eval() | ||
with torch.no_grad(): | ||
total_loss = 0 | ||
cnt = 0 | ||
mIoU_list = [] | ||
hist = np.zeros((n_class, n_class)) | ||
all_iou = [] | ||
for step, (images, masks, _) in enumerate(data_loader): | ||
|
||
images = torch.stack(images) # (batch, channel, height, width) | ||
masks = torch.stack(masks).long() # (batch, channel, height, width) | ||
|
||
images, masks = images.to(device), masks.to(device) | ||
|
||
outputs = model(images) | ||
loss = criterion(outputs, masks) | ||
total_loss += loss | ||
cnt += 1 | ||
|
||
outputs = torch.argmax(outputs, dim=1).detach().cpu().numpy() | ||
|
||
hist = add_hist(hist, masks.detach().cpu().numpy(), outputs, n_class=n_class) | ||
|
||
mIoU = label_accuracy_score(masks.detach().cpu().numpy(), outputs, n_class=12) | ||
mIoU_list.append(mIoU) | ||
|
||
batch_iou = batch_iou_score(masks.detach().cpu().numpy(), outputs, len(outputs)) | ||
all_iou.append(batch_iou) | ||
|
||
avrg_loss = total_loss / cnt | ||
miou2 = mIoU_score(hist) | ||
miou3 = np.mean(all_iou) | ||
print('Validation #{} Average Loss: {:.4f}, mIoU2: {:.4f}, mIOU3: {:.4f}'.format(epoch, avrg_loss, miou2, miou3)) | ||
|
||
return avrg_loss, np.mean(mIoU_list), miou2, miou3 | ||
|
||
|
||
def _fast_hist(label_true, label_pred, n_class): | ||
mask = (label_true >= 0) & (label_true < n_class) | ||
hist = np.bincount( | ||
n_class * label_true[mask].astype(int) + | ||
label_pred[mask], minlength=n_class ** 2).reshape(n_class, n_class) | ||
return hist | ||
|
||
|
||
def label_accuracy_score(label_trues, label_preds, n_class=12): | ||
hist = np.zeros((n_class, n_class)) | ||
for lt, lp in zip(label_trues, label_preds): | ||
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class) | ||
with np.errstate(divide='ignore', invalid='ignore'): | ||
iu = np.diag(hist) / ( | ||
hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist) | ||
) | ||
mean_iu = np.nanmean(iu) | ||
return mean_iu | ||
|
||
def label_accuracy_score2(hist): | ||
""" | ||
Returns accuracy score evaluation result. | ||
- [acc]: overall accuracy | ||
- [acc_cls]: mean accuracy | ||
- [mean_iu]: mean IU | ||
- [fwavacc]: fwavacc | ||
""" | ||
acc = np.diag(hist).sum() / hist.sum() | ||
with np.errstate(divide='ignore', invalid='ignore'): | ||
acc_cls = np.diag(hist) / hist.sum(axis=1) | ||
acc_cls = np.nanmean(acc_cls) | ||
|
||
with np.errstate(divide='ignore', invalid='ignore'): | ||
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) | ||
mean_iu = np.nanmean(iu) | ||
|
||
freq = hist.sum(axis=1) / hist.sum() | ||
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() | ||
return acc, acc_cls, mean_iu, fwavacc | ||
|
||
|
||
def add_hist(hist, label_trues, label_preds, n_class): | ||
for lt, lp in zip(label_trues, label_preds): | ||
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class) | ||
return hist | ||
|
||
|
||
def batch_iou_score(label_trues, label_preds, batch_size, n_class=12): | ||
hist = np.zeros((n_class, n_class)) | ||
batch_iou = 0 | ||
for lt, lp in zip(label_trues, label_preds): | ||
hist = _fast_hist(lt.flatten(), lp.flatten(), n_class) | ||
with np.errstate(divide='ignore', invalid='ignore'): | ||
iu = np.diag(hist) / ( | ||
hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist) | ||
) | ||
batch_iou += np.nanmean(iu) / batch_size | ||
return batch_iou | ||
|
||
|
||
def mIoU_score(hist): | ||
with np.errstate(divide='ignore', invalid='ignore'): | ||
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) | ||
mean_iu = np.nanmean(iu) | ||
return mean_iu |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
import os | ||
import glob | ||
import torch | ||
import numpy as np | ||
|
||
val_every = 1 | ||
|
||
saved_dir = './saved' | ||
if not os.path.isdir(saved_dir): | ||
os.mkdir(saved_dir) | ||
|
||
def save_model(model, saved_dir, file_name='default.pt'): | ||
check_point = {'net': model.state_dict()} | ||
output_path = os.path.join(saved_dir, file_name) | ||
torch.save(model.state_dict(), output_path) | ||
|
||
|
||
def load_model(model, device, saved_dir, file_name='default.pt'): | ||
model_path = os.path.join(saved_dir, file_name) | ||
checkpoint = torch.load(model_path, map_location=device) | ||
model.load_state_dict(checkpoint) | ||
|
||
|
||
def calculate_parameter(model, print_param=False): | ||
n_param = 0 | ||
n_conv = 0 | ||
for p_idx,(param_name,param) in enumerate(model.named_parameters()): | ||
if param.requires_grad: | ||
param_numpy = param.detach().cpu().numpy() # to numpy array | ||
n_param += len(param_numpy.reshape(-1)) | ||
if print_param==True: | ||
print ("[%d] name:[%s] shape:[%s]."%(p_idx,param_name,param_numpy.shape)) | ||
if "conv" in param_name: n_conv+=1 | ||
print("-"*50+f"\nTotal number of parameters: [{n_param:,d}]\n"+"-"*50) | ||
print(f"Total number of Conv layer : {n_conv}") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,88 @@ | ||
import math | ||
import torch | ||
from torch.optim.lr_scheduler import _LRScheduler | ||
|
||
class CosineAnnealingWarmupRestarts(_LRScheduler): | ||
""" | ||
optimizer (Optimizer): Wrapped optimizer. | ||
first_cycle_steps (int): First cycle step size. | ||
cycle_mult(float): Cycle steps magnification. Default: -1. | ||
max_lr(float): First cycle's max learning rate. Default: 0.1. | ||
min_lr(float): Min learning rate. Default: 0.001. | ||
warmup_steps(int): Linear warmup step size. Default: 0. | ||
gamma(float): Decrease rate of max learning rate by cycle. Default: 1. | ||
last_epoch (int): The index of last epoch. Default: -1. | ||
""" | ||
|
||
def __init__(self, | ||
optimizer : torch.optim.Optimizer, | ||
first_cycle_steps : int, | ||
cycle_mult : float = 1., | ||
max_lr : float = 0.1, | ||
min_lr : float = 0.001, | ||
warmup_steps : int = 0, | ||
gamma : float = 1., | ||
last_epoch : int = -1 | ||
): | ||
assert warmup_steps < first_cycle_steps | ||
|
||
self.first_cycle_steps = first_cycle_steps # first cycle step size | ||
self.cycle_mult = cycle_mult # cycle steps magnification | ||
self.base_max_lr = max_lr # first max learning rate | ||
self.max_lr = max_lr # max learning rate in the current cycle | ||
self.min_lr = min_lr # min learning rate | ||
self.warmup_steps = warmup_steps # warmup step size | ||
self.gamma = gamma # decrease rate of max learning rate by cycle | ||
|
||
self.cur_cycle_steps = first_cycle_steps # first cycle step size | ||
self.cycle = 0 # cycle count | ||
self.step_in_cycle = last_epoch # step size of the current cycle | ||
|
||
super(CosineAnnealingWarmupRestarts, self).__init__(optimizer, last_epoch) | ||
|
||
# set learning rate min_lr | ||
self.init_lr() | ||
|
||
def init_lr(self): | ||
self.base_lrs = [] | ||
for param_group in self.optimizer.param_groups: | ||
param_group['lr'] = self.min_lr | ||
self.base_lrs.append(self.min_lr) | ||
|
||
def get_lr(self): | ||
if self.step_in_cycle == -1: | ||
return self.base_lrs | ||
elif self.step_in_cycle < self.warmup_steps: | ||
return [(self.max_lr - base_lr)*self.step_in_cycle / self.warmup_steps + base_lr for base_lr in self.base_lrs] | ||
else: | ||
return [base_lr + (self.max_lr - base_lr) \ | ||
* (1 + math.cos(math.pi * (self.step_in_cycle-self.warmup_steps) \ | ||
/ (self.cur_cycle_steps - self.warmup_steps))) / 2 | ||
for base_lr in self.base_lrs] | ||
|
||
def step(self, epoch=None): | ||
if epoch is None: | ||
epoch = self.last_epoch + 1 | ||
self.step_in_cycle = self.step_in_cycle + 1 | ||
if self.step_in_cycle >= self.cur_cycle_steps: | ||
self.cycle += 1 | ||
self.step_in_cycle = self.step_in_cycle - self.cur_cycle_steps | ||
self.cur_cycle_steps = int((self.cur_cycle_steps - self.warmup_steps) * self.cycle_mult) + self.warmup_steps | ||
else: | ||
if epoch >= self.first_cycle_steps: | ||
if self.cycle_mult == 1.: | ||
self.step_in_cycle = epoch % self.first_cycle_steps | ||
self.cycle = epoch // self.first_cycle_steps | ||
else: | ||
n = int(math.log((epoch / self.first_cycle_steps * (self.cycle_mult - 1) + 1), self.cycle_mult)) | ||
self.cycle = n | ||
self.step_in_cycle = epoch - int(self.first_cycle_steps * (self.cycle_mult ** n - 1) / (self.cycle_mult - 1)) | ||
self.cur_cycle_steps = self.first_cycle_steps * self.cycle_mult ** (n) | ||
else: | ||
self.cur_cycle_steps = self.first_cycle_steps | ||
self.step_in_cycle = epoch | ||
|
||
self.max_lr = self.base_max_lr * (self.gamma**self.cycle) | ||
self.last_epoch = math.floor(epoch) | ||
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): | ||
param_group['lr'] = lr |
Oops, something went wrong.