diff --git a/3d-2d-distillation/config/scannet/scannet_pspnet50.yaml b/3d-2d-distillation/config/scannet/scannet_pspnet50.yaml index f71f009..45265cc 100644 --- a/3d-2d-distillation/config/scannet/scannet_pspnet50.yaml +++ b/3d-2d-distillation/config/scannet/scannet_pspnet50.yaml @@ -17,9 +17,9 @@ TRAIN: zoom_factor: 8 # zoom factor for final prediction during training, be in [1, 2, 4, 8] ignore_label: 255 aux_weight: 0.4 - train_gpu: [0,1,2,3] - workers: 16 # data loader workers - batch_size: 16 # batch size for training + train_gpu: [0] + workers: 2 # data loader workers + batch_size: 2 # batch size for training batch_size_val: 8 # batch size for validation during training, memory and speed tradeoff base_lr: 0.01 epochs: 50 @@ -30,7 +30,7 @@ TRAIN: manual_seed: print_freq: 10 save_freq: 1 - save_path: exp/ade20k/pspnet50/model + save_path: exp/scannet/pspnet50/model weight: #initmodel/train_epoch_1init.pth #exp/ade20k/pspnet50/model/train_epoch_100.pth #/media/lzz/f1cc9be0-388f-421d-a473-5b33192a9893/semseg_feature/initmodel/init.pth resume: #exp/ade20k/pspnet50/model/train_epoch_50.pth evaluate: False # evaluate on validation set, extra gpu memory needed and small batch_size_val is recommend diff --git a/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626524125.xjqi-System-Product-Name b/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626524125.xjqi-System-Product-Name new file mode 100644 index 0000000..e69de29 diff --git a/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626524173.xjqi-System-Product-Name b/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626524173.xjqi-System-Product-Name new file mode 100644 index 0000000..195bf12 Binary files /dev/null and b/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626524173.xjqi-System-Product-Name differ diff --git a/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626524600.xjqi-System-Product-Name b/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626524600.xjqi-System-Product-Name new file mode 100644 index 0000000..62a1806 Binary files /dev/null and b/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626524600.xjqi-System-Product-Name differ diff --git a/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626595359.xjqi-System-Product-Name b/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626595359.xjqi-System-Product-Name new file mode 100644 index 0000000..3d59977 Binary files /dev/null and b/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626595359.xjqi-System-Product-Name differ diff --git a/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626595507.xjqi-System-Product-Name b/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626595507.xjqi-System-Product-Name new file mode 100644 index 0000000..498f80f Binary files /dev/null and b/3d-2d-distillation/exp/ade20k/pspnet50/model/events.out.tfevents.1626595507.xjqi-System-Product-Name differ diff --git a/3d-2d-distillation/exp/scannet/ade20k/model/train-20210718_160231.log b/3d-2d-distillation/exp/scannet/ade20k/model/train-20210718_160231.log new file mode 100644 index 0000000..031f138 --- /dev/null +++ b/3d-2d-distillation/exp/scannet/ade20k/model/train-20210718_160231.log @@ -0,0 +1,10 @@ +Traceback (most recent call last): + File "tool/train.py", line 487, in + main() + File "tool/train.py", line 84, in main + args = get_parser() + File "tool/train.py", line 34, in get_parser + cfg = config.load_cfg_from_cfg_file(args.config) + File "/media/sdb/lzz/release2/3D-to-2D-Distillation-for-Indoor-Scene-Parsing/3d-2d-distillation/util/config.py", line 63, in load_cfg_from_cfg_file + '{} is not a yaml file'.format(file) +AssertionError: config/scannet/scannet_ade20k.yaml is not a yaml file diff --git a/3d-2d-distillation/exp/scannet/ade20k/result/test-20210718_160231.log b/3d-2d-distillation/exp/scannet/ade20k/result/test-20210718_160231.log new file mode 100644 index 0000000..e90822f --- /dev/null +++ b/3d-2d-distillation/exp/scannet/ade20k/result/test-20210718_160231.log @@ -0,0 +1,10 @@ +Traceback (most recent call last): + File "tool/test.py", line 282, in + main() + File "tool/test.py", line 70, in main + args, dataset_name = get_parser() + File "tool/test.py", line 27, in get_parser + cfg = config.load_cfg_from_cfg_file(args.config) + File "/media/sdb/lzz/release2/3D-to-2D-Distillation-for-Indoor-Scene-Parsing/3d-2d-distillation/util/config.py", line 63, in load_cfg_from_cfg_file + '{} is not a yaml file'.format(file) +AssertionError: config/scannet/scannet_ade20k.yaml is not a yaml file diff --git a/3d-2d-distillation/exp/scannet/ade20k/train.py b/3d-2d-distillation/exp/scannet/ade20k/train.py new file mode 100644 index 0000000..ea528af --- /dev/null +++ b/3d-2d-distillation/exp/scannet/ade20k/train.py @@ -0,0 +1,487 @@ +import os +import random +import time +import cv2 +import numpy as np +import logging +import argparse + +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torch.nn.parallel +import torch.optim +import torch.utils.data +import torch.multiprocessing as mp +import torch.distributed as dist +import apex +from tensorboardX import SummaryWriter + +from util import dataset, transform, config +from util.util import AverageMeter, poly_learning_rate, intersectionAndUnionGPU + +cv2.ocl.setUseOpenCL(False) +cv2.setNumThreads(0) + + +def get_parser(): + parser = argparse.ArgumentParser(description='PyTorch Semantic Segmentation') + parser.add_argument('--config', type=str, default='config/ade20k/ade20k_pspnet50.yaml', help='config file') + parser.add_argument('opts', help='see config/ade20k/ade20k_pspnet50.yaml for all options', default=None, nargs=argparse.REMAINDER) + args = parser.parse_args() + assert args.config is not None + cfg = config.load_cfg_from_cfg_file(args.config) + if args.opts is not None: + cfg = config.merge_cfg_from_list(cfg, args.opts) + return cfg + + +def get_logger(): + logger_name = "main-logger" + logger = logging.getLogger(logger_name) + logger.setLevel(logging.INFO) + handler = logging.StreamHandler() + fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s" + handler.setFormatter(logging.Formatter(fmt)) + logger.addHandler(handler) + return logger + + +def worker_init_fn(worker_id): + random.seed(args.manual_seed + worker_id) + + +def main_process(): + return not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % args.ngpus_per_node == 0) + + +def check(args): + assert args.classes > 1 + assert args.zoom_factor in [1, 2, 4, 8] + if args.arch == 'psp': + assert (args.train_h - 1) % 8 == 0 and (args.train_w - 1) % 8 == 0 + elif args.arch == 'psa': + if args.compact: + args.mask_h = (args.train_h - 1) // (8 * args.shrink_factor) + 1 + args.mask_w = (args.train_w - 1) // (8 * args.shrink_factor) + 1 + else: + assert (args.mask_h is None and args.mask_w is None) or ( + args.mask_h is not None and args.mask_w is not None) + if args.mask_h is None and args.mask_w is None: + args.mask_h = 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1 + args.mask_w = 2 * ((args.train_w - 1) // (8 * args.shrink_factor) + 1) - 1 + else: + assert (args.mask_h % 2 == 1) and (args.mask_h >= 3) and ( + args.mask_h <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1) + assert (args.mask_w % 2 == 1) and (args.mask_w >= 3) and ( + args.mask_w <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1) + else: + raise Exception('architecture not supported yet'.format(args.arch)) + + +def main(): + args = get_parser() + check(args) + os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.train_gpu) + if args.manual_seed is not None: + random.seed(args.manual_seed) + np.random.seed(args.manual_seed) + torch.manual_seed(manualSeed) + torch.cuda.manual_seed(manualSeed) + torch.cuda.manual_seed_all(manualSeed) + cudnn.benchmark = False + cudnn.deterministic = True + if args.dist_url == "env://" and args.world_size == -1: + args.world_size = int(os.environ["WORLD_SIZE"]) + args.distributed = args.world_size > 1 or args.multiprocessing_distributed + args.ngpus_per_node = len(args.train_gpu) + if len(args.train_gpu) == 1: + args.sync_bn = False + args.distributed = False + args.multiprocessing_distributed = False + if args.multiprocessing_distributed: + args.world_size = args.ngpus_per_node * args.world_size + mp.spawn(main_worker, nprocs=args.ngpus_per_node, args=(args.ngpus_per_node, args)) + else: + main_worker(args.train_gpu, args.ngpus_per_node, args) + + +def main_worker(gpu, ngpus_per_node, argss): + global args + args = argss + if args.sync_bn: + if args.multiprocessing_distributed: + BatchNorm = apex.parallel.SyncBatchNorm + else: + from lib.sync_bn.modules import BatchNorm2d + BatchNorm = BatchNorm2d + else: + BatchNorm = nn.BatchNorm2d + if args.distributed: + if args.dist_url == "env://" and args.rank == -1: + args.rank = int(os.environ["RANK"]) + if args.multiprocessing_distributed: + args.rank = args.rank * ngpus_per_node + gpu + dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) + + criterion = nn.CrossEntropyLoss(ignore_index=args.ignore_label) + if args.arch == 'psp': + from model.pspnet import PSPNet + model = PSPNet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, criterion=criterion, BatchNorm=BatchNorm) + modules_ori = [model.layer0, model.layer1, model.layer2, model.layer3, model.layer4] + modules_new = [model.ppm, model.reg, model.reg2, model.cls2, model.cls3, model.reg3] + elif args.arch == 'psa': + from model.psanet import PSANet + model = PSANet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, psa_type=args.psa_type, + compact=args.compact, shrink_factor=args.shrink_factor, mask_h=args.mask_h, mask_w=args.mask_w, + normalization_factor=args.normalization_factor, psa_softmax=args.psa_softmax, + criterion=criterion, + BatchNorm=BatchNorm) + modules_ori = [model.layer0, model.layer1, model.layer2, model.layer3, model.layer4] + modules_new = [model.psa, model.cls2, model.aux] + params_list = [] + for module in modules_ori: + params_list.append(dict(params=module.parameters(), lr=args.base_lr)) + for module in modules_new: + params_list.append(dict(params=module.parameters(), lr=args.base_lr * 10)) + #print (module.parameters()) + #params_list.append(dict(params=model.bnbias, lr=args.base_lr * 10)) + args.index_split = 5 + optimizer = torch.optim.SGD(params_list, lr=args.base_lr, momentum=args.momentum, weight_decay=args.weight_decay) + + if main_process(): + global logger, writer + logger = get_logger() + writer = SummaryWriter(args.save_path) + logger.info(args) + logger.info("=> creating model ...") + logger.info("Classes: {}".format(args.classes)) + logger.info(model) + if args.distributed: + torch.cuda.set_device(gpu) + args.batch_size = int(args.batch_size / ngpus_per_node) + args.batch_size_val = int(args.batch_size_val / ngpus_per_node) + args.workers = int(args.workers / ngpus_per_node) + if args.use_apex: + model, optimizer = apex.amp.initialize(model.cuda(), optimizer, opt_level=args.opt_level, keep_batchnorm_fp32=args.keep_batchnorm_fp32, loss_scale=args.loss_scale) + model = apex.parallel.DistributedDataParallel(model) + else: + model = torch.nn.parallel.DistributedDataParallel(model.cuda(), device_ids=[gpu]) + + else: + model = torch.nn.DataParallel(model.cuda()) + + if args.weight: + if os.path.isfile(args.weight): + if main_process(): + logger.info("=> loading weight '{}'".format(args.weight)) + #checkpoint = args.weight + checkpoint = torch.load(args.weight, map_location=lambda storage, loc: storage.cuda()) #torch.load(args.weight) + model.load_state_dict(checkpoint,strict=False)#['state_dict']) + if main_process(): + logger.info("=> loaded weight '{}'".format(args.weight)) + else: + if main_process(): + logger.info("=> no weight found at '{}'".format(args.weight)) + + if args.resume: + if os.path.isfile(args.resume): + if main_process(): + logger.info("=> loading checkpoint '{}'".format(args.resume)) + # checkpoint = torch.load(args.resume) + checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage.cuda()) + args.start_epoch = checkpoint['epoch'] + model.load_state_dict(checkpoint['state_dict'],strict=False) + optimizer.load_state_dict(checkpoint['optimizer']) + if main_process(): + logger.info("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) + else: + if main_process(): + logger.info("=> no checkpoint found at '{}'".format(args.resume)) + + value_scale = 255 + mean = [0.485, 0.456, 0.406] + mean = [item * value_scale for item in mean] + std = [0.229, 0.224, 0.225] + std = [item * value_scale for item in std] + + train_transform = transform.Compose([ + transform.RandScale([args.scale_min, args.scale_max]), + transform.RandRotate([args.rotate_min, args.rotate_max], padding=mean, ignore_label=args.ignore_label), + transform.RandomGaussianBlur(), + transform.RandomHorizontalFlip(), + transform.Crop([args.train_h, args.train_w], crop_type='rand', padding=mean, ignore_label=args.ignore_label), + transform.ToTensor(), + transform.Normalize(mean=mean, std=std)]) + train_data = dataset.SemData(split='train', data_root=args.data_root, data_list=args.train_list, transform=train_transform) + + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) + else: + train_sampler = None + train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True) + if args.evaluate: + val_transform = transform.Compose([ + transform.Crop([args.train_h, args.train_w], crop_type='center', padding=mean, ignore_label=args.ignore_label), + transform.ToTensor(), + transform.Normalize(mean=mean, std=std)]) + val_data = dataset.SemDataTest(split='val', data_root=args.data_root, data_list=args.val_list, transform=val_transform) + if args.distributed: + val_sampler = torch.utils.data.distributed.DistributedSampler(val_data) + else: + val_sampler = None + + val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size_val, shuffle=False, num_workers=args.workers, pin_memory=True, sampler=val_sampler) + + for epoch in range(args.start_epoch, args.epochs): + print ('epoch_worker',epoch) + epoch_log = epoch + 1 + if args.distributed: + train_sampler.set_epoch(epoch) + print (111,flush=True) + loss_train, mIoU_train, mAcc_train, allAcc_train = train(train_loader, model, optimizer, epoch) + print (222,flush=True) + if main_process(): + writer.add_scalar('loss_train', loss_train, epoch_log) + writer.add_scalar('mIoU_train', mIoU_train, epoch_log) + writer.add_scalar('mAcc_train', mAcc_train, epoch_log) + writer.add_scalar('allAcc_train', allAcc_train, epoch_log) + + if (epoch_log % args.save_freq == 0) and main_process(): + filename = args.save_path + '/train_epoch_' + str(epoch_log) + '.pth' + logger.info('Saving checkpoint to: ' + filename) + torch.save({'epoch': epoch_log, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, filename) + if epoch_log / args.save_freq > 2: + deletename = args.save_path + '/train_epoch_' + str(epoch_log - args.save_freq * 2) + '.pth' + #try: + # os.remove(deletename) + #except: + # pass + if args.evaluate: + loss_val, mIoU_val, mAcc_val, allAcc_val = validate(val_loader, model, criterion) + if main_process(): + writer.add_scalar('loss_val', loss_val, epoch_log) + writer.add_scalar('mIoU_val', mIoU_val, epoch_log) + writer.add_scalar('mAcc_val', mAcc_val, epoch_log) + writer.add_scalar('allAcc_val', allAcc_val, epoch_log) + + +def train(train_loader, model, optimizer, epoch): + batch_time = AverageMeter() + data_time = AverageMeter() + main_loss_meter = AverageMeter() + aux_loss_meter = AverageMeter() + reg_loss_meter = AverageMeter() + loss_meter = AverageMeter() + intersection_meter = AverageMeter() + union_meter = AverageMeter() + target_meter = AverageMeter() + final_loss_meter = AverageMeter() + model.train() + + '''for p in model.module.layer0.parameters(): + p.require_gradient=False + for p in model.module.layer1.parameters(): + p.require_gradient=False + for p in model.module.layer2.parameters(): + p.require_gradient=False + for p in model.module.layer3.parameters(): + p.require_gradient=False + for p in model.module.layer4.parameters(): + p.require_gradient=False + for p in model.module.ppm.parameters(): + p.require_gradient=False + for p in model.module.cls.parameters(): + p.require_gradient=False + for p in model.module.aux.parameters(): + p.require_gradient=False + + model.module.layer0.eval() + model.module.layer1.eval() + model.module.layer2.eval() + model.module.layer3.eval() + model.module.ppm.eval() + model.module.reg.eval() + model.module.aux.eval()''' + end = time.time() + max_iter = args.epochs * len(train_loader) + for i, (input, target, feat, featidx) in enumerate(train_loader): + #print (i,flush=True) + data_time.update(time.time() - end) + if args.zoom_factor != 8: + h = int((target.size()[1] - 1) / 8 * args.zoom_factor + 1) + w = int((target.size()[2] - 1) / 8 * args.zoom_factor + 1) + # 'nearest' mode doesn't support align_corners mode and 'bilinear' mode is fine for downsampling + target = F.interpolate(target.unsqueeze(1).float(), size=(h, w), mode='bilinear', align_corners=True).squeeze(1).long() + input = input.cuda(non_blocking=True) + target = target.cuda(non_blocking=True) + #print ('t1',flush=True) + feat=feat.cuda(non_blocking=True) + #print (feat.shape,flush=True) + featidx=featidx.cuda(non_blocking=True) + #print ('t2',flush=True) + output, main_loss, aux_loss, reg_loss, final_loss = model(input, target, feat, featidx) + #print ('t3',flush=True) + if not args.multiprocessing_distributed: + main_loss, aux_loss, reg_loss, final_loss = torch.mean(main_loss), torch.mean(aux_loss), torch.mean(reg_loss),torch.mean(final_loss) + #print (reg_loss,main_loss,aux_loss,flush=True) + loss = main_loss + args.aux_weight * aux_loss + reg_loss +final_loss + #print ('t4',flush=True) + optimizer.zero_grad() + #if args.use_apex and args.multiprocessing_distributed: + # with apex.amp.scale_loss(loss, optimizer) as scaled_loss: + # scaled_loss.backward() + #print ('apex...',flush=True) + #else: + loss.backward() + #print ('t5',flush=True) + optimizer.step() + #print ('apexfinished',flush=True) + n = input.size(0) + if args.multiprocessing_distributed: + #print ('t6',flush=True) + main_loss, aux_loss, reg_loss,final_loss, loss = main_loss.detach() * n, aux_loss * n, reg_loss*n,final_loss*n, loss * n # not considering ignore pixels + #print ('t7',flush=True) + count = target.new_tensor([n], dtype=torch.long) + #reg_loss=torch.Tensor(reg_loss).cuda() + #print ('t8',flush=True) + dist.all_reduce(main_loss), dist.all_reduce(aux_loss), dist.all_reduce(reg_loss),dist.all_reduce(final_loss), dist.all_reduce(loss), dist.all_reduce(count) + n = count.item() + main_loss, aux_loss, reg_loss, final_loss, loss = main_loss / n, aux_loss / n, reg_loss/n, final_loss/n, loss / n + #print ('2',flush=True) + #print ('t9',flush=True) + intersection, union, target = intersectionAndUnionGPU(output, target, args.classes, args.ignore_label) + if args.multiprocessing_distributed: + dist.all_reduce(intersection), dist.all_reduce(union), dist.all_reduce(target) + intersection, union, target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy() + intersection_meter.update(intersection), union_meter.update(union), target_meter.update(target) + #print ('t10',flush=True) + accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10) + #print ('acc',flush=True) + main_loss_meter.update(main_loss.item(), n) + aux_loss_meter.update(aux_loss.item(), n) + reg_loss_meter.update(reg_loss.item(), n) + final_loss_meter.update(final_loss.item(), n) + loss_meter.update(loss.item(), n) + batch_time.update(time.time() - end) + end = time.time() + #print ('t11',flush=True) + current_iter = epoch * len(train_loader) + i + 1 + current_lr = poly_learning_rate(args.base_lr, current_iter, max_iter, power=args.power) + #print (current_lr,'learningrate',flush=True) + for index in range(0, args.index_split): + optimizer.param_groups[index]['lr'] = current_lr + for index in range(args.index_split, len(optimizer.param_groups)): + optimizer.param_groups[index]['lr'] = current_lr * 10 + remain_iter = max_iter - current_iter + remain_time = remain_iter * batch_time.avg + t_m, t_s = divmod(remain_time, 60) + t_h, t_m = divmod(t_m, 60) + remain_time = '{:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m), int(t_s)) + + if (i + 1) % args.print_freq == 0 and main_process(): + logger.info('Epoch: [{}/{}][{}/{}] ' + 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' + 'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) ' + 'Remain {remain_time} ' + 'MainLoss {main_loss_meter.val:.4f} ' + 'AuxLoss {aux_loss_meter.val:.4f} ' + 'RegLoss {reg_loss_meter.val:.4f} ' + 'FinalLoss {final_loss_meter.val:.4f} ' + 'Loss {loss_meter.val:.4f} ' + 'Accuracy {accuracy:.4f}.'.format(epoch+1, args.epochs, i + 1, len(train_loader), + batch_time=batch_time, + data_time=data_time, + remain_time=remain_time, + main_loss_meter=main_loss_meter, + aux_loss_meter=aux_loss_meter, + reg_loss_meter=reg_loss_meter, + final_loss_meter=final_loss_meter, + loss_meter=loss_meter, + accuracy=accuracy)) + if main_process(): + writer.add_scalar('loss_train_batch', main_loss_meter.val, current_iter) + writer.add_scalar('mIoU_train_batch', np.mean(intersection / (union + 1e-10)), current_iter) + writer.add_scalar('mAcc_train_batch', np.mean(intersection / (target + 1e-10)), current_iter) + writer.add_scalar('allAcc_train_batch', accuracy, current_iter) + #print ('t12',flush=True) + iou_class = intersection_meter.sum / (union_meter.sum + 1e-10) + accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10) + mIoU = np.mean(iou_class) + mAcc = np.mean(accuracy_class) + + allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10) + if main_process(): + logger.info('Train result at epoch [{}/{}]: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(epoch+1, args.epochs, mIoU, mAcc, allAcc)) + return main_loss_meter.avg, mIoU, mAcc, allAcc + + +def validate(val_loader, model, criterion): + if main_process(): + logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>') + batch_time = AverageMeter() + data_time = AverageMeter() + loss_meter = AverageMeter() + intersection_meter = AverageMeter() + union_meter = AverageMeter() + target_meter = AverageMeter() + + model.eval() + end = time.time() + for i, (input, target) in enumerate(val_loader): + data_time.update(time.time() - end) + input = input.cuda(non_blocking=True) + target = target.cuda(non_blocking=True) + output = model(input) + if args.zoom_factor != 8: + output = F.interpolate(output, size=target.size()[1:], mode='bilinear', align_corners=True) + loss = criterion(output, target) + + n = input.size(0) + if args.multiprocessing_distributed: + loss = loss * n # not considering ignore pixels + count = target.new_tensor([n], dtype=torch.long) + dist.all_reduce(loss), dist.all_reduce(count) + n = count.item() + loss = loss / n + else: + loss = torch.mean(loss) + + output = output.max(1)[1] + intersection, union, target = intersectionAndUnionGPU(output, target, args.classes, args.ignore_label) + if args.multiprocessing_distributed: + dist.all_reduce(intersection), dist.all_reduce(union), dist.all_reduce(target) + intersection, union, target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy() + intersection_meter.update(intersection), union_meter.update(union), target_meter.update(target) + + accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10) + loss_meter.update(loss.item(), input.size(0)) + batch_time.update(time.time() - end) + end = time.time() + if ((i + 1) % args.print_freq == 0) and main_process(): + logger.info('Test: [{}/{}] ' + 'Data {data_time.val:.3f} ({data_time.avg:.3f}) ' + 'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) ' + 'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) ' + 'Accuracy {accuracy:.4f}.'.format(i + 1, len(val_loader), + data_time=data_time, + batch_time=batch_time, + loss_meter=loss_meter, + accuracy=accuracy)) + + iou_class = intersection_meter.sum / (union_meter.sum + 1e-10) + accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10) + mIoU = np.mean(iou_class) + mAcc = np.mean(accuracy_class) + allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10) + if main_process(): + logger.info('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc)) + for i in range(args.classes): + logger.info('Class_{} Result: iou/accuracy {:.4f}/{:.4f}.'.format(i, iou_class[i], accuracy_class[i])) + logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<') + return loss_meter.avg, mIoU, mAcc, allAcc + + +if __name__ == '__main__': + main() diff --git a/3d-2d-distillation/exp/scannet/ade20k/train.sh b/3d-2d-distillation/exp/scannet/ade20k/train.sh new file mode 100644 index 0000000..26ffa38 --- /dev/null +++ b/3d-2d-distillation/exp/scannet/ade20k/train.sh @@ -0,0 +1,25 @@ +#!/bin/sh +PARTITION=gpu +PYTHON=python + +dataset=$1 +exp_name=$2 +exp_dir=exp/${dataset}/${exp_name} +model_dir=${exp_dir}/model +result_dir=${exp_dir}/result +config=config/${dataset}/${dataset}_${exp_name}.yaml +now=$(date +"%Y%m%d_%H%M%S") + +mkdir -p ${model_dir} ${result_dir} +cp tool/train.sh tool/train.py ${config} ${exp_dir} + +export PYTHONPATH=./ +#sbatch -p $PARTITION --gres=gpu:8 -c16 --job-name=train \ +$PYTHON -u tool/train.py \ + --config=${config} \ + 2>&1 | tee ${model_dir}/train-$now.log + +#sbatch -p $PARTITION --gres=gpu:1 -c2 --job-name=test \ +$PYTHON -u tool/test.py \ + --config=${config} \ + 2>&1 | tee ${result_dir}/test-$now.log diff --git a/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_201356.log b/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_201356.log new file mode 100644 index 0000000..e69de29 diff --git a/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_201420.log b/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_201420.log new file mode 100644 index 0000000..23477e6 --- /dev/null +++ b/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_201420.log @@ -0,0 +1,318 @@ +[2021-07-17 20:14:23,127 INFO test.py line 74 23548] arch: psp +aux_weight: 0.4 +base_lr: 0.01 +base_size: 1297 +batch_size: 16 +batch_size_val: 8 +classes: 40 +colors_path: dataset/scannet/scannet_colors.txt +data_root: +dist_backend: nccl +dist_url: tcp://127.0.0.1:6789 +epochs: 50 +evaluate: False +has_prediction: False +ignore_label: 255 +index_start: 0 +index_step: 0 +keep_batchnorm_fp32: None +layers: 50 +loss_scale: None +manual_seed: None +model_path: exp/scannet/pspnet50/model/train_epoch_50.pth +momentum: 0.9 +multiprocessing_distributed: True +names_path: dataset/scannet/scannet_names.txt +opt_level: O0 +power: 0.9 +print_freq: 10 +rank: 0 +resume: None +rotate_max: 10 +rotate_min: -10 +save_folder: exp/scannet/pspnet50/result/val +save_freq: 1 +save_path: exp/ade20k/pspnet50/model +scale_max: 2.0 +scale_min: 0.5 +scales: [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] +split: val +start_epoch: 0 +sync_bn: True +test_gpu: [0] +test_h: 473 +test_list: list_val +test_w: 473 +train_gpu: [0, 1, 2, 3] +train_h: 473 +train_list: list +train_w: 473 +use_apex: True +val_list: val +weight: None +weight_decay: 0.0001 +workers: 16 +world_size: 1 +zoom_factor: 8 +[2021-07-17 20:14:23,128 INFO test.py line 75 23548] => creating model ... +[2021-07-17 20:14:23,128 INFO test.py line 76 23548] Classes: 40 +Totally 2635 samples in val set. +Starting Checking image&label pair val list... +Checking image&label pair val list done! +/home/zzliu/anaconda3/envs/pyhton3/lib/python3.7/site-packages/torch/nn/_reduction.py:43: UserWarning: size_average and reduce args will be deprecated, please use reduction='none' instead. + warnings.warn(warning.format(ret)) +[2021-07-17 20:14:23,655 INFO test.py line 108 23548] PSPNet( + (criterion): CrossEntropyLoss() + (criterion_reg): MSELoss() + (layer0): Sequential( + (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) + (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (5): ReLU(inplace=True) + (6): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (7): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (8): ReLU(inplace=True) + (9): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) + ) + (layer1): Sequential( + (0): Bottleneck( + (conv1): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer2): Sequential( + (0): Bottleneck( + (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (3): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer3): Sequential( + (0): Bottleneck( + (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (3): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (4): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (5): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer4): Sequential( + (0): Bottleneck( + (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (ppm): PPM( + (features): ModuleList( + (0): Sequential( + (0): AdaptiveAvgPool2d(output_size=1) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (1): Sequential( + (0): AdaptiveAvgPool2d(output_size=2) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (2): Sequential( + (0): AdaptiveAvgPool2d(output_size=3) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (3): Sequential( + (0): AdaptiveAvgPool2d(output_size=6) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + ) + ) + (cls3): Sequential( + (0): Conv2d(4096, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(512, 192, kernel_size=(1, 1), stride=(1, 1)) + (4): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (5): ReLU(inplace=True) + ) + (cls2): Sequential( + (0): Conv2d(288, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Dropout2d(p=0.1, inplace=False) + (4): Conv2d(96, 40, kernel_size=(1, 1), stride=(1, 1)) + ) + (reg): Sequential( + (0): Conv2d(4096, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(512, 96, kernel_size=(1, 1), stride=(1, 1)) + ) + (reg2): Sequential( + (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(96, 96, kernel_size=(1, 1), stride=(1, 1)) + (4): ReLU(inplace=True) + ) + (reg3): Sequential( + (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(96, 96, kernel_size=(1, 1), stride=(1, 1)) + (4): ReLU(inplace=True) + ) + (bn): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (bn2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) +) +Traceback (most recent call last): + File "tool/test.py", line 282, in + main() + File "tool/test.py", line 117, in main + raise RuntimeError("=> no checkpoint found at '{}'".format(args.model_path)) +RuntimeError: => no checkpoint found at 'exp/scannet/pspnet50/model/train_epoch_50.pth' diff --git a/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_201523.log b/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_201523.log new file mode 100644 index 0000000..1ee173d --- /dev/null +++ b/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_201523.log @@ -0,0 +1,318 @@ +[2021-07-17 20:15:26,437 INFO test.py line 74 23702] arch: psp +aux_weight: 0.4 +base_lr: 0.01 +base_size: 1297 +batch_size: 16 +batch_size_val: 8 +classes: 40 +colors_path: dataset/scannet/scannet_colors.txt +data_root: +dist_backend: nccl +dist_url: tcp://127.0.0.1:6789 +epochs: 50 +evaluate: False +has_prediction: False +ignore_label: 255 +index_start: 0 +index_step: 0 +keep_batchnorm_fp32: None +layers: 50 +loss_scale: None +manual_seed: None +model_path: exp/scannet/pspnet50/model/train_epoch_50.pth +momentum: 0.9 +multiprocessing_distributed: True +names_path: dataset/scannet/scannet_names.txt +opt_level: O0 +power: 0.9 +print_freq: 10 +rank: 0 +resume: None +rotate_max: 10 +rotate_min: -10 +save_folder: exp/scannet/pspnet50/result/val +save_freq: 1 +save_path: exp/ade20k/pspnet50/model +scale_max: 2.0 +scale_min: 0.5 +scales: [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] +split: val +start_epoch: 0 +sync_bn: True +test_gpu: [0] +test_h: 473 +test_list: list_val +test_w: 473 +train_gpu: [0, 1, 2, 3] +train_h: 473 +train_list: list +train_w: 473 +use_apex: True +val_list: val +weight: None +weight_decay: 0.0001 +workers: 16 +world_size: 1 +zoom_factor: 8 +[2021-07-17 20:15:26,438 INFO test.py line 75 23702] => creating model ... +[2021-07-17 20:15:26,438 INFO test.py line 76 23702] Classes: 40 +Totally 2635 samples in val set. +Starting Checking image&label pair val list... +Checking image&label pair val list done! +/home/zzliu/anaconda3/envs/pyhton3/lib/python3.7/site-packages/torch/nn/_reduction.py:43: UserWarning: size_average and reduce args will be deprecated, please use reduction='none' instead. + warnings.warn(warning.format(ret)) +[2021-07-17 20:15:26,917 INFO test.py line 108 23702] PSPNet( + (criterion): CrossEntropyLoss() + (criterion_reg): MSELoss() + (layer0): Sequential( + (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) + (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (5): ReLU(inplace=True) + (6): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (7): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (8): ReLU(inplace=True) + (9): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) + ) + (layer1): Sequential( + (0): Bottleneck( + (conv1): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer2): Sequential( + (0): Bottleneck( + (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (3): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer3): Sequential( + (0): Bottleneck( + (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (3): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (4): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (5): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer4): Sequential( + (0): Bottleneck( + (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (ppm): PPM( + (features): ModuleList( + (0): Sequential( + (0): AdaptiveAvgPool2d(output_size=1) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (1): Sequential( + (0): AdaptiveAvgPool2d(output_size=2) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (2): Sequential( + (0): AdaptiveAvgPool2d(output_size=3) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (3): Sequential( + (0): AdaptiveAvgPool2d(output_size=6) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + ) + ) + (cls3): Sequential( + (0): Conv2d(4096, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(512, 192, kernel_size=(1, 1), stride=(1, 1)) + (4): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (5): ReLU(inplace=True) + ) + (cls2): Sequential( + (0): Conv2d(288, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Dropout2d(p=0.1, inplace=False) + (4): Conv2d(96, 40, kernel_size=(1, 1), stride=(1, 1)) + ) + (reg): Sequential( + (0): Conv2d(4096, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(512, 96, kernel_size=(1, 1), stride=(1, 1)) + ) + (reg2): Sequential( + (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(96, 96, kernel_size=(1, 1), stride=(1, 1)) + (4): ReLU(inplace=True) + ) + (reg3): Sequential( + (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(96, 96, kernel_size=(1, 1), stride=(1, 1)) + (4): ReLU(inplace=True) + ) + (bn): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (bn2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) +) +Traceback (most recent call last): + File "tool/test.py", line 282, in + main() + File "tool/test.py", line 117, in main + raise RuntimeError("=> no checkpoint found at '{}'".format(args.model_path)) +RuntimeError: => no checkpoint found at 'exp/scannet/pspnet50/model/train_epoch_50.pth' diff --git a/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_201611.log b/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_201611.log new file mode 100644 index 0000000..d409ceb --- /dev/null +++ b/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_201611.log @@ -0,0 +1,312 @@ +[2021-07-17 20:16:21,405 INFO test.py line 74 23867] arch: psp +aux_weight: 0.4 +base_lr: 0.01 +base_size: 1297 +batch_size: 8 +batch_size_val: 8 +classes: 40 +colors_path: dataset/scannet/scannet_colors.txt +data_root: +dist_backend: nccl +dist_url: tcp://127.0.0.1:6789 +epochs: 50 +evaluate: False +has_prediction: False +ignore_label: 255 +index_start: 0 +index_step: 0 +keep_batchnorm_fp32: None +layers: 50 +loss_scale: None +manual_seed: None +model_path: exp/scannet/pspnet50/model/train_epoch_50.pth +momentum: 0.9 +multiprocessing_distributed: True +names_path: dataset/scannet/scannet_names.txt +opt_level: O0 +power: 0.9 +print_freq: 10 +rank: 0 +resume: None +rotate_max: 10 +rotate_min: -10 +save_folder: exp/scannet/pspnet50/result/val +save_freq: 1 +save_path: exp/ade20k/pspnet50/model +scale_max: 2.0 +scale_min: 0.5 +scales: [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] +split: val +start_epoch: 0 +sync_bn: True +test_gpu: [0] +test_h: 473 +test_list: list_val +test_w: 473 +train_gpu: [0, 1] +train_h: 473 +train_list: list +train_w: 473 +use_apex: True +val_list: val +weight: None +weight_decay: 0.0001 +workers: 8 +world_size: 1 +zoom_factor: 8 +[2021-07-17 20:16:21,405 INFO test.py line 75 23867] => creating model ... +[2021-07-17 20:16:21,405 INFO test.py line 76 23867] Classes: 40 +Totally 2635 samples in val set. +Starting Checking image&label pair val list... +Checking image&label pair val list done! +/home/zzliu/anaconda3/envs/pyhton3/lib/python3.7/site-packages/torch/nn/_reduction.py:43: UserWarning: size_average and reduce args will be deprecated, please use reduction='none' instead. + warnings.warn(warning.format(ret)) +[2021-07-17 20:16:21,888 INFO test.py line 108 23867] PSPNet( + (criterion): CrossEntropyLoss() + (criterion_reg): MSELoss() + (layer0): Sequential( + (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) + (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (5): ReLU(inplace=True) + (6): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (7): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (8): ReLU(inplace=True) + (9): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) + ) + (layer1): Sequential( + (0): Bottleneck( + (conv1): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer2): Sequential( + (0): Bottleneck( + (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (3): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer3): Sequential( + (0): Bottleneck( + (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (3): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (4): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (5): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer4): Sequential( + (0): Bottleneck( + (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (ppm): PPM( + (features): ModuleList( + (0): Sequential( + (0): AdaptiveAvgPool2d(output_size=1) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (1): Sequential( + (0): AdaptiveAvgPool2d(output_size=2) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (2): Sequential( + (0): AdaptiveAvgPool2d(output_size=3) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (3): Sequential( + (0): AdaptiveAvgPool2d(output_size=6) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + ) + ) + (cls3): Sequential( + (0): Conv2d(4096, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(512, 192, kernel_size=(1, 1), stride=(1, 1)) + (4): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (5): ReLU(inplace=True) + ) + (cls2): Sequential( + (0): Conv2d(288, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Dropout2d(p=0.1, inplace=False) + (4): Conv2d(96, 40, kernel_size=(1, 1), stride=(1, 1)) + ) + (reg): Sequential( + (0): Conv2d(4096, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(512, 96, kernel_size=(1, 1), stride=(1, 1)) + ) + (reg2): Sequential( + (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(96, 96, kernel_size=(1, 1), stride=(1, 1)) + (4): ReLU(inplace=True) + ) + (reg3): Sequential( + (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(96, 96, kernel_size=(1, 1), stride=(1, 1)) + (4): ReLU(inplace=True) + ) + (bn): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (bn2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) +) diff --git a/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_202317.log b/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210717_202317.log new file mode 100644 index 0000000..e69de29 diff --git a/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210718_160238.log b/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210718_160238.log new file mode 100644 index 0000000..3a9a458 --- /dev/null +++ b/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210718_160238.log @@ -0,0 +1,318 @@ +[2021-07-18 16:02:46,168 INFO test.py line 74 6102] arch: psp +aux_weight: 0.4 +base_lr: 0.01 +base_size: 1297 +batch_size: 1 +batch_size_val: 8 +classes: 40 +colors_path: dataset/scannet/scannet_colors.txt +data_root: +dist_backend: nccl +dist_url: tcp://127.0.0.1:6789 +epochs: 50 +evaluate: False +has_prediction: False +ignore_label: 255 +index_start: 0 +index_step: 0 +keep_batchnorm_fp32: None +layers: 50 +loss_scale: None +manual_seed: None +model_path: exp/scannet/pspnet50/model/train_epoch_50.pth +momentum: 0.9 +multiprocessing_distributed: True +names_path: dataset/scannet/scannet_names.txt +opt_level: O0 +power: 0.9 +print_freq: 10 +rank: 0 +resume: None +rotate_max: 10 +rotate_min: -10 +save_folder: exp/scannet/pspnet50/result/val +save_freq: 1 +save_path: exp/ade20k/pspnet50/model +scale_max: 2.0 +scale_min: 0.5 +scales: [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] +split: val +start_epoch: 0 +sync_bn: True +test_gpu: [0] +test_h: 473 +test_list: list_val +test_w: 473 +train_gpu: [0] +train_h: 473 +train_list: list +train_w: 473 +use_apex: True +val_list: val +weight: None +weight_decay: 0.0001 +workers: 1 +world_size: 1 +zoom_factor: 8 +[2021-07-18 16:02:46,169 INFO test.py line 75 6102] => creating model ... +[2021-07-18 16:02:46,169 INFO test.py line 76 6102] Classes: 40 +Totally 2635 samples in val set. +Starting Checking image&label pair val list... +Checking image&label pair val list done! +/home/zzliu/anaconda3/envs/pyhton3/lib/python3.7/site-packages/torch/nn/_reduction.py:43: UserWarning: size_average and reduce args will be deprecated, please use reduction='none' instead. + warnings.warn(warning.format(ret)) +[2021-07-18 16:02:46,653 INFO test.py line 108 6102] PSPNet( + (criterion): CrossEntropyLoss() + (criterion_reg): MSELoss() + (layer0): Sequential( + (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) + (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (5): ReLU(inplace=True) + (6): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (7): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (8): ReLU(inplace=True) + (9): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) + ) + (layer1): Sequential( + (0): Bottleneck( + (conv1): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer2): Sequential( + (0): Bottleneck( + (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (3): Bottleneck( + (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer3): Sequential( + (0): Bottleneck( + (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (3): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (4): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (5): Bottleneck( + (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), bias=False) + (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (layer4): Sequential( + (0): Bottleneck( + (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + (downsample): Sequential( + (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + ) + (1): Bottleneck( + (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + (2): Bottleneck( + (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), bias=False) + (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) + (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (relu): ReLU(inplace=True) + ) + ) + (ppm): PPM( + (features): ModuleList( + (0): Sequential( + (0): AdaptiveAvgPool2d(output_size=1) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (1): Sequential( + (0): AdaptiveAvgPool2d(output_size=2) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (2): Sequential( + (0): AdaptiveAvgPool2d(output_size=3) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + (3): Sequential( + (0): AdaptiveAvgPool2d(output_size=6) + (1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) + (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (3): ReLU(inplace=True) + ) + ) + ) + (cls3): Sequential( + (0): Conv2d(4096, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(512, 192, kernel_size=(1, 1), stride=(1, 1)) + (4): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (5): ReLU(inplace=True) + ) + (cls2): Sequential( + (0): Conv2d(288, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Dropout2d(p=0.1, inplace=False) + (4): Conv2d(96, 40, kernel_size=(1, 1), stride=(1, 1)) + ) + (reg): Sequential( + (0): Conv2d(4096, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(512, 96, kernel_size=(1, 1), stride=(1, 1)) + ) + (reg2): Sequential( + (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(96, 96, kernel_size=(1, 1), stride=(1, 1)) + (4): ReLU(inplace=True) + ) + (reg3): Sequential( + (0): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): ReLU(inplace=True) + (3): Conv2d(96, 96, kernel_size=(1, 1), stride=(1, 1)) + (4): ReLU(inplace=True) + ) + (bn): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (bn2): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) +) +Traceback (most recent call last): + File "tool/test.py", line 282, in + main() + File "tool/test.py", line 117, in main + raise RuntimeError("=> no checkpoint found at '{}'".format(args.model_path)) +RuntimeError: => no checkpoint found at 'exp/scannet/pspnet50/model/train_epoch_50.pth' diff --git a/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210718_160506.log b/3d-2d-distillation/exp/scannet/pspnet50/result/test-20210718_160506.log new file mode 100644 index 0000000..e69de29 diff --git a/3d-2d-distillation/exp/scannet/pspnet50/scannet_pspnet50.yaml b/3d-2d-distillation/exp/scannet/pspnet50/scannet_pspnet50.yaml index f71f009..b66f255 100644 --- a/3d-2d-distillation/exp/scannet/pspnet50/scannet_pspnet50.yaml +++ b/3d-2d-distillation/exp/scannet/pspnet50/scannet_pspnet50.yaml @@ -17,9 +17,9 @@ TRAIN: zoom_factor: 8 # zoom factor for final prediction during training, be in [1, 2, 4, 8] ignore_label: 255 aux_weight: 0.4 - train_gpu: [0,1,2,3] - workers: 16 # data loader workers - batch_size: 16 # batch size for training + train_gpu: [0] + workers: 2 # data loader workers + batch_size: 2 # batch size for training batch_size_val: 8 # batch size for validation during training, memory and speed tradeoff base_lr: 0.01 epochs: 50