forked from PeihaoChen/WS-MGMap
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run.py
123 lines (107 loc) · 3.26 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#!/usr/bin/env python3
import argparse
import random
import os
import warnings
import numpy as np
import matplotlib.pyplot as plt
from typing import List
plt.switch_backend('agg')
os.environ['GLOG_minloglevel'] = '2'
os.environ['MAGNUM_LOG'] = 'quiet'
warnings.filterwarnings("ignore")
import torch
from habitat import logger
from habitat_baselines.common.baseline_registry import baseline_registry
from vlnce_baselines.config.default import get_config, refine_config, set_saveDir_GPUs
from vlnce_baselines.common.utils import check_exist_file, save_sh_n_codes, save_config
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--run-type",
choices=["train", "eval", "inference"],
default="train",
help="run type of the experiment (train, eval, inference)",
)
parser.add_argument(
"-c", "--exp-config",
type=str,
required=True,
help="path to config yaml containing info about experiment",
)
parser.add_argument(
"-e", "--model-dir",
default=None,
help="path to save checkpoint, log and others",
)
parser.add_argument(
"--note",
default='base',
help="add extra note for running file",
)
parser.add_argument(
"-g", "--gpus",
default=None,
nargs="+",
type=int,
help="GPU id to run experiments",
)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options from command line",
)
parser.add_argument(
'--local_rank',
default=-1,
type=int,
help='node rank for distributed training'
)
args = parser.parse_args()
run_exp(**vars(args))
def run_exp(exp_config: str,
run_type: str,
model_dir: str,
note: str,
gpus: List[int],
opts=None,
local_rank=-1) -> None:
"""Runs experiment given mode and config
Args:
exp_config: path to config file.
run_type: "train" or "eval.
model_dir: path to save.
note: extra note.
opts: list of strings of additional config options.
Returns:
None.
"""
config = get_config(exp_config, opts)
config = set_saveDir_GPUs(config, run_type, model_dir, note, gpus, local_rank)
config = refine_config(config, local_rank)
if local_rank == 0:
check_exist_file(config)
save_sh_n_codes(
config,
run_type,
ignore_dir=['habitat-lab', 'data', 'result', 'habitat-sim', 'temp']
)
save_config(config, run_type)
logger.add_filehandler(config.LOG_FILE)
random.seed(config.TASK_CONFIG.SEED)
np.random.seed(config.TASK_CONFIG.SEED)
torch.manual_seed(config.TASK_CONFIG.SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
trainer = trainer_init(config)
if run_type == "train":
trainer.train()
elif run_type == "eval":
trainer.eval()
elif run_type == "inference":
trainer.inference()
if __name__ == "__main__":
main()