This repository has been archived by the owner on Mar 20, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 84
/
benchmark_runner.py
73 lines (54 loc) · 2.61 KB
/
benchmark_runner.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
from __future__ import print_function
import argparse
from ast import literal_eval
import logging
from utils import metrics_manager
from utils import data_manager
try:
import ConfigParser
config = ConfigParser.ConfigParser()
except ImportError:
import configparser
config = configparser.ConfigParser()
# --metrics-policy metrics_parameters_images --task-name custom.p316xlarge.fp32.bs32 --metrics-suffix nightly --num-gpus 8 --command-to-execute \"Hello world\"
CONFIG_TEMPLATE = './task_config_template.cfg'
def run_benchmark(args):
if 'imagenet' in args.data_set:
data_manager.getImagenetData(args.data_set)
config.read(args.metrics_template)
for name, value in config.items(args.metrics_policy):
if(name == 'patterns'):
metric_patterns = literal_eval(value)
elif(name == 'metrics'):
metric_names= literal_eval(value)
else:
metric_compute_methods = literal_eval(value)
metrics_manager.BenchmarkResultManager.uptime()
metrics_manager.benchmark(
command_to_execute=args.command_to_execute,
metric_patterns=metric_patterns,
metric_names=metric_names,
metric_compute_methods=metric_compute_methods,
num_gpus=args.num_gpus,
task_name=args.task_name,
suffix=args.metrics_suffix,
framework=args.framework
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run a benchmark task.")
parser.add_argument('--framework', type=str, help='Framework eg. mxnet')
parser.add_argument('--metrics-policy', type=str, help='Metrics policy section name e.g. metrics_paramaters_images')
parser.add_argument('--task-name', type=str, help='Task Name e.g. resnet50_cifar10_symbolic.')
parser.add_argument('--metrics-suffix', type=str, help='Metrics suffix e.g. --metrics-suffix daily')
parser.add_argument('--num-gpus', type=int, help='Numbers of gpus. e.g. --num-gpus 8')
parser.add_argument('--command-to-execute', type=str, help='The script command that performs benchmarking')
parser.add_argument('--data-set', type=str, help='The data set to use for benchmarking, eg. imagenet, imagenet-480px-256px-q95')
parser.add_argument('--metrics-template', type=str, help='The template file to use for metrics pattern', default=CONFIG_TEMPLATE)
args = parser.parse_args()
log_file_location = args.task_name + ".log"
logging.basicConfig(filename=log_file_location,level=logging.DEBUG)
try:
run_benchmark(args)
except Exception:
logging.exception("Fatal error in run_benchmark")
exit()