-
Notifications
You must be signed in to change notification settings - Fork 1
/
main.py
93 lines (76 loc) · 3.92 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
def define_env(env):
@env.macro
def mlperf_inference_implementation_readme(spaces, model, implementation):
pre_space = ""
for i in range(1,spaces):
pre_space = pre_space + " "
f_pre_space = pre_space
pre_space += " "
content=""
if implementation == "reference":
devices = [ "CPU", "CUDA", "ROCm" ]
if model.lower() == "resnet50":
frameworks = [ "Onnxruntime", "Tensorflow", "Deepsparse" ]
elif model.lower() == "retinanet":
frameworks = [ "Onnxruntime", "Pytorch" ]
elif implementation == "nvidia":
devices = [ "CUDA" ]
frameworks = [ "TensorRT" ]
elif implementation == "intel":
devices = [ "CPU" ]
frameworks = [ "Pytorch" ]
elif implementation == "qualcomm":
devices = [ "QAIC" ]
frameworks = [ "Glow" ]
elif implementation == "cpp":
devices = [ "CPU", "CUDA" ]
frameworks = [ "Onnxruntime" ]
for category in [ "Edge", "Datacenter" ]:
if category == "Edge":
scenarios = [ "Offline", "SingleStream" ]
if model.lower() in [ "resnet50", "retinanet" ]:
scenarios.append("Multistream")
elif category == "Datacenter":
scenarios = [ "Offline", "Server" ]
content += f"{pre_space}=== \"{category.lower()}\"\n\n"
cur_space = pre_space + " "
scenarios_string = ", ".join(scenarios)
content += f"{cur_space}#### {category} category \n\n{cur_space} In the {category.lower()} category, {model} has {scenarios_string} scenarios and all the scenarios are mandatory for a closed division submission.\n\n"
for framework in frameworks:
cur_space1 = cur_space + " "
content += f"{cur_space}=== \"{framework}\"\n"
content += f"{cur_space1}##### {framework} framework\n\n"
for device in devices:
if framework.lower() == "deepsparse":
if device.lower() != "cpu":
continue
cur_space2 = cur_space1 + " "
content += f"{cur_space1}=== \"{device}\"\n"
content += f"{cur_space2}###### {device} device\n\n"
for scenario in scenarios:
cur_space3 = cur_space2 + " "
content += f"{cur_space2}=== \"{scenario}\"\n{cur_space3}####### {scenario}\n"
run_cmd = mlperf_inference_run_command(spaces+16, model, implementation, framework.lower(), category.lower(), scenario, device.lower(), "valid")
content += run_cmd
content += f"{cur_space2}=== \"All Scenarios\"\n{cur_space3}####### All Scenarios\n"
run_cmd = mlperf_inference_run_command(spaces+16, model, implementation, framework.lower(), category.lower(), "All Scenarios", device.lower(), "valid")
content += run_cmd
return content
@env.macro
def mlperf_inference_run_command(spaces, model, implementation, framework, category, scenario, device="cpu", execution_mode="test", test_query_count="20"):
pre_space = ""
for i in range(1,spaces):
pre_space = pre_space + " "
f_pre_space = pre_space
pre_space += " "
if scenario == "All Scenarios":
scenario_variation_tag = ",_all-scenarios"
scenario_option = ""
else:
scenario_variation_tag = ""
scenario_option = f"\\\n {pre_space} --scenario={scenario}"
cmd_suffix = f" \\\n {pre_space} --docker"
#cmd_suffix = f""
if execution_mode == "test":
cmd_suffix += f" \\\n {pre_space} --test_query_count={test_query_count}"
return f"\n{f_pre_space} ```bash\n{f_pre_space} cm run script --tags=run-mlperf,inference{scenario_variation_tag} \\\n {pre_space} --model={model} \\\n {pre_space} --implementation={implementation} \\\n {pre_space} --framework={framework} \\\n {pre_space} --category={category} {scenario_option} \\\n {pre_space} --execution-mode={execution_mode} \\\n {pre_space} --device={device} {cmd_suffix}\n{f_pre_space} ```\n"