-
Notifications
You must be signed in to change notification settings - Fork 15
/
npf_run.py
executable file
·295 lines (243 loc) · 12.1 KB
/
npf_run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
#!/usr/bin/env python3
"""
Main NPF test runner program
"""
import argparse
import errno
import sys
from npf import npf
from npf.pipeline import pypost
from npf.regression import *
from npf.statistics import Statistics
from npf.test import Test, ScriptInitException
import multiprocessing
def main():
parser = argparse.ArgumentParser(description='NPF Test runner')
v = npf.add_verbosity_options(parser)
b = npf.add_building_options(parser)
b.add_argument('--allow-old-build',
help='Re-build and run test for old versions (compare-version and graph-version) without results. '
'By default, only building for the regression versions (see --history or --version) is done',
dest='allow_oldbuild', action='store_true', default=False)
b.add_argument('--force-old-build',
help='Force to rebuild the old versions. Ignored if allow-old-build is not set', dest='force_oldbuild',
action='store_true', default=False)
t = npf.add_testing_options(parser, True)
g = parser.add_argument_group('Versioning options')
g.add_argument('--regress',
help='Do a regression comparison against old version of the software', dest='compare', action='store_true',
default=False)
gf = g.add_mutually_exclusive_group()
gf.add_argument('--history',
help='Number of commits in the history on which to execute the regression tests. By default, '
'this is 1 meaning that the regression test is done on HEAD, and will be compared '
'against HEAD~1. This parameter allows to '
'start at commits HEAD~N as if it was HEAD, doing the regression test for each'
'commits up to now. Difference with --allow-old-build is that the regression test '
'will be done for each commit instead of just graphing the results, so error message and'
'return code will concern any regression between HEAD and HEAD~N. '
'Ignored if --version is given.',
dest='history', metavar='N',
nargs='?', type=int, default=1)
g.add_argument('--branch', help='Branch', type=str, nargs='?', default=None)
g.add_argument('--compare-version', dest='compare_version', metavar='version', type=str, nargs='?',
help='A version to compare against the last version. Default is the first parent of the last version containing some results.')
af = g.add_mutually_exclusive_group()
af.add_argument('--regress-version', '--graph-version', dest='graph_version', metavar='version', type=str, nargs='*',
help='Versions to compare against. Alternative to --regress-history to identify versions to compare against.')
af.add_argument('--regress-history', '--graph-num', dest='graph_num', metavar='N', type=int, nargs='?', default=-1,
help='Number of olds versions to graph after --compare-version, unused if --graph-version is given. Default is 0 or 8 if --regress is given.')
a = npf.add_graph_options(parser)
# a.add_argument('--graph-allvariables', help='Graph only the latest variables (usefull when you restrict variables '
# 'with tags)', dest='graph_newonly', action='store_true', default=False)
# a.add_argument('--graph-serie', dest='graph_serie', metavar='variable', type=str, nargs=1, default=[None],
# help='Set which variable will be used as serie when creating graph');
parser.add_argument('repo', metavar='repo name', type=str, nargs='?', help='name of the repo/group of builds', default=None)
args = parser.parse_args()
npf.parse_nodes(args)
if args.force_oldbuild and not args.allow_oldbuild:
print("--force-old-build needs --allow-old-build")
parser.print_help()
return 1
if args.repo:
repo = Repository.get_instance(args.repo, args)
else:
if os.path.exists(args.test_files) and os.path.isfile(args.test_files):
tmptest = Test(args.test_files,options=args)
if "default_repo" in tmptest.config and tmptest.config["default_repo"] is not None:
repo = Repository.get_instance(tmptest.config["default_repo"], args)
else:
print("This npf script has no default repository")
sys.exit(1)
else:
print("Please specify a repository to use to the command line or only a single test with a default_repo")
sys.exit(1)
if args.graph_num == -1:
args.graph_num = 8 if args.compare else 0
tags = args.tags
tags += repo.tags
#Overwrite config if a build folder is given
if args.use_local:
repo.url = None
repo._build_path = args.use_local + '/'
versions = ['local']
elif repo.url:
versions = repo.method.get_last_versions(limit=args.history,branch=args.branch)
else:
versions = ['local']
# Builds of the regression versions
builds : List[Build] = []
for version in versions:
builds.append(Build(repo, version, args.result_path))
last_rebuilds = []
last_build = None
if args.compare:
if args.compare_version and len(args.compare_version):
compare_version = args.compare_version
last_build = Build(repo, compare_version)
else:
old_versions = repo.method.get_history(versions[-1],100)
for i, version in enumerate(old_versions):
last_build = Build(repo, version)
if last_build.hasResults():
break
elif args.allow_oldbuild:
last_rebuilds.append(last_build)
break
if i > 100:
last_build = None
break
if last_build:
print("Comparaison version is %s" % last_build.version)
graph_builds = []
if args.graph_version and len(args.graph_version) > 0:
for g in args.graph_version:
graph_builds.append(Build(repo, g))
else:
if args.graph_num > 1 and repo.url:
old_versions = repo.method.get_history(last_build.version if last_build else builds[-1].version, 100)
for i, version in enumerate(old_versions):
g_build = Build(repo, version)
if g_build in builds or g_build == last_build:
continue
i += 1
if g_build.hasResults() and not args.force_oldbuild:
graph_builds.append(g_build)
elif args.allow_oldbuild:
last_rebuilds.append(g_build)
graph_builds.append(g_build)
if len(graph_builds) > args.graph_num:
break
tests = Test.expand_folder(test_path=args.test_files, options=args, tags=tags)
if not tests:
sys.exit(errno.ENOENT)
npf.override(args, tests)
for b in last_rebuilds:
print("Last version %s had no result. Re-executing tests for it." % b.version)
did_something = False
for test in tests:
prev_results = b.load_results(test)
print("Executing test %s" % test.filename)
try:
all_results, time_results, init_done = test.execute_all(b,options=args, prev_results=prev_results)
if all_results is None and time_results is None:
continue
except ScriptInitException:
continue
else:
did_something = True
b.writeversion(test, all_results, allow_overwrite=True)
if did_something:
b.writeResults()
returncode = 0
for build in reversed(builds):
if len(builds) > 1 or repo.version:
if build.version == "local":
print("Starting tests")
else:
print("Starting tests for version %s" % build.version)
nok = 0
ntests = 0
for test in tests:
print("Executing test %s" % test.filename)
regression = Regression(test)
if test.get_title() != test.filename:
print(test.get_title())
old_all_results = None
if last_build:
try:
old_all_results = last_build.load_results(test)
except FileNotFoundError:
print("Previous build %s could not be found, we will not compare !" % last_build.version)
last_build = None
try:
prev_results = build.load_results(test)
prev_time_results = build.load_results(test, kind=True)
except FileNotFoundError:
prev_results = None
prev_time_results = None
all_results = None
time_results = None
try:
if all_results is None and time_results is None:
all_results, time_results, init_done = test.execute_all(build, prev_results=prev_results, prev_time_results=prev_time_results, do_test=args.do_test, options=args)
if not all_results and not time_results:
returncode+=1
continue
except ScriptInitException:
continue
if args.compare:
variables_passed,variables_passed = regression.compare(test, test.variables, all_results, build, old_all_results, last_build)
if variables_passed == variables_passed:
nok += 1
else:
returncode += 1
ntests += 1
if all_results and len(all_results) > 0:
build.writeResults()
#Filtered results are results only for the given current variables
filtered_results = {}
for v in test.variables:
run = Run(v)
if run in all_results:
filtered_results[run] = all_results[run]
if args.statistics:
Statistics.run(build,filtered_results, test, max_depth=args.statistics_maxdepth, filename=args.statistics_filename)
grapher = Grapher()
g_series = []
if last_build and old_all_results and args.compare:
g_series.append((test, last_build, old_all_results))
for g_build in graph_builds:
try:
g_all_results = g_build.load_results(test)
if (g_all_results and len(g_all_results) > 0):
g_series.append((test, g_build, g_all_results))
except FileNotFoundError:
print("Previous build %s could not be found, we will not graph it !" % g_build.version)
filename = args.graph_filename or build.result_path(test.filename, 'pdf')
series_with_history = [(test, build, all_results)] + g_series
pypost.execute_pypost(series=series_with_history)
grapher.graph(series=series_with_history,
title=test.get_title(),
filename=filename,
#graph_variables=[Run(x) for x in test.variables],
options = args)
if time_results:
for time_ns, results in time_results.items():
if not results:
continue
series_with_history = [(test, build, results)]
grapher.graph(series=series_with_history,
title=test.get_title(),
filename = filename,
fileprefix = time_ns,
options = args)
if last_build and args.graph_num > 0:
graph_builds = [last_build] + graph_builds[:-1]
last_build = build
if args.compare:
print("[%s] Finished run for %s, %d/%d tests passed" % (repo.name, build.version, nok, ntests))
sys.exit(returncode)
if __name__ == "__main__":
multiprocessing.set_start_method('forkserver')
main()