From 982c9fb609331a1caf01e37e6cabc5ec1046c280 Mon Sep 17 00:00:00 2001 From: Vasileios Karakasis Date: Mon, 25 Sep 2017 10:04:36 +0200 Subject: [PATCH] Reframe 2.6.1 public release --- ci-scripts/ci-runner.bash | 7 + reframe/__init__.py | 5 +- reframe/core/debug.py | 59 +++ reframe/core/environments.py | 155 ++++---- reframe/core/exceptions.py | 28 +- reframe/core/fields.py | 97 ++--- reframe/core/launchers.py | 24 +- reframe/core/logging.py | 68 ++-- reframe/core/modules.py | 24 +- reframe/core/pipeline.py | 247 ++++++++---- reframe/core/schedulers.py | 354 +++++++++++++----- reframe/core/shell.py | 23 +- reframe/core/systems.py | 48 ++- reframe/frontend/argparse.py | 37 +- reframe/frontend/cli.py | 47 +-- reframe/frontend/executors/__init__.py | 42 ++- reframe/frontend/executors/policies.py | 50 +-- reframe/frontend/loader.py | 35 +- reframe/frontend/printer.py | 42 +-- reframe/frontend/resources.py | 13 +- reframe/frontend/statistics.py | 45 ++- reframe/settings.py | 99 ++--- reframe/utility/functions.py | 21 +- reframe/utility/os.py | 56 +-- reframe/utility/parsers.py | 44 +-- reframe/utility/sandbox.py | 3 +- unittests/fixtures.py | 89 ++--- unittests/resources/badchecks/badargs.py | 3 +- unittests/resources/badchecks/badentry.py | 3 +- .../resources/badchecks/invalid_check.py | 3 +- unittests/resources/badchecks/noentry.py | 1 + unittests/resources/emptycheck.py | 2 +- unittests/resources/frontend_checks.py | 71 ++-- unittests/resources/hellocheck.py | 13 +- unittests/resources/hellocheck_make.py | 19 +- unittests/resources/src/sleep_deeply.sh | 6 + unittests/test_argparser.py | 4 +- unittests/test_cli.py | 62 ++- unittests/test_core.py | 85 +++-- unittests/test_fields.py | 227 +++++------ unittests/test_launchers.py | 16 +- unittests/test_loader.py | 63 ++-- unittests/test_logging.py | 26 +- unittests/test_parsers.py | 70 +--- unittests/test_pipeline.py | 344 +++++++++-------- unittests/test_policies.py | 81 ++-- unittests/test_schedulers.py | 219 ++++++++--- unittests/test_utility.py | 55 +-- 48 files changed, 1690 insertions(+), 1445 deletions(-) create mode 100644 reframe/core/debug.py create mode 100755 unittests/resources/src/sleep_deeply.sh diff --git a/ci-scripts/ci-runner.bash b/ci-scripts/ci-runner.bash index b30b340e3a..7cbf3312e6 100644 --- a/ci-scripts/ci-runner.bash +++ b/ci-scripts/ci-runner.bash @@ -179,6 +179,13 @@ if [ $CI_EXITCODE -eq 0 ]; then swap_files reframe/settings.public.py reframe/settings.py fi +# FIXME: Temporary workaround for the PE upgrade on Daint +if [[ $(hostname) == daint* ]]; then + # Do not test modfied tests on Daint + exit $CI_EXITCODE +fi + + # Find modified or added user checks userchecks=( $(git log --name-status --oneline --no-merges -1 | \ grep -e '^[AM][[:space:]]*checks/.*\.py$' | \ diff --git a/reframe/__init__.py b/reframe/__init__.py index 1a1740c872..055d32ae7a 100644 --- a/reframe/__init__.py +++ b/reframe/__init__.py @@ -29,11 +29,12 @@ stderr=subprocess.PIPE, universal_newlines=True) if re.search('Unknown shell type', _completed.stderr, re.MULTILINE): - sys.stderr.write('Python is not supported by this modules framework.\n') + sys.stderr.write( + 'Python is not supported by this modules framework.\n') sys.exit(1) except OSError: # modulecmd was not found - sys.stderr.write("Could not run modulecmd. Tried `%s' and failed.\n" % \ + sys.stderr.write("Could not run modulecmd. Tried `%s' and failed.\n" % MODULECMD_PYTHON) sys.exit(1) diff --git a/reframe/core/debug.py b/reframe/core/debug.py new file mode 100644 index 0000000000..2e9de2cfcb --- /dev/null +++ b/reframe/core/debug.py @@ -0,0 +1,59 @@ +# +# Internal debug utilities for the framework +# + +import builtins +import threading + +# Current indentation levels per thread +_depth = {} + + +def _gettid(): + tid = threading.get_ident() + _depth.setdefault(tid, 0) + return tid + + +def _increase_indent(): + tid = _gettid() + _depth[tid] += 1 + return _depth[tid] + + +def _decrease_indent(): + tid = _gettid() + _depth[tid] -= 1 + return _depth[tid] + + +def repr(obj, indent=4, max_depth=2): + """Return a generic representation string for object `obj`. + + Keyword arguments: + indent -- indentation width + max_depth -- maximum depth for expanding nested objects + """ + if not hasattr(obj, '__dict__'): + # Delegate to the builtin repr() for builtin types + return builtins.repr(obj) + + tid = _gettid() + indent_width = _increase_indent() * indent + + # Attribute representation + if _depth[tid] == max_depth: + attr_list = ['...'] + else: + attr_list = ['%s%s=%r' % (indent_width * ' ', attr, val) + for attr, val in sorted(obj.__dict__.items())] + + repr_fmt = '%(module_name)s.%(class_name)s(%(attr_repr)s)@0x%(addr)x' + ret = repr_fmt % { + 'module_name': obj.__module__, + 'class_name': obj.__class__.__name__, + 'attr_repr': ',\n'.join(attr_list), + 'addr': id(obj) + } + _decrease_indent() + return ret diff --git a/reframe/core/environments.py b/reframe/core/environments.py index 2f5598ad5a..0977f64773 100644 --- a/reframe/core/environments.py +++ b/reframe/core/environments.py @@ -2,8 +2,11 @@ import shutil import subprocess import reframe.utility.os as os_ext +import reframe.core.debug as debug -from reframe.core.exceptions import ReframeError, CommandError, CompilationError +from reframe.core.exceptions import (ReframeError, + CommandError, + CompilationError) from reframe.core.fields import * from reframe.core.modules import * @@ -13,22 +16,21 @@ class Environment: modules = TypedListField('modules', str) variables = TypedDictField('variables', str, str) - def __init__(self, name, modules = [], variables = {}, **kwargs): + def __init__(self, name, modules=[], variables={}, **kwargs): self.name = name - self.modules = copy.deepcopy(modules) - self.variables = copy.deepcopy(variables) + self.modules = list(modules) + self.variables = dict(variables) self.loaded = False self._saved_variables = {} self._conflicted = [] self._preloaded = set() - + self._load_stmts = [] def add_module(self, name): """Add module to the list of modules to be loaded.""" self.modules.append(name) - def set_variable(self, name, value): """Set environment variable to name. @@ -36,7 +38,6 @@ def set_variable(self, name, value): saved internally and restored when Restore() is called.""" self.variables[name] = value - def load(self): """Load environment.""" @@ -46,6 +47,10 @@ def load(self): self._preloaded.add(m) self._conflicted += module_force_load(m) + for conflict in self._conflicted: + self._load_stmts += ['module unload %s' % conflict] + + self._load_stmts += ['module load %s' % m] for k, v in self.variables.items(): if k in os.environ: @@ -55,7 +60,6 @@ def load(self): self.loaded = True - def unload(self): """Restore environment to its previous state.""" if not self.loaded: @@ -69,7 +73,7 @@ def unload(self): # Unload modules in reverse order for m in reversed(self.modules): - if not m in self._preloaded: + if m not in self._preloaded: module_unload(m) # Reload the conflicted packages, previously removed @@ -78,20 +82,14 @@ def unload(self): self.loaded = False - - # FIXME: Does not correspond to the actual process in load() def emit_load_instructions(self, builder): """Emit shell instructions for loading this environment.""" - for m in self._conflicted: - builder.verbatim('module unload %s' % m) - - for m in self.modules: - builder.verbatim('module load %s' % m) + for stmt in self._load_stmts: + builder.verbatim(stmt) for k, v in self.variables.items(): builder.set_variable(k, v, export=True) - # FIXME: Does not correspond to the actual process in unload() def emit_unload_instructions(self, builder): """Emit shell instructions for loading this environment.""" @@ -104,32 +102,21 @@ def emit_unload_instructions(self, builder): for m in self._conflicted: builder.verbatim('module load %s' % m) - def __eq__(self, other): - return \ - other != None and \ - self.name == other.name and \ - set(self.modules) == set(other.modules) and \ - self.variables == other.variables - + return (other is not None and + self.name == other.name and + set(self.modules) == set(other.modules) and + self.variables == other.variables) def __ne__(self, other): return not self.__eq__(other) - def __repr__(self): - return self.__str__() - - - def __hash__(self): - return self.name.__hash__() - + return debug.repr(self) def __str__(self): - return \ - 'Name: %s\n' % self.name + \ - 'Modules: %s\n' % str(self.modules) + \ - 'Environment: %s' % str(self.variables) + return ('Name: %s\nModules: %s\nEnvironment: %s' % + (self.name, modules, self.variables)) def swap_environments(src, dst): @@ -145,21 +132,17 @@ def __init__(self, name='env_snapshot'): self.variables = dict(os.environ) self._conflicted = [] - def add_module(self, name): raise RuntimeError('environment snapshot is read-only') - def set_variable(self, name, value): raise RuntimeError('environment snapshot is read-only') - def load(self): os.environ.clear() os.environ.update(self.variables) self.loaded = True - def unload(self): raise RuntimeError('cannot unload an environment snapshot') @@ -167,16 +150,16 @@ def unload(self): class ProgEnvironment(Environment): def __init__(self, name, - modules = [], - variables = {}, - cc = 'cc', - cxx = 'CC', - ftn = 'ftn', - cppflags = None, - cflags = None, - cxxflags = None, - fflags = None, - ldflags = None, + modules=[], + variables={}, + cc='cc', + cxx='CC', + ftn='ftn', + cppflags=None, + cflags=None, + cxxflags=None, + fflags=None, + ldflags=None, **kwargs): super().__init__(name, modules, variables) self.cc = cc @@ -190,32 +173,29 @@ def __init__(self, self.include_search_path = [] self.propagate = True - def guess_language(self, filename): ext = filename.split('.')[-1] - if ext in [ 'c' ]: + if ext in ['c']: return 'C' - if ext in [ 'cc', 'cp', 'cxx', 'cpp', 'CPP', 'c++', 'C' ]: + if ext in ['cc', 'cp', 'cxx', 'cpp', 'CPP', 'c++', 'C']: return 'C++' - if ext in [ 'f', 'for', 'ftn', 'F', 'FOR', 'fpp', 'FPP', 'FTN', - 'f90', 'f95', 'f03', 'f08', 'F90', 'F95', 'F03', 'F08' ]: + if ext in ['f', 'for', 'ftn', 'F', 'FOR', 'fpp', 'FPP', 'FTN', + 'f90', 'f95', 'f03', 'f08', 'F90', 'F95', 'F03', 'F08']: return 'Fortran' - if ext in [ 'cu' ]: + if ext in ['cu']: return 'CUDA' - - def compile(self, sourcepath, makefile = None, executable = None, - lang = None, options = ''): + def compile(self, sourcepath, makefile=None, executable=None, + lang=None, options=''): if os.path.isdir(sourcepath): return self._compile_dir(sourcepath, makefile, options) else: return self._compile_file(sourcepath, executable, lang, options) - def _compile_file(self, source_file, executable, lang, options): if not executable: # default executable, same as source_file without the extension @@ -226,13 +206,13 @@ def _compile_file(self, source_file, executable, lang, options): lang = self.guess_language(source_file) # Replace None's with empty strings - cppflags = self.cppflags if self.cppflags else '' - cflags = self.cflags if self.cflags else '' - cxxflags = self.cxxflags if self.cxxflags else '' - fflags = self.fflags if self.fflags else '' - ldflags = self.ldflags if self.ldflags else '' + cppflags = self.cppflags or '' + cflags = self.cflags or '' + cxxflags = self.cxxflags or '' + fflags = self.fflags or '' + ldflags = self.ldflags or '' - flags = [ cppflags ] + flags = [cppflags] if lang == 'C': compiler = self.cc flags.append(cflags) @@ -249,19 +229,18 @@ def _compile_file(self, source_file, executable, lang, options): raise ReframeError('Unknown language') # Append include search path - flags += [ '-I' + d for d in self.include_search_path ] - cmd = '%s %s %s -o %s %s %s' % \ - (compiler, ' '.join(flags), source_file, - executable, ldflags, options) + flags += ['-I' + d for d in self.include_search_path] + cmd = ('%s %s %s -o %s %s %s' % (compiler, ' '.join(flags), + source_file, executable, + ldflags, options)) try: return os_ext.run_command(cmd, check=True) except CommandError as e: - raise CompilationError(command = e.command, - stdout = e.stdout, - stderr = e.stderr, - exitcode = e.exitcode, - environ = self) - + raise CompilationError(command=e.command, + stdout=e.stdout, + stderr=e.stderr, + exitcode=e.exitcode, + environ=self) def _compile_dir(self, source_dir, makefile, options): if makefile: @@ -271,25 +250,25 @@ def _compile_dir(self, source_dir, makefile, options): # Pass a set of predefined options to the Makefile if self.propagate: - flags = [ "CC='%s'" % self.cc, - "CXX='%s'" % self.cxx, - "FC='%s'" % self.ftn ] + flags = ["CC='%s'" % self.cc, + "CXX='%s'" % self.cxx, + "FC='%s'" % self.ftn] # Explicitly check against None here; the user may explicitly want # to clear the flags - if self.cppflags != None: + if self.cppflags is not None: flags.append("CPPFLAGS='%s'" % self.cppflags) - if self.cflags != None: + if self.cflags is not None: flags.append("CFLAGS='%s'" % self.cflags) - if self.cxxflags != None: + if self.cxxflags is not None: flags.append("CXXFLAGS='%s'" % self.cxxflags) - if self.fflags != None: + if self.fflags is not None: flags.append("FFLAGS='%s'" % self.fflags) - if self.ldflags != None: + if self.ldflags is not None: flags.append("LDFLAGS='%s'" % self.ldflags) cmd += ' '.join(flags) @@ -297,8 +276,8 @@ def _compile_dir(self, source_dir, makefile, options): try: return os_ext.run_command(cmd, check=True) except CommandError as e: - raise CompilationError(command = e.command, - stdout = e.stdout, - stderr = e.stderr, - exitcode = e.exitcode, - environ = self) + raise CompilationError(command=e.command, + stdout=e.stdout, + stderr=e.stderr, + exitcode=e.exitcode, + environ=self) diff --git a/reframe/core/exceptions.py b/reframe/core/exceptions.py index 1b8bd7ad80..2a2af930f5 100644 --- a/reframe/core/exceptions.py +++ b/reframe/core/exceptions.py @@ -2,13 +2,20 @@ # Base regression exceptions # +import reframe.core.debug as debug + + class ReframeError(Exception): """ Base exception for regression errors. """ - def __init__(self, msg = ''): + + def __init__(self, msg=''): self.message = msg + def __repr__(self): + return debug.repr(self) + def __str__(self): return self.message @@ -42,7 +49,7 @@ def __init__(self, command, stdout, stderr, exitcode, timeout=0): else: super().__init__( - "Command `%s' failed with exit code: %d" % \ + "Command `%s' failed with exit code: %d" % (self.command, exitcode)) self.stdout = stdout @@ -50,14 +57,13 @@ def __init__(self, command, stdout, stderr, exitcode, timeout=0): self.exitcode = exitcode self.timeout = timeout - def __str__(self): - ret = '\n' + super().__str__() + \ - "\n=== STDOUT ===\n" + \ - self.stdout + \ - "\n=== STDERR ===\n" + \ - self.stderr - return ret + return ('\n' + + super().__str__() + + '\n=== STDOUT ===\n' + + self.stdout + + '\n=== STDERR ===\n' + + self.stderr) class CompilationError(CommandError): @@ -68,3 +74,7 @@ def __init__(self, command, stdout, stderr, exitcode, environ): class JobSubmissionError(CommandError): pass + + +class JobResourcesError(ReframeError): + pass diff --git a/reframe/core/fields.py b/reframe/core/fields.py index 33f49febcd..e04a3ab0eb 100644 --- a/reframe/core/fields.py +++ b/reframe/core/fields.py @@ -2,27 +2,32 @@ # Useful descriptors for advanced operations on fields # -from reframe.core.exceptions import FieldError - import copy import re +import reframe.core.debug as debug + +from reframe.core.exceptions import FieldError + -class Field(object): +class Field: """Base class for fields""" + def __init__(self, fieldname): self.name = fieldname + def __repr__(self): + return debug.repr(self) def __get__(self, obj, objtype): return obj.__dict__[self.name] - def __set__(self, obj, value): obj.__dict__[self.name] = value -class ForwardField(object): +class ForwardField: """Simple field that forwards set/get to a target object.""" + def __init__(self, obj, attr): self.target = obj self.attr = attr @@ -30,24 +35,23 @@ def __init__(self, obj, attr): def __get__(self, obj, objtype): return self.target.__dict__[self.attr] - def __set__(self, obj, value): self.target.__dict__[self.attr] = value class TypedField(Field): """Stores a field of predefined type""" - def __init__(self, fieldname, fieldtype, allow_none = False): + + def __init__(self, fieldname, fieldtype, allow_none=False): super().__init__(fieldname) self.fieldtype = fieldtype self.allow_none = allow_none - def __set__(self, obj, value): - if (value != None or not self.allow_none) and \ - not isinstance(value, self.fieldtype): + if ((value is not None or not self.allow_none) and + not isinstance(value, self.fieldtype)): raise FieldError('attempt to set a field of different type. ' - 'Required: %s, got: %s' % \ + 'Required: %s, got: %s' % (self.fieldtype, value.__class__.__name__)) super().__set__(obj, value) @@ -66,29 +70,27 @@ class AggregateTypeField(Field): seq_type := list | tuple map_type := dict type := | callable""" - def __init__(self, fieldname, typespec, allow_none = False): + + def __init__(self, fieldname, typespec, allow_none=False): super().__init__(fieldname) self.typespec = typespec self.allow_none = allow_none - def __set__(self, obj, value): if not self._check_type_ext(value): raise FieldError('attempt to set an aggregate field ' - 'of different type. Required typespec: %s' % \ + 'of different type. Required typespec: %s' % str(self.typespec)) super().__set__(obj, value) - def _check_type_ext(self, value): # Checks also value against None if that's allowed - if value == None and self.allow_none: + if value is None and self.allow_none: return True return self._check_type(value, self.typespec) - def _extract_typeinfo(self, typespec): """Check if a typespec is of the form (type, None) @@ -100,22 +102,21 @@ def _extract_typeinfo(self, typespec): if len(typespec) != 2: raise FieldError('invalid typespec: %s' % str(typespec)) - if typespec[1] != None: + if typespec[1] is not None: return (typespec, False) else: return (typespec[0], True) - def _check_type(self, value, typespec): # Extract the type information and check if None is allowed typespec, allow_none = self._extract_typeinfo(typespec) - if value == None and allow_none: + if value is None and allow_none: return True if not isinstance(typespec, tuple): # we need to make a special check if typespec == callable - return callable(value) if typespec == callable else \ - isinstance(value, typespec) + return (callable(value) + if typespec == callable else isinstance(value, typespec)) if len(typespec) != 2: raise FieldError('invalid typespec: %s' % str(typespec)) @@ -124,7 +125,7 @@ def _check_type(self, value, typespec): if not isinstance(value, container_type): return False - if container_type in [ tuple, list ]: + if container_type in [tuple, list]: if isinstance(element_type, tuple) and len(element_type) == 1: # non-uniformly typed container elem_types = element_type[0] @@ -140,14 +141,14 @@ def _check_type(self, value, typespec): if not self._check_type(v, element_type): return False - elif container_type in [ set, frozenset ]: + elif container_type in [set, frozenset]: for v in value: if not self._check_type(v, element_type): return False elif container_type == dict: if len(element_type) != 2: - raise FieldError('invalid mapping typespec: %s' % \ + raise FieldError('invalid mapping typespec: %s' % str(element_type)) key_type, value_type = element_type @@ -164,12 +165,12 @@ def _check_type(self, value, typespec): class AlphanumericField(TypedField): """Stores an alphanumeric string ([A-Za-z0-9_])""" + def __init__(self, fieldname, allow_none=False): super().__init__(fieldname, str, allow_none) - def __set__(self, obj, value): - if value != None: + if value is not None: if not isinstance(value, str): raise FieldError('attempt to set an alphanumeric field ' 'with a non-string value') @@ -184,12 +185,12 @@ def __set__(self, obj, value): class NonWhitespaceField(TypedField): """Stores a string without any whitespace""" + def __init__(self, fieldname, allow_none=False): super().__init__(fieldname, str, allow_none) - def __set__(self, obj, value): - if value != None: + if value is not None: if not isinstance(value, str): raise FieldError('Attempt to set a string field ' 'with a non-string value') @@ -203,48 +204,56 @@ def __set__(self, obj, value): class StringField(TypedField): """Stores a standard string object""" + def __init__(self, fieldname, allow_none=False): super().__init__(fieldname, str, allow_none) class IntegerField(TypedField): """Stores an integer object""" + def __init__(self, fieldname, allow_none=False): super().__init__(fieldname, int, allow_none) class BooleanField(TypedField): """Stores a boolean object""" + def __init__(self, fieldname, allow_none=False): super().__init__(fieldname, bool, allow_none) class TypedListField(AggregateTypeField): """Stores a list of objects of the same type""" + def __init__(self, fieldname, elemtype): super().__init__(fieldname, (list, elemtype)) class TypedSetField(AggregateTypeField): """Stores a list of objects of the same type""" + def __init__(self, fieldname, elemtype): super().__init__(fieldname, (set, elemtype)) class TypedDictField(AggregateTypeField): """Stores a list of objects of the same type""" + def __init__(self, fieldname, keytype, valuetype): super().__init__(fieldname, (dict, (keytype, valuetype))) class CopyOnWriteField(Field): """Holds a copy of the variable that is assigned to it the first time""" + def __set__(self, obj, value): super().__set__(obj, copy.deepcopy(value)) class ReadOnlyField(Field): """Holds a read-only field. Attempts to set it will raise an exception""" + def __init__(self, value): super().__init__('_readonly_') self.value = value @@ -262,6 +271,7 @@ class SanityPatternField(AggregateTypeField): This is a special dictionary that allows a special entry for calling a callback function when eof is matched """ + def __init__(self, fieldname, allow_none=False): # The type of the outer dictionary self.outer_typespec = (dict, (str, object)) @@ -272,9 +282,8 @@ def __init__(self, fieldname, allow_none=False): ) super().__init__(fieldname, self.inner_typespec, allow_none) - def __set__(self, obj, value): - if value == None and self.allow_none: + if value is None and self.allow_none: # Call directly Field's __set__() method; no need for further type # checking Field.__set__(self, obj, value) @@ -305,7 +314,7 @@ def __set__(self, obj, value): 'with an invalid dictionary object') finally: # Restore '\e' - if eof_handler != None: + if eof_handler is not None: v['\e'] = eof_handler # All type checking is done; just set the value @@ -314,15 +323,15 @@ def __set__(self, obj, value): class TimerField(AggregateTypeField): """Stores a timer in the form of a tuple '(hh, mm, ss)'""" + def __init__(self, fieldname, allow_none=False): super().__init__(fieldname, (tuple, ((int, int, int),)), allow_none) - def __set__(self, obj, value): if not self._check_type_ext(value): raise FieldError('attempt to set a timer field with a wrong type') - if value != None: + if value is not None: # Check also the values for minutes and seconds h, m, s = value if h < 0 or m < 0 or s < 0: @@ -341,6 +350,7 @@ class ScopedDict(dict): """This is a special dict that imposes scopes on its keys. When a key is not found it will be searched up in the scope hierarchy.""" + def __init__(self, mapping={}, scope_sep=':', global_scope='*'): """Initialize a ScopedDict @@ -363,16 +373,15 @@ def __init__(self, mapping={}, scope_sep=':', global_scope='*'): for k, v in mapping.items(): self._check_scope_type(k, v) + # We need deep copy here, since mapping is a two-level dictionary self.scopes = copy.deepcopy(mapping) self.scope_sep = scope_sep self.global_scope = global_scope - def __str__(self): # just print the internal dictionary return str(self.scopes) - def _check_scope_type(self, key, value): if not isinstance(key, str): raise TypeError('scope keys in a scoped dict must be strings') @@ -384,7 +393,6 @@ def _check_scope_type(self, key, value): if not isinstance(k, str): raise TypeError('keys must be strings') - def add_scopes(self, scopes={}): if not isinstance(scopes, dict): raise TypeError('scopes is not a dictionary') @@ -396,7 +404,6 @@ def add_scopes(self, scopes={}): self.scopes[k] = copy.deepcopy(v) - def _keyinfo(self, key): key_parts = key.rsplit(self.scope_sep, maxsplit=1) if len(key_parts) == 2: @@ -404,12 +411,10 @@ def _keyinfo(self, key): else: return (self.global_scope, key_parts[0]) - def _parent_scope(self, scope): scope_parts = scope.rsplit(':', maxsplit=1)[:-1] return scope_parts[0] if scope_parts else self.global_scope - def __getitem__(self, key): scope, lookup_key = self._keyinfo(key) while scope != self.global_scope: @@ -424,16 +429,14 @@ def __getitem__(self, key): else: return self.__missing__(key) - def __setitem__(self, key, value): scope, lookup_key = self._keyinfo(key) - if not scope in self.scopes: + if scope not in self.scopes: # create the scope if does not exist self.scopes[scope] = {} self.scopes[scope][lookup_key] = value - def __delitem__(self, key): """Deletes either a key or a scope if key refers to a scope. @@ -447,7 +450,6 @@ def __delitem__(self, key): scope, lookup_key = self._keyinfo(key) del self.scopes[scope][lookup_key] - def __missing__(self, key): raise KeyError(str(key)) @@ -456,17 +458,18 @@ class ScopedDictField(AggregateTypeField): """Stores a ScopedDict with a specific type It also handles implicit conversions from ordinary dicts.""" + def __init__(self, fieldname, valuetype, allow_none=False): super().__init__( fieldname, (dict, (str, (dict, (str, valuetype)))), allow_none ) - def __set__(self, obj, value): if not self._check_type_ext(value): raise FieldError('attempt to set a ScopedDict ' - 'of different type. Required typespec: %s' % \ + 'of different type. Required typespec: %s' % str(self.typespec)) # Call Field's __set__() method, type checking is already performed - Field.__set__(self, obj, ScopedDict(value) if value != None else value) + Field.__set__(self, obj, + ScopedDict(value) if value is not None else value) diff --git a/reframe/core/launchers.py b/reframe/core/launchers.py index d1ba5c3647..242dce4a27 100644 --- a/reframe/core/launchers.py +++ b/reframe/core/launchers.py @@ -1,4 +1,5 @@ -from math import ceil +import math +import reframe.core.debug as debug class JobLauncher: @@ -6,6 +7,9 @@ def __init__(self, job, options=[]): self.job = job self.options = options + def __repr__(self): + return debug.repr(self) + @property def executable(self): raise NotImplementedError('Attempt to call an abstract method') @@ -16,7 +20,7 @@ def fixed_options(self): def emit_run_command(self, target_executable, builder, **builder_opts): options = ' '.join(self.fixed_options + self.options) - return builder.verbatim('%s %s %s' % \ + return builder.verbatim('%s %s %s' % (self.executable, options, target_executable), **builder_opts) @@ -34,11 +38,12 @@ def executable(self): @property def fixed_options(self): - return [ '-B' ] + return ['-B'] class LauncherWrapper(JobLauncher): """Wrap a launcher object so that its invocation may be modified.""" + def __init__(self, target_launcher, wrapper_command, wrapper_options=[]): super().__init__(target_launcher.job, target_launcher.options) self.target_launcher = target_launcher @@ -51,8 +56,9 @@ def executable(self): @property def fixed_options(self): - return self.wrapper_options + [ self.target_launcher.executable ] + \ - self.target_launcher.fixed_options + return (self.wrapper_options + + [self.target_launcher.executable] + + self.target_launcher.fixed_options) class LocalLauncher(JobLauncher): @@ -77,10 +83,12 @@ def executable(self): @property def fixed_options(self): options = [] - if self.target_launcher and \ - not isinstance(self.target_launcher, LocalLauncher): - num_nodes = ceil(self.job.num_tasks/self.job.num_tasks_per_node) + if (self.target_launcher and + not isinstance(self.target_launcher, LocalLauncher)): + num_nodes = math.ceil( + self.job.num_tasks / self.job.num_tasks_per_node) options.append('-np %s' % self.job.num_tasks) options.append('-nn %s' % num_nodes) options.append('-l %s' % self.target_launcher.executable) + return options diff --git a/reframe/core/logging.py b/reframe/core/logging.py index d3a0ab1272..11e35a33f7 100644 --- a/reframe/core/logging.py +++ b/reframe/core/logging.py @@ -3,11 +3,12 @@ import logging.handlers import sys import shutil +import reframe.core.debug as debug from datetime import datetime -from reframe.settings import settings from reframe.core.exceptions import ConfigurationError, ReframeError +from reframe.settings import settings # Reframe's log levels CRITICAL = 50 @@ -20,26 +21,27 @@ _log_level_names = { - CRITICAL : 'critical', - ERROR : 'error', - WARNING : 'warning', - INFO : 'info', - VERBOSE : 'verbose', - DEBUG : 'debug', - NOTSET : 'undefined' + CRITICAL: 'critical', + ERROR: 'error', + WARNING: 'warning', + INFO: 'info', + VERBOSE: 'verbose', + DEBUG: 'debug', + NOTSET: 'undefined' } _log_level_values = { - 'critical' : CRITICAL, - 'error' : ERROR, - 'warning' : WARNING, - 'info' : INFO, - 'verbose' : VERBOSE, - 'debug' : DEBUG, - 'undefined' : NOTSET, - 'notset' : NOTSET + 'critical': CRITICAL, + 'error': ERROR, + 'warning': WARNING, + 'info': INFO, + 'verbose': VERBOSE, + 'debug': DEBUG, + 'undefined': NOTSET, + 'notset': NOTSET } + def _check_level(level): if isinstance(level, int): ret = level @@ -61,6 +63,9 @@ class Handler(logging.Handler): def setLevel(self, level): self.level = _check_level(level) + def __repr__(self): + return debug.repr(self) + class StreamHandler(Handler, logging.StreamHandler): pass @@ -125,9 +130,7 @@ def _extract_handlers(handlers_dict): basename, datetime.now().strftime(timestamp), ext ) - hdlr = RotatingFileHandler( - filename, mode='a+' if append else 'w+' - ) + hdlr = RotatingFileHandler(filename, mode='a+' if append else 'w+') hdlr.setFormatter(logging.Formatter(fmt=fmt, datefmt=datefmt)) hdlr.setLevel(level) @@ -138,17 +141,18 @@ def _extract_handlers(handlers_dict): class Logger(logging.Logger): def __init__(self, name, level=logging.NOTSET): - # We will set the logger level ourselves so as to bypass the base class' - # check + # We will set the logger level ourselves so as to bypass the base + # class' check super().__init__(name, logging.NOTSET) self.level = _check_level(level) self.check = None + def __repr__(self): + return debug.repr(self) def setLevel(self, level): self.level = _check_level(level) - def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): # Setup dynamic fields of the check @@ -166,58 +170,52 @@ def makeRecord(self, name, level, fn, lno, msg, args, exc_info, return record - # Override all the convenience logging functions, because we want to make # sure that they map to our level definitions def critical(self, msg, *args, **kwargs): return self.log(CRITICAL, msg, *args, **kwargs) - def error(self, msg, *args, **kwargs): return self.log(ERROR, msg, *args, **kwargs) - def warning(self, msg, *args, **kwargs): return self.log(WARNING, msg, *args, **kwargs) - def info(self, msg, *args, **kwargs): return self.log(INFO, msg, *args, **kwargs) - def verbose(self, message, *args, **kwargs): self.log(VERBOSE, message, *args, **kwargs) - def debug(self, message, *args, **kwargs): self.log(DEBUG, message, *args, **kwargs) class LoggerAdapter(logging.LoggerAdapter): - def __init__(self, logger = None, check = None): + def __init__(self, logger=None, check=None): super().__init__( logger, { - 'check_name' : check.name if check else 'reframe', - 'check_jobid' : '-1' + 'check_name': check.name if check else 'reframe', + 'check_jobid': '-1' } ) if self.logger: self.logger.check = check + def __repr__(self): + return debug.repr(self) def setLevel(self, level): if self.logger: super().setLevel(level) - # Override log() function to treat `None` loggers def log(self, level, msg, *args, **kwargs): if self.logger: super().log(level, msg, *args, **kwargs) - def verbose(self, message, *args, **kwargs): self.log(VERBOSE, message, *args, **kwargs) @@ -228,11 +226,12 @@ def verbose(self, message, *args, **kwargs): _logger = None _frontend_logger = null_logger + def configure_logging(config): global _logger global _frontend_logger - if config == None: + if config is None: _logger = None _frontend_logger = null_logger return @@ -247,6 +246,7 @@ def save_log_files(dest): if isinstance(hdlr, logging.FileHandler): shutil.copy(hdlr.baseFilename, dest, follow_symlinks=True) + def getlogger(logger_kind, *args, **kwargs): if logger_kind == 'frontend': return _frontend_logger diff --git a/reframe/core/modules.py b/reframe/core/modules.py index d1a6666c2e..a428eb9a49 100644 --- a/reframe/core/modules.py +++ b/reframe/core/modules.py @@ -6,15 +6,18 @@ import subprocess import re import reframe +import reframe.core.debug as debug import reframe.utility.os as os_ext from reframe.core.exceptions import ModuleError + class Module: """Module wrapper. We basically need it for defining operators for use in standard Python algorithms.""" + def __init__(self, name): if not name: raise ModuleError('no module name specified') @@ -27,7 +30,7 @@ def __init__(self, name): self.version = None def __eq__(self, other): - if other != None and self.name == other.name: + if other is not None and self.name == other.name: if not self.version or not other.version: return True else: @@ -38,10 +41,8 @@ def __eq__(self, other): def __neq__(self, other): return not self.__eq__(other) - def __repr__(self): - return self.__str__() - + return debug.repr(self) def __str__(self): if self.version: @@ -50,7 +51,6 @@ def __str__(self): return self.name - def module_equal(rhs, lhs): return Module(rhs) == Module(lhs) @@ -58,7 +58,7 @@ def module_equal(rhs, lhs): def module_list(): try: # LOADEDMODULES may be defined but empty - return [ m for m in os.environ['LOADEDMODULES'].split(':') if m ] + return [m for m in os.environ['LOADEDMODULES'].split(':') if m] except KeyError: return [] @@ -67,7 +67,7 @@ def module_conflict_list(name): """Return the list of conflicted packages""" conflict_list = [] completed = os_ext.run_command( - cmd = '%s show %s' % (reframe.MODULECMD_PYTHON, name)) + cmd='%s show %s' % (reframe.MODULECMD_PYTHON, name)) # Search for lines starting with 'conflict' for line in completed.stderr.split('\n'): @@ -88,7 +88,7 @@ def module_present(name): def module_load(name): completed = os_ext.run_command( - cmd = '%s load %s' % (reframe.MODULECMD_PYTHON, name)) + cmd='%s load %s' % (reframe.MODULECMD_PYTHON, name)) exec(completed.stdout) if not module_present(name): @@ -118,7 +118,7 @@ def module_force_load(name): def module_unload(name): completed = os_ext.run_command( - cmd = '%s unload %s' % (reframe.MODULECMD_PYTHON, name)) + cmd='%s unload %s' % (reframe.MODULECMD_PYTHON, name)) exec(completed.stdout) if module_present(name): @@ -127,7 +127,7 @@ def module_unload(name): def module_purge(): completed = os_ext.run_command( - cmd = '%s purge' % reframe.MODULECMD_PYTHON) + cmd='%s purge' % reframe.MODULECMD_PYTHON) exec(completed.stdout) @@ -137,7 +137,7 @@ def module_path_add(dirs): """ args = ' '.join(dirs) completed = os_ext.run_command( - cmd = '%s use %s' % (reframe.MODULECMD_PYTHON, args)) + cmd='%s use %s' % (reframe.MODULECMD_PYTHON, args)) exec(completed.stdout) @@ -147,5 +147,5 @@ def module_path_remove(dirs): """ args = ' '.join(dirs) completed = os_ext.run_command( - cmd = '%s unuse %s' % (reframe.MODULECMD_PYTHON, args)) + cmd='%s unuse %s' % (reframe.MODULECMD_PYTHON, args)) exec(completed.stdout) diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index e97ad769d4..ca5f0bd81d 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -7,7 +7,7 @@ import os import shutil -import reframe +import reframe.core.debug as debug import reframe.core.logging as logging import reframe.settings as settings import reframe.utility.os as os_ext @@ -15,7 +15,7 @@ from reframe.core.environments import Environment from reframe.core.exceptions import ReframeFatalError from reframe.core.fields import * -from reframe.core.launchers import * +from reframe.core.launchers import * from reframe.core.logging import getlogger, LoggerAdapter, null_logger from reframe.core.schedulers import * from reframe.core.shell import BashScriptBuilder @@ -23,7 +23,101 @@ from reframe.frontend.resources import ResourcesManager -class RegressionTest(object): +class _OutputScanInfo: + """Holds information for the output scanning algorithm.""" + + def __init__(self): + self._scanned_patterns = {} + + def __repr__(self): + return debug.repr(self) + + def set_patterns(self, path, patterns): + self._scanned_patterns.setdefault(path, {}) + for patt in patterns: + self._scanned_patterns[path][patt] = None + + def add_match_pattern(self, path, patt): + self._scanned_patterns[path][patt] = [] + + def add_match_tag(self, path, patt, tag, value, reference, action_result): + self._scanned_patterns[path][patt].append( + (tag, value, reference, action_result)) + + def add_match_eof(self, path, eof_result): + self._scanned_patterns[path]['\e'] = eof_result + + # Routines for querying matches + def matched_pattern(self, path, patt): + return self._scanned_patterns[path][patt] + + def matched_tag(self, path, patt, tag): + for tinfo in self._scanned_patterns[path][patt]: + if tinfo[0] == tag: + return tinfo + return None + + def matched_eof(self, path): + return self._scanned_patterns[path]['\e'] + + # Routines for producing formatted reports + def failure_report(self, full_paths=True): + """Provide information of the whole scan process""" + ret = '' + for path, patterns in self._scanned_patterns.items(): + if not full_paths: + path = os.path.basename(path) + + for patt, taglist in patterns.items(): + if patt == '\e': + # taglist here is actually the result of the eof test + ret += "`%s': eof action failed\n" % path + continue + + if taglist is None: + ret += ("`%s': pattern `%s' was not matched\n" % + (path, patt)) + continue + + for t in taglist: + tag, val, ref, res = t + if not res: + ret += ("%s: pattern `%s': " + "action for tag `%s' failed " + "(value: %s, reference: %s)\n" % + (path, patt, tag, val, ref)) + return ret + + def scan_report(self): + """Provide information of the whole scan process""" + ret = '' + for path, patterns in self._scanned_patterns.items(): + ret += "%s:\n" % path + for patt, taglist in patterns.items(): + if patt == '\e': + # Here taglist refers to the action taken at eof + ret += (' action at end of file: %s' % + 'success' if taglist else 'fail') + ret += '\n' + continue + + ret += " pattern: '%s': " % patt + if taglist is None: + ret += 'not matched\n' + continue + + ret += 'matched\n' + for t in taglist: + tag, val, ref, res = t + ret += (" tag: '%s': %s (value: %s, reference: %s)\n" % + (tag, 'success' if res else 'fail', val, str(ref))) + return ret + + def __str__(self): + return str(self._scanned_patterns) + + +class RegressionTest: """Base class for regression checks providing the implementation of the different phases the regression goes through.""" @@ -52,7 +146,8 @@ class RegressionTest(object): num_gpus_per_node = IntegerField('num_gpus_per_node') num_cpus_per_task = IntegerField('num_cpus_per_task', allow_none=True) num_tasks_per_core = IntegerField('num_tasks_per_core', allow_none=True) - num_tasks_per_socket = IntegerField('num_tasks_per_socket', allow_none=True) + num_tasks_per_socket = IntegerField('num_tasks_per_socket', + allow_none=True) use_multithreading = BooleanField('use_multithreading', allow_none=True) exclusive_access = BooleanField('exclusive_access') local = BooleanField('local') @@ -64,7 +159,8 @@ class RegressionTest(object): logger = TypedField('logger', LoggerAdapter) _perf_logfile = StringField('_perf_logfile', allow_none=True) reference = ScopedDictField('reference', object) - sanity_patterns = SanityPatternField('sanity_patterns', allow_none=True) + sanity_patterns = SanityPatternField('sanity_patterns', + allow_none=True) perf_patterns = SanityPatternField('perf_patterns', allow_none=True) modules = TypedListField('modules', str) variables = TypedDictField('variables', str, str) @@ -110,8 +206,8 @@ def __init__(self, name, prefix, system, resources): self.local = False # Static directories of the regression check - self.prefix = os.path.abspath(prefix) - self.sourcesdir = os.path.join(self.prefix, 'src') + self.prefix = os.path.abspath(prefix) + self.sourcesdir = os.path.join(self.prefix, 'src') # Dynamic paths of the regression check; will be set in setup() self.stagedir = None @@ -121,9 +217,11 @@ def __init__(self, name, prefix, system, resources): # Output patterns self.sanity_patterns = None + self.sanity_info = _OutputScanInfo() # Performance patterns: None -> no performance checking self.perf_patterns = None + self.perf_info = _OutputScanInfo() self.reference = {} # Environment setup @@ -136,7 +234,7 @@ def __init__(self, name, prefix, system, resources): # Private fields self._resources = resources - # Compilation task output; not meant to be touched by users + # Compilation task output self._compile_task = None # Check-specific logging @@ -146,6 +244,8 @@ def __init__(self, name, prefix, system, resources): # Type of launcher to use for launching jobs self._launcher_type = None + def __repr__(self): + return debug.repr(self) def supports_system(self, partition_name): if '*' in self.valid_systems: @@ -161,25 +261,21 @@ def supports_system(self, partition_name): return partition_name in self.valid_systems - def supports_progenv(self, env_name): if '*' in self.valid_prog_environs: return True return env_name in self.valid_prog_environs - def is_local(self): return self.local or self.current_partition.scheduler == 'local' - def _sanitize_basename(self, name): """Create a basename safe to be used as path component Replace all path separator characters in `name` with underscores.""" return name.replace(os.sep, '_') - def _setup_environ(self, environ): """Setup the current environment and load it.""" @@ -201,7 +297,6 @@ def _setup_environ(self, environ): self.logger.debug('loading environment %s' % self.current_environ.name) self.current_environ.load() - def _setup_paths(self): """Setup the check's dynamic paths.""" self.logger.debug('setting up paths') @@ -219,7 +314,6 @@ def _setup_paths(self): self.stdout = os.path.join(self.stagedir, '%s.out' % self.name) self.stderr = os.path.join(self.stagedir, '%s.err' % self.name) - def _setup_job(self, **job_opts): """Setup the job related to this check.""" @@ -287,20 +381,20 @@ def _setup_job(self, **job_opts): **job_opts) # Get job options from managed resources and prepend them to - # job_opts. We want any user supplied options to be able to override - # those set by the framework. + # job_opts. We want any user supplied options to be able to + # override those set by the framework. resources_opts = [] for r, v in self.job_resources.items(): - resources_opts.extend(self.current_partition.get_resource(r, v)) + resources_opts.extend( + self.current_partition.get_resource(r, v)) - self.job.options = self.current_partition.access + \ - resources_opts + self.job.options + self.job.options = (self.current_partition.access + + resources_opts + self.job.options) # Prepend job path to script name self.job.script_filename = os.path.join(self.stagedir, self.job.script_filename) - # FIXME: This is a temporary solution to address issue #157 def _setup_perf_logging(self): self.logger.debug('setting up performance logging') @@ -312,11 +406,11 @@ def _setup_perf_logging(self): perf_logging_config = { 'level': 'INFO', 'handlers': { - self._perf_logfile : { - 'level' : 'DEBUG', - 'format' : '[%(asctime)s] %(check_name)s ' - '(jobid=%(check_jobid)s): %(message)s', - 'append' : True, + self._perf_logfile: { + 'level': 'DEBUG', + 'format': '[%(asctime)s] %(check_name)s ' + '(jobid=%(check_jobid)s): %(message)s', + 'append': True, } } } @@ -335,29 +429,25 @@ def setup(self, system, environ, **job_opts): self._setup_environ(environ) self._setup_paths() self._setup_job(**job_opts) - if self.perf_patterns != None: + if self.perf_patterns is not None: self._setup_perf_logging() - def _copy_to_stagedir(self, path): self.logger.debug('copying %s to stage directory (%s)' % (path, self.stagedir)) self.logger.debug('symlinking files: %s' % self.readonly_files) os_ext.copytree_virtual(path, self.stagedir, self.readonly_files) - def prebuild(self): for cmd in self.prebuild_cmd: self.logger.debug('executing prebuild command: %s' % cmd) os_ext.run_command(cmd, check=True) - def postbuild(self): for cmd in self.postbuild_cmd: self.logger.debug('executing postbuild command: %s' % cmd) os_ext.run_command(cmd, check=True) - def compile(self, **compile_opts): if not self.current_environ: raise ReframeError('no programming environment set') @@ -375,7 +465,6 @@ def compile(self, **compile_opts): else: includedir = os.path.abspath(self.sourcesdir) - # Add the the correct source directory to the include path self.current_environ.include_search_path.append(includedir) @@ -384,8 +473,8 @@ def compile(self, **compile_opts): compile_opts.pop('executable', None) # Change working dir to stagedir although absolute paths are used - # everywhere in the compilation process. This is done to ensure that any - # other files (besides the executable) generated during the the + # everywhere in the compilation process. This is done to ensure that + # any other files (besides the executable) generated during the the # compilation will remain in the stage directory wd_save = os.getcwd() os.chdir(self.stagedir) @@ -406,7 +495,6 @@ def compile(self, **compile_opts): os.chdir(wd_save) self.logger.debug('compilation finished') - def run(self): if not self.current_system or not self.current_partition: raise ReframeError('no system or system partition is set') @@ -415,33 +503,31 @@ def run(self): (self.executable, ' '.join(self.executable_opts)), workdir=self.stagedir) - msg = 'spawned job (%s=%s)' % \ - ('pid' if self.is_local() else 'jobid', self.job.jobid) + msg = ('spawned job (%s=%s)' % + ('pid' if self.is_local() else 'jobid', self.job.jobid)) self.logger.debug(msg) - def poll(self): """Poll the test's status. - Returns `True` if the associated job has finished, `False` otherwise.""" + Returns `True` if the associated job has finished, `False` + otherwise.""" if not self.job: return True return self.job.finished() - def wait(self): self.job.wait() self.logger.debug('spawned job finished') - def check_sanity(self): - return self._match_patterns(self.sanity_patterns, None) - + return self._match_patterns(self.sanity_patterns, None, + self.sanity_info) def check_performance(self): - return self._match_patterns(self.perf_patterns, self.reference) - + return self._match_patterns(self.perf_patterns, self.reference, + self.perf_info) def _copy_to_outputdir(self): """Copy checks interesting files to the output directory.""" @@ -457,7 +543,6 @@ def _copy_to_outputdir(self): f = os.path.join(self.stagedir, f) shutil.copy(f, self.outputdir) - def cleanup(self, remove_files=False, unload_env=True): aliased = os.path.samefile(self.stagedir, self.outputdir) if aliased: @@ -475,18 +560,16 @@ def cleanup(self, remove_files=False, unload_env=True): self.current_environ.unload() self.current_partition.local_env.unload() - - def _match_patterns_file(self, path, patterns, reference): + def _match_patterns_infile(self, path, patterns, reference, scan_info): def _resolve_tag(tag): try: - return reference['%s:%s:%s' % \ - (self.current_system.name, - self.current_partition.name, tag)] + key = '%s:%s' % (self.current_partition.fullname, tag) + return reference[key] except KeyError: raise ReframeError( "tag `%s' could not be resolved " "in perf. references for `%s'" % - (tag, self.current_system.name) + (tag, self.current_partition.fullname) ) matched_patt = set() @@ -501,32 +584,39 @@ def _resolve_tag(tag): continue matched_patt.add(patt) + scan_info.add_match_pattern(path, patt) for td in taglist: - tag, conv, thres = td - ref = _resolve_tag(tag) \ - if reference != None else None - if thres(value=conv(match.group(tag)), - reference=ref, - logger=self._perf_logger): + tag, conv, action = td + val = conv(match.group(tag)) + ref = (_resolve_tag(tag) + if reference is not None else None) + res = action(value=val, reference=ref, + logger=self._perf_logger) + if tag in found_tags: + # At least one match is sufficient + continue + + scan_info.add_match_tag(path, patt, tag, val, ref, res) + if res: found_tags.add(tag) + except (OSError, ValueError) as e: - raise ReframeError('Caught %s: %s' % (type(e).__name__, str(e))) + raise ReframeError('Caught %s: %s' % (type(e).__name__, e)) finally: if file: file.close() return (matched_patt, found_tags) - - def _match_patterns(self, multi_patterns, reference): + def _match_patterns(self, multi_patterns, reference, scan_info): if not multi_patterns: return True for file_patt, patterns in multi_patterns.items(): if file_patt == '-' or file_patt == '&1': - files = [ self.stdout ] + files = [self.stdout] elif file_patt == '&2': - files = [ self.stderr ] + files = [self.stderr] else: files = glob.glob(os.path.join(self.stagedir, file_patt)) @@ -542,45 +632,48 @@ def _match_patterns(self, multi_patterns, reference): else: eof_handler = None + required_patterns = patterns.keys() required_tags = frozenset( - [ td[0] for taglist in patterns.values() for td in taglist ] + [td[0] for taglist in patterns.values() for td in taglist] ) ret = True for filename in files: - matched_patt, found_tags = self._match_patterns_file( - filename, patterns, reference + scan_info.set_patterns(filename, required_patterns) + matched_patt, found_tags = self._match_patterns_infile( + filename, patterns, reference, scan_info ) - if matched_patt != patterns.keys() or \ - found_tags != required_tags: + if (matched_patt != required_patterns or + found_tags != required_tags): ret = False # We need eof_handler to be called anyway that's why we do not # combine this check with the above and we delay the breaking # out of the loop here - if eof_handler and not eof_handler(logger=self._perf_logger): - ret = False - break + if eof_handler: + eof_result = eof_handler(logger=self._perf_logger) + scan_info.add_match_eof(filename, eof_result) + if not eof_result: + ret = False if eof_handler: # Restore the handler patterns['\e'] = eof_handler + self.logger.debug('output scan info:\n' + scan_info.scan_report()) return ret - def __str__(self): - return '%s (%s)\n' \ - ' tags: [ %s ], maintainers: [ %s ]' % \ + return ('%s (%s)\n' + ' tags: [%s], maintainers: [%s]' % (self.name, self.descr, - ', '.join(self.tags), ', '.join(self.maintainers)) + ', '.join(self.tags), ', '.join(self.maintainers))) class RunOnlyRegressionTest(RegressionTest): def compile(self, **compile_opts): pass - def run(self): # The sourcesdir can be set to None by the user; then we don't copy. if self.sourcesdir: @@ -595,14 +688,12 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.local = True - # No need to setup the job for compile-only checks def setup(self, system, environ, **job_opts): self.current_partition = system self._setup_environ(environ) self._setup_paths() - def compile(self, **compile_opts): super().compile(**compile_opts) @@ -612,10 +703,8 @@ def compile(self, **compile_opts): with open(self.stderr, 'w') as f: f.write(self._compile_task.stderr) - def run(self): pass - def wait(self): pass diff --git a/reframe/core/schedulers.py b/reframe/core/schedulers.py index e6e4bfd9ff..3850c8af7e 100644 --- a/reframe/core/schedulers.py +++ b/reframe/core/schedulers.py @@ -5,19 +5,26 @@ import itertools import os import re +import signal import stat import subprocess import time +import reframe.core.debug as debug import reframe.utility.os as os_ext from datetime import datetime -from reframe.core.exceptions import ReframeError, JobSubmissionError +from reframe.core.exceptions import (ReframeError, + JobSubmissionError, + JobResourcesError) from reframe.core.launchers import LocalLauncher -from reframe.core.shell import BashScriptBuilder from reframe.settings import settings +class _TimeoutExpired(ReframeError): + pass + + class Job: def __init__(self, job_name, @@ -32,56 +39,57 @@ def __init__(self, launcher_options=[], **kwargs): self.name = job_name - self.environs = job_environ_list if job_environ_list else [] + self.environs = job_environ_list or [] self.script_builder = job_script_builder self.num_tasks = num_tasks - self.script_filename = script_filename \ - if script_filename else '%s.sh' % self.name + self.script_filename = script_filename or '%s.sh' % self.name self.options = options self.launcher = launcher(self, launcher_options) - self.stdout = stdout if stdout else '%s.out' % self.name - self.stderr = stderr if stderr else '%s.err' % self.name + self.stdout = stdout or '%s.out' % self.name + self.stderr = stderr or '%s.err' % self.name # Commands to be run before and after the job is launched self.pre_run = [] self.post_run = [] # Live job information; to be filled during job's lifetime - self.jobid = -1 - self.state = None + self.jobid = -1 + self.state = None self.exitcode = None + self._is_cancelling = False + def __repr__(self): + return debug.repr(self) def emit_preamble(self, builder): + for e in self.environs: + e.emit_load_instructions(self.script_builder) + for stmt in self.pre_run: builder.verbatim(stmt) - def emit_postamble(self, builder): for stmt in self.post_run: builder.verbatim(stmt) - def _submit(self, script): raise NotImplementedError('Attempt to call an abstract method') - def wait(self): """Wait for the job to finish.""" raise NotImplementedError('Attempt to call an abstract method') - def finished(self): """Status of the job.""" raise NotImplementedError('Attempt to call an abstract method') + def cancel(self): + """Cancel this job.""" + raise NotImplementedError('Attempt to call an abstract method') - def submit(self, cmd, workdir = '.'): + def submit(self, cmd, workdir='.'): # Build the submission script and submit it self.emit_preamble(self.script_builder) - for e in self.environs: - e.emit_load_instructions(self.script_builder) - self.script_builder.verbatim('cd %s' % workdir) self.launcher.emit_run_command(cmd, self.script_builder) self.emit_postamble(self.script_builder) @@ -96,15 +104,15 @@ class JobState: def __init__(self, state): self.state = state + def __repr__(self): + return debug.repr(self) def __eq__(self, other): - return other != None and self.state == other.state - + return other is not None and self.state == other.state def __ne__(self, other): return not self.__eq__(other) - def __str__(self): return self.state @@ -112,11 +120,14 @@ def __str__(self): class JobResources: """Managed job resources. - Custom resources usually configured per system by the system administrators. - """ + Custom resources usually configured per system by the system + administrators.""" + def __init__(self, resources): self.resources = resources + def __repr__(self): + return debug.repr(self) def get(self, name, **kwargs): """Get resource option string for the resource `name'""" @@ -125,12 +136,8 @@ def get(self, name, **kwargs): except KeyError: return None - def getall(self, resources_spec): - """ - Return a list of resource option strings for all the resources specified in - `resourse_spec' - """ + """Get all resource option strings for resources in `resource_spec`.""" ret = [] for opt, kwargs in resources_spec.items(): opt_str = self.get(opt, **kwargs) @@ -145,6 +152,7 @@ class LocalJobState(JobState): def __init__(self, state): super().__init__(state) + LOCAL_JOB_SUCCESS = LocalJobState('SUCCESS') LOCAL_JOB_FAILURE = LocalJobState('FAILURE') LOCAL_JOB_TIMEOUT = LocalJobState('TIMEOUT') @@ -152,59 +160,142 @@ def __init__(self, state): class LocalJob(Job): def __init__(self, - time_limit = (0, 10, 0), + time_limit=(0, 10, 0), **kwargs): super().__init__(num_tasks=1, launcher=LocalLauncher, **kwargs) - # Process launched - self.proc = None + # Launched process self.time_limit = time_limit - + self.cancel_grace_period = 2 + self._wait_poll_secs = 0.1 + self._proc = None def _submit(self, script): # `chmod +x' first, because we will execute the script locally - os.chmod(script.name, os.stat(script.name).st_mode | stat.S_IEXEC); + os.chmod(script.name, os.stat(script.name).st_mode | stat.S_IEXEC) # Run from the absolute path - self._stdout = open(self.stdout, 'w+') - self._stderr = open(self.stderr, 'w+') - self.proc = os_ext.run_command_async(os.path.abspath(script.name), - stdout=self._stdout, - stderr=self._stderr) + self._f_stdout = open(self.stdout, 'w+') + self._f_stderr = open(self.stderr, 'w+') + + # The new process starts also a new session (session leader), so that + # we can later kill any other processes that this might spawn by just + # killing this one. + self._proc = os_ext.run_command_async(os.path.abspath(script.name), + stdout=self._f_stdout, + stderr=self._f_stderr, + start_new_session=True) + # Update job info + self.jobid = self._proc.pid + + def _kill_all(self): + """Send SIGKILL to all the processes of the spawned job.""" + try: + os.killpg(self.jobid, signal.SIGKILL) + except (ProcessLookupError, PermissionError): + # The process group may already be dead or assigned to a different + # group, so ignore this error + pass - # update job info - self.jobid = self.proc.pid + def _term_all(self): + """Send SIGTERM to all the processes of the spawned job.""" + os.killpg(self.jobid, signal.SIGTERM) + def _wait_all(self, timeout=0): + """Wait for all the processes of spawned job to finish. - def wait(self): - # convert timeout to seconds - h, m, s = self.time_limit - timeout = h * 3600 + m * 60 + s - # wait for spawned process to finish + Keyword arguments: + + timeout -- Timeout period for this wait call in seconds (may be a real + number, too). If `None` or `0`, no timeout will be set. + """ + t_wait = datetime.now() + self._proc.wait(timeout=timeout or None) + t_wait = datetime.now() - t_wait try: - self.proc.wait(timeout=timeout) - self.exitcode = self.proc.returncode + # Wait for all processes in the process group to finish + while not timeout or t_wait.total_seconds() < timeout: + t_poll = datetime.now() + os.killpg(self.jobid, 0) + time.sleep(self._wait_poll_secs) + t_poll = datetime.now() - t_poll + t_wait += t_poll + + # Final check + os.killpg(self.jobid, 0) + raise _TimeoutExpired + except (ProcessLookupError, PermissionError): + # Ignore also EPERM errors in case this process id is assigned + # elsewhere and we cannot query its status + return + + def cancel(self): + """Cancel the current job. + + The SIGTERM signal will be sent first to all the processes of this job + and after a grace period (default 2s) the SIGKILL signal will be send. + + This function waits for the spawned process tree to finish. + """ + if self.jobid == -1: + return + + self._term_all() + + # Set the time limit to the grace period and let wait() do the final + # killing + self.time_limit = (0, 0, self.cancel_grace_period) + self.wait() + + def wait(self, timeout=None): + """Wait for the spawned job to finish. + + As soon as the parent job process finishes, all of its spawned + subprocesses will be forced to finish, too. + + Upon return, the whole process tree of the spawned job process will be + cleared, unless any of them has called `setsid()`. + + Keyword arguments: + + timeout -- Timeout period for this wait call in seconds. If `None` the + `self.time_limit` will be used. + """ + if self.state is not None: + # Job has been already waited for + return + + if timeout is None: + # Convert time_limit to seconds + h, m, s = self.time_limit + timeout = h * 3600 + m * 60 + s + + try: + self._wait_all(timeout=timeout) + self.exitcode = self._proc.returncode if self.exitcode != 0: self.state = LOCAL_JOB_FAILURE else: self.state = LOCAL_JOB_SUCCESS - except subprocess.TimeoutExpired: - self.proc.kill() - # we need the wait to avoid zombie processes - self.proc.wait() + except (_TimeoutExpired, subprocess.TimeoutExpired): self.state = LOCAL_JOB_TIMEOUT - - # close stdout/stderr finally: - self._stdout.close() - self._stderr.close() - + # Cleanup all the processes of this job + self._kill_all() + self._wait_all() + self._f_stdout.close() + self._f_stderr.close() def finished(self): - # poll spawned process - self.proc.poll() - if self.proc.returncode == None: + """Check if the spawned process has finished. + + This function does not wait the process. It just queries its state. If + the process has finished, you *must* call wait() to properly cleanup + after it. + """ + self._proc.poll() + if self._proc.returncode is None: return False return True @@ -233,18 +324,18 @@ def __init__(self, state): class SlurmJob(Job): def __init__(self, - time_limit = (0, 10, 0), - use_smt = None, - nodelist = None, - exclude = None, - partition = None, - reservation = None, - account = None, + time_limit=(0, 10, 0), + use_smt=None, + nodelist=None, + exclude=None, + partition=None, + reservation=None, + account=None, num_tasks_per_node=None, num_cpus_per_task=None, num_tasks_per_core=None, num_tasks_per_socket=None, - exclusive_access = False, + exclusive_access=False, **kwargs): super().__init__(**kwargs) self.partition = partition @@ -263,33 +354,53 @@ def __init__(self, self.num_cpus_per_task = num_cpus_per_task self.num_tasks_per_core = num_tasks_per_core self.num_tasks_per_socket = num_tasks_per_socket - self.completion_states = [ SLURM_JOB_BOOT_FAIL, - SLURM_JOB_CANCELLED, - SLURM_JOB_COMPLETED, - SLURM_JOB_FAILED, - SLURM_JOB_NODE_FAILED, - SLURM_JOB_PREEMPTED, - SLURM_JOB_TIMEOUT ] + self.completion_states = [SLURM_JOB_BOOT_FAIL, + SLURM_JOB_CANCELLED, + SLURM_JOB_COMPLETED, + SLURM_JOB_FAILED, + SLURM_JOB_NODE_FAILED, + SLURM_JOB_PREEMPTED, + SLURM_JOB_TIMEOUT] + self.pending_states = [SLURM_JOB_CONFIGURING, + SLURM_JOB_PENDING] + # Reasons to cancel a pending job: if the job is expected to remain + # pending for a much longer time then usual (mostly if a sysadmin + # intervention is required) + self.cancel_reasons = ['FrontEndDown', + 'Licenses', # May require sysadmin + 'NodeDown', + 'PartitionDown', + 'PartitionInactive', + 'PartitionNodeLimit', + 'QOSJobLimit', + 'QOSResourceLimit', + 'ReqNodeNotAvail', # Inaccurate SLURM doc + 'QOSUsageThreshold'] def emit_preamble(self, builder): builder.verbatim('%s --job-name="%s"' % (self.prefix, self.name)) - builder.verbatim('%s --time=%s' % (self.prefix, - '%d:%d:%d' % self.time_limit)) + builder.verbatim('%s --time=%s' % + (self.prefix, '%d:%d:%d' % self.time_limit)) builder.verbatim('%s --ntasks=%d' % (self.prefix, self.num_tasks)) if self.num_tasks_per_node: - builder.verbatim('%s --ntasks-per-node=%d' % (self.prefix, - self.num_tasks_per_node)) + builder.verbatim('%s --ntasks-per-node=%d' % + (self.prefix, self.num_tasks_per_node)) + if self.num_cpus_per_task: - builder.verbatim('%s --cpus-per-task=%d' % (self.prefix, - self.num_cpus_per_task)) + builder.verbatim('%s --cpus-per-task=%d' % + (self.prefix, self.num_cpus_per_task)) + if self.num_tasks_per_core: - builder.verbatim('%s --ntasks-per-core=%d' % (self.prefix, - self.num_tasks_per_core)) + builder.verbatim('%s --ntasks-per-core=%d' % + (self.prefix, self.num_tasks_per_core)) + if self.num_tasks_per_socket: - builder.verbatim('%s --ntasks-per-socket=%d' % (self.prefix, - self.num_tasks_per_socket)) + builder.verbatim('%s --ntasks-per-socket=%d' % + (self.prefix, self.num_tasks_per_socket)) + if self.partition: - builder.verbatim('%s --partition=%s' % (self.prefix, self.partition)) + builder.verbatim('%s --partition=%s' % + (self.prefix, self.partition)) if self.exclusive_access: builder.verbatim('%s --exclusive' % self.prefix) @@ -306,9 +417,9 @@ def emit_preamble(self, builder): builder.verbatim( '%s --exclude=%s' % (self.prefix, self.exclude)) - if self.use_smt != None: + if self.use_smt is not None: hint = 'multithread' if self.use_smt else 'nomultithread' - builder.verbatim('%s --hint=%s'%(self.prefix, hint)) + builder.verbatim('%s --hint=%s' % (self.prefix, hint)) if self.reservation: builder.verbatim('%s --reservation=%s' % (self.prefix, @@ -324,7 +435,6 @@ def emit_preamble(self, builder): super().emit_preamble(builder) - def _submit(self, script): cmd = 'sbatch %s' % script.name completed = os_ext.run_command( @@ -346,25 +456,23 @@ def _submit(self, script): if not self.stderr: self.stderr = self.stdout - def _update_state(self): - """ - Check the status of the job. - """ + """Check the status of the job.""" intervals = itertools.cycle(settings.job_init_poll_intervals) state_match = None - while not state_match and \ - self.job_init_poll_num_tries < settings.job_init_poll_max_tries: + max_tries = settings.job_init_poll_max_tries + while (not state_match and + self.job_init_poll_num_tries < max_tries): # Query job state persistently. When you first submit, the job may # not be yet registered in the database; so try some times We # restrict the `sacct' query to today (`-S' option), so as to avoid # possible older and stale slurm database entries. completed = os_ext.run_command( - 'sacct -S %s -P -j %s -o jobid,state,exitcode' % \ + 'sacct -S %s -P -j %s -o jobid,state,exitcode' % (datetime.now().strftime('%F'), self.jobid), check=True) state_match = re.search( - '^(?P\d+)\|(?P\S+)\|' + '^(?P\d+)\|(?P\S+)([^\|]*)\|' '(?P\d+)\:(?P\d+)', completed.stdout, re.MULTILINE) if not state_match: @@ -374,16 +482,44 @@ def _update_state(self): if not state_match: raise ReframeError('Querying initial job state timed out') - if state_match.group('jobid') != self.jobid: - # this shouldn't happen - raise ReframeFatalError( - 'Oops: job ids do not match. Expected %s, got %s' % \ - (self.jobid, state_match.group('jobid'))) + assert self.jobid == state_match.group('jobid') - self.state = JobState(state_match.group('state')) + self.state = SlurmJobState(state_match.group('state')) self.exitcode = int(state_match.group('exitcode')) self.signal = int(state_match.group('signal')) + def _cancel_if_blocked(self): + if self._is_cancelling or self.state not in self.pending_states: + return + + completed = os_ext.run_command('squeue -j %s -o "%%i|%%T|%%r" ' % + self.jobid, check=True) + # Note: the reason may given as "ReqNodeNotAvail, + # UnavailableNodes:nid00[408,411-415]" by squeue. In this case, + # we take only the string up to the comma. + state_match = re.search( + '^(?P\d+)\|(?P\S+)\|' + '(?P\w+)(\W+(?P.*))?', + completed.stdout, re.MULTILINE) + # If squeue does not return any job info (state_match is empty), + # it means normally that the job has finished meanwhile. So we + # can exit this function. + if not state_match: + return + + assert self.jobid == state_match.group('jobid') + # Assure that the job is still in a pending state + state = SlurmJobState(state_match.group('state')) + reason = state_match.group('reason') + if state in self.pending_states and reason in self.cancel_reasons: + self.cancel() + reason_msg = ('job canceled because it was blocked in pending ' + 'state due to the following SLURM reason: ' + reason) + reason_details = state_match.group('reason_details') + if reason_details: + reason_msg += ', ' + reason_details + raise JobResourcesError(reason_msg) + def wait(self): intervals = itertools.cycle(settings.job_state_poll_intervals) @@ -392,9 +528,23 @@ def wait(self): return self._update_state() - while not self.state in self.completion_states: + self._cancel_if_blocked() + while self.state not in self.completion_states: time.sleep(next(intervals)) self._update_state() + self._cancel_if_blocked() + + def cancel(self): + """Cancel job execution. + + This call waits until the job has finished.""" + if self.jobid == -1: + return + + os_ext.run_command('scancel %s' % self.jobid, + check=True, timeout=settings.job_submit_timeout) + self._is_cancelling = True + self.wait() def finished(self): self._update_state() diff --git a/reframe/core/shell.py b/reframe/core/shell.py index 528287e56c..3993da66b4 100644 --- a/reframe/core/shell.py +++ b/reframe/core/shell.py @@ -3,9 +3,11 @@ # import string +import reframe.core.debug as debug + class ShellScriptBuilder: - def __init__(self, name = 'default', login=False): + def __init__(self, name='default', login=False): self.name = name if login: self.header = '#!/bin/sh -l' @@ -14,20 +16,21 @@ def __init__(self, name = 'default', login=False): self.statements = [] + def __repr__(self): + return debug.repr(self) def verbatim(self, stmt, suppress=False): - """ - Append statement stmt verbatim. If suppress=True, stmt will not be in the - generated script file but it will be returned from this function. This - feature is useful when you want only the command that would be generated - but you don't want it to be actually generated in the scipt file. - """ + """Append statement stmt verbatim. + + If suppress=True, stmt will not be in the generated script file but it + will be returned from this function. This feature is useful when you + want only the command that would be generated but you don't want it to + be actually generated in the scipt file.""" if not suppress: self.statements.append(stmt) return stmt - def set_variable(self, name, value, export=False, suppress=False): if export: export_keyword = 'export ' @@ -38,17 +41,15 @@ def set_variable(self, name, value, export=False, suppress=False): '%s%s=%s' % (export_keyword, name, value), suppress ) - def unset_variable(self, name, suppress=False): return self.verbatim('unset %s' % name, suppress) - def finalise(self): return '%s\n' % self.header + '\n'.join(self.statements) + '\n' class BashScriptBuilder(ShellScriptBuilder): - def __init__(self, name = 'bash', login=False): + def __init__(self, name='bash', login=False): super().__init__(name, login) if login: self.header = '#!/bin/bash -l' diff --git a/reframe/core/systems.py b/reframe/core/systems.py index e2007c59ff..3528f044f6 100644 --- a/reframe/core/systems.py +++ b/reframe/core/systems.py @@ -1,9 +1,11 @@ import copy +import reframe.core.debug as debug import reframe.utility.os as os_ext from reframe.core.environments import * from reframe.core.exceptions import ReframeError + class SystemPartition: name = NonWhitespaceField('name') descr = StringField('descr') @@ -31,26 +33,23 @@ def __init__(self, name, system): self.system = system self.max_jobs = 1 - @property def fullname(self): """Return fully-qualified name for this partition.""" return '%s:%s' % (self.system.name, self.name) - def get_resource(self, name, value): """Instantiate managed resource `name' with `value'""" ret = [] for r in self.resources.get(name, []): try: - args = { name : value } + args = {name: value} ret.append(r.format(**args)) except KeyError: pass return ret - def environment(self, name): for e in self.environs: if e.name == name: @@ -58,24 +57,24 @@ def environment(self, name): return None - def __eq__(self, other): - return other != None and \ - self.name == other.name and \ - self.scheduler == other.scheduler and \ - self.access == other.access and \ - self.environs == other.environs and \ - self.resources == other.resources and \ - self.local_env == other.local_env - + return (other is not None and + self.name == other.name and + self.scheduler == other.scheduler and + self.access == other.access and + self.environs == other.environs and + self.resources == other.resources and + self.local_env == other.local_env) def __ne__(self, other): return not self.__eq__(other) - def __str__(self): return self.name + def __repr__(self): + return debug.repr(self) + class System: """System configuration.""" @@ -90,7 +89,6 @@ def __init__(self, name): self.hostnames = [] self.partitions = [] - def partition(self, name): """Retrieve partition with name""" for p in self.partitions: @@ -99,22 +97,18 @@ def partition(self, name): return None - def __eq__(self, other): - return other != None and \ - self.name == other.name and \ - self.hostnames == other.hostnames and \ - self.partitions == other.partitions - + return (other is not None and + self.name == other.name and + self.hostnames == other.hostnames and + self.partitions == other.partitions) def __ne__(self, other): return not self.__eq__(other) + def __repr__(self): + return debug.repr(self) def __str__(self): - ret = self.name + ' (partitions: [ ' - for p in self.partitions: - ret += str(p) + ' ' - - ret += '])' - return ret + return '%s (partitions: %s)' % (self.name, + [str(p) for p in self.partitions]) diff --git a/reframe/frontend/argparse.py b/reframe/frontend/argparse.py index 7895d2c0c7..d4127ee3f7 100644 --- a/reframe/frontend/argparse.py +++ b/reframe/frontend/argparse.py @@ -11,9 +11,9 @@ # are of an "unknown" type to the users of the `argparse` module, since they # inherit from an internal private class. # -# For this reason, we base our design on composition by implementing wrappers of -# both the argument group and the argument parser. These wrappers provide the -# same public interface as their `argparse` counterparts (currently we only +# For this reason, we base our design on composition by implementing wrappers +# of both the argument group and the argument parser. These wrappers provide +# the same public interface as their `argparse` counterparts (currently we only # implement the part of the interface that matters for Reframe), delegating the # parsing work to them. For these "shadow" data structures for argument groups # and the parser, we follow a similar design as in the `argparse` module: both @@ -27,7 +27,7 @@ # -class _ArgumentHolder(object): +class _ArgumentHolder: def __init__(self, holder): self._holder = holder self._defaults = argparse.Namespace() @@ -37,22 +37,20 @@ def __init__(self, holder): if m[0] != '_': setattr(self.__class__, m, ForwardField(self._holder, m)) - def _attr_from_flag(self, *flags): if not flags: raise ValueError('could not infer a dest name: no flags defined') return flags[-1].lstrip('-').replace('-', '_') - def _extract_default(self, *flags, **kwargs): attr = kwargs.get('dest', self._attr_from_flag(*flags)) action = kwargs.get('action', None) if action == 'store_true' or action == 'store_false': # These actions imply a default; we will convert them to their # 'const' action equivalent and add an explicit default value - kwargs['action'] = 'store_const' - kwargs['const'] = True if action == 'store_true' else False + kwargs['action'] = 'store_const' + kwargs['const'] = True if action == 'store_true' else False kwargs['default'] = False if action == 'store_true' else True try: @@ -63,7 +61,6 @@ def _extract_default(self, *flags, **kwargs): finally: return kwargs - def add_argument(self, *flags, **kwargs): return self._holder.add_argument( *flags, **self._extract_default(*flags, **kwargs) @@ -81,37 +78,35 @@ class ArgumentParser(_ArgumentHolder): `argparse.ArgumenParser`. In fact, it uses such a parser internally, delegating all the calls to it. The key difference is how newly parsed options are combined with existing namespaces in `parse_args()`.""" + def __init__(self, **kwargs): super().__init__(argparse.ArgumentParser(**kwargs)) self._groups = [] - def add_argument_group(self, *args, **kwargs): - group = _ArgumentGroup(self._holder.add_argument_group(*args, **kwargs)) + group = _ArgumentGroup( + self._holder.add_argument_group(*args, **kwargs)) self._groups.append(group) return group def _resolve_attr(self, attr, namespaces): for ns in namespaces: - if ns == None: + if ns is None: continue val = ns.__dict__.setdefault(attr, None) - if val != None: + if val is not None: return val return None - def _update_defaults(self): for g in self._groups: self._defaults.__dict__.update(g._defaults.__dict__) - def print_help(self): self._holder.print_help() - def parse_args(self, args=None, namespace=None): """Convert argument strings to objects and return them as attributes of a namespace. @@ -120,9 +115,9 @@ def parse_args(self, args=None, namespace=None): `argparse.ArgumentParser.parse_args()`. If `namespace` is not `None` and an attribute has not been assigned a - value during the parsing process of argument strings `args`, a value for - it will be looked up first in `namespace` and if not found there, it - will be assigned the default value as specified in its corresponding + value during the parsing process of argument strings `args`, a value + for it will be looked up first in `namespace` and if not found there, + it will be assigned the default value as specified in its corresponding `add_argument()` call. If no default value was specified either, the attribute will be set to `None`.""" @@ -136,9 +131,9 @@ def parse_args(self, args=None, namespace=None): # Update parser's defaults with groups' defaults self._update_defaults() for attr, val in options.__dict__.items(): - if val == None: + if val is None: options.__dict__[attr] = self._resolve_attr( - attr, [ namespace, self._defaults ] + attr, [namespace, self._defaults] ) return options diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index e728f21857..5ff795f595 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -11,10 +11,11 @@ from reframe.core.logging import getlogger from reframe.frontend.argparse import ArgumentParser from reframe.frontend.executors import Runner -from reframe.frontend.executors.policies import SerialExecutionPolicy, \ - AsynchronousExecutionPolicy -from reframe.frontend.loader import RegressionCheckLoader, \ - SiteConfiguration, autodetect_system +from reframe.frontend.executors.policies import (SerialExecutionPolicy, + AsynchronousExecutionPolicy) +from reframe.frontend.loader import (RegressionCheckLoader, + SiteConfiguration, + autodetect_system) from reframe.frontend.printer import PrettyPrinter from reframe.frontend.resources import ResourcesManager from reframe.settings import settings @@ -40,7 +41,8 @@ def main(): argparser = ArgumentParser() output_options = argparser.add_argument_group( 'Options controlling regression directories') - locate_options = argparser.add_argument_group('Options for locating checks') + locate_options = argparser.add_argument_group( + 'Options for locating checks') select_options = argparser.add_argument_group( 'Options for selecting checks') action_options = argparser.add_argument_group( @@ -146,7 +148,7 @@ def main(): help='Skip prog. environment check') run_options.add_argument( '--exec-policy', metavar='POLICY', action='store', - choices=[ 'serial', 'async' ], default='serial', + choices=['serial', 'async'], default='serial', help='Specify the execution policy for running the regression tests. ' 'Available policies: "serial" (default), "async"') run_options.add_argument( @@ -219,7 +221,6 @@ def main(): list_supported_systems(site_config.systems.values(), printer) sys.exit(1) - if options.mode: try: mode_key = '%s:%s' % (system.name, options.mode) @@ -232,7 +233,6 @@ def main(): printer.error("no such execution mode: `%s'" % (options.mode)) sys.exit(1) - # Setup the check loader if options.checkpath: load_path = [] @@ -276,14 +276,13 @@ def main(): stage_prefix=system.stagedir, log_prefix=system.logdir, timestamp=options.timestamp) - if os_ext.samefile(resources.stage_prefix, resources.output_prefix) and \ - not options.keep_stage_files: + if (os_ext.samefile(resources.stage_prefix, resources.output_prefix) and + not options.keep_stage_files): printer.error('stage and output refer to the same directory. ' 'If this is on purpose, please use also the ' "`--keep-stage-files' option.") sys.exit(1) - printer.log_config(options) # Print command line @@ -296,7 +295,7 @@ def main(): printer.info('Reframe paths') printer.info('=============') printer.info(' Check prefix : %s' % loader.prefix) - printer.info('%03s Check search path : %s' % \ + printer.info('%03s Check search path : %s' % ('(R)' if loader.recurse else '', "'%s'" % ':'.join(loader.load_path))) printer.info(' Stage dir prefix : %s' % resources.stage_prefix) @@ -308,7 +307,8 @@ def main(): # Filter checks by name checks_matched = filter( - lambda c: c if c.name not in options.exclude_names else None, + lambda c: + c if c.name not in options.exclude_names else None, checks_found ) @@ -328,9 +328,9 @@ def main(): # Filter checks by prgenv if not options.skip_prgenv_check: checks_matched = filter( - lambda c: c \ - if sum([ c.supports_progenv(p) - for p in options.prgenv ]) == len(options.prgenv) + lambda c: c + if sum([c.supports_progenv(p) + for p in options.prgenv]) == len(options.prgenv) else None, checks_matched ) @@ -352,18 +352,17 @@ def main(): checks_matched ) - - checks_matched = [ c for c in checks_matched ] + checks_matched = [c for c in checks_matched] # Act on checks # Unload regression's module and load user-specified modules - module_unload(settings.module_name); + module_unload(settings.module_name) for m in options.user_modules: try: module_force_load(m) except ModuleError: - printer.info("Could not load module `%s': Skipping..." % m) + printer.info("Could not load module `%s': Skipping..." % m) success = True if options.list: @@ -377,13 +376,15 @@ def main(): elif options.exec_policy == 'async': exec_policy = AsynchronousExecutionPolicy() else: - # This should not happen, since choices are handled by argparser + # This should not happen, since choices are handled by + # argparser printer.error("unknown execution policy `%s': Exiting...") sys.exit(1) exec_policy.skip_system_check = options.skip_system_check exec_policy.force_local = options.force_local - exec_policy.relax_performance_check = options.relax_performance_check + exec_policy.relax_performance_check = ( + options.relax_performance_check) exec_policy.skip_environ_check = options.skip_prgenv_check exec_policy.skip_sanity_check = options.skip_sanity_check exec_policy.skip_performance_check = options.skip_performance_check @@ -421,7 +422,7 @@ def main(): printer.error("`%s': %s" % (e.filename, e.strerror)) sys.exit(1) except Exception as e: - printer.error('fatal error: %s\n' % str(e)) + printer.error('fatal error: %s\n' % e) traceback.print_exc() sys.exit(1) finally: diff --git a/reframe/frontend/executors/__init__.py b/reframe/frontend/executors/__init__.py index f825a29249..56fb7531e1 100644 --- a/reframe/frontend/executors/__init__.py +++ b/reframe/frontend/executors/__init__.py @@ -1,4 +1,5 @@ import sys +import reframe.core.debug as debug from reframe.core.environments import EnvironmentSnapshot from reframe.core.exceptions import ReframeFatalError, ReframeError @@ -8,7 +9,8 @@ from reframe.frontend.statistics import TestStats from reframe.utility.sandbox import Sandbox -class TestCase(object): + +class TestCase: """Test case result placeholder class.""" STATE_SUCCESS = 0 STATE_FAILURE = 1 @@ -19,13 +21,16 @@ def __init__(self, executor): self.failed_stage = None self.exc_info = None + def __repr__(self): + return debug.repr(self) + def valid(self): - return self.result != None + return self.result is not None def success(self): self.result = TestCase.STATE_SUCCESS - def fail(self, exc_info = None): + def fail(self, exc_info=None): self.result = TestCase.STATE_FAILURE self.failed_stage = self.executor.current_stage self.exc_info = exc_info @@ -34,11 +39,11 @@ def failed(self): return self.result == TestCase.STATE_FAILURE -class RegressionTestExecutor(object): +class RegressionTestExecutor: """Responsible for the execution of `RegressionTest`'s pipeline stages. - Keeps track of the current stage and implements relaxed performance checking - logic.""" + Keeps track of the current stage and implements relaxed performance + checking logic.""" check = TypedField('check', RegressionTest) current_stage = StringField('current_stage') @@ -47,6 +52,9 @@ def __init__(self, check): self.check = check self.relax_performance_check = False + def __repr__(self): + return debug.repr(self) + def setup(self, system, environ, **job_opts): self.current_stage = 'setup' self.check.setup(system, environ, **job_opts) @@ -88,17 +96,20 @@ def cleanup(self, remove_files=False, unload_env=True): self.current_stage = 'completed' -class Runner(object): +class Runner: """Responsible for executing a set of regression tests based on an execution policy.""" - def __init__(self, policy, printer = None): - self.printer = PrettyPrinter() if not printer else printer + + def __init__(self, policy, printer=None): + self.printer = printer or PrettyPrinter() self.policy = policy self.policy.printer = self.printer self.policy.runner = self self.sandbox = Sandbox() self.stats = None + def __repr__(self): + return debug.repr(self) def runall(self, checks, system): try: @@ -113,20 +124,18 @@ def runall(self, checks, system): num_failures = self.stats.num_failures() self.printer.status( 'FAILED' if num_failures else 'PASSED', - 'Ran %d test case(s) from %d check(s) (%d failure(s))' % \ + 'Ran %d test case(s) from %d check(s) (%d failure(s))' % (self.stats.num_cases(), len(checks), num_failures), just='center' ) self.printer.timestamp('Finished on', 'short double line') - def _partition_supported(self, check, partition): if self.policy.skip_system_check: return True return check.supports_system(partition.name) - def _environ_supported(self, check, environ): precond = True if self.policy.only_environs: @@ -137,7 +146,6 @@ def _environ_supported(self, check, environ): else: return precond and check.supports_progenv(environ.name) - def _runall(self, checks, system): self.policy.enter() for c in checks: @@ -153,7 +161,7 @@ def _runall(self, checks, system): for e in p.environs: if not self._environ_supported(c, e): self.printer.status('SKIP', - 'skipping %s for %s' % \ + 'skipping %s for %s' % (e.name, p.fullname), just='center') continue @@ -178,10 +186,11 @@ def _runall(self, checks, system): self.policy.exit() -class ExecutionPolicy(object): +class ExecutionPolicy: """Base abstract class for execution policies. An execution policy implements the regression check pipeline.""" + def __init__(self): # Options controlling the check execution self.skip_system_check = False @@ -203,6 +212,9 @@ def __init__(self): self.sched_exclude_nodelist = None self.sched_options = [] + def __repr__(self): + return debug.repr(self) + def enter(self): pass diff --git a/reframe/frontend/executors/policies.py b/reframe/frontend/executors/policies.py index 935af30942..9b40f6131b 100644 --- a/reframe/frontend/executors/policies.py +++ b/reframe/frontend/executors/policies.py @@ -1,12 +1,13 @@ import itertools import time import sys +import reframe.core.debug as debug from reframe.core.exceptions import ReframeFatalError from reframe.core.logging import getlogger -from reframe.frontend.executors import ExecutionPolicy, \ - RegressionTestExecutor, \ - TestCase +from reframe.frontend.executors import (ExecutionPolicy, + RegressionTestExecutor, + TestCase) from reframe.frontend.statistics import TestStats from reframe.settings import settings from reframe.core.environments import EnvironmentSnapshot @@ -17,14 +18,12 @@ def __init__(self): super().__init__() self.test_cases = [] - def getstats(self): return TestStats(self.test_cases) - def run_check(self, check, partition, environ): self.printer.status( - 'RUN', "%s on %s using %s" % \ + 'RUN', "%s on %s using %s" % (check.name, partition.fullname, environ.name) ) try: @@ -77,7 +76,7 @@ def run_check(self, check, partition, environ): self.environ_snapshot.load() -class RunningTestCase(object): +class RunningTestCase: def __init__(self, testcase, environ): self.testcase = testcase self.environ = environ @@ -85,12 +84,16 @@ def __init__(self, testcase, environ): # Test case has finished, but has not been waited for yet self.zombie = False + def __repr__(self): + return debug.repr(self) + class WaitError(BaseException): """Mark wait errors during the asynchronous execution of test cases. It stores the `RunningTestCase` that has failed during waiting and the associated exception info.""" + def __init__(self, running_testcase, exc_info): self.running_case = running_testcase self.exc_info = exc_info @@ -116,7 +119,6 @@ def __init__(self): self.logger = getlogger('frontend') - def _compile_run_testcase(self, testcase): try: executor = testcase.executor @@ -135,7 +137,6 @@ def _compile_run_testcase(self, testcase): executor.check.current_environ, not testcase.failed()) - def _finalize_testcase(self, ready_testcase): try: ready_testcase.environ.load() @@ -171,7 +172,6 @@ def _finalize_testcase(self, ready_testcase): executor.check.current_environ, not testcase.failed()) - def _failall(self): """Mark all tests as failures""" for rc in self.running_cases: @@ -181,17 +181,14 @@ def _failall(self): for rc in ready_list: rc.testcase.fail(sys.exc_info()) - def enter_partition(self, c, p): self.running_cases_counts.setdefault(p.fullname, 0) self.ready_cases.setdefault(p.fullname, []) self.max_jobs.setdefault(p.fullname, p.max_jobs) - def getstats(self): return TestStats(self.test_cases) - def _print_executor_status(self, status, executor): checkname = executor.check.name partname = executor.check.current_partition.fullname @@ -200,7 +197,6 @@ def _print_executor_status(self, status, executor): self.logger.debug('%s %s' % (status.lower(), msg)) self.printer.status(status, msg) - def run_check(self, check, partition, environ): try: executor = RegressionTestExecutor(check) @@ -228,7 +224,7 @@ def run_check(self, check, partition, environ): if self.running_cases_counts[partname] < partition.max_jobs: # Test's environment is already loaded; no need to be reloaded - self._reschedule(ready_testcase, load_env=False) + self._reschedule(ready_testcase, load_env=False) else: self._print_executor_status('HOLD', executor) self.ready_cases[partname].append(ready_testcase) @@ -242,8 +238,8 @@ def run_check(self, check, partition, environ): raise except: # Here we are sure that test case has failed during setup, since - # _compile_and_run() handles already non-fatal exceptions. Though we - # check again the testcase, just in case. + # _compile_and_run() handles already non-fatal exceptions. Though + # we check again the testcase, just in case. if not testcase.failed(): testcase.fail(sys.exc_info()) finally: @@ -256,7 +252,6 @@ def run_check(self, check, partition, environ): self.test_cases.append(testcase) self.environ_snapshot.load() - def _update_running_counts(self): """Update the counts of running checks per partition.""" freed_slots = {} @@ -273,7 +268,6 @@ def _update_running_counts(self): for p, ns in freed_slots.items(): self.logger.debug('freed %s slot(s) on partition %s' % (ns, p)) - def _reschedule(self, ready_testcase, load_env=True): testcase = ready_testcase.testcase executor = testcase.executor @@ -289,7 +283,6 @@ def _reschedule(self, ready_testcase, load_env=True): self.running_cases_counts[partname] += 1 self.running_cases.append(ready_testcase) - def _reschedule_all(self): self._update_running_counts() for partname, num_jobs in self.running_cases_counts.items(): @@ -308,7 +301,6 @@ def _reschedule_all(self): ready_case.environ.load() self._reschedule(ready_case) - def _waitany(self): intervals = itertools.cycle(settings.job_state_poll_intervals) while True: @@ -336,8 +328,8 @@ def _waitany(self): partname = running_check.current_partition.fullname self.running_cases_counts[partname] -= 1 - # This is just for completeness; the case is no more - # a zombie, since it has been waited for + # This is just for completeness; the case is no + # more a zombie, since it has been waited for running.zombie = False if testcase.valid(): @@ -349,7 +341,6 @@ def _waitany(self): time.sleep(next(intervals)) - def exit(self): self.printer.separator( 'short single line', 'waiting for spawned checks' @@ -372,14 +363,3 @@ def exit(self): self.printer.separator( 'short single line', 'all spawned checks finished' ) - - -class DebugAsynchronousExecutionPolicy(AsynchronousExecutionPolicy): - def __init__(self): - super().__init__() - self.keep_stage_files = True - self.checks = [] - - def exit_environ(self, c, p, e): - super().exit_environ(c, p, e) - self.checks.append(c) diff --git a/reframe/frontend/loader.py b/reframe/frontend/loader.py index a64e5dc917..90193bd61e 100644 --- a/reframe/frontend/loader.py +++ b/reframe/frontend/loader.py @@ -6,6 +6,7 @@ import os import logging import sys +import reframe.core.debug as debug import reframe.utility.os as os_ext from importlib.machinery import SourceFileLoader @@ -25,18 +26,20 @@ def valid(self): return self._validated def visit_FunctionDef(self, node): - if node.name == '_get_checks' and \ - node.col_offset == 0 and \ - node.args.kwarg: + if (node.name == '_get_checks' and + node.col_offset == 0 and + node.args.kwarg): self._validated = True class RegressionCheckLoader: - def __init__(self, load_path, prefix = '', recurse = False): + def __init__(self, load_path, prefix='', recurse=False): self.load_path = load_path - self.prefix = prefix if prefix != None else '' + self.prefix = prefix or '' self.recurse = recurse + def __repr__(self): + return debug.repr(self) def _module_name(self, filename): """Figure out a module name from filename. @@ -49,7 +52,6 @@ def _module_name(self, filename): else: return (os.path.splitext(filename)[0]).replace('/', '.') - def _validate_source(self, filename): """Check if `filename` is a valid Reframe source file. @@ -65,7 +67,6 @@ def _validate_source(self, filename): validator.visit(source_tree) return validator.valid - def load_from_module(self, module, **check_args): """Load user checks from module. @@ -77,11 +78,10 @@ def load_from_module(self, module, **check_args): # already validated candidates = module._get_checks(**check_args) if isinstance(candidates, list): - return [ c for c in candidates if isinstance(c, RegressionTest) ] + return [c for c in candidates if isinstance(c, RegressionTest)] else: return [] - def load_from_file(self, filename, **check_args): module_name = self._module_name(filename) try: @@ -92,10 +92,9 @@ def load_from_file(self, filename, **check_args): return self.load_from_module(loader.load_module(), **check_args) except OSError as e: raise ReframeError( - "Could not load module `%s' from file `%s': %s" % \ + "Could not load module `%s' from file `%s': %s" % (module_name, filename, e.strerror)) - def load_from_dir(self, dirname, recurse=False, **check_args): checks = [] for entry in os.scandir(dirname): @@ -104,16 +103,15 @@ def load_from_dir(self, dirname, recurse=False, **check_args): self.load_from_dir(entry.path, recurse, **check_args) ) - if entry.name.startswith('.') or \ - not entry.name.endswith('.py') or \ - not entry.is_file(): + if (entry.name.startswith('.') or + not entry.name.endswith('.py') or + not entry.is_file()): continue checks.extend(self.load_from_file(entry.path, **check_args)) return checks - def load_all(self, **check_args): """Load all checks in self.load_path. @@ -125,7 +123,7 @@ def load_all(self, **check_args): continue if os.path.isdir(d): checks.extend(self.load_from_dir(d, self.recurse, - **check_args)) + **check_args)) else: checks.extend(self.load_from_file(d, **check_args)) @@ -140,6 +138,8 @@ def __init__(self): self.systems = {} self.modes = ScopedDict({}) + def __repr__(self): + return debug.repr(self) def load_from_dict(self, site_config): if not isinstance(site_config, dict): @@ -191,7 +191,6 @@ def create_env(system, partition, name): except KeyError: raise ConfigurationError("no type specified for `%s'" % name) - # Populate the systems directory for sysname, config in sysconfig.items(): if not isinstance(config, dict): @@ -242,7 +241,7 @@ def create_env(system, partition, name): variables=partconfig.get('variables', {}) ) partition.environs = [ - create_env(sysname, partname, e) \ + create_env(sysname, partname, e) for e in partconfig.get('environs', []) ] partition.access = partconfig.get('access', []) diff --git a/reframe/frontend/printer.py b/reframe/frontend/printer.py index ac00ff7493..1ee3f15ab7 100644 --- a/reframe/frontend/printer.py +++ b/reframe/frontend/printer.py @@ -1,10 +1,14 @@ import datetime import sys +import reframe.core.debug as debug from reframe.core.logging import LoggerAdapter, load_from_dict, getlogger class Colorizer: + def __repr__(self): + return debug.repr(self) + def colorize(string, foreground, background): raise NotImplementedError('attempt to call an abstract method') @@ -28,13 +32,13 @@ class AnsiColorizer(Colorizer): white = '7m' default = '9m' - def colorize(string, foreground, background = None): - return AnsiColorizer.escape_seq + \ - AnsiColorizer.fgcolor + foreground + string + \ - AnsiColorizer.escape_seq + AnsiColorizer.reset_term + def colorize(string, foreground, background=None): + return (AnsiColorizer.escape_seq + + AnsiColorizer.fgcolor + foreground + string + + AnsiColorizer.escape_seq + AnsiColorizer.reset_term) -class PrettyPrinter(object): +class PrettyPrinter: """Pretty printing facility for the framework. Final printing is delegated to an internal logger, which is responsible for @@ -46,8 +50,10 @@ def __init__(self): self.status_width = 10 self._logger = getlogger('frontend') + def __repr__(self): + return debug.repr(self) - def separator(self, linestyle, msg = ''): + def separator(self, linestyle, msg=''): if linestyle == 'short double line': line = self.status_width * '=' elif linestyle == 'short single line': @@ -57,8 +63,7 @@ def separator(self, linestyle, msg = ''): self.info('[%s] %s' % (line, msg)) - - def status(self, status, message = '', just=None): + def status(self, status, message='', just=None): if just == 'center': status = status.center(self.status_width - 2) elif just == 'right': @@ -70,23 +75,22 @@ def status(self, status, message = '', just=None): status_stripped = status.strip().lower() if status_stripped == 'skip': status = AnsiColorizer.colorize(status, AnsiColorizer.yellow) - elif status_stripped in [ 'fail', 'failed' ]: + elif status_stripped in ['fail', 'failed']: status = AnsiColorizer.colorize(status, AnsiColorizer.red) else: status = AnsiColorizer.colorize(status, AnsiColorizer.green) self.info('[ %s ] %s' % (status, message)) - def result(self, check, partition, environ, success): if success: result_str = 'OK' else: result_str = 'FAIL' - self.status(result_str, '%s on %s using %s' % \ - (check.name, partition.fullname, environ.name), just='right') - + self.status( + result_str, '%s on %s using %s' % + (check.name, partition.fullname, environ.name), just='right') def timestamp(self, msg='', separator=None): msg = '%s %s' % (msg, datetime.datetime.today().strftime('%c %Z')) @@ -95,18 +99,14 @@ def timestamp(self, msg='', separator=None): else: self.info(msg) - def error(self, msg): self._logger.error('%s: %s' % (sys.argv[0], msg)) - - def info(self, msg = ''): + def info(self, msg=''): self._logger.info(msg) - def log_config(self, options): - config_str = 'configuration\n' - for attr, val in sorted(options.__dict__.items()): - config_str += ' %s=%s\n' % (attr, str(val)) + opt_list = [' %s=%s' % (attr, val) + for attr, val in sorted(options.__dict__.items())] - self._logger.debug(config_str) + self._logger.debug('configuration\n%s' % '\n'.join(opt_list)) diff --git a/reframe/frontend/resources.py b/reframe/frontend/resources.py index fdf0eb7e06..bdbfadbbde 100644 --- a/reframe/frontend/resources.py +++ b/reframe/frontend/resources.py @@ -3,15 +3,17 @@ # import os +import reframe.core.debug as debug from datetime import datetime + class ResourcesManager: - def __init__(self, prefix = '.', output_prefix = None, stage_prefix = None, - log_prefix = None, timestamp = None): + def __init__(self, prefix='.', output_prefix=None, stage_prefix=None, + log_prefix=None, timestamp=None): # Get the timestamp - time = datetime.now().strftime(timestamp) if timestamp else '' + time = datetime.now().strftime(timestamp or '') self.prefix = os.path.abspath(prefix) if output_prefix: @@ -34,20 +36,19 @@ def __init__(self, prefix = '.', output_prefix = None, stage_prefix = None, else: self.log_prefix = os.path.abspath(log_prefix) + def __repr__(self): + return debug.repr(self) def _makedir(self, *dirs): ret = os.path.join(*dirs) os.makedirs(ret, exist_ok=True) return ret - def stagedir(self, *dirs): return self._makedir(self.stage_prefix, *dirs) - def outputdir(self, *dirs): return self._makedir(self.output_prefix, *dirs) - def logdir(self, *dirs): return self._makedir(self.log_prefix, *dirs) diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py index 47a4365fd2..d0e473414f 100644 --- a/reframe/frontend/statistics.py +++ b/reframe/frontend/statistics.py @@ -1,11 +1,13 @@ import traceback +import reframe.core.debug as debug from reframe.core.exceptions import ReframeError class TestStats: """Stores test case statistics.""" - def __init__(self, test_cases = []): + + def __init__(self, test_cases=[]): if not isinstance(test_cases, list): raise TypeError('TestStats is expecting a list of TestCase') @@ -18,8 +20,10 @@ def __init__(self, test_cases = []): tclist = self.test_cases_bypart.setdefault(partname, []) tclist.append(t) + def __repr__(self): + return debug.repr(self) - def num_failures(self, partition = None): + def num_failures(self, partition=None): num_fails = 0 if partition: num_fails += len([ @@ -28,20 +32,18 @@ def num_failures(self, partition = None): else: # count all failures for tclist in self.test_cases_bypart.values(): - num_fails += len([ t for t in tclist if t.failed() ]) + num_fails += len([t for t in tclist if t.failed()]) return num_fails - def num_failures_stage(self, stage): num_fails = 0 for tclist in self.test_cases_bypart.values(): - num_fails += len([ t for t in tclist if t.failed_stage == stage ]) + num_fails += len([t for t in tclist if t.failed_stage == stage]) return num_fails - - def num_cases(self, partition = None): + def num_cases(self, partition=None): num_cases = 0 if partition: num_cases += len(self.test_cases_bypart[partition]) @@ -52,17 +54,16 @@ def num_cases(self, partition = None): return num_cases - def failure_report(self): line_width = 78 - report = line_width*'=' + '\n' + report = line_width * '=' + '\n' report += 'SUMMARY OF FAILURES\n' for partname, tclist in self.test_cases_bypart.items(): - for tf in [ t for t in tclist if t.failed() ]: + for tf in [t for t in tclist if t.failed()]: check = tf.executor.check - environ_name = check.current_environ.name \ - if check.current_environ else 'None' - report += line_width*'-' + '\n' + environ_name = (check.current_environ.name + if check.current_environ else 'None') + report += line_width * '-' + '\n' report += 'FAILURE INFO for %s\n' % check.name report += ' * System partition: %s\n' % partname report += ' * Environment: %s\n' % environ_name @@ -81,15 +82,19 @@ def failure_report(self): elif isinstance(value, KeyboardInterrupt): report += 'cancelled by user\n' else: - report += 'caught unexpected exception: %s (%s)\n' % \ - (etype.__name__, value) + report += ('caught unexpected exception: %s (%s)\n' % + (etype.__name__, value)) report += ''.join( traceback.format_exception(*tf.exc_info)) + elif tf.failed_stage == 'sanity': + report += ('Sanity check failure\n' + + check.sanity_info.failure_report()) + elif tf.failed_stage == 'performance': + report += ('Performance check failure\n' + + check.perf_info.failure_report()) else: - report += "sanity/performance check failure " \ - "(performance log kept in `%s')\n" % \ - check._perf_logfile - + # This shouldn't happen... + report += 'Unknown error.' - report += line_width*'-' + '\n' + report += line_width * '-' + '\n' return report diff --git a/reframe/settings.py b/reframe/settings.py index c8fe58ab5b..e90b0f4cc3 100644 --- a/reframe/settings.py +++ b/reframe/settings.py @@ -7,69 +7,70 @@ from reframe.core.fields import ReadOnlyField + class RegressionSettings: - version = ReadOnlyField('2.6') + version = ReadOnlyField('2.6.1') module_name = ReadOnlyField('reframe') - job_state_poll_intervals = ReadOnlyField([ 1, 2, 3 ]) - job_init_poll_intervals = ReadOnlyField([ 1 ]) + job_state_poll_intervals = ReadOnlyField([1, 2, 3]) + job_init_poll_intervals = ReadOnlyField([1]) job_init_poll_max_tries = ReadOnlyField(30) job_submit_timeout = ReadOnlyField(60) prefix_apps = ReadOnlyField('/apps/common/regression/resources') - checks_path = ReadOnlyField([ 'checks/' ]) + checks_path = ReadOnlyField(['checks/']) checks_path_recurse = ReadOnlyField(True) site_configuration = ReadOnlyField({ - 'systems' : { + 'systems': { # Generic system used for cli unit tests - 'generic' : { - 'descr' : 'Generic example system', - 'partitions' : { - 'login' : { - 'scheduler' : 'local', - 'modules' : [], - 'access' : [], - 'environs' : [ 'builtin-gcc' ], - 'descr' : 'Login nodes' + 'generic': { + 'descr': 'Generic example system', + 'partitions': { + 'login': { + 'scheduler': 'local', + 'modules': [], + 'access': [], + 'environs': ['builtin-gcc'], + 'descr': 'Login nodes' } } } }, - 'environments' : { - '*' : { - 'PrgEnv-cray' : { - 'type' : 'ProgEnvironment', - 'modules' : [ 'PrgEnv-cray' ], + 'environments': { + '*': { + 'PrgEnv-cray': { + 'type': 'ProgEnvironment', + 'modules': ['PrgEnv-cray'], }, - 'PrgEnv-gnu' : { - 'type' : 'ProgEnvironment', - 'modules' : [ 'PrgEnv-gnu' ], + 'PrgEnv-gnu': { + 'type': 'ProgEnvironment', + 'modules': ['PrgEnv-gnu'], }, - 'PrgEnv-intel' : { - 'type' : 'ProgEnvironment', - 'modules' : [ 'PrgEnv-intel' ], + 'PrgEnv-intel': { + 'type': 'ProgEnvironment', + 'modules': ['PrgEnv-intel'], }, - 'PrgEnv-pgi' : { - 'type' : 'ProgEnvironment', - 'modules' : [ 'PrgEnv-pgi' ], + 'PrgEnv-pgi': { + 'type': 'ProgEnvironment', + 'modules': ['PrgEnv-pgi'], }, - 'builtin' : { - 'type' : 'ProgEnvironment', - 'cc' : 'cc', - 'cxx' : '', - 'ftn' : '', + 'builtin': { + 'type': 'ProgEnvironment', + 'cc': 'cc', + 'cxx': '', + 'ftn': '', }, - 'builtin-gcc' : { - 'type' : 'ProgEnvironment', - 'cc' : 'gcc', - 'cxx' : 'g++', - 'ftn' : 'gfortran', + 'builtin-gcc': { + 'type': 'ProgEnvironment', + 'cc': 'gcc', + 'cxx': 'g++', + 'ftn': 'gfortran', } } } @@ -78,22 +79,22 @@ class RegressionSettings: logging_config = { 'level': 'DEBUG', 'handlers': { - 'reframe.log' : { - 'level' : 'DEBUG', - 'format' : '[%(asctime)s] %(levelname)s: ' - '%(check_name)s: %(message)s', - 'append' : False, + 'reframe.log': { + 'level': 'DEBUG', + 'format': '[%(asctime)s] %(levelname)s: ' + '%(check_name)s: %(message)s', + 'append': False, }, # Output handling '&1': { - 'level' : 'INFO', - 'format' : '%(message)s' + 'level': 'INFO', + 'format': '%(message)s' }, - 'reframe.out' : { - 'level' : 'INFO', - 'format' : '%(message)s', - 'append' : False, + 'reframe.out': { + 'level': 'INFO', + 'format': '%(message)s', + 'append': False, } } } diff --git a/reframe/utility/functions.py b/reframe/utility/functions.py index 55cfe282ac..126417d454 100644 --- a/reframe/utility/functions.py +++ b/reframe/utility/functions.py @@ -4,6 +4,7 @@ from reframe.core.exceptions import ReframeError + def _expect_interval(val, interval, valdescr=None): lower, upper = interval if val < lower or val > upper: @@ -11,15 +12,15 @@ def _expect_interval(val, interval, valdescr=None): valdescr = 'value' raise ReframeError('%s (%s) not in [%s,%s]' % - (valdescr, val, lower, upper)) + (valdescr, val, lower, upper)) def _bound(refval, thres): # Upper/lower bounds computation is common, since lower bounds are negative - return (refval + abs(refval)*thres) if refval else thres + return (refval + abs(refval) * thres) if refval else thres -def standard_threshold(value, reference, logger = None): +def standard_threshold(value, reference, logger=None): try: refval, thres_lower, thres_upper = reference except (ValueError, TypeError): @@ -29,26 +30,26 @@ def standard_threshold(value, reference, logger = None): logger.info('value: %s, reference: %s' % (str(value), reference)) # sanity checking of user input - if refval == None: + if refval is None: raise ReframeError( 'No reference value specified for calculating tolerance') - if thres_lower == None and thres_upper == None: + if thres_lower is None and thres_upper is None: return True - if thres_lower == None: + if thres_lower is None: _expect_interval(thres_upper, (0, 1), 'reference upper threshold') return value <= _bound(refval, thres_upper) - if thres_upper == None: + if thres_upper is None: _expect_interval(thres_lower, (-1, 0), 'reference lower threshold') return value >= _bound(refval, thres_lower) - _expect_interval(thres_upper, ( 0, 1), 'reference upper threshold') + _expect_interval(thres_upper, (0, 1), 'reference upper threshold') _expect_interval(thres_lower, (-1, 0), 'reference lower threshold') - return value >= _bound(refval, thres_lower) and \ - value <= _bound(refval, thres_upper) + return (value >= _bound(refval, thres_lower) and + value <= _bound(refval, thres_upper)) def always_true(value, reference, **kwargs): diff --git a/reframe/utility/os.py b/reframe/utility/os.py index 8815fa3d8e..e929f36c92 100644 --- a/reframe/utility/os.py +++ b/reframe/utility/os.py @@ -10,6 +10,7 @@ from reframe.core.exceptions import * + def run_command(cmd, check=False, timeout=None): try: return subprocess.run(shlex.split(cmd), @@ -19,30 +20,30 @@ def run_command(cmd, check=False, timeout=None): timeout=timeout, check=check) except subprocess.CalledProcessError as e: - raise CommandError(command = e.cmd, - stdout = e.stdout, - stderr = e.stderr, - exitcode = e.returncode) + raise CommandError(command=e.cmd, + stdout=e.stdout, + stderr=e.stderr, + exitcode=e.returncode) except subprocess.TimeoutExpired as e: - raise CommandError(command = e.cmd, - stdout = e.stdout, - stderr = e.stderr, - exitcode = None, - timeout = e.timeout) + raise CommandError(command=e.cmd, + stdout=e.stdout, + stderr=e.stderr, + exitcode=None, + timeout=e.timeout) -def grep_command_output(cmd, pattern, where = 'stdout'): +def grep_command_output(cmd, pattern, where='stdout'): completed = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) if where == 'stdout': - outlist = [ completed.stdout ] + outlist = [completed.stdout] elif where == 'stderr': - outlist = [ completed.stderr ] + outlist = [completed.stderr] else: - outlist = [ completed.stdout, completed.stderr ] + outlist = [completed.stdout, completed.stderr] for out in outlist: if re.search(pattern, out, re.MULTILINE): @@ -51,13 +52,17 @@ def grep_command_output(cmd, pattern, where = 'stdout'): return False -def run_command_async(cmd, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, bufsize=1): +def run_command_async(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + bufsize=1, + **popen_args): return subprocess.Popen(args=shlex.split(cmd), stdout=stdout, stderr=stderr, universal_newlines=True, - bufsize=bufsize) + bufsize=bufsize, + **popen_args) def copytree(src, dst, symlinks=False, ignore=None, copy_function=shutil.copy2, @@ -80,8 +85,8 @@ def copytree_virtual(src, dst, file_links=[], If `file_links` is empty, this is equivalent to `copytree()`. The rest of the arguments are passed as-is to `copytree()`. Paths in `file_links` must - be relative to `src`. If you try to pass `.` in `file_links`, `OSError` will - be raised.""" + be relative to `src`. If you try to pass `.` in `file_links`, `OSError` + will be raised.""" # Work with absolute paths src = os.path.abspath(src) @@ -109,15 +114,16 @@ def copytree_virtual(src, dst, file_links=[], link_targets.add(os.path.abspath(target)) - if not file_links: ignore = None else: - ignore = lambda dir, contents: \ - [ c for c in contents if os.path.join(dir, c) in link_targets ] + def ignore(dir, contents): + return [c for c in contents + if os.path.join(dir, c) in link_targets] # Copy to dst ignoring the file_links - copytree(src, dst, symlinks, ignore, copy_function, ignore_dangling_symlinks) + copytree(src, dst, symlinks, ignore, + copy_function, ignore_dangling_symlinks) # Now create the symlinks for f in link_targets: @@ -156,9 +162,9 @@ def samefile(path1, path2): """Check if paths refer to the same file. If paths exist, this is equivalent to `os.path.samefile()`. If only one of - the paths exists, it will be followed if it is a symbolic link and its final - target will be compared to the other path. If both paths do not exist, a - simple string comparison will be performed (after they have been + the paths exists, it will be followed if it is a symbolic link and its + final target will be compared to the other path. If both paths do not + exist, a simple string comparison will be performed (after they have been normalized).""" # normalise the paths first diff --git a/reframe/utility/parsers.py b/reframe/utility/parsers.py index 9856341408..a190d8518f 100644 --- a/reframe/utility/parsers.py +++ b/reframe/utility/parsers.py @@ -3,32 +3,35 @@ # sanity patterns output # +import reframe.core.debug as debug + from reframe.utility.functions import always_true class StatefulParser: """Basic stateful parser""" + def __init__(self, callback=None): """Create a basic StatefulParser callback -- callable to be called when matching criteria are met (default None, which is equivalent to reframe.utility.always_true)""" self.is_on = False - self.callback = callback if callback != None else always_true + self.callback = callback or always_true + def __repr__(self): + return debug.repr(self) def on(self, **kwargs): """Switch on the parser""" self.is_on = True return True - def off(self, **kwargs): """Switch off the parser""" self.is_on = False return True - def match(self, value, reference, **kwargs): """To be called when a match is found""" if self.is_on: @@ -36,20 +39,17 @@ def match(self, value, reference, **kwargs): else: return False - def match_eof(self, **kwargs): """To be called at the end of an perf. or sanity input file""" self.clear() return True - def _match(self, value, reference, **kwargs): """The actual state changing function. It is only called when the parser is on.""" return self.callback(value, reference, **kwargs) - def clear(self, **kwargs): """Clear the parser state.""" self.is_on = False @@ -57,12 +57,12 @@ def clear(self, **kwargs): class SingleOccurrenceParser(StatefulParser): """Parser for checking for the nth occurrence of a match""" + def __init__(self, nth_occurrence, callback=None): super().__init__(callback) self.count = 0 self.nth_occurrence = nth_occurrence - def _match(self, value, reference, **kwargs): self.count += 1 if self.count == self.nth_occurrence: @@ -70,7 +70,6 @@ def _match(self, value, reference, **kwargs): else: return False - def clear(self, **kwargs): super().clear() self.count = 0 @@ -93,28 +92,25 @@ def __init__(self, num_matches, exact=False, callback=None): self.num_matches = num_matches self.exact = exact - def _match(self, value, reference, **kwargs): self.count += 1 if self.num_matches < 0: self.last_match = (value, reference) return True else: - return self.count == self.num_matches and \ - self.callback(value, reference, **kwargs) - + return (self.count == self.num_matches and + self.callback(value, reference, **kwargs)) def match_eof(self, **kwargs): if self.num_matches < 0: - retvalue = self.callback(*self.last_match, **kwargs) \ - if self.last_match != None else True + retvalue = (self.callback(*self.last_match, **kwargs) + if self.last_match is not None else True) else: retvalue = self.count == self.num_matches if self.exact else True super().match_eof() return retvalue - def clear(self, **kwargs): super().clear() self.count = 0 @@ -124,17 +120,16 @@ def clear(self, **kwargs): class UniqueOccurrencesParser(StatefulParser): """Parser for counting the unique occurrences of the values associated with a match""" + def __init__(self, num_matches, callback=None): super().__init__(callback) self.num_matches = num_matches self.matched = set() - def _match(self, value, reference, **kwargs): self.matched.add((value, reference)) return True - def match_eof(self, **kwargs): retvalue = True if len(self.matched) != self.num_matches: @@ -147,7 +142,6 @@ def match_eof(self, **kwargs): super().match_eof() return retvalue - def clear(self, **kwargs): super().clear() self.matched.clear() @@ -161,9 +155,8 @@ def __init__(self, callback=None): self.value = None self.reference = None - def _match(self, value, reference, **kwargs): - if self.value == None: + if self.value is None: self.value = value else: self._apply_operator(value) @@ -171,23 +164,20 @@ def _match(self, value, reference, **kwargs): self.reference = reference return True - def _apply_operator(self, value): """The reduction operator To be implemented by subclasses""" raise NotImplementedError('attempt to call an abstract method') - def match_eof(self, **kwargs): - if self.value == None: + if self.value is None: return True retvalue = self.callback(self.value, self.reference, **kwargs) super().match_eof() return retvalue - def clear(self, **kwargs): super().clear() self.value = None @@ -214,18 +204,15 @@ def __init__(self, callback=None): super().__init__(callback) self.count = 0 - def _match(self, value, reference, **kwargs): self.count += 1 return super()._match(value, reference, **kwargs) - def _apply_operator(self, value): self.value += value - def match_eof(self, **kwargs): - if self.value == None: + if self.value is None: return True retvalue = self.callback(self.value / self.count, @@ -233,7 +220,6 @@ def match_eof(self, **kwargs): super().match_eof() return retvalue - def clear(self, **kwargs): super().clear() self.count = 0 diff --git a/reframe/utility/sandbox.py b/reframe/utility/sandbox.py index df305dff50..c3b300f486 100644 --- a/reframe/utility/sandbox.py +++ b/reframe/utility/sandbox.py @@ -1,6 +1,7 @@ from reframe.core.fields import CopyOnWriteField -class Sandbox(object): + +class Sandbox: """Sandbox class for manipulating shared resources.""" environ = CopyOnWriteField('environ') system = CopyOnWriteField('system') diff --git a/unittests/fixtures.py b/unittests/fixtures.py index 7adfa0fcb7..7eed2f1bff 100644 --- a/unittests/fixtures.py +++ b/unittests/fixtures.py @@ -11,63 +11,63 @@ TEST_MODULES = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'modules') TEST_SITE_CONFIG = { - 'systems' : { - 'testsys' : { - 'descr' : 'Fake system for unit tests', - 'hostnames' : [ 'testsys' ], - 'prefix' : '/foo/bar', - 'partitions' : { - 'login' : { - 'scheduler' : 'local', - 'modules' : [], - 'access' : [], - 'resources' : {}, - 'environs' : [ 'PrgEnv-cray', 'PrgEnv-gnu', 'builtin-gcc' ], - 'descr' : 'Login nodes' + 'systems': { + 'testsys': { + 'descr': 'Fake system for unit tests', + 'hostnames': ['testsys'], + 'prefix': '/foo/bar', + 'partitions': { + 'login': { + 'scheduler': 'local', + 'modules' : [], + 'access' : [], + 'resources': {}, + 'environs' : ['PrgEnv-cray', 'PrgEnv-gnu', 'builtin-gcc'], + 'descr' : 'Login nodes' }, - 'gpu' : { - 'scheduler' : 'nativeslurm', - 'modules' : [], - 'resources' : { - 'num_gpus_per_node' : [ + 'gpu': { + 'scheduler': 'nativeslurm', + 'modules' : [], + 'resources': { + 'num_gpus_per_node': [ '--gres=gpu:{num_gpus_per_node}' ], }, - 'access' : [], - 'environs' : [ 'PrgEnv-gnu', 'builtin-gcc' ], - 'descr' : 'GPU partition', + 'access' : [], + 'environs': ['PrgEnv-gnu', 'builtin-gcc'], + 'descr' : 'GPU partition', } } } }, - 'environments' : { - 'testsys:login' : { - 'PrgEnv-gnu' : { - 'type' : 'ProgEnvironment', - 'modules' : [ 'PrgEnv-gnu' ], - 'cc' : 'gcc', - 'cxx' : 'g++', - 'ftn' : 'gfortran', + 'environments': { + 'testsys:login': { + 'PrgEnv-gnu': { + 'type': 'ProgEnvironment', + 'modules': ['PrgEnv-gnu'], + 'cc' : 'gcc', + 'cxx': 'g++', + 'ftn': 'gfortran', }, }, - '*' : { - 'PrgEnv-gnu' : { - 'type' : 'ProgEnvironment', - 'modules' : [ 'PrgEnv-gnu' ], + '*': { + 'PrgEnv-gnu': { + 'type': 'ProgEnvironment', + 'modules': ['PrgEnv-gnu'], }, - 'PrgEnv-cray' : { - 'type' : 'ProgEnvironment', - 'modules' : [ 'PrgEnv-cray' ], + 'PrgEnv-cray': { + 'type': 'ProgEnvironment', + 'modules': ['PrgEnv-cray'], }, 'builtin-gcc' : { - 'type' : 'ProgEnvironment', - 'cc' : 'gcc', - 'cxx' : 'g++', - 'ftn' : 'gfortran', + 'type': 'ProgEnvironment', + 'cc' : 'gcc', + 'cxx' : 'g++', + 'ftn' : 'gfortran', } } } @@ -80,6 +80,7 @@ def force_remove_file(filename): except FileNotFoundError: pass + def guess_system(): site_config = SiteConfiguration() site_config.load_from_dict(settings.site_configuration) @@ -88,11 +89,11 @@ def guess_system(): # FIXME: This may conflict in the unlikely situation that a user defines a # system named `kesch` with a partition named `pn`. -def system_with_scheduler(sched_type, skip_partitions = [ 'kesch:pn' ]): +def system_with_scheduler(sched_type, skip_partitions=['kesch:pn']): """Retrieve a partition from the current system with a specific scheduler. - If `sched_type == None`, the first partition with a non-local scheduler will - be returned. + If `sched_type` is `None`, the first partition with a non-local scheduler + will be returned. Partitions in `skip_partitions` will be skipped from searching. Items of `skip_partitions` are of the form `:`.""" @@ -105,7 +106,7 @@ def system_with_scheduler(sched_type, skip_partitions = [ 'kesch:pn' ]): if canon_name in skip_partitions: continue - if sched_type == None and p.scheduler != 'local': + if sched_type is None and p.scheduler != 'local': return p if p.scheduler == sched_type: diff --git a/unittests/resources/badchecks/badargs.py b/unittests/resources/badchecks/badargs.py index 76e0e835f9..717f8f2520 100644 --- a/unittests/resources/badchecks/badargs.py +++ b/unittests/resources/badchecks/badargs.py @@ -2,10 +2,11 @@ from reframe.core.pipeline import RegressionTest + class EmptyTest(RegressionTest): def __init__(self, **kwargs): super().__init__('emptycheck', os.path.dirname(__file__), **kwargs) def _get_checks(): - return [ EmptyTest() ] + return [EmptyTest()] diff --git a/unittests/resources/badchecks/badentry.py b/unittests/resources/badchecks/badentry.py index da9b44622e..a832c17c1a 100644 --- a/unittests/resources/badchecks/badentry.py +++ b/unittests/resources/badchecks/badentry.py @@ -2,9 +2,10 @@ from reframe.core.pipeline import RegressionTest + class EmptyTest(RegressionTest): def __init__(self, **kwargs): super().__init__('emptycheck', os.path.dirname(__file__), **kwargs) def _get_checks(**kwargs): - return [ self ] + return [self] diff --git a/unittests/resources/badchecks/invalid_check.py b/unittests/resources/badchecks/invalid_check.py index 442c138a06..8635bc91d2 100644 --- a/unittests/resources/badchecks/invalid_check.py +++ b/unittests/resources/badchecks/invalid_check.py @@ -12,5 +12,6 @@ class InvalidTest: def __init__(self, **kwargs): pass + def _get_checks(**kwargs): - return [ EmptyTest(**kwargs), InvalidTest(**kwargs) ] + return [EmptyTest(**kwargs), InvalidTest(**kwargs)] diff --git a/unittests/resources/badchecks/noentry.py b/unittests/resources/badchecks/noentry.py index efc30eac4a..5ec6d4e39d 100644 --- a/unittests/resources/badchecks/noentry.py +++ b/unittests/resources/badchecks/noentry.py @@ -2,6 +2,7 @@ from reframe.core.pipeline import RegressionTest + class EmptyTest(RegressionTest): def __init__(self, **kwargs): super().__init__('emptycheck', os.path.dirname(__file__), **kwargs) diff --git a/unittests/resources/emptycheck.py b/unittests/resources/emptycheck.py index 57b65595c5..642c140d14 100644 --- a/unittests/resources/emptycheck.py +++ b/unittests/resources/emptycheck.py @@ -9,4 +9,4 @@ def __init__(self, **kwargs): def _get_checks(**kwargs): - return [ EmptyTest(**kwargs) ] + return [EmptyTest(**kwargs)] diff --git a/unittests/resources/frontend_checks.py b/unittests/resources/frontend_checks.py index fb9d320b35..82e142e187 100644 --- a/unittests/resources/frontend_checks.py +++ b/unittests/resources/frontend_checks.py @@ -16,19 +16,18 @@ def __init__(self, name, **kwargs): self.local = True self.executable = 'echo hello' self.sanity_patterns = { - '-' : { 'hello' : [] } + '-' : {'hello' : []} } - self.tags = { self.name } - self.maintainers = [ 'VK' ] + self.tags = {self.name} + self.maintainers = ['VK'] class BadSetupCheck(BaseFrontendCheck): def __init__(self, **kwargs): super().__init__(type(self).__name__, **kwargs) - self.valid_systems = [ '*' ] - self.valid_prog_environs = [ '*' ] - + self.valid_systems = ['*'] + self.valid_prog_environs = ['*'] def setup(self, system, environ, **job_opts): super().setup(system, environ, **job_opts) @@ -39,9 +38,8 @@ class BadSetupCheckEarly(BaseFrontendCheck): def __init__(self, **kwargs): super().__init__(type(self).__name__, **kwargs) - self.valid_systems = [ '*' ] - self.valid_prog_environs = [ '*' ] - + self.valid_systems = ['*'] + self.valid_prog_environs = ['*'] def setup(self, system, environ, **job_opts): raise ReframeError('Setup failure') @@ -50,30 +48,30 @@ def setup(self, system, environ, **job_opts): class NoSystemCheck(BaseFrontendCheck): def __init__(self, **kwargs): super().__init__(type(self).__name__, **kwargs) - self.valid_prog_environs = [ '*' ] + self.valid_prog_environs = ['*'] class NoPrgEnvCheck(BaseFrontendCheck): def __init__(self, **kwargs): super().__init__(type(self).__name__, **kwargs) - self.valid_systems = [ '*' ] + self.valid_systems = ['*'] class SanityFailureCheck(BaseFrontendCheck): def __init__(self, **kwargs): super().__init__(type(self).__name__, **kwargs) - self.valid_systems = [ '*' ] - self.valid_prog_environs = [ '*' ] + self.valid_systems = ['*'] + self.valid_prog_environs = ['*'] self.sanity_patterns = { - '-' : { 'foo' : [] } + '-' : {'foo' : []} } class PerformanceFailureCheck(BaseFrontendCheck): def __init__(self, **kwargs): super().__init__(type(self).__name__, **kwargs) - self.valid_systems = [ '*' ] - self.valid_prog_environs = [ '*' ] + self.valid_systems = ['*'] + self.valid_prog_environs = ['*'] self.perf_patterns = { '-' : { '(?P\S+)' : [ @@ -92,33 +90,32 @@ def __init__(self, **kwargs): class CustomPerformanceFailureCheck(BaseFrontendCheck): """Simulate a performance check that ignores completely logging""" + def __init__(self, **kwargs): super().__init__(type(self).__name__, **kwargs) - self.valid_systems = [ '*' ] - self.valid_prog_environs = [ '*' ] + self.valid_systems = ['*'] + self.valid_prog_environs = ['*'] self.strict_check = False - def check_performance(self): return False class KeyboardInterruptCheck(BaseFrontendCheck): """Simulate keyboard interrupt during test's execution.""" + def __init__(self, phase='wait', **kwargs): super().__init__(type(self).__name__, **kwargs) - self.valid_systems = [ '*' ] - self.valid_prog_environs = [ '*' ] + self.valid_systems = ['*'] + self.valid_prog_environs = ['*'] self.phase = phase - def setup(self, system, environ, **job_opts): super().setup(system, environ, **job_opts) if self.phase == 'setup': raise KeyboardInterrupt - def wait(self): # We do our nasty stuff in wait() to make things more complicated if self.phase == 'wait': @@ -129,11 +126,12 @@ def wait(self): class SystemExitCheck(BaseFrontendCheck): """Simulate system exit from within a check.""" + def __init__(self, **kwargs): super().__init__(type(self).__name__, **kwargs) - self.valid_systems = [ '*' ] - self.valid_prog_environs = [ '*' ] + self.valid_systems = ['*'] + self.valid_prog_environs = ['*'] def wait(self): # We do our nasty stuff in wait() to make things more complicated @@ -150,21 +148,22 @@ def __init__(self, sleep_time, **kwargs): '-c "from time import sleep; sleep(%s)"' % sleep_time ] self.sanity_patterns = None - self.valid_systems = [ '*' ] - self.valid_prog_environs = [ '*' ] + self.valid_systems = ['*'] + self.valid_prog_environs = ['*'] def setup(self, system, environ, **job_opts): super().setup(system, environ, **job_opts) print_timestamp = "python3 -c \"from datetime import datetime; " \ "print(datetime.today().strftime('%s.%f'))\"" - self.job.pre_run = [ print_timestamp ] - self.job.post_run = [ print_timestamp ] + self.job.pre_run = [print_timestamp] + self.job.post_run = [print_timestamp] + def _get_checks(**kwargs): - return [ BadSetupCheck(**kwargs), - BadSetupCheckEarly(**kwargs), - NoSystemCheck(**kwargs), - NoPrgEnvCheck(**kwargs), - SanityFailureCheck(**kwargs), - PerformanceFailureCheck(**kwargs), - CustomPerformanceFailureCheck(**kwargs), ] + return [BadSetupCheck(**kwargs), + BadSetupCheckEarly(**kwargs), + NoSystemCheck(**kwargs), + NoPrgEnvCheck(**kwargs), + SanityFailureCheck(**kwargs), + PerformanceFailureCheck(**kwargs), + CustomPerformanceFailureCheck(**kwargs), ] diff --git a/unittests/resources/hellocheck.py b/unittests/resources/hellocheck.py index 9f66bc2e12..bd1344c6f9 100644 --- a/unittests/resources/hellocheck.py +++ b/unittests/resources/hellocheck.py @@ -3,21 +3,22 @@ from reframe.core.pipeline import RegressionTest from reframe.core.environments import * + class HelloTest(RegressionTest): def __init__(self, **kwargs): super().__init__('hellocheck', os.path.dirname(__file__), **kwargs) self.descr = 'C Hello World test' # All available systems are supported - self.valid_systems = [ '*' ] - self.valid_prog_environs = [ '*' ] + self.valid_systems = ['*'] + self.valid_prog_environs = ['*'] self.sourcepath = 'hello.c' - self.tags = { 'foo', 'bar' } + self.tags = {'foo', 'bar'} self.sanity_patterns = { - '-' : { 'Hello, World\!' : [] } + '-' : {'Hello, World\!' : []} } - self.maintainers = [ 'VK' ] + self.maintainers = ['VK'] def _get_checks(**kwargs): - return [ HelloTest(**kwargs) ] + return [HelloTest(**kwargs)] diff --git a/unittests/resources/hellocheck_make.py b/unittests/resources/hellocheck_make.py index bc42049660..8f17d798e1 100644 --- a/unittests/resources/hellocheck_make.py +++ b/unittests/resources/hellocheck_make.py @@ -3,23 +3,24 @@ from reframe.core.pipeline import RegressionTest from reframe.core.environments import * + class HelloMakeTest(RegressionTest): def __init__(self, **kwargs): - super().__init__('hellocheck_make', os.path.dirname(__file__), **kwargs) + super().__init__('hellocheck_make', + os.path.dirname(__file__), **kwargs) self.descr = 'C++ Hello World test' # All available systems are supported - self.valid_systems = [ '*' ] - self.valid_prog_environs = [ '*' ] + self.valid_systems = ['*'] + self.valid_prog_environs = ['*'] self.sourcepath = '' self.executable = './hello_cpp' - self.keep_files = [ 'hello_cpp' ] - self.tags = { 'foo', 'bar' } + self.keep_files = ['hello_cpp'] + self.tags = {'foo', 'bar'} self.sanity_patterns = { - '-' : { 'Hello, World\!' : [] } + '-' : {'Hello, World\!' : []} } - self.maintainers = [ 'VK' ] - + self.maintainers = ['VK'] def compile(self): self.current_environ.cflags = '-O3' @@ -28,4 +29,4 @@ def compile(self): def _get_checks(**kwargs): - return [ HelloMakeTest(**kwargs) ] + return [HelloMakeTest(**kwargs)] diff --git a/unittests/resources/src/sleep_deeply.sh b/unittests/resources/src/sleep_deeply.sh new file mode 100755 index 0000000000..298133b1c6 --- /dev/null +++ b/unittests/resources/src/sleep_deeply.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +trap -- '' TERM +sleep 5 & +echo $! +wait diff --git a/unittests/test_argparser.py b/unittests/test_argparser.py index 209b8c0daa..07aa20d0d4 100644 --- a/unittests/test_argparser.py +++ b/unittests/test_argparser.py @@ -21,17 +21,15 @@ def setUp(self): action='append', default=[]) self.foo_options.add_argument('--barfoo', action='store_true') - def test_arguments(self): self.assertRaises(ValueError, self.foo_options.add_argument, action='store', default='FOO') self.foo_options.add_argument('--foo-bar', action='store_true') self.foo_options.add_argument('--alist', action='append', default=[]) - options = self.parser.parse_args([ '--foobar', '--foo-bar']) + options = self.parser.parse_args(['--foobar', '--foo-bar']) self.assertTrue(options.foobar) self.assertTrue(options.foo_bar) - def test_parsing(self): options = self.parser.parse_args( '--foo name --foolist gag --barfoo --unfoo'.split() diff --git a/unittests/test_cli.py b/unittests/test_cli.py index f3c3ce2b38..5592f48588 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -74,25 +74,22 @@ def setUp(self): } } - def _run_reframe(self): import reframe.frontend.cli as cli argv = self.cmdstr.format( - executable = self.executable, - checkopt = ('-c %s' % self.checkfile) if self.checkfile else '', - prefix = self.prefix, - prgenvopt = ('-p %s' % self.prgenv) if self.prgenv else '', - action = self.action, - local = '--force-local' if self.local else '', - options = ' '.join(self.options), - sysopt = ('--system %s' % self.sysopt) if self.sysopt else '' + executable=self.executable, + checkopt=('-c %s' % self.checkfile) if self.checkfile else '', + prefix=self.prefix, + prgenvopt=('-p %s' % self.prgenv) if self.prgenv else '', + action=self.action, + local='--force-local' if self.local else '', + options=' '.join(self.options), + sysopt=('--system %s' % self.sysopt) if self.sysopt else '' ).split() - print(argv) return run_command_inline(argv, cli.main) - def _stage_exists(self, check_name, partitions, prgenv_name): stagedir = os.path.join(self.prefix, 'stage') @@ -103,7 +100,6 @@ def _stage_exists(self, check_name, partitions, prgenv_name): return True - def _perflog_exists(self, check_name, partitions): logdir = os.path.join(self.prefix, 'logs') for p in partitions: @@ -113,23 +109,20 @@ def _perflog_exists(self, check_name, partitions): return True - def assert_log_file_is_saved(self): outputdir = os.path.join(self.prefix, 'output') self.assertTrue(os.path.exists(self.logfile)) self.assertTrue(os.path.exists( - os.path.join(outputdir,os.path.basename(self.logfile)))) - + os.path.join(outputdir, os.path.basename(self.logfile)))) def test_check_success(self): - self.options = [ '--save-log-files' ] + self.options = ['--save-log-files'] returncode, stdout, stderr = self._run_reframe() self.assertNotIn('FAILED', stdout) self.assertIn('PASSED', stdout) self.assertEqual(0, returncode) self.assert_log_file_is_saved() - @unittest.skipIf(not system_with_scheduler(None), 'job submission not supported') def test_check_submit_success(self): @@ -148,19 +141,17 @@ def test_check_submit_success(self): self.assertIn('PASSED', stdout) self.assertEqual(0, returncode) - def test_check_failure(self): self.checkfile = 'unittests/resources/frontend_checks.py' - self.options = [ '--tag BadSetupCheck' ] + self.options = ['--tag BadSetupCheck'] returncode, stdout, stderr = self._run_reframe() self.assertIn('FAILED', stdout) self.assertNotEqual(returncode, 0) - def test_check_sanity_failure(self): self.checkfile = 'unittests/resources/frontend_checks.py' - self.options = [ '--tag SanityFailureCheck' ] + self.options = ['--tag SanityFailureCheck'] returncode, stdout, stderr = self._run_reframe() self.assertIn('FAILED', stdout) @@ -169,12 +160,11 @@ def test_check_sanity_failure(self): self.assertNotIn('Traceback', stderr) self.assertNotEqual(returncode, 0) self.assertTrue(self._stage_exists('SanityFailureCheck', - [ 'login' ], self.prgenv)) - + ['login'], self.prgenv)) def test_performance_check_failure(self): self.checkfile = 'unittests/resources/frontend_checks.py' - self.options = [ '--tag PerformanceFailureCheck' ] + self.options = ['--tag PerformanceFailureCheck'] returncode, stdout, stderr = self._run_reframe() self.assertIn('FAILED', stdout) @@ -183,14 +173,13 @@ def test_performance_check_failure(self): self.assertNotIn('Traceback', stderr) self.assertNotEqual(0, returncode) self.assertTrue(self._stage_exists('PerformanceFailureCheck', - [ 'login' ], self.prgenv)) + ['login'], self.prgenv)) self.assertTrue(self._perflog_exists('PerformanceFailureCheck', - [ 'login' ])) - + ['login'])) def test_custom_performance_check_failure(self): self.checkfile = 'unittests/resources/frontend_checks.py' - self.options = [ '--tag CustomPerformanceFailureCheck' ] + self.options = ['--tag CustomPerformanceFailureCheck'] returncode, stdout, stderr = self._run_reframe() self.assertIn('FAILED', stdout) @@ -200,38 +189,34 @@ def test_custom_performance_check_failure(self): self.assertNotEqual(0, returncode) self.assertTrue(self._stage_exists('CustomPerformanceFailureCheck', - [ 'login' ], self.prgenv)) + ['login'], self.prgenv)) self.assertNotIn('Check log file:', stdout) - def test_skip_system_check_option(self): self.checkfile = 'unittests/resources/frontend_checks.py' - self.options = [ '--skip-system-check', '--tag NoSystemCheck' ] + self.options = ['--skip-system-check', '--tag NoSystemCheck'] returncode, stdout, stderr = self._run_reframe() self.assertIn('PASSED', stdout) - def test_skip_prgenv_check_option(self): self.checkfile = 'unittests/resources/frontend_checks.py' - self.options = [ '--skip-prgenv-check', '--tag NoPrgEnvCheck' ] + self.options = ['--skip-prgenv-check', '--tag NoPrgEnvCheck'] returncode, stdout, stderr = self._run_reframe() self.assertIn('PASSED', stdout) self.assertEqual(0, returncode) - def test_sanity_of_checks(self): # This test will effectively load all the tests in the checks path and # will force a syntactic and runtime check at least for the constructor # of the checks self.action = '-l' - self.options = [ '--save-log-files' ] + self.options = ['--save-log-files'] self.checkfile = None returncode, stdout, stderr = self._run_reframe() self.assertEqual(0, returncode) self.assert_log_file_is_saved() - def test_unknown_system(self): self.action = '-l' self.sysopt = 'foo' @@ -241,14 +226,12 @@ def test_unknown_system(self): self.assertNotIn('Traceback', stderr) self.assertEqual(1, returncode) - def test_sanity_of_optconfig(self): # Test the sanity of the command line options configuration self.action = '-h' self.checkfile = None returncode, stdout, stderr = self._run_reframe() - def test_checkpath_recursion(self): self.action = '-l' self.checkfile = None @@ -257,7 +240,7 @@ def test_checkpath_recursion(self): 'Found (\d+) check', stdout, re.MULTILINE).group(1) self.checkfile = 'checks/' - self.options = [ '-R' ] + self.options = ['-R'] returncode, stdout, stderr = self._run_reframe() num_checks_in_checkdir = re.search( 'Found (\d+) check', stdout, re.MULTILINE).group(1) @@ -269,7 +252,6 @@ def test_checkpath_recursion(self): 'Found (\d+) check', stdout, re.MULTILINE).group(1) self.assertEqual('0', num_checks_in_checkdir) - def test_same_output_stage_dir(self): output_dir = os.path.join(self.prefix, 'foo') self.options = ('-o %s -s %s' % (output_dir, output_dir)).split() diff --git a/unittests/test_core.py b/unittests/test_core.py index 496fff052b..5e9942096e 100644 --- a/unittests/test_core.py +++ b/unittests/test_core.py @@ -2,10 +2,11 @@ import tempfile import stat import unittest +import reframe.core.debug as debug import reframe.utility.os as os_ext from reframe.core.environments import Environment, EnvironmentSnapshot, \ - ProgEnvironment + ProgEnvironment from reframe.core.modules import * from reframe.core.exceptions import CompilationError from reframe.core.modules import * @@ -15,22 +16,19 @@ class TestEnvironment(unittest.TestCase): def assertEnvironmentVariable(self, name, value): - if not name in os.environ: + if name not in os.environ: self.fail('environment variable %s not set' % name) self.assertEqual(os.environ[name], value) - def assertModulesLoaded(self, modules): for m in modules: self.assertTrue(module_present(m)) - def assertModulesNotLoaded(self, modules): for m in modules: self.assertFalse(module_present(m)) - def setUp(self): module_path_add([TEST_MODULES]) @@ -54,12 +52,10 @@ def setUp(self): modules=['testmod_boo']) self.environ_other.set_variable(name='_fookey11', value='value11') - def tearDown(self): module_path_remove([TEST_MODULES]) self.environ_save.load() - def test_setup(self): self.assertEqual(len(self.environ.modules), 1) self.assertEqual(len(self.environ.variables.keys()), 4) @@ -67,7 +63,6 @@ def test_setup(self): self.assertEqual(self.environ.variables['_fookey2'], 'value2') self.assertIn('testmod_foo', self.environ.modules) - def test_environment_snapshot(self): self.assertRaises(RuntimeError, self.environ_save.add_module, 'testmod_foo') @@ -79,7 +74,6 @@ def test_environment_snapshot(self): self.environ_save.load() self.assertEqual(self.environ_save, EnvironmentSnapshot()) - def test_load_restore(self): self.environ.load() self.assertEnvironmentVariable(name='_fookey1', value='value3') @@ -94,26 +88,22 @@ def test_load_restore(self): self.assertFalse(module_present('testmod_foo')) self.assertEnvironmentVariable(name='_fookey1', value='origfoo') - def test_load_present(self): module_load('testmod_boo') self.environ.load() self.environ.unload() self.assertTrue(module_present('testmod_boo')) - def test_equal(self): - env1 = Environment('env1', modules=[ 'foo', 'bar' ]) - env2 = Environment('env1', modules=[ 'bar', 'foo' ]) + env1 = Environment('env1', modules=['foo', 'bar']) + env2 = Environment('env1', modules=['bar', 'foo']) self.assertEqual(env1, env2) - def test_not_equal(self): - env1 = Environment('env1', modules=[ 'foo', 'bar' ]) - env2 = Environment('env2', modules=[ 'foo', 'bar' ]) + env1 = Environment('env1', modules=['foo', 'bar']) + env2 = Environment('env2', modules=['foo', 'bar']) self.assertNotEqual(env1, env2) - def test_conflicting_environments(self): envfoo = Environment(name='envfoo', modules=['testmod_foo', 'testmod_boo']) @@ -126,7 +116,6 @@ def test_conflicting_environments(self): for m in envfoo.modules: self.assertFalse(module_present(m)) - def test_conflict_environ_after_module_load(self): module_load('testmod_foo') envfoo = Environment(name='envfoo', modules=['testmod_foo']) @@ -134,7 +123,6 @@ def test_conflict_environ_after_module_load(self): envfoo.unload() self.assertTrue(module_present('testmod_foo')) - def test_conflict_environ_after_module_force_load(self): module_load('testmod_foo') envbar = Environment(name='envbar', modules=['testmod_bar']) @@ -142,7 +130,6 @@ def test_conflict_environ_after_module_force_load(self): envbar.unload() self.assertTrue(module_present('testmod_foo')) - def test_swap(self): from reframe.core.environments import swap_environments @@ -157,14 +144,12 @@ def setUp(self): self.environ_save = EnvironmentSnapshot() self.executable = os.path.join(TEST_RESOURCES, 'hello') - def tearDown(self): # Remove generated executable ingoring file-not-found errors force_remove_file(self.executable) self.environ_save.load() - - def assertHelloMessage(self, executable = None): + def assertHelloMessage(self, executable=None): if not executable: executable = self.executable @@ -172,7 +157,6 @@ def assertHelloMessage(self, executable = None): pattern='Hello, World\!')) force_remove_file(executable) - def compile_with_env(self, env, skip_fortran=False): srcdir = os.path.join(TEST_RESOURCES, 'src') env.cxxflags = '-O2' @@ -192,13 +176,12 @@ def compile_with_env(self, env, skip_fortran=False): env.unload() - def compile_dir_with_env(self, env, skip_fortran=False): srcdir = os.path.join(TEST_RESOURCES, 'src') env.cxxflags = '-O3' env.load() - executables = [ 'hello_c', 'hello_cpp' ] + executables = ['hello_c', 'hello_cpp'] if skip_fortran: env.compile(srcdir, makefile='Makefile.nofort') else: @@ -211,10 +194,10 @@ def compile_dir_with_env(self, env, skip_fortran=False): env.compile(sourcepath=srcdir, options='clean') env.unload() - def test_compile(self): # Compile a 'Hello, World' with the builtin gcc/g++ - env = ProgEnvironment(name='builtin-gcc', cc='gcc', cxx='g++', ftn=None) + env = ProgEnvironment(name='builtin-gcc', + cc='gcc', cxx='g++', ftn=None) try: self.compile_with_env(env, skip_fortran=True) self.compile_dir_with_env(env, skip_fortran=True) @@ -228,15 +211,13 @@ def setUp(self): os.chmod(self.script_file.name, os.stat(self.script_file.name).st_mode | stat.S_IEXEC) - def tearDown(self): os.remove(self.script_file.name) - def test_bash_builder(self): builder = BashScriptBuilder() - builder.set_variable('var1', '13') - builder.set_variable('var2', '2') + builder.set_variable('var1', '13') + builder.set_variable('var2', '2') builder.set_variable('foo', '33', suppress=True) builder.verbatim('((var3 = var1 + var2)); echo hello $var3') self.script_file.write(builder.finalise()) @@ -258,14 +239,12 @@ def tearDown(self): module_unload('testmod_bar') module_path_remove([TEST_MODULES]) - def test_module_path(self): self.assertTrue(os_ext.inpath(TEST_MODULES, os.environ['MODULEPATH'])) module_path_remove([TEST_MODULES]) self.assertFalse(os_ext.inpath(TEST_MODULES, os.environ['MODULEPATH'])) - def test_module_equal(self): self.assertTrue(module_equal('foo', 'foo')) self.assertTrue(module_equal('foo/1.2', 'foo/1.2')) @@ -274,7 +253,6 @@ def test_module_equal(self): self.assertFalse(module_equal('foo', 'bar')) self.assertFalse(module_equal('foo', 'foobar')) - def test_module_load(self): self.assertRaises(ModuleError, module_load, 'foo') self.assertFalse(module_present('foo')) @@ -287,7 +265,6 @@ def test_module_load(self): self.assertFalse(module_present('testmod_foo')) self.assertNotIn('TESTMOD_FOO', os.environ) - def test_module_force_load(self): module_load('testmod_foo') @@ -301,8 +278,42 @@ def test_module_force_load(self): self.assertIn('testmod_foo', unloaded) self.assertIn('TESTMOD_BAR', os.environ) - def test_module_purge(self): module_load('testmod_base') module_purge() self.assertNotIn('LOADEDMODULES', os.environ) + + +class TestDebugRepr(unittest.TestCase): + def test_builtin_types(self): + # builtin types must use the default repr() + self.assertEqual(repr(1), debug.repr(1)) + self.assertEqual(repr(1.2), debug.repr(1.2)) + self.assertEqual(repr([1, 2, 3]), debug.repr([1, 2, 3])) + self.assertEqual(repr({1, 2, 3}), debug.repr({1, 2, 3})) + self.assertEqual(repr({1, 2, 3}), debug.repr({1, 2, 3})) + self.assertEqual(repr({'a': 1, 'b': {2, 3}}), + debug.repr({'a': 1, 'b': {2, 3}})) + + def test_obj_repr(self): + class C: + def __repr__(self): + return debug.repr(self) + + class D: + def __repr__(self): + return debug.repr(self) + + c = C() + c._a = -1 + c.a = 1 + c.b = {1, 2, 3} + c.d = D() + c.d.a = 2 + c.d.b = 3 + + rep = repr(c) + self.assertIn('unittests.test_core', rep) + self.assertIn('_a=%r' % c._a, rep) + self.assertIn('b=%r' % c.b, rep) + self.assertIn('D(...)', rep) diff --git a/unittests/test_fields.py b/unittests/test_fields.py index 23a755fcac..6cf3e44ec9 100644 --- a/unittests/test_fields.py +++ b/unittests/test_fields.py @@ -9,37 +9,33 @@ def test_copy_on_write_field(self): class FieldTester: cow = CopyOnWriteField('cow') - tester = FieldTester() - var = [ 1, 2, 3 ] + var = [1, 2, 3] # set copy-on-write field tester.cow = var # modify original variable var.append(4) - self.assertEqual(tester.cow, [ 1, 2, 3 ]) - + self.assertEqual(tester.cow, [1, 2, 3]) def test_readonly_field(self): class FieldTester: ro = ReadOnlyField('foo') - tester = FieldTester() self.assertEqual(tester.ro, 'foo') self.assertRaises(FieldError, exec, "tester.ro = 'bar'", globals(), locals()) - def test_alphanumeric_field(self): class FieldTester: field1 = AlphanumericField('field1', allow_none=True) field2 = AlphanumericField('field2') + def __init__(self, value): self.field1 = value - tester1 = FieldTester('foo') tester2 = FieldTester('bar') self.assertEqual('foo', tester1.field1) @@ -55,7 +51,6 @@ def __init__(self, value): # Setting field1 to None must be fine tester1.field1 = None - def test_typed_field(self): class ClassA: def __init__(self, val): @@ -65,15 +60,14 @@ class ClassB(ClassA): def __init__(self): super().__init__(10) - class FieldTester: field = TypedField('field', ClassA) field_maybe_none = TypedField('field_maybe_none', ClassA, allow_none=True) + def __init__(self, value): self.field = value - tester = FieldTester(ClassA(3)) self.assertEqual(3, tester.field.value) self.assertRaises(FieldError, FieldTester, 3) @@ -84,17 +78,17 @@ def __init__(self, value): globals(), locals()) tester.field_maybe_none = None - def test_aggregate_typed_field(self): class FieldTester: simple_int = AggregateTypeField('simple_int', int) int_list = AggregateTypeField('int_list', (list, int)) - tuple_list = AggregateTypeField('tuple_list', (list, (tuple, int))) + tuple_list = AggregateTypeField('tuple_list', + (list, (tuple, int))) mixed_tuple = AggregateTypeField('mixed_tuple', (tuple, ((int, float, int),))) float_tuple = AggregateTypeField('float_tuple', (tuple, float)) dict_list = AggregateTypeField('dict_list', - (list, (dict, (str, int)))) + (list, (dict, (str, int)))) multilevel_dict = AggregateTypeField( 'multilevel_dict', (dict, (str, (dict, (str, int)))) ) @@ -122,44 +116,42 @@ class FieldTester: (tuple, ((int, (float, None), (int, None)),)) ) - - int_list = [ 1, 2, 3 ] - int_list_none = [ 1, None, 3 ] - tuple_list = [ (1, 2, 3), (4, 5, 6) ] + int_list = [1, 2, 3] + int_list_none = [1, None, 3] + tuple_list = [(1, 2, 3), (4, 5, 6)] dict_list = [ - { 'a' : 1, 'b' : 2 }, - { 'a' : 3, 'b' : 4 } + {'a': 1, 'b': 2}, + {'a': 3, 'b': 4} ] typed_tuple = (1, 2.2, 'foo') float_tuple = (2.3, 1.2, 5.6, 9.8) mixed_tuple = (1, 2.3, 3) multilevel_dict = { - 'foo' : { - 'a' : 1, - 'b' : 2, + 'foo': { + 'a': 1, + 'b': 2, }, - 'bar' : { - 'c' : 3, - 'd' : 4, + 'bar': { + 'c': 3, + 'd': 4, } } complex_dict = { - '-' : { - 'pattern' : [ + '-': { + 'pattern': [ ('foo', int, int), ('bar', None, float), ], - 'patt' : [ + 'patt': [ ('foobar', int, None), ] } } dict_list_none = [ - { 'a' : 1, 'b' : 2 }, + {'a': 1, 'b': 2}, None ] - # Test valid assignments tester = FieldTester() tester.simple_int = 1 @@ -188,18 +180,18 @@ class FieldTester: # Test empty containers tester.int_list = [] tester.tuple_list = [] - tester.dict_list = [ { 'a' : 1, 'b' : 2 }, {} ] + tester.dict_list = [{'a': 1, 'b': 2}, {}] tester.multilevel_dict = { - 'foo' : {}, - 'bar' : { - 'c' : 3, - 'd' : 4, + 'foo': {}, + 'bar': { + 'c': 3, + 'd': 4, } } # Test invalid assignments self.assertRaises(FieldError, exec, - "tester.int_list = [ 'a', 'b' ]", + "tester.int_list = ['a', 'b']", globals(), locals()) self.assertRaises(FieldError, exec, "tester.int_list = int_list_none", @@ -226,109 +218,100 @@ class FieldTester: "tester.complex_dict = multilevel_dict", globals(), locals()) - def test_string_field(self): class FieldTester: field = StringField('field') + def __init__(self, value): self.field = value - tester = FieldTester('foo') self.assertEqual('foo', tester.field) self.assertRaises(FieldError, exec, 'tester.field = 13', globals(), locals()) - def test_non_whitespace_field(self): class FieldTester: field = NonWhitespaceField('field') - tester = FieldTester() tester.field = 'foobar' self.assertEqual('foobar', tester.field) self.assertRaises(FieldError, exec, 'tester.field = "foo bar"', globals(), locals()) - def test_integer_field(self): class FieldTester: field = IntegerField('field') + def __init__(self, value): self.field = value - tester = FieldTester(5) self.assertEqual(5, tester.field) self.assertRaises(FieldError, FieldTester, 'foo') self.assertRaises(FieldError, exec, "tester.field = 'foo'", globals(), locals()) - def test_boolean_field(self): class FieldTester: field = BooleanField('field') + def __init__(self, value): self.field = value - tester = FieldTester(True) self.assertEqual(True, tester.field) self.assertRaises(FieldError, FieldTester, 'foo') self.assertRaises(FieldError, exec, 'tester.field = 3', globals(), locals()) - def test_typed_list_field(self): class FieldTester: field = TypedListField('field', int) + def __init__(self, value): self.field = value - tester = FieldTester([1, 2, 3]) self.assertEqual([1, 2, 3], tester.field) self.assertRaises(FieldError, FieldTester, [1, 'foo']) self.assertRaises(FieldError, exec, 'tester.field = 3', globals(), locals()) - def test_typed_set_field(self): class FieldTester: field = TypedSetField('field', int) + def __init__(self, value): self.field = value - tester = FieldTester({1, 2, 3}) self.assertEqual({1, 2, 3}, tester.field) self.assertRaises(FieldError, FieldTester, {1, 'foo'}) - self.assertRaises(FieldError, exec, 'tester.field = [ 1, 2 ]', + self.assertRaises(FieldError, exec, 'tester.field = [1, 2]', globals(), locals()) - def test_typed_dict_field(self): class FieldTester: field = TypedDictField('field', str, int) + def __init__(self, value): self.field = value - user_dict = { - 'foo' : 1, - 'bar' : 2, - 'foobar' : 3 + 'foo' : 1, + 'bar' : 2, + 'foobar': 3 } tester = FieldTester(user_dict) self.assertEqual(user_dict, tester.field) - self.assertRaises(FieldError, FieldTester, { 1 : 'foo' }) - self.assertRaises(FieldError, FieldTester, { 'foo' : 1.3 }) - self.assertRaises(FieldError, exec, 'tester.field = [ 1, 2 ]', + self.assertRaises(FieldError, FieldTester, {1: 'foo'}) + self.assertRaises(FieldError, FieldTester, {'foo': 1.3}) + self.assertRaises(FieldError, exec, 'tester.field = [1, 2]', globals(), locals()) - def test_sanity_field(self): class FieldTester: field = SanityPatternField('field') @@ -336,15 +319,15 @@ class FieldTester: allow_none=True) sanity = { - 'out' : { - '(?\S+) (?\S+)' : [ + 'out': { + '(?\S+) (?\S+)': [ ('foo', float, int), ('bar', float, int) ], - '\e' : int + '\e': int }, - 'bar' : { - 'foobar' : [] + 'bar': { + 'foobar': [] }, } @@ -357,22 +340,20 @@ class FieldTester: globals(), locals()) self.assertRaises(FieldError, exec, """tester.field = { - 'out' : { - '(?\S+) (?\S+)' : [ + 'out': { + '(?\S+) (?\S+)': [ ('foo', float, int), ('bar', float, int), ], - '\e' : 34 + '\e': 34 }, }""", globals(), locals()) - def test_timer_field(self): class FieldTester: field = TimerField('field') field_maybe_none = TimerField('field_maybe_none', allow_none=True) - tester = FieldTester() tester.field = (65, 22, 47) tester.field_maybe_none = None @@ -399,7 +380,6 @@ class FieldTester: self.assertRaises(FieldError, exec, 'tester.field = (100, 3, 65)', globals(), locals()) - def test_sandbox(self): from reframe.core.environments import Environment from reframe.core.systems import System @@ -417,7 +397,6 @@ def test_sandbox(self): self.assertEqual(environ.name, 'myenv') self.assertEqual(system.name, 'mysystem') - def test_proxy_field(self): class Target: def __init__(self): @@ -439,7 +418,6 @@ class Proxy: self.assertEqual(3, t.a) self.assertEqual(4, t.b) - def test_settings(self): from reframe.settings import settings @@ -451,60 +429,59 @@ class TestScopedDict(unittest.TestCase): def test_construction(self): namespace_dict = ScopedDict() namespace_dict = ScopedDict({ - 'a' : { 'k1' : 3, 'k2' : 4 }, - 'b' : { 'k3' : 5 } + 'a' : {'k1' : 3, 'k2' : 4}, + 'b' : {'k3' : 5} }) - self.assertRaises(TypeError, ScopedDict, 1); - self.assertRaises(TypeError, ScopedDict, { 'a' : 1, 'b' : 2 }) - self.assertRaises(TypeError, ScopedDict, [ ('a', 1), ('b', 2) ]) - self.assertRaises(TypeError, ScopedDict, { 'a' : { 1 : 'k1' }, - 'b' : { 2 : 'k2' } }) - + self.assertRaises(TypeError, ScopedDict, 1) + self.assertRaises(TypeError, ScopedDict, {'a' : 1, 'b' : 2}) + self.assertRaises(TypeError, ScopedDict, [('a', 1), ('b', 2)]) + self.assertRaises(TypeError, ScopedDict, {'a' : {1 : 'k1'}, + 'b' : {2 : 'k2'}}) def test_key_resolution(self): scoped_dict = ScopedDict({ - 'a' : { 'k1' : 1, 'k2' : 2 }, - 'a:b' : { 'k1' : 3, 'k3' : 4 }, - 'a:b:c' : { 'k2' : 5, 'k3' : 6 }, - '*' : { 'k1' : 7, 'k3' : 9, 'k4' : 10 } + 'a' : {'k1': 1, 'k2': 2}, + 'a:b' : {'k1': 3, 'k3': 4}, + 'a:b:c': {'k2': 5, 'k3': 6}, + '*' : {'k1': 7, 'k3': 9, 'k4': 10} }) - self.assertEqual( 1, scoped_dict['a:k1']) - self.assertEqual( 2, scoped_dict['a:k2']) - self.assertEqual( 9, scoped_dict['a:k3']) + self.assertEqual(1, scoped_dict['a:k1']) + self.assertEqual(2, scoped_dict['a:k2']) + self.assertEqual(9, scoped_dict['a:k3']) self.assertEqual(10, scoped_dict['a:k4']) - self.assertEqual( 3, scoped_dict['a:b:k1']) - self.assertEqual( 2, scoped_dict['a:b:k2']) - self.assertEqual( 4, scoped_dict['a:b:k3']) + self.assertEqual(3, scoped_dict['a:b:k1']) + self.assertEqual(2, scoped_dict['a:b:k2']) + self.assertEqual(4, scoped_dict['a:b:k3']) self.assertEqual(10, scoped_dict['a:b:k4']) - self.assertEqual( 3, scoped_dict['a:b:c:k1']) - self.assertEqual( 5, scoped_dict['a:b:c:k2']) - self.assertEqual( 6, scoped_dict['a:b:c:k3']) + self.assertEqual(3, scoped_dict['a:b:c:k1']) + self.assertEqual(5, scoped_dict['a:b:c:k2']) + self.assertEqual(6, scoped_dict['a:b:c:k3']) self.assertEqual(10, scoped_dict['a:b:c:k4']) # Test global scope - self.assertEqual( 7, scoped_dict['k1']) + self.assertEqual(7, scoped_dict['k1']) self.assertRaises( KeyError, exec, "scoped_dict['k2']", globals(), locals() ) - self.assertEqual( 9, scoped_dict['k3']) + self.assertEqual(9, scoped_dict['k3']) self.assertEqual(10, scoped_dict['k4']) - self.assertEqual( 7, scoped_dict[':k1']) + self.assertEqual(7, scoped_dict[':k1']) self.assertRaises( KeyError, exec, "scoped_dict[':k2']", globals(), locals() ) - self.assertEqual( 9, scoped_dict[':k3']) + self.assertEqual(9, scoped_dict[':k3']) self.assertEqual(10, scoped_dict[':k4']) - self.assertEqual( 7, scoped_dict['*:k1']) + self.assertEqual(7, scoped_dict['*:k1']) self.assertRaises( KeyError, exec, "scoped_dict['*:k2']", globals(), locals() ) - self.assertEqual( 9, scoped_dict['*:k3']) + self.assertEqual(9, scoped_dict['*:k3']) self.assertEqual(10, scoped_dict['*:k4']) # Try to fool it, by requesting keys with scope names @@ -527,13 +504,12 @@ def test_key_resolution(self): KeyError, exec, "scoped_dict['']", globals(), locals() ) - def test_setitem(self): scoped_dict = ScopedDict({ - 'a' : { 'k1' : 1, 'k2' : 2 }, - 'a:b' : { 'k1' : 3, 'k3' : 4 }, - 'a:b:c' : { 'k2' : 5, 'k3' : 6 }, - '*' : { 'k1' : 7, 'k3' : 9, 'k4' : 10 } + 'a' : {'k1': 1, 'k2': 2}, + 'a:b' : {'k1': 3, 'k3': 4}, + 'a:b:c': {'k2': 5, 'k3': 6}, + '*' : {'k1': 7, 'k3': 9, 'k4': 10} }) scoped_dict['a:k2'] = 20 @@ -547,13 +523,12 @@ def test_setitem(self): self.assertEqual(50, scoped_dict['k5']) self.assertEqual(60, scoped_dict['k6']) - def test_delitem(self): scoped_dict = ScopedDict({ - 'a' : { 'k1' : 1, 'k2' : 2 }, - 'a:b' : { 'k1' : 3, 'k3' : 4 }, - 'a:b:c' : { 'k2' : 5, 'k3' : 6 }, - '*' : { 'k1' : 7, 'k3' : 9, 'k4' : 10 } + 'a' : {'k1': 1, 'k2': 2}, + 'a:b' : {'k1': 3, 'k3': 4}, + 'a:b:c': {'k2': 5, 'k3': 6}, + '*' : {'k1': 7, 'k3': 9, 'k4': 10} }) # delete key @@ -574,26 +549,24 @@ def test_delitem(self): KeyError, exec, "del scoped_dict['a:k4']", globals(), locals() ) - def test_addscopes(self): scoped_dict = ScopedDict({ - 'a' : { 'k1' : 1, 'k2' : 2 }, - 'a:b' : { 'k1' : 3, 'k3' : 4 }, - 'a:b:c' : { 'k2' : 5, 'k3' : 6 }, - '*' : { 'k1' : 7, 'k3' : 9, 'k4' : 10 } + 'a' : {'k1': 1, 'k2': 2}, + 'a:b' : {'k1': 3, 'k3': 4}, + 'a:b:c': {'k2': 5, 'k3': 6}, + '*' : {'k1': 7, 'k3': 9, 'k4': 10} }) scoped_dict_alt = ScopedDict() scoped_dict_alt.add_scopes({ - 'a' : { 'k1' : 1, 'k2' : 2 }, - 'a:b' : { 'k1' : 3, 'k3' : 4 }, - 'a:b:c' : { 'k2' : 5, 'k3' : 6 }, - '*' : { 'k1' : 7, 'k3' : 9, 'k4' : 10 } + 'a' : {'k1': 1, 'k2': 2}, + 'a:b' : {'k1': 3, 'k3': 4}, + 'a:b:c': {'k2': 5, 'k3': 6}, + '*' : {'k1': 7, 'k3': 9, 'k4': 10} }) self.assertEqual(scoped_dict, scoped_dict_alt) self.assertRaises(KeyError, scoped_dict.add_scopes, - { 'a' : { 'k1' : 1 } }); - + {'a': {'k1': 1}}) def test_scoped_dict_field(self): class FieldTester: @@ -605,10 +578,10 @@ class FieldTester: # Test valid assignments tester.field = { - 'a' : { 'k1' : 1, 'k2' : 2 }, - 'a:b' : { 'k1' : 3, 'k3' : 4 }, - 'a:b:c' : { 'k2' : 5, 'k3' : 6 }, - '*' : { 'k1' : 7, 'k3' : 9, 'k4' : 10 } + 'a' : {'k1': 1, 'k2': 2}, + 'a:b' : {'k1': 3, 'k3': 4}, + 'a:b:c': {'k2': 5, 'k3': 6}, + '*' : {'k1': 7, 'k3': 9, 'k4': 10} } tester.field_maybe_none = None @@ -618,12 +591,12 @@ class FieldTester: # Test invalid assignments self.assertRaises(FieldError, exec, - 'tester.field = { 1 : "a", 2 : "b" }', + 'tester.field = {1: "a", 2: "b" }', globals(), locals()) self.assertRaises(FieldError, exec, - "tester.field = [ ('a', 1), ('b', 2) ]", + "tester.field = [('a', 1), ('b', 2)]", globals(), locals()) self.assertRaises(FieldError, exec, - """tester.field = { 'a' : { 1 : 'k1' }, - 'b' : { 2 : 'k2' } }""", + """tester.field = {'a': {1: 'k1'}, + 'b': {2: 'k2'}}""", globals(), locals()) diff --git a/unittests/test_launchers.py b/unittests/test_launchers.py index f6afb3469c..6179f0f509 100644 --- a/unittests/test_launchers.py +++ b/unittests/test_launchers.py @@ -14,12 +14,12 @@ def setUp(self): # Pattern to match: must include only horizontal spaces [ \t] # (\h in perl; in python \h might be introduced in future) self.expected_launcher_patt = None - self.launcher_options = [ '--foo' ] - self.target_executable = 'hostname' + self.launcher_options = ['--foo'] + self.target_executable = 'hostname' @property def launcher_command(self): - return ' '.join([ self.launcher.executable ] + + return ' '.join([self.launcher.executable] + self.launcher.fixed_options) @property @@ -64,7 +64,7 @@ class TestLauncherWrapperAlps(_TestLauncher): def setUp(self): super().setUp() self.launcher = LauncherWrapper(AlpsLauncher(None), - 'ddt', '-o foo.out'.split()) + 'ddt', '-o foo.out'.split()) self.expected_launcher_patt = '^[ \t]*ddt[ \t]+-o[ \t]+foo.out' \ '[ \t]+aprun[ \t]+-B[ \t]*$' @@ -73,7 +73,7 @@ class TestLauncherWrapperNativeSlurm(_TestLauncher): def setUp(self): super().setUp() self.launcher = LauncherWrapper(NativeSlurmLauncher(None), - 'ddt', '-o foo.out'.split()) + 'ddt', '-o foo.out'.split()) self.expected_launcher_patt = '^[ \t]*ddt[ \t]+-o[ \t]+foo.out' \ '[ \t]+srun[ \t]*$' @@ -119,7 +119,7 @@ def setUp(self): self.launcher = VisitLauncher(self.job) self.expected_launcher_patt = '^[ \t]*visit[ \t]+-np[ \t]+5[ \t]+' \ '-nn[ \t]+3[ \t]+-l[ \t]+srun[ \t]*$' - self.launcher_options = [ '-o data.nc' ] + self.launcher_options = ['-o data.nc'] self.target_executable = '' @property @@ -135,9 +135,9 @@ def setUp(self): job_script_builder=self.builder) self.launcher = VisitLauncher(self.job) self.expected_launcher_patt = '^[ \t]*visit[ \t]*$' - self.launcher_options = [ '-o data.nc' ] + self.launcher_options = ['-o data.nc'] self.target_executable = '' @property def expected_shell_script_patt(self): - return '^[ \t]*%s[ \t]+-o[ \t]+data.nc[ \t]*$' % self.launcher_command \ No newline at end of file + return '^[ \t]*%s[ \t]+-o[ \t]+data.nc[ \t]*$' % self.launcher_command diff --git a/unittests/test_loader.py b/unittests/test_loader.py index d2f54b4018..53ea9fd411 100644 --- a/unittests/test_loader.py +++ b/unittests/test_loader.py @@ -8,12 +8,12 @@ from reframe.frontend.resources import ResourcesManager from unittests.fixtures import TEST_SITE_CONFIG + class TestSiteConfigurationFromDict(unittest.TestCase): def setUp(self): self.config = SiteConfiguration() self.site_config = copy.deepcopy(TEST_SITE_CONFIG) - def test_load_success(self): self.config.load_from_dict(self.site_config) self.assertEqual(1, len(self.config.systems)) @@ -38,96 +38,83 @@ def test_load_success(self): # Check that the PrgEnv-gnu of the gpu partition is resolved to the # default one env_gpu = system.partition('gpu').environment('PrgEnv-gnu') - self.assertEqual('cc', env_gpu.cc) - self.assertEqual('CC', env_gpu.cxx) + self.assertEqual('cc', env_gpu.cc) + self.assertEqual('CC', env_gpu.cxx) self.assertEqual('ftn', env_gpu.ftn) # Check resource instantiation - self.assertEqual([ '--gres=gpu:16' ], + self.assertEqual(['--gres=gpu:16'], system.partition('gpu').get_resource( - 'num_gpus_per_node', '16' - )) - - + 'num_gpus_per_node', '16')) def test_load_failure_empty_dict(self): site_config = {} self.assertRaises(ConfigurationError, self.config.load_from_dict, site_config) - def test_load_failure_no_environments(self): - site_config = { 'systems' : {} } + site_config = {'systems': {}} self.assertRaises(ConfigurationError, self.config.load_from_dict, site_config) - def test_load_failure_no_systems(self): - site_config = { 'environments' : {} } + site_config = {'environments' : {}} self.assertRaises(ConfigurationError, self.config.load_from_dict, site_config) - def test_load_failure_environments_no_scoped_dict(self): self.site_config['environments'] = { - 'testsys' : 'PrgEnv-gnu' + 'testsys': 'PrgEnv-gnu' } self.assertRaises(ConfigurationError, self.config.load_from_dict, self.site_config) - def test_load_failure_partitions_nodict(self): - self.site_config['systems']['testsys']['partitions'] = [ 'gpu' ] + self.site_config['systems']['testsys']['partitions'] = ['gpu'] self.assertRaises(ConfigurationError, self.config.load_from_dict, self.site_config) - def test_load_failure_systems_nodict(self): - self.site_config['systems']['testsys'] = [ 'gpu' ] + self.site_config['systems']['testsys'] = ['gpu'] self.assertRaises(ConfigurationError, self.config.load_from_dict, self.site_config) - def test_load_failure_partitions_nodict(self): self.site_config['systems']['testsys']['partitions']['login'] = 'foo' self.assertRaises(ConfigurationError, self.config.load_from_dict, self.site_config) - def test_load_failure_partconfig_nodict(self): self.site_config['systems']['testsys']['partitions']['login'] = 'foo' self.assertRaises(ConfigurationError, self.config.load_from_dict, self.site_config) - def test_load_failure_unresolved_environment(self): self.site_config['environments'] = { - '*' : { - 'PrgEnv-gnu' : { - 'type' : 'ProgEnvironment', - 'modules' : [ 'PrgEnv-gnu' ], + '*': { + 'PrgEnv-gnu': { + 'type': 'ProgEnvironment', + 'modules': ['PrgEnv-gnu'], } } } self.assertRaises(ConfigurationError, self.config.load_from_dict, self.site_config) - def test_load_failure_envconfig_nodict(self): self.site_config['environments'] = { - '*' : { - 'PrgEnv-gnu' : 'foo' + '*': { + 'PrgEnv-gnu': 'foo' } } self.assertRaises(ConfigurationError, self.config.load_from_dict, self.site_config) - def test_load_failure_envconfig_notype(self): self.site_config['environments'] = { - '*' : { - 'PrgEnv-gnu' : { - 'modules' : [ 'PrgEnv-gnu' ], + '*': { + 'PrgEnv-gnu': { + 'modules': ['PrgEnv-gnu'], } } } @@ -139,15 +126,14 @@ class TestRegressionCheckLoader(unittest.TestCase): def setUp(self): self.loader = RegressionCheckLoader(['.']) self.loader_with_path = RegressionCheckLoader( - [ 'unittests/resources', 'unittests/foobar' ]) + ['unittests/resources', 'unittests/foobar']) self.loader_with_prefix = RegressionCheckLoader( - load_path = [ 'badchecks' ], - prefix = os.path.abspath('unittests/resources')) + load_path=['badchecks'], + prefix=os.path.abspath('unittests/resources')) self.system = System('foo') self.resources = ResourcesManager() - def test_load_file_relative(self): checks = self.loader.load_from_file( 'unittests/resources/emptycheck.py', @@ -156,7 +142,6 @@ def test_load_file_relative(self): self.assertEqual(1, len(checks)) self.assertEqual(checks[0].name, 'emptycheck') - def test_load_file_absolute(self): checks = self.loader.load_from_file( os.path.abspath('unittests/resources/emptycheck.py'), @@ -165,7 +150,6 @@ def test_load_file_absolute(self): self.assertEqual(1, len(checks)) self.assertEqual(checks[0].name, 'emptycheck') - def test_load_recursive(self): checks = self.loader.load_from_dir( 'unittests/resources', recurse=True, @@ -173,19 +157,16 @@ def test_load_recursive(self): ) self.assertEqual(11, len(checks)) - def test_load_all(self): checks = self.loader_with_path.load_all(system=self.system, resources=self.resources) self.assertEqual(10, len(checks)) - def test_load_all_with_prefix(self): checks = self.loader_with_prefix.load_all(system=self.system, resources=self.resources) self.assertEqual(1, len(checks)) - def test_load_error(self): self.assertRaises(ReframeError, self.loader.load_from_file, 'unittests/resources/foo.py') diff --git a/unittests/test_logging.py b/unittests/test_logging.py index a48c2a8901..88aedb5941 100644 --- a/unittests/test_logging.py +++ b/unittests/test_logging.py @@ -40,11 +40,9 @@ def setUp(self): ) ) - def tearDown(self): os.remove(self.logfile) - def found_in_logfile(self, string): found = False with open(self.logfile, 'rt') as f: @@ -52,12 +50,10 @@ def found_in_logfile(self, string): return found - def test_invalid_loglevel(self): self.assertRaises(ReframeError, self.logger.setLevel, 'level') self.assertRaises(ReframeError, Logger, 'logger', 'level') - def test_custom_loglevels(self): self.logger_without_check.info('foo') self.logger_without_check.verbose('bar') @@ -67,7 +63,6 @@ def test_custom_loglevels(self): self.assertTrue(self.found_in_logfile('verbose')) self.assertTrue(self.found_in_logfile('reframe')) - def test_check_logger(self): self.logger_with_check.info('foo') self.logger_with_check.verbose('bar') @@ -77,7 +72,6 @@ def test_check_logger(self): self.assertTrue(self.found_in_logfile('verbose')) self.assertTrue(self.found_in_logfile('random_check')) - def test_custom_handler_levels(self): self.handler.setLevel('verbose') self.handler.setLevel(VERBOSE) @@ -88,7 +82,6 @@ def test_custom_handler_levels(self): self.assertFalse(self.found_in_logfile('foo')) self.assertTrue(self.found_in_logfile('bar')) - def test_logger_levels(self): self.logger_with_check.setLevel('verbose') self.logger_with_check.setLevel(VERBOSE) @@ -116,16 +109,14 @@ def setUp(self): } } self.logger = None - self.check = RegressionTest( + self.check = RegressionTest( 'random_check', '.', System('foosys'), ResourcesManager() ) - def tearDown(self): if os.path.exists(self.logfile): os.remove(self.logfile) - def found_in_logfile(self, string): for handler in self.logger.handlers: handler.flush() @@ -137,37 +128,30 @@ def found_in_logfile(self, string): return found - def set_logger(self): from reframe.core.logging import load_from_dict self.logger = load_from_dict(self.logging_config) - def close_handlers(self): for h in self.logger.handlers: h.close() - def flush_handlers(self): for h in self.logger.handlers: h.flush() - def test_valid_level(self): self.set_logger() self.assertEqual(INFO, self.logger.getEffectiveLevel()) - def test_no_handlers(self): del self.logging_config['handlers'] self.assertRaises(ConfigurationError, self.set_logger) - def test_empty_handlers(self): self.logging_config['handlers'] = {} self.assertRaises(ConfigurationError, self.set_logger) - def test_handler_level(self): self.set_logger() self.logger.info('foo') @@ -176,7 +160,6 @@ def test_handler_level(self): self.assertFalse(self.found_in_logfile('foo')) self.assertTrue(self.found_in_logfile('bar')) - def test_handler_append(self): self.set_logger() self.logger.warning('foo') @@ -189,7 +172,6 @@ def test_handler_append(self): self.assertTrue(self.found_in_logfile('foo')) self.assertTrue(self.found_in_logfile('bar')) - def test_handler_noappend(self): self.logging_config = { 'level' : 'INFO', @@ -214,7 +196,6 @@ def test_handler_noappend(self): self.assertFalse(self.found_in_logfile('foo')) self.assertTrue(self.found_in_logfile('bar')) - # FIXME: this test is not robust def test_date_format(self): self.set_logger() @@ -222,7 +203,6 @@ def test_date_format(self): self.flush_handlers() self.assertTrue(self.found_in_logfile(datetime.now().strftime('%F'))) - def test_stream_handler_stdout(self): self.logging_config = { 'level' : 'INFO', @@ -238,7 +218,6 @@ def test_stream_handler_stdout(self): self.assertTrue(isinstance(handler, StreamHandler)) self.assertEqual(handler.stream, sys.stdout) - def test_stream_handler_stderr(self): self.logging_config = { 'level' : 'INFO', @@ -254,7 +233,6 @@ def test_stream_handler_stderr(self): self.assertTrue(isinstance(handler, StreamHandler)) self.assertEqual(handler.stream, sys.stderr) - def test_multiple_handlers(self): self.logging_config = { 'level' : 'INFO', @@ -266,7 +244,6 @@ def test_multiple_handlers(self): self.set_logger() self.assertEqual(len(self.logger.handlers), 2) - def test_global_noconfig(self): # This is to test the case when no configuration is set, but since the # order the unit tests are invoked is arbitrary, we emulate the @@ -278,7 +255,6 @@ def test_global_noconfig(self): self.assertEqual(None, frontend_logger.logger) self.assertEqual(None, check_logger.logger) - def test_global_config(self): configure_logging(self.logging_config) frontend_logger = getlogger('frontend') diff --git a/unittests/test_parsers.py b/unittests/test_parsers.py index 701cbbc48c..1522e46535 100644 --- a/unittests/test_parsers.py +++ b/unittests/test_parsers.py @@ -12,6 +12,7 @@ from reframe.utility.functions import standard_threshold from reframe.utility.parsers import * + class StatefulParserTest(unittest.TestCase): def setUp(self): self.system = System('daint') @@ -54,7 +55,6 @@ def setUp(self): } } - def tearDown(self): self.perf_file.close() self.output_file.close() @@ -62,7 +62,6 @@ def tearDown(self): os.remove(self.output_file.name) shutil.rmtree(self.resourcesdir) - def _add_parser_region(self): self.test.perf_patterns[self.perf_file.name].update({ '(?P== ENABLE ==)' : [ @@ -84,15 +83,12 @@ def _add_parser_region(self): self.test.reference['*:switch'] = None - def _write_marker_enable(self, file): file.write('== ENABLE ==\n') - def _write_marker_disable(self, file): file.write('== DISABLE ==\n') - def is_parser_clear(self, parser, **kwargs): return not parser.is_on @@ -101,7 +97,6 @@ class TestStatefulParserPerformance(StatefulParserTest): def setUp(self): super().setUp() - def _write_good_performance(self, file, with_region=False): if with_region: file.write('performance = 0.1\n') @@ -116,7 +111,6 @@ def _write_good_performance(self, file, with_region=False): file.close() - def _write_bad_performance(self, file, with_region=False): if with_region: file.write('performance = 1.9\n') @@ -131,41 +125,35 @@ def _write_bad_performance(self, file, with_region=False): file.close() - def test_performance_success(self): self.test.perf_parser.on() self._write_good_performance(file=self.perf_file) self.assertTrue(self.test.check_performance()) self.assertTrue(self.is_parser_clear(self.test.perf_parser)) - def test_performance_success_with_region(self): self._add_parser_region() self._write_good_performance(file=self.perf_file, with_region=True) self.assertTrue(self.test.check_performance()) self.assertTrue(self.is_parser_clear(self.test.perf_parser)) - def test_performance_failure(self): self.test.perf_parser.on() self._write_bad_performance(file=self.perf_file) self.assertFalse(self.test.check_performance()) self.assertTrue(self.is_parser_clear(self.test.perf_parser)) - def test_performance_failure_with_region(self): self._add_parser_region() self._write_bad_performance(file=self.perf_file, with_region=True) self.assertFalse(self.test.check_performance()) self.assertTrue(self.is_parser_clear(self.test.perf_parser)) - def test_default_status(self): self._write_good_performance(file=self.perf_file) self.assertFalse(self.test.check_performance()) self.assertTrue(self.is_parser_clear(self.test.perf_parser)) - def test_empty_file(self): self.perf_file.close() self.assertFalse(self.test.check_performance()) @@ -176,7 +164,6 @@ class TestStatefulParserSanity(StatefulParserTest): def setUp(self): super().setUp() - def _write_good_sanity(self, file, with_region=False): if with_region: file.write('result = failure\n') @@ -191,7 +178,6 @@ def _write_good_sanity(self, file, with_region=False): file.close() - def _write_bad_sanity(self, file, with_region=False): if with_region: file.write('result = success\n') @@ -206,41 +192,35 @@ def _write_bad_sanity(self, file, with_region=False): file.close() - def test_sanity_success(self): self.test.sanity_parser.on() self._write_good_sanity(file=self.output_file) self.assertTrue(self.test.check_sanity()) self.assertTrue(self.is_parser_clear(self.test.sanity_parser)) - def test_sanity_success_with_region(self): self._add_parser_region() self._write_good_sanity(file=self.output_file, with_region=True) self.assertTrue(self.test.check_sanity()) self.assertTrue(self.is_parser_clear(self.test.sanity_parser)) - def test_sanity_failure(self): self.test.sanity_parser.on() self._write_bad_sanity(file=self.output_file) self.assertFalse(self.test.check_sanity()) self.assertTrue(self.is_parser_clear(self.test.sanity_parser)) - def test_sanity_failure_with_region(self): self._add_parser_region() self._write_bad_sanity(file=self.output_file, with_region=True) self.assertFalse(self.test.check_sanity()) self.assertTrue(self.is_parser_clear(self.test.sanity_parser)) - def test_default_status(self): self._write_good_sanity(file=self.output_file) self.assertFalse(self.test.check_sanity()) self.assertTrue(self.is_parser_clear(self.test.sanity_parser)) - def test_empty_file(self): self.output_file.close() self.assertFalse(self.test.check_sanity()) @@ -277,7 +257,6 @@ def _write_good_performance(self, file, with_region=False): file.close() - def _write_bad_performance(self, file, with_region=False): if with_region: file.write('performance = 1.9\n') @@ -293,7 +272,6 @@ def _write_bad_performance(self, file, with_region=False): file.close() - def is_parser_clear(self, parser, **kwargs): if parser.count != 0: return False @@ -314,7 +292,6 @@ def setUp(self): } } - def _write_good_sanity(self, file, with_region=False): if with_region: self._write_marker_enable(file) @@ -332,7 +309,6 @@ def _write_good_sanity(self, file, with_region=False): file.close() - def _write_bad_sanity(self, file, with_region=False): if with_region: self._write_marker_enable(file) @@ -349,9 +325,8 @@ def _write_bad_sanity(self, file, with_region=False): file.close() - def is_parser_clear(self, parser, **kwargs): - if parser.count != 0 or parser.last_match != None: + if parser.count != 0 or parser.last_match is not None: return False return super().is_parser_clear(parser) @@ -370,7 +345,6 @@ def setUp(self): } } - def _write_good_sanity(self, file, with_region=False): if with_region: file.write('nid123\n') @@ -387,7 +361,6 @@ def _write_good_sanity(self, file, with_region=False): file.close() - def _write_bad_sanity(self, file, with_region=False): if with_region: self._write_marker_enable(file) @@ -404,12 +377,11 @@ def _write_bad_sanity(self, file, with_region=False): file.close() - def is_parser_clear(self, parser, **kwargs): if parser.count != 0: return False - if parser.last_match != None: + if parser.last_match is not None: return False return True @@ -429,7 +401,6 @@ def setUp(self): } } - def _write_good_performance(self, file, with_region=False): if with_region: self._write_marker_enable(file) @@ -445,7 +416,6 @@ def _write_good_performance(self, file, with_region=False): file.close() - def _write_bad_performance(self, file, with_region=False): if with_region: self._write_marker_enable(file) @@ -461,9 +431,8 @@ def _write_bad_performance(self, file, with_region=False): file.close() - def is_parser_clear(self, parser, **kwargs): - if parser.count != 0 or parser.last_match != None: + if parser.count != 0 or parser.last_match is not None: return False return super().is_parser_clear(parser) @@ -482,7 +451,6 @@ def setUp(self): } } - def _write_good_sanity(self, file, with_region=False): if with_region: file.write('nid009\n') @@ -500,7 +468,6 @@ def _write_good_sanity(self, file, with_region=False): file.close() - def _write_bad_sanity(self, file, with_region=False): if with_region: file.write('nid001\n') @@ -519,7 +486,6 @@ def _write_bad_sanity(self, file, with_region=False): file.close() - def is_parser_clear(self, parser, **kwargs): return not parser.matched @@ -537,7 +503,6 @@ def setUp(self): } } - def _write_good_performance(self, file, with_region=False): if with_region: file.write('performance = 0.2\n') @@ -554,7 +519,6 @@ def _write_good_performance(self, file, with_region=False): file.close() - def _write_bad_performance(self, file, with_region=False): if with_region: file.write('performance = 2.1\n') @@ -571,12 +535,11 @@ def _write_bad_performance(self, file, with_region=False): file.close() - def is_parser_clear(self, parser, **kwargs): - if parser.value != None: + if parser.value is not None: return False - if parser.reference != None: + if parser.reference is not None: return False return True @@ -595,7 +558,6 @@ def setUp(self): } } - def _write_good_performance(self, file, with_region=False): if with_region: file.write('performance = 10.1\n') @@ -612,7 +574,6 @@ def _write_good_performance(self, file, with_region=False): file.close() - def _write_bad_performance(self, file, with_region=False): if with_region: file.write('performance = 2.1\n') @@ -629,12 +590,11 @@ def _write_bad_performance(self, file, with_region=False): file.close() - def is_parser_clear(self, parser, **kwargs): - if parser.value != None: + if parser.value is not None: return False - if parser.reference != None: + if parser.reference is not None: return False return True @@ -655,7 +615,6 @@ def setUp(self): } } - def _write_good_performance(self, file, with_region=False): if with_region: file.write('val = 1\n') @@ -672,7 +631,6 @@ def _write_good_performance(self, file, with_region=False): file.close() - def _write_bad_performance(self, file, with_region=False): if with_region: file.write('val = 4\n') @@ -688,12 +646,11 @@ def _write_bad_performance(self, file, with_region=False): file.close() - def is_parser_clear(self, parser, **kwargs): - if parser.value != None: + if parser.value is not None: return False - if parser.reference != None: + if parser.reference is not None: return False return True @@ -712,7 +669,6 @@ def setUp(self): } } - def _write_good_performance(self, file, with_region=False): if with_region: file.write('val = 100\n') @@ -729,7 +685,6 @@ def _write_good_performance(self, file, with_region=False): file.close() - def _write_bad_performance(self, file, with_region=False): if with_region: file.write('val = -100\n') @@ -745,12 +700,11 @@ def _write_bad_performance(self, file, with_region=False): file.close() - def is_parser_clear(self, parser, **kwargs): - if parser.value != None: + if parser.value is not None: return False - if parser.reference != None: + if parser.reference is not None: return False if parser.count != 0: diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 3d3c37a271..40308969dc 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -32,11 +32,9 @@ def setUp(self): self.loader = RegressionCheckLoader(['unittests/resources']) self.resources = ResourcesManager(prefix=self.resourcesdir) - def tearDown(self): shutil.rmtree(self.resourcesdir, ignore_errors=True) - def setup_from_site(self): self.partition = system_with_scheduler(None) @@ -44,25 +42,22 @@ def setup_from_site(self): if self.partition.environs: self.progenv = self.partition.environs[0] - def replace_prefix(self, filename, new_prefix): basename = os.path.basename(filename) return os.path.join(new_prefix, basename) - - def keep_files_list(self, test, compile_only = False): - ret = [ self.replace_prefix(test.stdout, test.outputdir), - self.replace_prefix(test.stderr, test.outputdir) ] + def keep_files_list(self, test, compile_only=False): + ret = [self.replace_prefix(test.stdout, test.outputdir), + self.replace_prefix(test.stderr, test.outputdir)] if not compile_only: ret.append(self.replace_prefix(test.job.script_filename, test.outputdir)) - ret.extend([ self.replace_prefix(f, test.outputdir) - for f in test.keep_files ]) + ret.extend([self.replace_prefix(f, test.outputdir) + for f in test.keep_files]) return ret - def test_environ_setup(self): test = self.loader.load_from_file( 'unittests/resources/hellocheck.py', @@ -70,9 +65,9 @@ def test_environ_setup(self): )[0] # Use test environment for the regression check - test.valid_prog_environs = [ self.progenv.name ] - test.modules = [ 'testmod_foo' ] - test.variables = { '_FOO_' : '1', '_BAR_' : '2' } + test.valid_prog_environs = [self.progenv.name] + test.modules = ['testmod_foo'] + test.variables = {'_FOO_': '1', '_BAR_': '2'} test.local = True test.setup(self.partition, self.progenv) @@ -85,7 +80,6 @@ def test_environ_setup(self): # Manually unload the environment self.progenv.unload() - def _run_test(self, test, compile_only=False, performance_result=True): test.setup(self.partition, self.progenv) test.compile() @@ -98,7 +92,6 @@ def _run_test(self, test, compile_only=False, performance_result=True): for f in self.keep_files_list(test, compile_only): self.assertTrue(os.path.exists(f)) - @unittest.skipIf(not system_with_scheduler(None), 'job submission not supported') def test_hellocheck(self): @@ -109,10 +102,9 @@ def test_hellocheck(self): )[0] # Use test environment for the regression check - test.valid_prog_environs = [ self.progenv.name ] + test.valid_prog_environs = [self.progenv.name] self._run_test(test) - @unittest.skipIf(not system_with_scheduler(None), 'job submission not supported') def test_hellocheck_make(self): @@ -123,10 +115,9 @@ def test_hellocheck_make(self): )[0] # Use test environment for the regression check - test.valid_prog_environs = [ self.progenv.name ] + test.valid_prog_environs = [self.progenv.name] self._run_test(test) - def test_hellocheck_local(self): test = self.loader.load_from_file( 'unittests/resources/hellocheck.py', @@ -134,18 +125,17 @@ def test_hellocheck_local(self): )[0] # Use test environment for the regression check - test.valid_prog_environs = [ self.progenv.name ] + test.valid_prog_environs = [self.progenv.name] # Test also the prebuild/postbuild functionality - test.prebuild_cmd = [ 'touch prebuild' ] - test.postbuild_cmd = [ 'touch postbuild' ] - test.keepfiles = [ 'prebuild', 'postbuild' ] + test.prebuild_cmd = ['touch prebuild'] + test.postbuild_cmd = ['touch postbuild'] + test.keepfiles = ['prebuild', 'postbuild'] # Force local execution of the test test.local = True self._run_test(test) - def test_hellocheck_local_slashes(self): # Try to fool path creation by adding slashes to environment partitions # names @@ -154,7 +144,6 @@ def test_hellocheck_local_slashes(self): self.partition.name += os.sep + 'bad' self.test_hellocheck_local() - def test_run_only(self): test = RunOnlyRegressionTest('runonlycheck', 'unittests/resources', @@ -163,26 +152,24 @@ def test_run_only(self): test.executable = './hello.sh' test.executable_opts = ['Hello, World!'] test.local = True - test.valid_prog_environs = [ '*' ] - test.valid_systems = [ '*' ] + test.valid_prog_environs = ['*'] + test.valid_systems = ['*'] test.sanity_patterns = { - '-' : { 'Hello, World\!' : [] } + '-' : {'Hello, World\!': []} } self._run_test(test) - def test_compile_only_failure(self): test = CompileOnlyRegressionTest('compileonlycheck', 'unittests/resources', resources=self.resources, system=self.system) test.sourcepath = 'compiler_failure.c' - test.valid_prog_environs = [ self.progenv.name ] - test.valid_systems = [ self.system.name ] + test.valid_prog_environs = [self.progenv.name] + test.valid_systems = [self.system.name] test.setup(self.partition, self.progenv) self.assertRaises(CompilationError, test.compile) - def test_compile_only_warning(self): test = CompileOnlyRegressionTest('compileonlycheckwarning', 'unittests/resources', @@ -190,14 +177,13 @@ def test_compile_only_warning(self): system=self.system) test.sourcepath = 'compiler_warning.c' self.progenv.cflags = '-Wall' - test.valid_prog_environs = [ self.progenv.name ] - test.valid_systems = [ self.system.name ] + test.valid_prog_environs = [self.progenv.name] + test.valid_systems = [self.system.name] test.sanity_patterns = { '&2': {'warning': []} } self._run_test(test, compile_only=True) - def test_supports_system(self): test = self.loader.load_from_file( 'unittests/resources/hellocheck.py', @@ -205,59 +191,56 @@ def test_supports_system(self): )[0] test.current_system = System('testsys') - test.valid_systems = [ '*' ] + test.valid_systems = ['*'] self.assertTrue(test.supports_system('gpu')) self.assertTrue(test.supports_system('login')) self.assertTrue(test.supports_system('testsys:gpu')) self.assertTrue(test.supports_system('testsys:login')) - test.valid_systems = [ 'testsys' ] + test.valid_systems = ['testsys'] self.assertTrue(test.supports_system('gpu')) self.assertTrue(test.supports_system('login')) self.assertTrue(test.supports_system('testsys:gpu')) self.assertTrue(test.supports_system('testsys:login')) - test.valid_systems = [ 'testsys:gpu' ] + test.valid_systems = ['testsys:gpu'] self.assertTrue(test.supports_system('gpu')) self.assertFalse(test.supports_system('login')) self.assertTrue(test.supports_system('testsys:gpu')) self.assertFalse(test.supports_system('testsys:login')) - test.valid_systems = [ 'testsys:login' ] + test.valid_systems = ['testsys:login'] self.assertFalse(test.supports_system('gpu')) self.assertTrue(test.supports_system('login')) self.assertFalse(test.supports_system('testsys:gpu')) self.assertTrue(test.supports_system('testsys:login')) - test.valid_systems = [ 'foo' ] + test.valid_systems = ['foo'] self.assertFalse(test.supports_system('gpu')) self.assertFalse(test.supports_system('login')) self.assertFalse(test.supports_system('testsys:gpu')) self.assertFalse(test.supports_system('testsys:login')) - def test_sourcesdir_none(self): test = RegressionTest('hellocheck', 'unittests/resources', resources=self.resources, system=self.system) test.sourcesdir = None - test.valid_prog_environs = [ '*' ] - test.valid_systems = [ '*' ] + test.valid_prog_environs = ['*'] + test.valid_systems = ['*'] self.assertRaises(ReframeError, self._run_test, test) - def test_sourcesdir_none_compile_only(self): test = CompileOnlyRegressionTest('hellocheck', 'unittests/resources', resources=self.resources, system=self.system) test.sourcesdir = None - test.valid_prog_environs = [ '*' ] - test.valid_systems = [ '*' ] + test.valid_prog_environs = ['*'] + test.valid_systems = ['*'] self.assertRaises(ReframeError, self._run_test, test) - def test_sourcesdir_none_run_only(self): test = RunOnlyRegressionTest('hellocheck', 'unittests/resources', @@ -265,12 +248,12 @@ def test_sourcesdir_none_run_only(self): system=self.system) test.sourcesdir = None test.executable = 'echo' - test.executable_opts = [ "Hello, World!" ] + test.executable_opts = ["Hello, World!"] test.local = True - test.valid_prog_environs = [ '*' ] - test.valid_systems = [ '*' ] + test.valid_prog_environs = ['*'] + test.valid_systems = ['*'] test.sanity_patterns = { - '-' : { 'Hello, World\!' : [] } + '-' : {'Hello, World\!': []} } self._run_test(test) @@ -290,39 +273,38 @@ def setUp(self): self.test.current_system = self.system self.test.current_partition = self.system.partition('gpu') self.test.reference = { - 'testsys' : { - 'value1' : (1.4, -0.1, 0.1), - 'value2' : (1.7, -0.1, 0.1), + 'testsys': { + 'value1': (1.4, -0.1, 0.1), + 'value2': (1.7, -0.1, 0.1), }, - 'testsys:gpu' : { - 'value3' : (3.1, -0.1, 0.1), + 'testsys:gpu': { + 'value3': (3.1, -0.1, 0.1), } } self.perf_file = tempfile.NamedTemporaryFile(mode='wt', delete=False) self.output_file = tempfile.NamedTemporaryFile(mode='wt', delete=False) self.test.perf_patterns = { - self.perf_file.name : { - 'performance1 = (?P\S+)' : [ + self.perf_file.name: { + 'performance1 = (?P\S+)': [ ('value1', float, standard_threshold) ], - 'performance2 = (?P\S+)' : [ + 'performance2 = (?P\S+)': [ ('value2', float, standard_threshold) ], - 'performance3 = (?P\S+)' : [ + 'performance3 = (?P\S+)': [ ('value3', float, standard_threshold) ] } } self.test.sanity_patterns = { - self.output_file.name : { - 'result = success' : [] + self.output_file.name: { + 'result = success': [] } } self.test.stagedir = self.test.prefix - def tearDown(self): self.perf_file.close() self.output_file.close() @@ -330,8 +312,7 @@ def tearDown(self): os.remove(self.output_file.name) shutil.rmtree(self.resourcesdir) - - def write_performance_output(self, file = None, **kwargs): + def write_performance_output(self, file=None, **kwargs): if not file: file = self.perf_file @@ -340,15 +321,20 @@ def write_performance_output(self, file = None, **kwargs): file.close() - def custom_sanity(self, value, reference, **kwargs): return value == 'success' - # custom threshold function def custom_threshold(self, value, reference, **kwargs): - return value >= reference*0.9 and value <= reference*1.1 + return value >= reference * 0.9 and value <= reference * 1.1 + def assertReportGeneration(self): + # Assert that the different reports are generated without unexpected + # exceptions; no check is made as of their contents + self.test.sanity_info.scan_report() + self.test.perf_info.scan_report() + self.test.sanity_info.failure_report() + self.test.perf_info.failure_report() def test_success(self): self.write_performance_output(performance1=1.3, @@ -359,20 +345,48 @@ def test_success(self): self.assertTrue(self.test.check_sanity()) self.assertTrue(self.test.check_performance()) + # Verify that the sanity/perf. check info is collected correctly + self.assertIsNotNone(self.test.sanity_info.matched_pattern( + self.output_file.name, 'result = success')) + + expected_perf_info = { + self.perf_file.name: { + 'performance1 = (?P\S+)': [ + ('value1', 1.3, (1.4, -0.1, 0.1), True) + ], + 'performance2 = (?P\S+)': [ + ('value2', 1.8, (1.7, -0.1, 0.1), True) + ], + 'performance3 = (?P\S+)': [ + ('value3', 3.3, (3.1, -0.1, 0.1), True) + ] + } + } + for path, patterns in expected_perf_info.items(): + for patt, taglist in patterns.items(): + self.assertIsNotNone( + self.test.perf_info.matched_pattern(path, patt)) + + for t in taglist: + tinfo = self.test.perf_info.matched_tag(path, patt, t[0]) + self.assertIsNotNone(tinfo) + self.assertEquals(t, tinfo) def test_empty_file(self): self.output_file.close() self.test.sanity_patterns = { - self.output_file.name : { '.*' : [] } + self.output_file.name : {'.*': []} } self.assertFalse(self.test.check_sanity()) - + self.assertIsNone(self.test.sanity_info.matched_pattern( + self.output_file.name, '.*')) def test_sanity_failure(self): self.output_file.write('result = failure\n') self.output_file.close() self.assertFalse(self.test.check_sanity()) - + self.assertIsNone(self.test.sanity_info.matched_pattern( + self.output_file.name, 'result = success')) def test_sanity_multiple_patterns(self): self.output_file.write('result1 = success\n') @@ -382,30 +396,31 @@ def test_sanity_multiple_patterns(self): # Simulate a pure sanity test; invalidate the reference values self.test.reference = {} self.test.sanity_patterns = { - self.output_file.name : { - 'result1 = success' : [], - 'result2 = success' : [] + self.output_file.name: { + 'result1 = success': [], + 'result2 = success': [] } } self.assertTrue(self.test.check_sanity()) # Require more patterns to be present self.test.sanity_patterns = { - self.output_file.name : { - 'result1 = success' : [], - 'result2 = success' : [], - 'result3 = success' : [] + self.output_file.name: { + 'result1 = success': [], + 'result2 = success': [], + 'result3 = success': [] } } self.assertFalse(self.test.check_sanity()) - + self.assertIsNone(self.test.sanity_info.matched_pattern( + self.output_file.name, 'result3 = success')) def test_multiple_files(self): # Create multiple files following the same pattern - files = [ tempfile.NamedTemporaryFile(mode='wt', prefix='regtmp', - dir=self.test.prefix, - delete=False) - for i in range(0, 2) ] + files = [tempfile.NamedTemporaryFile(mode='wt', prefix='regtmp', + dir=self.test.prefix, + delete=False) + for i in range(0, 2)] # Write the performance files for f in files: @@ -416,14 +431,14 @@ def test_multiple_files(self): # Reset the performance patterns; also put relative paths self.test.perf_patterns = { - 'regtmp*' : { - 'performance1 = (?P\S+)' : [ + 'regtmp*': { + 'performance1 = (?P\S+)': [ ('value1', float, standard_threshold) ], - 'performance2 = (?P\S+)' : [ + 'performance2 = (?P\S+)': [ ('value2', float, standard_threshold) ], - 'performance3 = (?P\S+)' : [ + 'performance3 = (?P\S+)': [ ('value3', float, standard_threshold) ], } @@ -436,30 +451,30 @@ def test_multiple_files(self): for f in files: os.remove(f.name) - def test_invalid_conversion(self): self.write_performance_output(performance1='nodata', performance2=1.8, performance3=3.3) self.assertRaises(ReframeError, self.test.check_performance) - def test_reference_file_not_found(self): self.output_file.write('result = success\n') self.output_file.close() # Remove read permissions os.chmod(self.output_file.name, stat.S_IWUSR) - self.assertRaises(ReframeError, self.test.check_sanity) - def test_below_threshold(self): self.write_performance_output(performance1=1.0, performance2=1.7, performance3=3.1) self.assertFalse(self.test.check_performance()) + # Verify collected match info + tag, val, ref, res = self.test.perf_info.matched_tag( + self.perf_file.name, 'performance1 = (?P\S+)', 'value1') + self.assertFalse(res) def test_above_threshold(self): self.write_performance_output(performance1=1.4, @@ -467,13 +482,10 @@ def test_above_threshold(self): performance3=3.2) self.assertFalse(self.test.check_performance()) - - def test_strict_performance_check(self): - self.write_performance_output(performance1=1.4, - performance2=2.7, - performance3=3.2) - self.assertFalse(self.test.check_performance()) - + # Verify collected match info + tag, val, ref, res = self.test.perf_info.matched_tag( + self.perf_file.name, 'performance2 = (?P\S+)', 'value2') + self.assertFalse(res) def test_invalid_threshold(self): self.write_performance_output(performance1=1.3, @@ -493,13 +505,12 @@ def test_invalid_threshold(self): self.test.reference['testsys:value1'] = (1.4, -0.1, -0.1) self.assertRaises(ReframeError, self.test.check_performance) - def test_zero_reference(self): self.test.reference = { - 'testsys' : { - 'value1' : (0.0, -0.1, 0.1), - 'value2' : (0.0, -0.1, 0.1), - 'value3' : (0.0, -0.1, 0.1), + 'testsys': { + 'value1': (0.0, -0.1, 0.1), + 'value2': (0.0, -0.1, 0.1), + 'value3': (0.0, -0.1, 0.1), } } @@ -508,13 +519,12 @@ def test_zero_reference(self): performance3=0.0) self.assertTrue(self.test.check_performance()) - def test_zero_thresholds(self): self.test.reference = { - 'testsys' : { - 'value1' : (1.4, 0.0, 0.0), - 'value2' : (1.7, 0.0, 0.0), - 'value3' : (3.1, 0.0, 0.0), + 'testsys': { + 'value1': (1.4, 0.0, 0.0), + 'value2': (1.7, 0.0, 0.0), + 'value3': (3.1, 0.0, 0.0), } } @@ -523,13 +533,12 @@ def test_zero_thresholds(self): performance3=3.11) self.assertFalse(self.test.check_performance()) - def test_unbounded(self): self.test.reference = { - 'testsys' : { - 'value1' : (1.4, None, None), - 'value2' : (1.7, None, 0.1), - 'value3' : (3.1, -0.1, None), + 'testsys': { + 'value1': (1.4, None, None), + 'value2': (1.7, None, 0.1), + 'value3': (3.1, -0.1, None), } } @@ -538,13 +547,12 @@ def test_unbounded(self): performance3=3.3) self.assertTrue(self.test.check_performance()) - def test_no_threshold(self): self.test.reference = { - 'testsys' : { - 'value1' : (None, None, None), - 'value2' : (1.7, None, 0.1), - 'value3' : (3.1, -0.1, None), + 'testsys': { + 'value1': (None, None, None), + 'value2': (1.7, None, 0.1), + 'value3': (3.1, -0.1, None), } } @@ -553,32 +561,30 @@ def test_no_threshold(self): performance3=3.3) self.assertRaises(ReframeError, self.test.check_performance) - def test_pattern_not_found(self): self.write_performance_output(performance1=1.3, performance2=1.8, foo=3.3) self.assertFalse(self.test.check_performance()) - def test_custom_threshold(self): self.test.reference = { - 'testsys' : { - 'value1' : 1.4, - 'value2' : 1.7, - 'value3' : 3.1, + 'testsys': { + 'value1': 1.4, + 'value2': 1.7, + 'value3': 3.1, } } self.test.perf_patterns = { - self.perf_file.name : { - 'performance1 = (?P\S+)' : [ + self.perf_file.name: { + 'performance1 = (?P\S+)': [ ('value1', float, self.custom_threshold) ], - 'performance2 = (?P\S+)' : [ + 'performance2 = (?P\S+)': [ ('value2', float, self.custom_threshold) ], - 'performance3 = (?P\S+)' : [ + 'performance3 = (?P\S+)': [ ('value3', float, lambda value, **kwargs: value >= 3.1) ], @@ -590,12 +596,11 @@ def test_custom_threshold(self): performance3=3.3) self.assertTrue(self.test.check_performance()) - def test_sanity_tags(self): self.test.reference = {} self.test.sanity_patterns = { - self.output_file.name : { - 'result = (?P\S+)' : [ + self.output_file.name: { + 'result = (?P\S+)': [ ('result', str, self.custom_sanity) ] } @@ -610,13 +615,12 @@ def test_sanity_tags(self): self.output_file.close() self.assertFalse(self.test.check_sanity()) - def test_unknown_tag(self): self.test.reference = { - 'testsys' : { - 'value1' : (1.4, -0.1, 0.1), - 'value2' : (1.7, -0.1, 0.1), - 'foo' : (3.1, -0.1, 0.1), + 'testsys': { + 'value1': (1.4, -0.1, 0.1), + 'value2': (1.7, -0.1, 0.1), + 'foo': (3.1, -0.1, 0.1), } } @@ -625,69 +629,64 @@ def test_unknown_tag(self): performance3=3.3) self.assertRaises(ReframeError, self.test.check_performance) - def test_unknown_system(self): self.write_performance_output(performance1=1.3, performance2=1.8, performance3=3.3) self.test.reference = { - 'testsys:login' : { - 'value1' : (1.4, -0.1, 0.1), - 'value2' : (1.7, -0.1, 0.1), - 'value3' : (3.1, -0.1, 0.1), + 'testsys:login': { + 'value1': (1.4, -0.1, 0.1), + 'value2': (1.7, -0.1, 0.1), + 'value3': (3.1, -0.1, 0.1), } } self.assertRaises(ReframeError, self.test.check_performance) - def test_default_reference(self): self.write_performance_output(performance1=1.3, performance2=1.8, performance3=3.3) self.test.reference = { - '*' : { - 'value1' : (1.4, -0.1, 0.1), - 'value2' : (1.7, -0.1, 0.1), - 'value3' : (3.1, -0.1, 0.1), + '*': { + 'value1': (1.4, -0.1, 0.1), + 'value2': (1.7, -0.1, 0.1), + 'value3': (3.1, -0.1, 0.1), } } self.assertTrue(self.test.check_performance()) - def test_tag_resolution(self): self.write_performance_output(performance1=1.3, performance2=1.8, performance3=3.3) self.test.reference = { - 'testsys' : { - 'value1' : (1.4, -0.1, 0.1), - 'value2' : (1.7, -0.1, 0.1), + 'testsys': { + 'value1': (1.4, -0.1, 0.1), + 'value2': (1.7, -0.1, 0.1), }, '*' : { - 'value3' : (3.1, -0.1, 0.1), + 'value3': (3.1, -0.1, 0.1), } } self.assertTrue(self.test.check_performance()) - def test_negative_threshold_success(self): self.write_performance_output(performance1=-1.3, performance2=-1.8, performance3=-3.3) self.test.reference = { - '*' : { - 'value1' : (-1.4, -0.1, 0.1), - 'value2' : (-1.7, -0.1, 0.1), - 'value3' : (-3.1, -0.1, 0.1), + '*': { + 'value1': (-1.4, -0.1, 0.1), + 'value2': (-1.7, -0.1, 0.1), + 'value3': (-3.1, -0.1, 0.1), } } self.assertTrue(self.test.check_performance()) - def test_negative_threshold_failure(self): self.write_performance_output(performance1=1.3, performance2=1.8, @@ -701,7 +700,6 @@ def test_negative_threshold_failure(self): } self.assertFalse(self.test.check_performance()) - def test_negative_threshold_positive_ref(self): self.write_performance_output(performance1=-1.3, performance2=-1.8, @@ -715,7 +713,6 @@ def test_negative_threshold_positive_ref(self): } self.assertFalse(self.test.check_performance()) - def test_eof_handler(self): self.output_file.write('result = success\n') self.output_file.write('result = success\n') @@ -734,24 +731,23 @@ def match_line(self, value, reference, **kwargs): def match_eof(self, **kwargs): return self.count == 3 - p = Parser() self.test.sanity_patterns = { - self.output_file.name : { - '(?Presult = success)' : [ + self.output_file.name: { + '(?Presult = success)': [ ('success_string', str, p.match_line) ], - r'\\e = success' : [], + r'\\e = success': [], - '\e' : p.match_eof + '\e': p.match_eof }, } self.assertTrue(self.test.check_sanity()) self.assertIn('\e', self.test.sanity_patterns[self.output_file.name].keys()) - + self.assertReportGeneration() def test_eof_handler_restore_on_failure(self): self.output_file.write('result = success\n') @@ -769,21 +765,20 @@ def match_line(self, value, reference, **kwargs): def match_eof(self, **kwargs): return self.count == 3 - p = Parser() self.test.sanity_patterns = { - self.output_file.name : { - '(?Presult = success)' : [ + self.output_file.name: { + '(?Presult = success)': [ ('success_string', str, p.match_line) ], - '\e' : p.match_eof + '\e': p.match_eof }, } self.assertFalse(self.test.check_sanity()) self.assertIn('\e', self.test.sanity_patterns[self.output_file.name].keys()) - + self.assertReportGeneration() def test_patterns_empty(self): self.test.perf_patterns = {} @@ -796,18 +791,17 @@ def test_patterns_empty(self): self.assertTrue(self.test.check_sanity()) self.assertTrue(self.test.check_performance()) - def test_file_not_found(self): self.test.stagedir = self.test.prefix self.test.perf_patterns = { - 'foobar' : { - 'performance1 = (?P\S+)' : [ + 'foobar': { + 'performance1 = (?P\S+)': [ ('value1', float, standard_threshold) ], - 'performance2 = (?P\S+)' : [ + 'performance2 = (?P\S+)': [ ('value2', float, standard_threshold) ], - 'performance3 = (?P\S+)' : [ + 'performance3 = (?P\S+)': [ ('value3', float, standard_threshold) ], } diff --git a/unittests/test_policies.py b/unittests/test_policies.py index 3532ce5d83..b4b0b8b917 100644 --- a/unittests/test_policies.py +++ b/unittests/test_policies.py @@ -9,8 +9,23 @@ from reframe.frontend.resources import ResourcesManager from reframe.settings import settings +from unittests.resources.frontend_checks import (KeyboardInterruptCheck, + SleepCheck, + SystemExitCheck) from unittests.fixtures import TEST_SITE_CONFIG + +class DebugAsynchronousExecutionPolicy(AsynchronousExecutionPolicy): + def __init__(self): + super().__init__() + self.keep_stage_files = True + self.checks = [] + + def exit_environ(self, c, p, e): + super().exit_environ(c, p, e) + self.checks.append(c) + + class TestSerialExecutionPolicy(unittest.TestCase): def setUp(self): # Load a system configuration @@ -29,7 +44,6 @@ def setUp(self): def tearDown(self): shutil.rmtree(self.resourcesdir, ignore_errors=True) - def test_runall(self): self.runner.runall(self.checks, self.system) @@ -40,7 +54,6 @@ def test_runall(self): self.assertEqual(1, stats.num_failures_stage('sanity')) self.assertEqual(2, stats.num_failures_stage('performance')) - def test_runall_skip_system_check(self): self.runner.policy.skip_system_check = True self.runner.runall(self.checks, self.system) @@ -52,7 +65,6 @@ def test_runall_skip_system_check(self): self.assertEqual(1, stats.num_failures_stage('sanity')) self.assertEqual(2, stats.num_failures_stage('performance')) - def test_runall_skip_prgenv_check(self): self.runner.policy.skip_environ_check = True self.runner.runall(self.checks, self.system) @@ -64,7 +76,6 @@ def test_runall_skip_prgenv_check(self): self.assertEqual(1, stats.num_failures_stage('sanity')) self.assertEqual(2, stats.num_failures_stage('performance')) - def test_runall_skip_sanity_check(self): self.runner.policy.skip_sanity_check = True self.runner.runall(self.checks, self.system) @@ -76,7 +87,6 @@ def test_runall_skip_sanity_check(self): self.assertEqual(0, stats.num_failures_stage('sanity')) self.assertEqual(2, stats.num_failures_stage('performance')) - def test_runall_skip_performance_check(self): self.runner.policy.skip_performance_check = True self.runner.runall(self.checks, self.system) @@ -88,7 +98,6 @@ def test_runall_skip_performance_check(self): self.assertEqual(1, stats.num_failures_stage('sanity')) self.assertEqual(0, stats.num_failures_stage('performance')) - def test_run_relaxed_performance_check(self): self.runner.policy.relax_performance_check = True self.runner.runall(self.checks, self.system) @@ -100,25 +109,19 @@ def test_run_relaxed_performance_check(self): self.assertEqual(1, stats.num_failures_stage('sanity')) self.assertEqual(0, stats.num_failures_stage('performance')) - def test_kbd_interrupt_within_test(self): - from unittests.resources.frontend_checks import KeyboardInterruptCheck - check = KeyboardInterruptCheck(system=self.system, resources=self.resources) self.assertRaises(KeyboardInterrupt, self.runner.runall, - [ check ], self.system) + [check], self.system) stats = self.runner.stats self.assertEqual(1, stats.num_failures()) - def test_system_exit_within_test(self): - from unittests.resources.frontend_checks import SystemExitCheck - check = SystemExitCheck(system=self.system, resources=self.resources) # This should not raise and should not exit - self.runner.runall([ check ], self.system) + self.runner.runall([check], self.system) stats = self.runner.stats self.assertEqual(1, stats.num_failures()) @@ -129,12 +132,10 @@ def setUp(self): self.debug_policy = DebugAsynchronousExecutionPolicy() self.runner = Runner(self.debug_policy) - def set_max_jobs(self, value): for p in self.system.partitions: p.max_jobs = value - def read_timestamps_sorted(self): self.begin_stamps = [] self.end_stamps = [] @@ -146,10 +147,7 @@ def read_timestamps_sorted(self): self.begin_stamps.sort() self.end_stamps.sort() - def test_concurrency_unlimited(self): - from unittests.resources.frontend_checks import SleepCheck - checks = [ SleepCheck(0.5, system=self.system, resources=self.resources), SleepCheck(0.5, system=self.system, resources=self.resources), @@ -169,17 +167,14 @@ def test_concurrency_unlimited(self): # Assure that all tests were run in parallel self.assertTrue(self.begin_stamps[-1] < self.end_stamps[0]) - def test_concurrency_limited(self): - from unittests.resources.frontend_checks import SleepCheck - # The number of checks must be <= 2*max_jobs t = 0.5 - checks = [ SleepCheck(t, system=self.system, resources=self.resources), - SleepCheck(t, system=self.system, resources=self.resources), - SleepCheck(t, system=self.system, resources=self.resources), - SleepCheck(t, system=self.system, resources=self.resources), - SleepCheck(t, system=self.system, resources=self.resources) ] + checks = [SleepCheck(t, system=self.system, resources=self.resources), + SleepCheck(t, system=self.system, resources=self.resources), + SleepCheck(t, system=self.system, resources=self.resources), + SleepCheck(t, system=self.system, resources=self.resources), + SleepCheck(t, system=self.system, resources=self.resources)] num_checks = len(checks) max_jobs = num_checks - 2 self.set_max_jobs(max_jobs) @@ -193,7 +188,7 @@ def test_concurrency_limited(self): self.read_timestamps_sorted() # Assure that the first #max_jobs jobs were run in parallel - self.assertTrue(self.begin_stamps[max_jobs-1] < self.end_stamps[0]) + self.assertTrue(self.begin_stamps[max_jobs - 1] < self.end_stamps[0]) # Assure that the remaining jobs were each run after one of the # previous #max_jobs jobs had finished (e.g. begin[max_jobs] > end[0]) @@ -209,14 +204,11 @@ def test_concurrency_limited(self): # important prolongation of the unit test execution time. # self.assertTrue(self.begin_stamps[-1] < self.end_stamps[max_jobs]) - def test_concurrency_none(self): - from unittests.resources.frontend_checks import SleepCheck - t = 0.5 - checks = [ SleepCheck(t, system=self.system, resources=self.resources), - SleepCheck(t, system=self.system, resources=self.resources), - SleepCheck(t, system=self.system, resources=self.resources) ] + checks = [SleepCheck(t, system=self.system, resources=self.resources), + SleepCheck(t, system=self.system, resources=self.resources), + SleepCheck(t, system=self.system, resources=self.resources)] num_checks = len(checks) self.set_max_jobs(1) self.runner.runall(checks, self.system) @@ -230,11 +222,10 @@ def test_concurrency_none(self): # Assure that the jobs were run after the previous job had finished # (e.g. begin[1] > end[0]) - begin_after_end = [ b > e for b, e in zip(self.begin_stamps[1:], - self.end_stamps[:-1]) ] + begin_after_end = [b > e for b, e in zip(self.begin_stamps[1:], + self.end_stamps[:-1])] self.assertTrue(all(begin_after_end)) - def _run_checks(self, checks, max_jobs): self.set_max_jobs(max_jobs) self.assertRaises(KeyboardInterrupt, self.runner.runall, @@ -243,11 +234,7 @@ def _run_checks(self, checks, max_jobs): self.assertEqual(4, self.runner.stats.num_cases()) self.assertEqual(4, self.runner.stats.num_failures()) - def test_kbd_interrupt_in_wait_with_concurrency(self): - from unittests.resources.frontend_checks import SleepCheck, \ - KeyboardInterruptCheck - checks = [ KeyboardInterruptCheck(system=self.system, resources=self.resources), @@ -257,11 +244,7 @@ def test_kbd_interrupt_in_wait_with_concurrency(self): ] self._run_checks(checks, 4) - def test_kbd_interrupt_in_wait_with_limited_concurrency(self): - from unittests.resources.frontend_checks import SleepCheck, \ - KeyboardInterruptCheck - checks = [ KeyboardInterruptCheck(system=self.system, resources=self.resources), @@ -271,11 +254,7 @@ def test_kbd_interrupt_in_wait_with_limited_concurrency(self): ] self._run_checks(checks, 2) - def test_kbd_interrupt_in_setup_with_concurrency(self): - from unittests.resources.frontend_checks import SleepCheck, \ - KeyboardInterruptCheck - checks = [ SleepCheck(1, system=self.system, resources=self.resources), SleepCheck(1, system=self.system, resources=self.resources), @@ -286,11 +265,7 @@ def test_kbd_interrupt_in_setup_with_concurrency(self): ] self._run_checks(checks, 4) - def test_kbd_interrupt_in_setup_with_limited_concurrency(self): - from unittests.resources.frontend_checks import SleepCheck, \ - KeyboardInterruptCheck - checks = [ SleepCheck(1, system=self.system, resources=self.resources), SleepCheck(1, system=self.system, resources=self.resources), diff --git a/unittests/test_schedulers.py b/unittests/test_schedulers.py index 54cd64f307..449dac5b5e 100644 --- a/unittests/test_schedulers.py +++ b/unittests/test_schedulers.py @@ -13,26 +13,46 @@ from reframe.frontend.loader import autodetect_system, SiteConfiguration from reframe.settings import settings -from unittests.fixtures import TEST_MODULES, system_with_scheduler +from unittests.fixtures import ( + force_remove_file, system_with_scheduler, TEST_MODULES, TEST_RESOURCES +) -class TestJobSubmission(unittest.TestCase): +class _TestJob(unittest.TestCase): def setUp(self): module_path_add([TEST_MODULES]) self.site_config = SiteConfiguration() self.site_config.load_from_dict(settings.site_configuration) - self.stdout_f = NamedTemporaryFile(dir='.', suffix='.out', delete=False) - self.stderr_f = NamedTemporaryFile(dir='.', suffix='.err', delete=False) + self.stdout_f = NamedTemporaryFile( + dir='.', suffix='.out', delete=False) + self.stderr_f = NamedTemporaryFile( + dir='.', suffix='.err', delete=False) self.script_f = NamedTemporaryFile(dir='.', suffix='.sh', delete=False) - # Close all files and let whoever interested to open them. Otherwise the - # test_loca_job may fail with a 'Text file busy' error + # Close all files and let whoever interested to open them. Otherwise a + # local job may fail with a 'Text file busy' error self.stdout_f.close() self.stderr_f.close() self.script_f.close() - # Setup a slurm job + def tearDown(self): + force_remove_file(self.stdout_f.name) + force_remove_file(self.stderr_f.name) + force_remove_file(self.script_f.name) + + def assertProcessDied(self, pid): + try: + os.kill(pid, 0) + self.fail('process %s is still alive' % pid) + except (ProcessLookupError, PermissionError): + pass + + +class TestSlurmJob(_TestJob): + def setUp(self): + super().setUp() + self.num_tasks = 4 self.num_tasks_per_node = 2 self.testjob = SlurmJob( @@ -48,27 +68,14 @@ def setUp(self): stderr=self.stderr_f.name, launcher=NativeSlurmLauncher ) - self.testjob.pre_run = [ 'echo prerun', 'echo prerun' ] - self.testjob.post_run = [ 'echo postrun' ] - + self.testjob.pre_run = ['echo prerun', 'echo prerun'] + self.testjob.post_run = ['echo postrun'] def setup_job(self, scheduler): partition = system_with_scheduler(scheduler) self.testjob.options += partition.access - - def tearDown(self): - if os.path.exists(self.stdout_f.name): - os.remove(self.stdout_f.name) - - if os.path.exists(self.stderr_f.name): - os.remove(self.stderr_f.name) - - if os.path.exists(self.script_f.name): - os.remove(self.script_f.name) - - - def _test_job_submission(self, ignore_lines = None): + def _test_submission(self, ignore_lines=None): self.testjob.submit('hostname') self.testjob.wait() self.assertEqual(self.testjob.state, SLURM_JOB_COMPLETED) @@ -100,8 +107,7 @@ def _test_job_submission(self, ignore_lines = None): self.assertEqual(num_tasks, self.num_tasks) self.assertEqual(len(nodes), self.num_tasks / self.num_tasks_per_node) - - def _test_jobstate_poll(self): + def _test_state_poll(self): t_sleep = datetime.now() self.testjob.submit('sleep 3') self.testjob.wait() @@ -109,75 +115,184 @@ def _test_jobstate_poll(self): self.assertEqual(self.testjob.state, SLURM_JOB_COMPLETED) self.assertEqual(self.testjob.exitcode, 0) - self.assertGreaterEqual(t_sleep.seconds, 3) + self.assertGreaterEqual(t_sleep.total_seconds(), 3) + + @unittest.skipIf(not system_with_scheduler(None), + 'job submission not supported') + def test_cancel(self): + self.setup_job(None) + self.testjob.submit('sleep 5') + self.testjob.cancel() + # Cancel waits for job to finish + self.assertTrue(self.testjob.finished()) + self.assertEqual(self.testjob.state, SLURM_JOB_CANCELLED) + + def test_cancel_before_submit(self): + self.testjob.cancel() @unittest.skipIf(not system_with_scheduler('nativeslurm'), 'native SLURM not supported') - def test_job_submission_slurm(self): + def test_submit_slurm(self): self.setup_job('nativeslurm') - self._test_job_submission() - + self._test_submission() @unittest.skipIf(not system_with_scheduler('nativeslurm'), 'native SLURM not supported') - def test_jobstate_poll_slurm(self): + def test_state_poll_slurm(self): self.setup_job('nativeslurm') - self._test_jobstate_poll() - + self._test_state_poll() @unittest.skipIf(not system_with_scheduler('slurm+alps'), 'SLURM+ALPS not supported') - def test_job_submission_alps(self): + def test_submit_alps(self): from reframe.launchers import AlpsLauncher self.setup_job('slurm+alps') self.testjob.launcher = AlpsLauncher(self.testjob) - self._test_job_submission(ignore_lines='^Application (\d+) resources\:') - + self._test_submission(ignore_lines='^Application (\d+) resources\:') @unittest.skipIf(not system_with_scheduler('slurm+alps'), 'SLURM+ALPS not supported') - def test_jobstate_poll_alps(self): + def test_state_poll_alps(self): from reframe.launchers import AlpsLauncher self.setup_job('slurm+alps') self.testjob.launcher = AlpsLauncher(self.testjob) - self._test_jobstate_poll() - + self._test_state_poll() - def test_local_job(self): +class TestLocalJob(_TestJob): + def setUp(self): + super().setUp() self.testjob = LocalJob(job_name='localjob', job_environ_list=[], job_script_builder=BashScriptBuilder(), stdout=self.stdout_f.name, stderr=self.stderr_f.name, - script_filename=self.script_f.name, - time_limit=(0, 0, 3)) - self.testjob.submit('sleep 2 && echo hello') + script_filename=self.script_f.name) + + def test_submission(self): + self.testjob.submit('sleep 1 && echo hello') + t_wait = datetime.now() self.testjob.wait() + t_wait = datetime.now() - t_wait + + self.assertGreaterEqual(t_wait.total_seconds(), 1) self.assertEqual(self.testjob.state, LOCAL_JOB_SUCCESS) self.assertEqual(self.testjob.exitcode, 0) with open(self.testjob.stdout) as f: self.assertEqual(f.read(), 'hello\n') - def test_local_job_timelimit(self): - self.testjob = LocalJob(job_name='localjob', - job_environ_list=[], - job_script_builder=BashScriptBuilder(), - stdout=self.stdout_f.name, - stderr=self.stderr_f.name, - script_filename=self.script_f.name, - time_limit=(0, 0, 2)) + # Double wait; job state must not change + self.testjob.wait() + self.assertEqual(self.testjob.state, LOCAL_JOB_SUCCESS) + + def test_submission_timelimit(self): + self.testjob.time_limit = (0, 0, 2) + t_job = datetime.now() self.testjob.submit('echo before && sleep 10 && echo after') self.testjob.wait() t_job = datetime.now() - t_job + self.assertEqual(self.testjob.state, LOCAL_JOB_TIMEOUT) self.assertNotEqual(self.testjob.exitcode, 0) with open(self.testjob.stdout) as f: self.assertEqual(f.read(), 'before\n') - self.assertGreaterEqual(t_job.seconds, 2) - self.assertLess(t_job.seconds, 10) + self.assertGreaterEqual(t_job.total_seconds(), 2) + self.assertLess(t_job.total_seconds(), 10) + + # Double wait; job state must not change + self.testjob.wait() + self.assertEqual(self.testjob.state, LOCAL_JOB_TIMEOUT) + + def test_cancel(self): + t_job = datetime.now() + self.testjob.submit('sleep 5') + self.testjob.cancel() + t_job = datetime.now() - t_job + + # Cancel waits for the job to finish + self.assertTrue(self.testjob.finished()) + self.assertLess(t_job.total_seconds(), 5) + self.assertEqual(self.testjob.state, LOCAL_JOB_FAILURE) + + def test_cancel_before_submit(self): + self.testjob.cancel() + + def test_cancel_with_grace(self): + # This test emulates a spawned process that ignores the SIGTERM signal + # and also spawns another process: + # + # reframe --- local job script --- sleep 10 + # (TERM IGN) + # + # We expect the job not to be cancelled immediately, since it ignores + # the gracious signal we are sending it. However, we expect it to be + # killed immediately after the grace period of 2 seconds expires. + # + # We also check that the additional spawned process is also killed. + + self.testjob.time_limit = (0, 1, 0) + self.testjob.cancel_grace_period = 2 + self.testjob.pre_run = ['trap -- "" TERM'] + self.testjob.post_run = ['echo $!', 'wait'] + self.testjob.submit('sleep 5 &') + + # Stall a bit here to let the the spawned process start and install its + # signal handler for SIGTERM + time.sleep(1) + + t_grace = datetime.now() + self.testjob.cancel() + t_grace = datetime.now() - t_grace + + self.testjob.wait() + # Read pid of spawned sleep + with open(self.testjob.stdout) as f: + sleep_pid = int(f.read()) + + self.assertGreaterEqual(t_grace.total_seconds(), 2) + self.assertLess(t_grace.total_seconds(), 5) + self.assertEqual(LOCAL_JOB_TIMEOUT, self.testjob.state) + + # Verify that the spawned sleep is killed, too + self.assertProcessDied(sleep_pid) + + def test_cancel_term_ignore(self): + # This test emulates a descendant process of the spawned job that + # ignores the SIGTERM signal: + # + # reframe --- local job script --- sleep_deeply.sh --- sleep + # (TERM IGN) + # + # Since the "local job script" does not ignore SIGTERM, it will be + # terminated immediately after we cancel the job. However, the deeply + # spawned sleep will ignore it. We need to make sure that our + # implementation grants the sleep process a grace period and then + # kills it. + + prog = os.path.join(TEST_RESOURCES, 'src', 'sleep_deeply.sh') + self.testjob.cancel_grace_period = 2 + self.testjob.submit(prog) + + # Stall a bit here to let the the spawned process start and install its + # signal handler for SIGTERM + time.sleep(1) + + t_grace = datetime.now() + self.testjob.cancel() + t_grace = datetime.now() - t_grace + self.testjob.wait() + + # Read pid of spawned sleep + with open(self.testjob.stdout) as f: + sleep_pid = int(f.read()) + + self.assertGreaterEqual(t_grace.total_seconds(), 2) + self.assertEqual(LOCAL_JOB_TIMEOUT, self.testjob.state) + + # Verify that the spawned sleep is killed, too + self.assertProcessDied(sleep_pid) diff --git a/unittests/test_utility.py b/unittests/test_utility.py index bbef165e53..ed3b1e2bf6 100644 --- a/unittests/test_utility.py +++ b/unittests/test_utility.py @@ -13,18 +13,17 @@ from unittests.fixtures import TEST_MODULES + class TestOSTools(unittest.TestCase): def test_command_success(self): completed = os_ext.run_command('echo foobar') self.assertEqual(completed.returncode, 0) self.assertEqual(completed.stdout, 'foobar\n') - def test_command_error(self): self.assertRaises(CommandError, os_ext.run_command, 'false', 'check=True') - def test_command_timeout(self): try: os_ext.run_command('sleep 3', timeout=2) @@ -32,7 +31,6 @@ def test_command_timeout(self): except CommandError as e: self.assertEqual(e.timeout, 2) - def test_command_async(self): from datetime import datetime @@ -48,12 +46,11 @@ def test_command_async(self): self.assertLess(t_launch.seconds, 1) self.assertGreaterEqual(t_sleep.seconds, 1) - def test_grep(self): self.assertTrue(os_ext.grep_command_output(cmd='echo hello', - pattern='hello')) + pattern='hello')) self.assertFalse(os_ext.grep_command_output(cmd='echo hello', - pattern='foo')) + pattern='foo')) def test_copytree(self): dir_src = tempfile.mkdtemp() @@ -68,12 +65,10 @@ def test_copytree(self): shutil.rmtree(dir_src) shutil.rmtree(dir_dst) - def test_inpath(self): self.assertTrue(os_ext.inpath('/foo/bin', '/bin:/foo/bin:/usr/bin')) self.assertFalse(os_ext.inpath('/foo/bin', '/bin:/usr/local/bin')) - def _make_testdirs(self, prefix): # Create a temporary directory structure # foo/ @@ -87,7 +82,6 @@ def _make_testdirs(self, prefix): os.makedirs(os.path.join(prefix, 'foo', 'goo'), exist_ok=True) os.makedirs(os.path.join(prefix, 'loo', 'bar'), exist_ok=True) - def test_subdirs(self): prefix = tempfile.mkdtemp() self._make_testdirs(prefix) @@ -96,13 +90,13 @@ def test_subdirs(self): open(os.path.join(prefix, 'foo', 'bar', 'file.txt'), 'w').close() open(os.path.join(prefix, 'loo', 'file.txt'), 'w').close() - expected_subdirs = { prefix, - os.path.join(prefix, 'foo'), - os.path.join(prefix, 'foo', 'bar'), - os.path.join(prefix, 'foo', 'bar', 'boo'), - os.path.join(prefix, 'foo', 'goo'), - os.path.join(prefix, 'loo'), - os.path.join(prefix, 'loo', 'bar') } + expected_subdirs = {prefix, + os.path.join(prefix, 'foo'), + os.path.join(prefix, 'foo', 'bar'), + os.path.join(prefix, 'foo', 'bar', 'boo'), + os.path.join(prefix, 'foo', 'goo'), + os.path.join(prefix, 'loo'), + os.path.join(prefix, 'loo', 'bar')} returned_subdirs = os_ext.subdirs(prefix) self.assertEqual([prefix], returned_subdirs) @@ -111,7 +105,6 @@ def test_subdirs(self): self.assertEqual(expected_subdirs, set(returned_subdirs)) shutil.rmtree(prefix) - def test_samefile(self): # Create a temporary directory structure prefix = tempfile.mkdtemp() @@ -136,7 +129,8 @@ def test_samefile(self): self.assertTrue(os_ext.samefile(os.path.join(prefix, 'foo'), os.path.join(prefix, 'foolnk1'))) self.assertFalse(os_ext.samefile('/foo', '/bar')) - self.assertTrue(os_ext.samefile('/foo', os.path.join(prefix, 'broken'))) + self.assertTrue(os_ext.samefile( + '/foo', os.path.join(prefix, 'broken'))) self.assertTrue(os_ext.samefile(os.path.join(prefix, 'broken'), os.path.join(prefix, 'broken1'))) @@ -166,8 +160,7 @@ def setUp(self): open(os.path.join(self.prefix, 'bar.txt'), 'w').close() open(os.path.join(self.prefix, 'foo.txt'), 'w').close() - - def verify_target_directory(self, file_links = []): + def verify_target_directory(self, file_links=[]): """Verify the directory structure""" self.assertTrue( os.path.exists(os.path.join(self.target, 'bar', 'bar.txt'))) @@ -187,47 +180,40 @@ def verify_target_directory(self, file_links = []): self.assertTrue(os.path.islink(link_name)) self.assertEqual(target_name, os.readlink(link_name)) - def test_virtual_copy_nolinks(self): os_ext.copytree_virtual(self.prefix, self.target) self.verify_target_directory() - def test_virtual_copy_valid_links(self): - file_links = [ 'bar/', 'foo/bar.txt', 'foo.txt' ] + file_links = ['bar/', 'foo/bar.txt', 'foo.txt'] os_ext.copytree_virtual(self.prefix, self.target, file_links) self.verify_target_directory(file_links) - def test_virtual_copy_inexistent_links(self): - file_links = [ 'foobar/', 'foo/bar.txt', 'foo.txt' ] + file_links = ['foobar/', 'foo/bar.txt', 'foo.txt'] self.assertRaises(ReframeError, os_ext.copytree_virtual, self.prefix, self.target, file_links) - def test_virtual_copy_absolute_paths(self): - file_links = [ os.path.join(self.prefix, 'bar'), - 'foo/bar.txt', 'foo.txt' ] + file_links = [os.path.join(self.prefix, 'bar'), + 'foo/bar.txt', 'foo.txt'] self.assertRaises(ReframeError, os_ext.copytree_virtual, self.prefix, self.target, file_links) - def test_virtual_copy_irrelevenant_paths(self): - file_links = [ '/bin', 'foo/bar.txt', 'foo.txt' ] + file_links = ['/bin', 'foo/bar.txt', 'foo.txt'] self.assertRaises(ReframeError, os_ext.copytree_virtual, self.prefix, self.target, file_links) - file_links = [ os.path.dirname(self.prefix), 'foo/bar.txt', 'foo.txt' ] + file_links = [os.path.dirname(self.prefix), 'foo/bar.txt', 'foo.txt'] self.assertRaises(ReframeError, os_ext.copytree_virtual, self.prefix, self.target, file_links) - def test_virtual_copy_linkself(self): - file_links = [ '.' ] + file_links = ['.'] self.assertRaises(OSError, os_ext.copytree_virtual, self.prefix, self.target, file_links) - def tearDown(self): shutil.rmtree(self.prefix) shutil.rmtree(self.target) @@ -249,7 +235,6 @@ def test_standard_threshold(self): self.assertRaises(ReframeError, standard_threshold, 0.9, (1.0,)) self.assertRaises(ReframeError, standard_threshold, 0.9, (1.0, None)) - def test_always_true(self): self.assertTrue(always_true(0, None)) self.assertTrue(always_true(230, 321.))