From 9e220e0f001c090bd205afd46c9abcf6bca0fb17 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Wed, 1 Nov 2023 16:47:16 -0400 Subject: [PATCH 01/28] refactor: Create partition and partition config classes --- flow/environment.py | 90 +++++++++++++++++++++++-------------- flow/environments/incite.py | 21 +++++---- flow/environments/purdue.py | 10 +++-- flow/environments/umich.py | 11 ++--- flow/environments/xsede.py | 39 +++++++++------- 5 files changed, 105 insertions(+), 66 deletions(-) diff --git a/flow/environment.py b/flow/environment.py index 605bed25c..cb670f2fa 100644 --- a/flow/environment.py +++ b/flow/environment.py @@ -90,6 +90,56 @@ def template_filter(func): return classmethod(func) +class _PartitionConfig: + def __init__(self, cpus_per_node=None, gpus_per_node=None, shared_partitions=None): + self.cpus_per_node = ( + {"default": None} if cpus_per_node is None else cpus_per_node + ) + self.gpus_per_node = ( + {"default": None} if gpus_per_node is None else gpus_per_node + ) + self.shared_partitions = ( + set() if shared_partitions is None else shared_partitions + ) + + def __getitem__(self, partition): + return _Partition( + partition, + self._get_cpus(partition), + self._get_gpus(partition), + partition in self.shared_partitions, + ) + + def _get_cpus(self, partition): + return self.cpus_per_node.get(partition, self.cpus_per_node["default"]) + + def _get_gpus(self, partition): + return self.gpus_per_node.get(partition, self.gpus_per_node["default"]) + + +class _Partition: + def __init__(self, name, gpus, cpus, shared): + self.name = name + self.gpus = gpus + self.cpus = cpus + self.shared = shared + + def calculate_num_nodes(self, cpu_tasks, gpu_tasks, threshold): + if gpu_tasks > 0: + num_nodes_gpu = self._nodes_for_task(gpu_tasks, self.gpus, threshold) + num_nodes_cpu = self._nodes_for_task(cpu_tasks, self.cpus, 0) + else: + num_nodes_gpu = 0 + num_nodes_cpu = self._nodes_for_task(cpu_tasks, self.cpus, threshold) + return max(num_nodes_cpu, num_nodes_gpu, 1) + + def _nodes_for_task(self, tasks, processors, threshold): + """Call calc_num_nodes but handles the None sentinal value.""" + if processors is None: + return 1 + return calc_num_nodes(tasks, processors, threshold) + + class ComputeEnvironment(metaclass=_ComputeEnvironmentType): """Define computational environments. @@ -109,9 +159,7 @@ class ComputeEnvironment(metaclass=_ComputeEnvironmentType): template = "base_script.sh" mpi_cmd = "mpiexec" - _cpus_per_node = {"default": -1} - _gpus_per_node = {"default": -1} - _shared_partitions = set() + _partition_config = _PartitionConfig() @classmethod def is_present(cls): @@ -296,9 +344,9 @@ def _get_scheduler_values(cls, context): ------- Must be called after the rest of the template context has been gathered. """ - partition = context.get("partition", None) + partition = cls._partition_config[context.get("partition", None)] force = context.get("force", False) - if force or partition in cls._shared_partitions: + if force or partition.shared: threshold = 0.0 else: threshold = 0.9 @@ -315,40 +363,16 @@ def _get_scheduler_values(cls, context): context.get("force", False), ) - if gpu_tasks_total > 0: - num_nodes_gpu = cls._calc_num_nodes( - gpu_tasks_total, cls._get_gpus_per_node(partition), threshold - ) - num_nodes_cpu = cls._calc_num_nodes( - cpu_tasks_total, cls._get_cpus_per_node(partition), 0 - ) - else: - num_nodes_gpu = 0 - num_nodes_cpu = cls._calc_num_nodes( - cpu_tasks_total, cls._get_cpus_per_node(partition), threshold - ) - num_nodes = max(num_nodes_cpu, num_nodes_gpu, 1) + num_nodes = partition.calculate_num_nodes( + cpu_tasks_total, gpu_tasks_total, threshold + ) + return { "ncpu_tasks": cpu_tasks_total, "ngpu_tasks": gpu_tasks_total, "num_nodes": num_nodes, } - @classmethod - def _get_cpus_per_node(cls, partition): - return cls._cpus_per_node.get(partition, cls._cpus_per_node["default"]) - - @classmethod - def _get_gpus_per_node(cls, partition): - return cls._gpus_per_node.get(partition, cls._gpus_per_node["default"]) - - @classmethod - def _calc_num_nodes(cls, tasks, processors, threshold): - """Call calc_num_nodes but handles the -1 sentinal value.""" - if processors == -1: - return 1 - return calc_num_nodes(tasks, processors, threshold) - class StandardEnvironment(ComputeEnvironment): """Default environment which is always present.""" diff --git a/flow/environments/incite.py b/flow/environments/incite.py index 5b47e1482..5d69a8397 100644 --- a/flow/environments/incite.py +++ b/flow/environments/incite.py @@ -10,6 +10,7 @@ from ..environment import ( DefaultLSFEnvironment, DefaultSlurmEnvironment, + _PartitionConfig, template_filter, ) from ..util.template_filters import check_utilization @@ -36,8 +37,9 @@ def my_operation(job): hostname_pattern = r".*\.summit\.olcf\.ornl\.gov" template = "summit.sh" mpi_cmd = "jsrun" - _cpus_per_node = {"default": 42} - _gpus_per_node = {"default": 6} + _partition_config = _PartitionConfig( + cpus_per_node={"default": 42}, gpus_per_node={"default": 6} + ) @template_filter def calc_num_nodes(cls, resource_sets, parallel=False): @@ -187,8 +189,9 @@ class AndesEnvironment(DefaultSlurmEnvironment): hostname_pattern = r"andes-.*\.olcf\.ornl\.gov" template = "andes.sh" mpi_cmd = "srun" - _cpus_per_node = {"default": 32, "gpu": 28} - _gpus_per_node = {"default": 0, "gpu": 2} + _partition_config = _PartitionConfig( + cpus_per_node={"default": 32, "gpu": 28}, gpus_per_node={"default": 0, "gpu": 2} + ) @classmethod def add_args(cls, parser): @@ -217,8 +220,9 @@ class CrusherEnvironment(DefaultSlurmEnvironment): hostname_pattern = r".*\.crusher\.olcf\.ornl\.gov" template = "crusher.sh" - _cpus_per_node = {"default": 56} - _gpus_per_node = {"default": 8} + _partition_config = _PartitionConfig( + cpus_per_node={"default": 56}, gpus_per_node={"default": 8} + ) mpi_cmd = "srun" @@ -269,8 +273,9 @@ class FrontierEnvironment(DefaultSlurmEnvironment): hostname_pattern = r".*\.frontier\.olcf\.ornl\.gov" template = "frontier.sh" - _cpus_per_node = {"default": 56} - _gpus_per_node = {"default": 8} + _partition_config = _PartitionConfig( + cpus_per_node={"default": 56}, gpus_per_node={"default": 8} + ) mpi_cmd = "srun" @classmethod diff --git a/flow/environments/purdue.py b/flow/environments/purdue.py index 797029e0a..c842f2f1a 100644 --- a/flow/environments/purdue.py +++ b/flow/environments/purdue.py @@ -4,7 +4,7 @@ """Environments for Purdue supercomputers.""" import logging -from ..environment import DefaultSlurmEnvironment +from ..environment import DefaultSlurmEnvironment, _PartitionConfig logger = logging.getLogger(__name__) @@ -18,9 +18,11 @@ class AnvilEnvironment(DefaultSlurmEnvironment): hostname_pattern = r".*\.anvil\.rcac\.purdue\.edu$" template = "anvil.sh" mpi_cmd = "mpirun" - _cpus_per_node = {"default": 128} - _gpus_per_node = {"default": 4} - _shared_partitions = {"debug", "gpu-debug", "shared", "highmem", "gpu"} + _partition_config = _PartitionConfig( + cpus_per_node={"default": 128}, + gpus_per_node={"default": 4}, + shared_partitions={"debug", "gpu-debug", "shared", "highmem", "gpu"}, + ) @classmethod def add_args(cls, parser): diff --git a/flow/environments/umich.py b/flow/environments/umich.py index fed0ecb95..945918310 100644 --- a/flow/environments/umich.py +++ b/flow/environments/umich.py @@ -2,7 +2,7 @@ # All rights reserved. # This software is licensed under the BSD 3-Clause License. """Environments for the University of Michigan HPC environment.""" -from ..environment import DefaultSlurmEnvironment +from ..environment import DefaultSlurmEnvironment, _PartitionConfig class GreatLakesEnvironment(DefaultSlurmEnvironment): @@ -13,11 +13,12 @@ class GreatLakesEnvironment(DefaultSlurmEnvironment): hostname_pattern = r"gl(-login)?[0-9]+\.arc-ts\.umich\.edu" template = "umich-greatlakes.sh" - _cpus_per_node = {"default": 36, "gpu": 40} - _gpus_per_node = {"default": 2} - _shared_partitions = {"standard", "gpu"} - mpi_cmd = "srun" + _partition_config = _PartitionConfig( + cpus_per_node={"default": 36, "gpu": 40}, + gpus_per_node={"default": 2}, + shared_partitions={"standard", "gpu"}, + ) @classmethod def add_args(cls, parser): diff --git a/flow/environments/xsede.py b/flow/environments/xsede.py index b8923bb22..0d425fd40 100644 --- a/flow/environments/xsede.py +++ b/flow/environments/xsede.py @@ -5,7 +5,7 @@ import logging import os -from ..environment import DefaultSlurmEnvironment, template_filter +from ..environment import DefaultSlurmEnvironment, _PartitionConfig, template_filter logger = logging.getLogger(__name__) @@ -25,6 +25,7 @@ class Stampede2Environment(DefaultSlurmEnvironment): mpi_cmd = "ibrun" offset_counter = 0 base_offset = _STAMPEDE_OFFSET + _cpus_per_node = { "default": 48, "skx-dev": 68, @@ -147,9 +148,11 @@ class Bridges2Environment(DefaultSlurmEnvironment): hostname_pattern = r".*\.bridges2\.psc\.edu$" template = "bridges2.sh" mpi_cmd = "mpirun" - _cpus_per_node = {"default": 128, "EM": 96, "GPU": 40, "GPU-shared": 40} - _gpus_per_node = {"default": 8} - _shared_partitions = {"RM-shared", "GPU-shared"} + _partition_config = _PartitionConfig( + cpus_per_node={"default": 128, "EM": 96, "GPU": 40, "GPU-shared": 40}, + gpus_per_node={"default": 8}, + shared_partitions={"RM-shared", "GPU-shared"}, + ) @classmethod def add_args(cls, parser): @@ -185,9 +188,11 @@ class ExpanseEnvironment(DefaultSlurmEnvironment): hostname_pattern = r".*\.expanse\.sdsc\.edu$" template = "expanse.sh" - _cpus_per_node = {"default": 128, "GPU": 40} - _gpus_per_node = {"default": 4} - _shared_partitions = {"shared", "gpu-shared"} + _partition_config = _PartitionConfig( + cpus_per_node={"default": 128, "GPU": 40}, + gpus_per_node={"default": 4}, + shared_partitions={"shared", "gpu-shared"}, + ) @classmethod def add_args(cls, parser): @@ -229,15 +234,17 @@ class DeltaEnvironment(DefaultSlurmEnvironment): # be safer given the parts listed are less likely to change. hostname_pattern = r"(gpua|dt|cn)(-login)?[0-9]+\.delta.*\.ncsa.*\.edu" template = "delta.sh" - _cpus_per_node = { - "default": 128, - "gpuA40x4": 64, - "gpuA100x4": 64, - "gpuA100x8": 128, - "gpuMI100x8": 128, - } - _gpus_per_node = {"default": 4, "gpuA100x8": 8, "gpuMI100x8": 8} - _shared_partitions = {"cpu", "gpuA100x4", "gpuA40x4", "gpuA100x8", "gpuMI100x8"} + _partition_config = _PartitionConfig( + cpus_per_node={ + "default": 128, + "gpuA40x4": 64, + "gpuA100x4": 64, + "gpuA100x8": 128, + "gpuMI100x8": 128, + }, + gpus_per_node={"default": 4, "gpuA100x8": 8, "gpuMI100x8": 8}, + shared_partitions={"cpu", "gpuA100x4", "gpuA40x4", "gpuA100x8", "gpuMI100x8"}, + ) @classmethod def add_args(cls, parser): From 48d3c030e380cca4e03c39de85fe345280bef82e Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 2 Nov 2023 14:44:12 -0400 Subject: [PATCH 02/28] fix: argument ordering of _Partition --- flow/environment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/environment.py b/flow/environment.py index cb670f2fa..b37c6cf44 100644 --- a/flow/environment.py +++ b/flow/environment.py @@ -118,7 +118,7 @@ def _get_gpus(self, partition): class _Partition: - def __init__(self, name, gpus, cpus, shared): + def __init__(self, name, cpus, gpus, shared): self.name = name self.gpus = gpus self.cpus = cpus From a18bf6adcae815300b52fbc2dc301af126549cf9 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 2 Nov 2023 16:04:46 -0400 Subject: [PATCH 03/28] refactor: Switch shared_partition for node_types Supports 3 node types: - shared: 1 node or less - mixed: any number of tasks - wholenode: whole node increments --- flow/environment.py | 58 +++++++++++++++++++++---------------- flow/environments/incite.py | 17 ++++++++--- flow/environments/purdue.py | 12 ++++++-- flow/environments/umich.py | 3 +- flow/environments/xsede.py | 44 +++++++++++++++++++++------- 5 files changed, 89 insertions(+), 45 deletions(-) diff --git a/flow/environment.py b/flow/environment.py index b37c6cf44..e78530500 100644 --- a/flow/environment.py +++ b/flow/environment.py @@ -9,6 +9,7 @@ This enables the user to adjust their workflow based on the present environment, e.g. for the adjustment of scheduler submission scripts. """ +import enum import logging import os import re @@ -27,7 +28,7 @@ _WALLTIME, _Directives, ) -from .errors import NoSchedulerError +from .errors import NoSchedulerError, SubmitError from .scheduling.base import JobStatus from .scheduling.fake_scheduler import FakeScheduler from .scheduling.lsf import LSFScheduler @@ -90,39 +91,41 @@ def template_filter(func): return classmethod(func) +class _NodeTypes(enum.Enum): + SHARED = 1 + MIXED = 2 + WHOLENODE = 3 + + class _PartitionConfig: - def __init__(self, cpus_per_node=None, gpus_per_node=None, shared_partitions=None): - self.cpus_per_node = ( - {"default": None} if cpus_per_node is None else cpus_per_node - ) - self.gpus_per_node = ( - {"default": None} if gpus_per_node is None else gpus_per_node - ) - self.shared_partitions = ( - set() if shared_partitions is None else shared_partitions - ) + _default_cpus_per_node = None + _default_gpus_per_node = None + _default_node_type = _NodeTypes.MIXED + + def __init__(self, cpus_per_node=None, gpus_per_node=None, node_types=None): + self.cpus_per_node = {} if cpus_per_node is None else cpus_per_node + self.gpus_per_node = {} if gpus_per_node is None else gpus_per_node + self.node_types = {} if node_types is None else node_types def __getitem__(self, partition): return _Partition( partition, - self._get_cpus(partition), - self._get_gpus(partition), - partition in self.shared_partitions, + self._get(partition, self.cpus_per_node, self._default_cpus_per_node), + self._get(partition, self.gpus_per_node, self._default_gpus_per_node), + self._get(partition, self.node_types, self._default_node_type), ) - def _get_cpus(self, partition): - return self.cpus_per_node.get(partition, self.cpus_per_node["default"]) - - def _get_gpus(self, partition): - return self.gpus_per_node.get(partition, self.gpus_per_node["default"]) + @staticmethod + def _get(key, mapping, default): + return mapping.get(key, mapping.get("default", default)) class _Partition: - def __init__(self, name, cpus, gpus, shared): + def __init__(self, name, cpus, gpus, node_type): self.name = name self.gpus = gpus self.cpus = cpus - self.shared = shared + self.node_type = node_type def calculate_num_nodes(self, cpu_tasks, gpu_tasks, threshold): if gpu_tasks > 0: @@ -137,7 +140,12 @@ def _nodes_for_task(self, tasks, processors, threshold): """Call calc_num_nodes but handles the None sentinal value.""" if processors is None: return 1 - return calc_num_nodes(tasks, processors, threshold) + nodes = calc_num_nodes(tasks, processors, threshold) + if self.node_type == _NodeTypes.SHARED and nodes > 1: + raise SubmitError( + f"Cannot submit {tasks} tasks to shared partition {self.name}" + ) + return nodes class ComputeEnvironment(metaclass=_ComputeEnvironmentType): @@ -346,10 +354,10 @@ def _get_scheduler_values(cls, context): """ partition = cls._partition_config[context.get("partition", None)] force = context.get("force", False) - if force or partition.shared: - threshold = 0.0 - else: + if partition.node_type == _NodeTypes.WHOLENODE and not force: threshold = 0.9 + else: + threshold = 0.0 cpu_tasks_total = calc_tasks( context["operations"], "np", diff --git a/flow/environments/incite.py b/flow/environments/incite.py index 5d69a8397..b89e656ea 100644 --- a/flow/environments/incite.py +++ b/flow/environments/incite.py @@ -10,6 +10,7 @@ from ..environment import ( DefaultLSFEnvironment, DefaultSlurmEnvironment, + _NodeTypes, _PartitionConfig, template_filter, ) @@ -38,7 +39,9 @@ def my_operation(job): template = "summit.sh" mpi_cmd = "jsrun" _partition_config = _PartitionConfig( - cpus_per_node={"default": 42}, gpus_per_node={"default": 6} + cpus_per_node={"default": 42}, + gpus_per_node={"default": 6}, + node_types={"default": _NodeTypes.WHOLENODE}, ) @template_filter @@ -190,7 +193,9 @@ class AndesEnvironment(DefaultSlurmEnvironment): template = "andes.sh" mpi_cmd = "srun" _partition_config = _PartitionConfig( - cpus_per_node={"default": 32, "gpu": 28}, gpus_per_node={"default": 0, "gpu": 2} + cpus_per_node={"default": 32, "gpu": 28}, + gpus_per_node={"gpu": 2}, + node_types={"default": _NodeTypes.WHOLENODE}, ) @classmethod @@ -221,7 +226,9 @@ class CrusherEnvironment(DefaultSlurmEnvironment): hostname_pattern = r".*\.crusher\.olcf\.ornl\.gov" template = "crusher.sh" _partition_config = _PartitionConfig( - cpus_per_node={"default": 56}, gpus_per_node={"default": 8} + cpus_per_node={"default": 56}, + gpus_per_node={"default": 8}, + node_types={"default": _NodeTypes.WHOLENODE}, ) mpi_cmd = "srun" @@ -274,7 +281,9 @@ class FrontierEnvironment(DefaultSlurmEnvironment): hostname_pattern = r".*\.frontier\.olcf\.ornl\.gov" template = "frontier.sh" _partition_config = _PartitionConfig( - cpus_per_node={"default": 56}, gpus_per_node={"default": 8} + cpus_per_node={"default": 56}, + gpus_per_node={"default": 8}, + node_types={"default": _NodeTypes.WHOLENODE}, ) mpi_cmd = "srun" diff --git a/flow/environments/purdue.py b/flow/environments/purdue.py index c842f2f1a..9785303b5 100644 --- a/flow/environments/purdue.py +++ b/flow/environments/purdue.py @@ -4,7 +4,7 @@ """Environments for Purdue supercomputers.""" import logging -from ..environment import DefaultSlurmEnvironment, _PartitionConfig +from ..environment import DefaultSlurmEnvironment, _NodeTypes, _PartitionConfig logger = logging.getLogger(__name__) @@ -20,8 +20,14 @@ class AnvilEnvironment(DefaultSlurmEnvironment): mpi_cmd = "mpirun" _partition_config = _PartitionConfig( cpus_per_node={"default": 128}, - gpus_per_node={"default": 4}, - shared_partitions={"debug", "gpu-debug", "shared", "highmem", "gpu"}, + gpus_per_node={"gpu": 4, "gpu-debug": 4}, + node_types={ + "gpu-debug": _NodeTypes.SHARED, + "shared": _NodeTypes.SHARED, + "highmem": _NodeTypes.SHARED, + "wholenode": _NodeTypes.WHOLENODE, + "wide": _NodeTypes.WHOLENODE, + }, ) @classmethod diff --git a/flow/environments/umich.py b/flow/environments/umich.py index 945918310..ed2d6a2b8 100644 --- a/flow/environments/umich.py +++ b/flow/environments/umich.py @@ -16,8 +16,7 @@ class GreatLakesEnvironment(DefaultSlurmEnvironment): mpi_cmd = "srun" _partition_config = _PartitionConfig( cpus_per_node={"default": 36, "gpu": 40}, - gpus_per_node={"default": 2}, - shared_partitions={"standard", "gpu"}, + gpus_per_node={"gpu": 2}, ) @classmethod diff --git a/flow/environments/xsede.py b/flow/environments/xsede.py index 0d425fd40..136431d8a 100644 --- a/flow/environments/xsede.py +++ b/flow/environments/xsede.py @@ -5,7 +5,12 @@ import logging import os -from ..environment import DefaultSlurmEnvironment, _PartitionConfig, template_filter +from ..environment import ( + DefaultSlurmEnvironment, + _NodeTypes, + _PartitionConfig, + template_filter, +) logger = logging.getLogger(__name__) @@ -149,9 +154,21 @@ class Bridges2Environment(DefaultSlurmEnvironment): template = "bridges2.sh" mpi_cmd = "mpirun" _partition_config = _PartitionConfig( - cpus_per_node={"default": 128, "EM": 96, "GPU": 40, "GPU-shared": 40}, - gpus_per_node={"default": 8}, - shared_partitions={"RM-shared", "GPU-shared"}, + cpus_per_node={ + "default": 128, + "RM-shared": 64, + "EM": 96, + "GPU": 40, + "GPU-shared": 20, + }, + gpus_per_node={"GPU": 8, "GPU-shared": 4}, + node_types={ + "RM-shared": _NodeTypes.SHARED, + "GPU-shared": _NodeTypes.SHARED, + "EM": _NodeTypes.SHARED, + "RM": _NodeTypes.WHOLENODE, + "GPU": _NodeTypes.WHOLENODE, + }, ) @classmethod @@ -170,7 +187,6 @@ def add_args(cls, parser): choices=[ "RM", "RM-shared", - "RM-small", "EM", "GPU", "GPU-shared", @@ -189,9 +205,15 @@ class ExpanseEnvironment(DefaultSlurmEnvironment): hostname_pattern = r".*\.expanse\.sdsc\.edu$" template = "expanse.sh" _partition_config = _PartitionConfig( - cpus_per_node={"default": 128, "GPU": 40}, - gpus_per_node={"default": 4}, - shared_partitions={"shared", "gpu-shared"}, + cpus_per_node={"default": 128, "gpu": 40, "gpu-shared": 40, "gpu-debug": 40}, + gpus_per_node={"gpu": 4, "gpu-shared": 4, "gpu-debug": 4}, + node_types={ + "shared": _NodeTypes.SHARED, + "large-shared": _NodeTypes.SHARED, + "gpu-shared": _NodeTypes.SHARED, + "compute": _NodeTypes.WHOLENODE, + "gpu": _NodeTypes.WHOLENODE, + }, ) @classmethod @@ -212,6 +234,7 @@ def add_args(cls, parser): "shared", "large-shared", "gpu", + "gpu-debug", "gpu-shared", "debug", ], @@ -236,14 +259,13 @@ class DeltaEnvironment(DefaultSlurmEnvironment): template = "delta.sh" _partition_config = _PartitionConfig( cpus_per_node={ - "default": 128, + "cpu": 128, "gpuA40x4": 64, "gpuA100x4": 64, "gpuA100x8": 128, "gpuMI100x8": 128, }, - gpus_per_node={"default": 4, "gpuA100x8": 8, "gpuMI100x8": 8}, - shared_partitions={"cpu", "gpuA100x4", "gpuA40x4", "gpuA100x8", "gpuMI100x8"}, + gpus_per_node={"gpuA40x4": 4, "gpuA100x4": 4, "gpuA100x8": 8, "gpuMI100x8": 8}, ) @classmethod From 45f47f84d93bf8585dff30882bf48e37d1930fd6 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 2 Nov 2023 16:07:14 -0400 Subject: [PATCH 04/28] refactor: Raise more template errors in Python --- flow/environment.py | 12 ++++++++++-- flow/templates/andes.sh | 9 --------- flow/templates/anvil.sh | 8 -------- flow/templates/bridges2.sh | 15 --------------- flow/templates/delta.sh | 9 --------- flow/templates/expanse.sh | 12 ------------ flow/templates/frontier.sh | 3 --- flow/templates/umich-greatlakes.sh | 6 ------ 8 files changed, 10 insertions(+), 64 deletions(-) diff --git a/flow/environment.py b/flow/environment.py index e78530500..00757a184 100644 --- a/flow/environment.py +++ b/flow/environment.py @@ -127,11 +127,19 @@ def __init__(self, name, cpus, gpus, node_type): self.cpus = cpus self.node_type = node_type - def calculate_num_nodes(self, cpu_tasks, gpu_tasks, threshold): + def calculate_num_nodes(self, cpu_tasks, gpu_tasks, threshold, force): if gpu_tasks > 0: + if (self.gpus is None or self.gpus == 0) and not force: + raise SubmitError( + f"Cannot request GPU's on nonGPU partition, {self.name}." + ) num_nodes_gpu = self._nodes_for_task(gpu_tasks, self.gpus, threshold) num_nodes_cpu = self._nodes_for_task(cpu_tasks, self.cpus, 0) else: + if (self.gpus is not None and self.gpus > 0) and not force: + raise SubmitError( + f"Cannot submit to GPU partition, {self.name}, without GPUs." + ) num_nodes_gpu = 0 num_nodes_cpu = self._nodes_for_task(cpu_tasks, self.cpus, threshold) return max(num_nodes_cpu, num_nodes_gpu, 1) @@ -372,7 +380,7 @@ def _get_scheduler_values(cls, context): ) num_nodes = partition.calculate_num_nodes( - cpu_tasks_total, gpu_tasks_total, threshold + cpu_tasks_total, gpu_tasks_total, threshold, force ) return { diff --git a/flow/templates/andes.sh b/flow/templates/andes.sh index 5fd55ff6e..ee7cbd40c 100644 --- a/flow/templates/andes.sh +++ b/flow/templates/andes.sh @@ -1,15 +1,6 @@ {# Templated in accordance with: https://docs.olcf.ornl.gov/systems/andes_user_guide.html #} {% extends "slurm.sh" %} {% block tasks %} - {% if resources.ngpu_tasks %} - {% if not ('GPU' in partition or force) %} - {% raise "GPU operations require a GPU partition!" %} - {% endif %} - {% else %} - {% if 'gpu' in partition and not force %} - {% raise "Requesting gpu partition, but no GPUs requested!" %} - {% endif %} - {% endif %} {% if 'gpu' in partition %} {% if resources.ncpu_tasks > resources.ngpu_tasks * 14 and not force %} {% raise "Cannot request more than 14 CPUs per GPU." %} diff --git a/flow/templates/anvil.sh b/flow/templates/anvil.sh index ac5222bc3..023957606 100644 --- a/flow/templates/anvil.sh +++ b/flow/templates/anvil.sh @@ -1,14 +1,6 @@ {# Templated in accordance with: https://www.rcac.purdue.edu/knowledge/anvil/ #} {% extends "slurm.sh" %} {% block tasks %} - {% if resources.ngpu_tasks and not ("gpu" in partition or force) %} - {% raise "GPU operations require a gpu partition!" %} - {% endif %} - {% if resources.ngpu_tasks == 0 %} - {% if 'gpu' in partition and not force %} - {% raise "Requesting gpu partition, but no GPUs requested!" %} - {% endif %} - {% endif %} {% if resources.num_nodes > 1 %} #SBATCH -N {{ resources.num_nodes }} {% endif %} diff --git a/flow/templates/bridges2.sh b/flow/templates/bridges2.sh index 6606021e1..25cdeb57f 100644 --- a/flow/templates/bridges2.sh +++ b/flow/templates/bridges2.sh @@ -1,21 +1,6 @@ {# Templated in accordance with: https://www.psc.edu/resources/bridges-2/user-guide #} {% extends "slurm.sh" %} {% block tasks %} - {% if resources.ngpu_tasks %} - {% if not ('GPU' in partition or force) %} - {% raise "GPU operations require a GPU partition!" %} - {% endif %} - {% if partition == "GPU-shared" and resources.ngpu_tasks > 4 %} - {% raise "Cannot request GPU-shared with more than 4 GPUs." %} - {% endif %} - {% else %} - {% if 'GPU' in partition and not force %} - {% raise "Requesting GPU partition, but no GPUs requested!" %} - {% endif %} - {% endif %} - {% if partition == 'RM-shared' and resources.ncpu_tasks > 64 %} - {% raise "Cannot request RM-shared with more than 64 tasks or multiple nodes." %} - {% endif %} {% if resources.num_nodes > 1 or resources.ncpu_tasks >= 128 or resources.ngpu_tasks >= 8 %} #SBATCH -N {{ resources.num_nodes }} {% endif %} diff --git a/flow/templates/delta.sh b/flow/templates/delta.sh index 4785da06a..4502ddf66 100644 --- a/flow/templates/delta.sh +++ b/flow/templates/delta.sh @@ -6,15 +6,6 @@ increased charges and is expected to be suitable for a minority of use cases." %} {% endif %} - {% if resources.ngpu_tasks %} - {% if not ("gpu" in partition or force) %} - {% raise "GPU operations require a GPU partition!" %} - {% endif %} - {% else %} - {% if 'gpu' in partition and not force %} - {% raise "Requesting GPU partition, but no GPUs requested!" %} - {% endif %} - {% endif %} {% if resources.num_nodes > 1 %} #SBATCH -N {{ resources.num_nodes }} {% endif %} diff --git a/flow/templates/expanse.sh b/flow/templates/expanse.sh index a052726cc..013e0eeb1 100644 --- a/flow/templates/expanse.sh +++ b/flow/templates/expanse.sh @@ -1,18 +1,6 @@ {# Templated in accordance with: https://www.sdsc.edu/support/user_guides/expanse.html #} {% extends "slurm.sh" %} {% block tasks %} - {% if resources.gpu_tasks %} - {% if not ('gpu' in partition or force) %} - {% raise "GPU operations require a GPU partition!" %} - {% endif %} - {% else %} - {% if 'gpu' in partition and not force %} - {% raise "Requesting GPU partition, but no GPUs requested!" %} - {% endif %} - {% endif %} - {% if "shared" in partition and resources.num_nodes > 1 %} - {% raise "Cannot request shared partition with resources spanning multiple nodes." %} - {% endif %} {% if "shared" not in partition %} #SBATCH -N {{ resources.num_nodes }} {% endif %} diff --git a/flow/templates/frontier.sh b/flow/templates/frontier.sh index 4eb5553ac..dd60a01c1 100644 --- a/flow/templates/frontier.sh +++ b/flow/templates/frontier.sh @@ -1,9 +1,6 @@ {# Templated in accordance with: https://docs.olcf.ornl.gov/systems/crusher_quick_start_guide.html #} {% extends "slurm.sh" %} {% block tasks %} - {% if not resources.ngpu_tasks and not force %} - {% raise "Must request GPUs to use Frontier." %} - {% endif %} #SBATCH --nodes={{ resources.num_nodes }} {% endblock tasks %} {% block header %} diff --git a/flow/templates/umich-greatlakes.sh b/flow/templates/umich-greatlakes.sh index abb640b75..871d8894a 100644 --- a/flow/templates/umich-greatlakes.sh +++ b/flow/templates/umich-greatlakes.sh @@ -1,12 +1,6 @@ {% extends "slurm.sh" %} {% set partition = partition|default('standard', true) %} {% block tasks %} - {% if resources.ngpu_tasks and 'gpu' not in partition and not force %} - {% raise "Requesting GPUs requires a gpu partition!" %} - {% endif %} - {% if 'gpu' in partition and resources.ngpu_tasks == 0 and not force %} - {% raise "Requesting gpu partition without GPUs!" %} - {% endif %} #SBATCH --nodes={{ resources.num_nodes }} #SBATCH --ntasks={{ resources.ncpu_tasks }} {% if partition == 'gpu' %} From ba63e0f07ea855b704b6010ba4ff7e4af2e5411f Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 2 Nov 2023 16:45:29 -0400 Subject: [PATCH 05/28] doc/refactor: Document new classes Also move threshold to _PartitionConfig class and gpus_per_node default to 0. --- flow/environment.py | 122 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 108 insertions(+), 14 deletions(-) diff --git a/flow/environment.py b/flow/environment.py index 00757a184..8f9a183c7 100644 --- a/flow/environment.py +++ b/flow/environment.py @@ -92,14 +92,54 @@ def template_filter(func): class _NodeTypes(enum.Enum): + """Defines for a partition the acceptable node requests. + + - SHARED: Only support partial nodes requests to full node. Note that you + can restrict the cores provided in `_PartitionConfig` to restrict below + even full nodes. + - MIXED: No restriction on partial to full node requests. + - WHOLENODE: Only support submissions in units of whole nodes. + """ + SHARED = 1 MIXED = 2 WHOLENODE = 3 class _PartitionConfig: + """The configuration of partition for a given environment. + + Currently supports the ideas of + - CPUs for a partition + - GPUs for a partition + - Node type of a partition + + When querying a value for a specific partition the logic first searches the + provided mapping if any for the partition, if it is not found then the + mapping is searched for "default" if it exists, if not the class default is + used. + + 1. Partition specific -> 2. Provided default -> 3. _PartitionConfig default + + The class defaults are + + - CPUs: ``None`` which represents any number. This will be interpretted as + an unlimited supply of CPUs practically. + - GPUs: 0. + - Node type: `_NodeTypes.MIXED`. + + Parameters + ---------- + cpus_per_node: dict[str, int], optional + Mapping between partitions and CPUs per node. Defaults to an empyt `dict`. + gpus_per_node: dict[str, int], optional + Mapping between partitions and GPUs per node. Defaults to an empyt `dict`. + node_types: dict[str, _NodeTypes], optional + Mapping between partitions and node types. Defaults to an empyt `dict`. + """ + _default_cpus_per_node = None - _default_gpus_per_node = None + _default_gpus_per_node = 0 _default_node_type = _NodeTypes.MIXED def __init__(self, cpus_per_node=None, gpus_per_node=None, node_types=None): @@ -108,6 +148,7 @@ def __init__(self, cpus_per_node=None, gpus_per_node=None, node_types=None): self.node_types = {} if node_types is None else node_types def __getitem__(self, partition): + """Get the `_Partition` object for the provided partition.""" return _Partition( partition, self._get(partition, self.cpus_per_node, self._default_cpus_per_node), @@ -117,26 +158,83 @@ def __getitem__(self, partition): @staticmethod def _get(key, mapping, default): + """Get the value of key following the class priority chain.""" return mapping.get(key, mapping.get("default", default)) class _Partition: + """Represents a partition and associated data. + + Parameters + ---------- + name: str + The name of the partition. + cpus: int + The CPUs per node. + gpus: int + The GPUs per node. + node_type: _NodeTypes + The node type for the partition. + + Attributes + ---------- + name: str + The name of the partition. + cpus: int + The CPUs per node. + gpus: int + The GPUs per node. + node_type: _NodeTypes + The node type for the partition. + """ + def __init__(self, name, cpus, gpus, node_type): - self.name = name + # Use empty string for error messages. + self.name = name if name is not None else "" self.gpus = gpus self.cpus = cpus self.node_type = node_type - def calculate_num_nodes(self, cpu_tasks, gpu_tasks, threshold, force): + def calculate_num_nodes(self, cpu_tasks, gpu_tasks, force): + """Compute the number of nodes for the given workload. + + Parameters + ---------- + cpu_tasks: int + Total CPU tasks/cores. + gpu_tasks: int + Total GPUs requested. + force: bool + Whether to allow seemingly nonsensical/erronous resource requests. + + Raises + ------ + SubmitError: + Raises a SubmitError for + 1. non-zero GPUs on non-GPU partitions + 2. zero GPUs on GPU partitions. + 3. Requests larger than a node on `_NodeTypes.SHARED` partitions (through + `~._nodes_for_task`). + 4. Requests less than 0.9 of the last node for `_NodeTypes.WHOLENODE` + partitions (through `calc_num_nodes`) + if ``not force``. + """ + threshold = 0.9 if self.node_type == _NodeTypes.WHOLENODE and not force else 0.0 if gpu_tasks > 0: - if (self.gpus is None or self.gpus == 0) and not force: - raise SubmitError( - f"Cannot request GPU's on nonGPU partition, {self.name}." - ) - num_nodes_gpu = self._nodes_for_task(gpu_tasks, self.gpus, threshold) + if self.gpus == 0: + # Required for current tests. Also skips a divide by zero error + # if user actually wants to submit CPU only jobs to GPU partitions. + if force: + num_nodes_gpu = 1 + else: + raise SubmitError( + f"Cannot request GPU's on nonGPU partition, {self.name}." + ) + else: + num_nodes_gpu = self._nodes_for_task(gpu_tasks, self.gpus, threshold) num_nodes_cpu = self._nodes_for_task(cpu_tasks, self.cpus, 0) else: - if (self.gpus is not None and self.gpus > 0) and not force: + if self.gpus > 0 and not force: raise SubmitError( f"Cannot submit to GPU partition, {self.name}, without GPUs." ) @@ -362,10 +460,6 @@ def _get_scheduler_values(cls, context): """ partition = cls._partition_config[context.get("partition", None)] force = context.get("force", False) - if partition.node_type == _NodeTypes.WHOLENODE and not force: - threshold = 0.9 - else: - threshold = 0.0 cpu_tasks_total = calc_tasks( context["operations"], "np", @@ -380,7 +474,7 @@ def _get_scheduler_values(cls, context): ) num_nodes = partition.calculate_num_nodes( - cpu_tasks_total, gpu_tasks_total, threshold, force + cpu_tasks_total, gpu_tasks_total, force ) return { From 14f407639eebba47f28bdd17c125a9701d60db3d Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 2 Nov 2023 17:25:16 -0400 Subject: [PATCH 06/28] test: add force=True to submit (handle ngpu directives) --- tests/test_project.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_project.py b/tests/test_project.py index ef2bb52b3..0758ae1e6 100644 --- a/tests/test_project.py +++ b/tests/test_project.py @@ -1158,7 +1158,7 @@ def test_submit(self): project = self.mock_project() assert len(list(MockScheduler.jobs())) == 0 with redirect_stderr(StringIO()): - project.submit() + project.submit(force=True) # force set due to GPU directives even_jobs = [job for job in project if job.sp.b % 2 == 0] num_jobs_submitted = (2 * len(project)) + len(even_jobs) assert len(list(MockScheduler.jobs())) == num_jobs_submitted @@ -1194,13 +1194,13 @@ def test_resubmit(self): assert len(list(MockScheduler.jobs())) == 0 with redirect_stderr(StringIO()): # Initial submission - project.submit() + project.submit(force=True) # force set due to GPU directives assert len(list(MockScheduler.jobs())) == num_jobs_submitted # Resubmit a bunch of times: for i in range(1, self.expected_number_of_steps + 3): MockScheduler.step() - project.submit() + project.submit(force=True) # force set due to GPU directives if len(list(MockScheduler.jobs())) == 0: break # break when there are no jobs left @@ -1239,7 +1239,7 @@ def test_submit_status(self): assert next_op.id not in cached_status with redirect_stderr(StringIO()): - project.submit() + project.submit(force=True) # force set due to GPU directives assert len(list(MockScheduler.jobs())) == num_jobs_submitted cached_status = project._get_cached_scheduler_status() From c3ccf3d3c80c1ea62e57f4adf4aaeadf0500dfb9 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Mon, 6 Nov 2023 17:00:26 -0500 Subject: [PATCH 07/28] test: Test new errors in _Partition --- tests/test_environment.py | 79 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 1 deletion(-) diff --git a/tests/test_environment.py b/tests/test_environment.py index 92825d66c..5b179b161 100644 --- a/tests/test_environment.py +++ b/tests/test_environment.py @@ -2,8 +2,34 @@ # All rights reserved. # This software is licensed under the BSD 3-Clause License. +import conftest +import pytest + +import flow from flow import get_environment -from flow.environment import ComputeEnvironment, TestEnvironment +from flow.environment import ( + ComputeEnvironment, + TestEnvironment, + _NodeTypes, + _PartitionConfig, +) +from flow.errors import SubmitError + + +class MockEnvironment(ComputeEnvironment): + scheduler_type = conftest.MockScheduler + _partition_config = _PartitionConfig( + cpus_per_node={"cpu-shared": 100, "cpu-wholenode": 100, "default": 40}, + gpus_per_node={"gpu": 4}, + node_types={ + "cpu-shared": _NodeTypes.SHARED, + "cpu-wholenode": _NodeTypes.WHOLENODE, + }, + ) + + @classmethod + def is_present(cls): + return True class TestProject: @@ -13,3 +39,54 @@ def test_get_TestEnvironment(self): assert not issubclass(env, TestEnvironment) env = get_environment(test=True) assert issubclass(env, TestEnvironment) + + +class TestEnvironments(conftest.TestProjectBase): + class Project(flow.FlowProject): + pass + + @Project.operation(directives={"ngpu": 1}) + def gpu_op(job): + pass + + @Project.operation(directives={"np": 1_000}) + def large_cpu_op(job): + pass + + @Project.operation(directives={"np": 1}) + def small_cpu_op(job): + pass + + project_class = Project + + def mock_project(self): + project = self.project_class.get_project(path=self._tmp_dir.name) + project.open_job({"i": 0}).init() + project._environment = MockEnvironment + return project + + def test_gpu_parttion_without_gpu(self): + pr = self.mock_project() + with pytest.raises(SubmitError): + pr.submit(names=["small_cpu_op"], partition="gpu") + + def test_gpu_op_without_gpu_partition(self): + pr = self.mock_project() + with pytest.raises(SubmitError): + pr.submit(names=["gpu_op"], partition="cpu-shared") + + def test_wholenode_submission_with_insufficient_resources(self): + pr = self.mock_project() + with pytest.raises(RuntimeError): + pr.submit(names=["small_cpu_op"], partition="cpu-wholenode") + + def test_shared_submission_with_too_large_request(self): + pr = self.mock_project() + with pytest.raises(RuntimeError): + pr.submit(names=["large_cpu_op"], partition="cpu-shared") + + def test_various_valid_submissions(self): + pr = self.mock_project() + pr.submit(names=["large_cpu_op"], partition="cpu-wholenode") + pr.submit(names=["small_cpu_op"], partition="cpu-shared") + pr.submit(names=["gpu_op"], partition="gpu") From aaa0e7f4c253232503c1a73897230a6491784c3b Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Tue, 7 Nov 2023 17:02:58 -0500 Subject: [PATCH 08/28] refactor (WIP): Switch over directives to new structure --- flow/directives.py | 226 +++++------------------------------ flow/environment.py | 24 ++-- tests/test_directives.py | 252 +++++++++------------------------------ 3 files changed, 98 insertions(+), 404 deletions(-) diff --git a/flow/directives.py b/flow/directives.py index bdf5231e3..773b07586 100644 --- a/flow/directives.py +++ b/flow/directives.py @@ -8,7 +8,6 @@ """ import datetime import functools -import operator import sys from collections.abc import MutableMapping @@ -18,14 +17,9 @@ class _Directive: """The definition of a single directive. - Logic for validation of values when setting, defaults, and the ability - for directives to inspect other directives (such as using ``nranks`` and - ``omp_num_threads`` for computing ``np``). This is only meant to work + Logic for validation when setting and providing defaults. This is only meant to work with the internals of signac-flow. - The validation of a directive occurs before the call to ``finalize``. It is - the caller's responsibility to ensure that finalized values are still valid. - Since directive values can be dependent on jobs we allow all directives to be set to a callable which is lazily validated. @@ -40,23 +34,6 @@ class _Directive: callable directly returns the passed value. Defaults to ``None``. default : any, optional Sets the default for the directive, defaults to ``None``. - serial : callable, optional - A callable that takes two inputs for the directive and returns the - appropriate value for these operations running in serial. If ``None`` or - not provided, the ``max`` function is used. Defaults to ``None``. - parallel : callable, optional - A callable that takes two inputs for the directive and returns the - appropriate value for these operations running in parallel. If ``None`` - or not provided, the ``operator.add`` function is used. Defaults to - ``None``. Defaults to ``None``. - finalize : callable, optional - A callable that takes the set value of the directive and the - :class:`~._Directives` object it is a child of and outputs the finalized - value for that directive. This is useful if some directives have - multiple ways to be set or are dependent in some way on other - directives. If ``None`` or not provided, the set value is returned. - Defaults to ``None``. - """ def __init__( @@ -65,23 +42,14 @@ def __init__( *, validator=None, default=None, - serial=max, - parallel=operator.add, - finalize=None, ): self._name = name self._default = default - self._serial = serial - self._parallel = parallel def identity(value): return value - def default_finalize(value, directives): - return value - self._validator = identity if validator is None else validator - self._finalize = default_finalize if finalize is None else finalize def __call__(self, value): """Return a validated value for the given directive. @@ -157,7 +125,7 @@ def _set_defined_directive(self, key, value): def __getitem__(self, key): if key in self._defined_directives and key in self._directive_definitions: value = self._defined_directives[key] - return self._directive_definitions[key]._finalize(value, self) + return value if key in self._user_directives: return self._user_directives[key] raise KeyError(f"{key} not in directives.") @@ -188,28 +156,6 @@ def __str__(self): def __repr__(self): return f"_Directives({str(self)})" - def update(self, other, aggregate=False, jobs=None, parallel=False): - """Update directives with another set of directives. - - This method accounts for serial/parallel behavior and aggregation. - - Parameters - ---------- - other : :class:`~._Directives` - The other set of directives. - aggregate : bool - Whether to combine directives according to serial/parallel rules. - jobs : :class:`signac.job.Job` or tuple of :class:`signac.job.Job` - The jobs used to evaluate directives. - parallel : bool - Whether to aggregate according to parallel rules. - - """ - if aggregate: - self._aggregate(other, jobs=jobs, parallel=parallel) - else: - super().update(other) - def evaluate(self, jobs): """Evaluate directives for the provided jobs. @@ -227,18 +173,6 @@ def evaluate(self, jobs): self[key] = _evaluate(value, jobs) self._evaluated = True - def _aggregate(self, other, jobs=None, parallel=False): - self.evaluate(jobs) - other.evaluate(jobs) - agg_func_attr = "_parallel" if parallel else "_serial" - for name in self._defined_directives: - agg_func = getattr(self._directive_definitions[name], agg_func_attr) - default_value = self._directive_definitions[name]._default - other_directive = other.get(name, default_value) - directive = self[name] - if other_directive is not None: - self._defined_directives[name] = agg_func(directive, other_directive) - @property def user_keys(self): # noqa: D401 """A generator of user specified keys.""" @@ -297,36 +231,6 @@ def is_greater_or_equal(value): return is_greater_or_equal -_NP_DEFAULT = 1 - - -def _finalize_np(np, directives): - """Return the actual number of processes/threads to use. - - We check the default np because when aggregation occurs we multiply the - number of MPI ranks and OMP_NUM_THREADS. If we always took the greater of - the given NP and ranks * threads then after aggregating we will inflate the - number of processors needed as (r1 * t1) + (r2 * t2) <= (r1 + r2) * (t1 + t2) - for numbers greater than one. - """ - if callable(np) or np != _NP_DEFAULT: - return np - nranks = directives.get("nranks", 1) - omp_num_threads = directives.get("omp_num_threads", 1) - if callable(nranks) or callable(omp_num_threads): - return np - return max(np, max(1, nranks) * max(1, omp_num_threads)) - - -# Helper validators for defining _Directive -def _no_aggregation(value, other): - """Return the first argument. - - This is used for directives that ignore aggregation rules. - """ - return value - - def _is_fraction(value): if 0 <= value <= 1: return value @@ -401,40 +305,7 @@ def _parse_memory(memory): ) -def _max_not_none(value, other): - """Return the max of two values, with special handling of None. - - This is used for memory directives in serial and walltime directives in - parallel. - """ - if value is None and other is None: - return None - elif other is None: - return value - elif value is None: - return other - else: - return max(value, other) - - -def _sum_not_none(value, other): - """Return the sum of two values, with special handling of None. - - This is used for memory directives in parallel and walltime directives in - serial. - """ - if value is None and other is None: - return None - elif other is None: - return value - elif value is None: - return other - else: - return operator.add(value, other) - - # Definitions used for validating directives -_bool = _OnlyTypes(bool) _natural_number = _OnlyTypes(int, postprocess=_raise_below(1)) _nonnegative_int = _OnlyTypes(int, postprocess=_raise_below(0)) _positive_real_walltime = _OnlyTypes( @@ -461,10 +332,8 @@ def _GET_EXECUTABLE(): # This is because we mock `sys.executable` while generating template reference data. _EXECUTABLE = _Directive( "executable", - validator=_OnlyTypes(str), + validator=_OnlyTypes(str, type(None)), default=sys.executable, - serial=_no_aggregation, - parallel=_no_aggregation, ) _EXECUTABLE.__doc__ = """Return the path to the executable to be used for an operation. @@ -478,25 +347,27 @@ def _GET_EXECUTABLE(): return _EXECUTABLE -_FORK = _Directive("fork", validator=_bool, default=False) -_FORK.__doc__ = """The fork directive can be set to True to enforce that a -particular operation is always executed within a subprocess and not within the -Python interpreter's process even if there are no other reasons that would prevent that. +_LAUNCHER = _Directive("launcher", validator=_OnlyTypes(str, type(None)), default=None) +_LAUNCHER.__doc__ = """The launcher to use to execute this operation. -.. note:: +A launcher is defined as a separate program used to launch an application. +Primarily this is designed to specify whether or not MPI should be used to +launch the operation. Set to "mpi" for this case. Defaults to ``None``. - Setting ``fork=False`` will not prevent forking if there are other reasons for forking, - such as a timeout. +For example: + +.. code-block:: python + + @Project.operation(directives={"launcher": "mpi"}) + def op(job): + pass """ -_MEMORY = _Directive( - "memory", - validator=_positive_real_memory, - default=None, - serial=_max_not_none, - parallel=_sum_not_none, + +_MEMORY_PER_CPU = _Directive( + "memory_per_cpu", validator=_positive_real_memory, default=None ) -_MEMORY.__doc__ = """The memory to request for this operation. +_MEMORY_PER_CPU.__doc__ = """The memory to request per CPU for this operation. The memory to validate should be either a float, int, or string. A valid memory argument is defined as: @@ -536,36 +407,24 @@ def op2(job): pass """ -_NGPU = _Directive("ngpu", validator=_nonnegative_int, default=0) -_NGPU.__doc__ = """The number of GPUs to use for this operation. - -Expects a nonnegative integer. Defaults to 0. -""" - -_NP = _Directive( - "np", validator=_natural_number, default=_NP_DEFAULT, finalize=_finalize_np +_GPUS_PER_PROCESS = _Directive( + "gpus_per_process", validator=_nonnegative_int, default=0 ) -_NP.__doc__ = """The total number of CPU cores to request for a given operation. - -Expects a natural number (i.e. an integer >= 1). This directive introspects into -the "nranks" or "omp_num_threads" directives and uses their product if it is -greater than the current set value. Defaults to 1. +_GPUS_PER_PROCESS.__doc__ = """The number of GPUs to use per process. -Warning: - Generally for multicore applications, either this if not using MPI, or "nranks" and - "omp_num_threads" should be specified but not both. +Expects a nonnegative integer. Defaults to 0. """ -_NRANKS = _Directive("nranks", validator=_nonnegative_int, default=0) -_NRANKS.__doc__ = """The number of MPI ranks to use for this operation. Defaults to 0. +_PROCESSES = _Directive("processes", validator=_natural_number, default=1) +_PROCESSES.__doc__ = """The number of processes the operation plans on using. -Expects a nonnegative integer. +Expects a natural number (i.e. an integer >= 1). Defualts to 1. """ -_OMP_NUM_THREADS = _Directive("omp_num_threads", validator=_nonnegative_int, default=0) -_OMP_NUM_THREADS.__doc__ = """The number of OpenMP threads to use for this operation. Defaults to 0. - -When used in conjunction with "nranks" this specifies the OpenMP threads per rank. +_THREADS_PER_PROCESS = _Directive( + "threads_per_process", validator=_nonnegative_int, default=0 +) +_THREADS_PER_PROCESS.__doc__ = """The number of threads to use per process. Defaults to 0. Using this directive sets the environmental variable ``OMP_NUM_THREADS`` in the operation's execution environment. @@ -573,30 +432,7 @@ def op2(job): Expects a nonnegative integer. """ -_PROCESSOR_FRACTION = _Directive( - "processor_fraction", - validator=_OnlyTypes(float, postprocess=_is_fraction), - default=1.0, - serial=_no_aggregation, - parallel=_no_aggregation, -) -_PROCESSOR_FRACTION.__doc__ = """Fraction of a resource to use on a single operation. - -If set to 0.5 for a bundled job with 20 operations (all with 'np' set to 1), 10 -CPUs will be used. Defaults to 1. - -.. note:: - - This can be particularly useful on Stampede2's launcher. -""" - -_WALLTIME = _Directive( - "walltime", - validator=_positive_real_walltime, - default=None, - serial=_sum_not_none, - parallel=_max_not_none, -) +_WALLTIME = _Directive("walltime", validator=_positive_real_walltime, default=None) _WALLTIME.__doc__ = """The number of hours to request for executing this job. This directive expects a float representing the walltime in hours. Fractional diff --git a/flow/environment.py b/flow/environment.py index 8f9a183c7..fdf87926d 100644 --- a/flow/environment.py +++ b/flow/environment.py @@ -17,14 +17,12 @@ from functools import lru_cache from .directives import ( - _FORK, _GET_EXECUTABLE, - _MEMORY, - _NGPU, - _NP, - _NRANKS, - _OMP_NUM_THREADS, - _PROCESSOR_FRACTION, + _GPUS_PER_PROCESS, + _LAUNCHER, + _MEMORY_PER_CPU, + _PROCESSES, + _THREADS_PER_PROCESS, _WALLTIME, _Directives, ) @@ -439,13 +437,11 @@ def _get_default_directives(cls): return _Directives( ( _GET_EXECUTABLE(), - _FORK, - _MEMORY, - _NGPU, - _NP, - _NRANKS, - _OMP_NUM_THREADS, - _PROCESSOR_FRACTION, + _MEMORY_PER_CPU, + _GPUS_PER_PROCESS, + _PROCESSES, + _THREADS_PER_PROCESS, + _LAUNCHER, _WALLTIME, ) ) diff --git a/tests/test_directives.py b/tests/test_directives.py index 5ea20c1aa..551b3a320 100644 --- a/tests/test_directives.py +++ b/tests/test_directives.py @@ -10,16 +10,14 @@ from flow import FlowProject from flow.directives import ( _GET_EXECUTABLE, - _MEMORY, - _NGPU, - _NP, - _NRANKS, - _OMP_NUM_THREADS, - _PROCESSOR_FRACTION, + _GPUS_PER_PROCESS, + _LAUNCHER, + _MEMORY_PER_CPU, + _PROCESSES, + _THREADS_PER_PROCESS, _WALLTIME, _Directive, _Directives, - _no_aggregation, ) from flow.errors import DirectivesError @@ -27,14 +25,13 @@ @pytest.fixture() def available_directives_list(): return [ - _NP, - _NRANKS, - _NGPU, - _OMP_NUM_THREADS, + _MEMORY_PER_CPU, + _GPUS_PER_PROCESS, + _PROCESSES, + _THREADS_PER_PROCESS, + _LAUNCHER, _GET_EXECUTABLE(), _WALLTIME, - _MEMORY, - _PROCESSOR_FRACTION, ] @@ -50,22 +47,7 @@ def val(v): raise ValueError("Price cannot be less than 10 units") return v - def finalize(value, dict): - discount = dict.get("discount", 0) - free = dict.get("free", False) - value = value - discount - if value < 0 or free: - return 0 - return value - - product = _Directive( - name="product", - validator=val, - default=10, - serial=_no_aggregation, - parallel=_no_aggregation, - finalize=finalize, - ) + product = _Directive(name="product", validator=val, default=10) return product @@ -73,24 +55,20 @@ def finalize(value, dict): def non_default_directive_values(): return [ { - "np": 1, - "ngpu": 10, - "nranks": 5, - "omp_num_threads": 20, + "processes": 5, + "threads_per_process": 20, "executable": "Non Default Path", "walltime": 64.0, - "memory": 32, - "processor_fraction": 0.5, + "memory_per_cpu": 2, + "launcher": "mpi", }, { - "np": 4, - "ngpu": 1, - "nranks": 0, - "omp_num_threads": 10, + "processes": 4, + "gpus_per_process": 1, + "threads_per_process": 10, "executable": "PathFinder", "walltime": 20.0, - "memory": 16, - "processor_fraction": 0.5, + "memory_per_cpu": 1, }, ] @@ -99,31 +77,28 @@ class TestItems: """Tests for _Directive class.""" def test_default(self): - assert _NP._default == 1 - assert _NGPU._default == 0 - assert _NRANKS._default == 0 - assert _OMP_NUM_THREADS._default == 0 + assert _PROCESSES._default == 1 + assert _GPUS_PER_PROCESS._default == 0 + assert _THREADS_PER_PROCESS._default == 0 + assert _MEMORY_PER_CPU._default is None assert _GET_EXECUTABLE()._default == sys.executable assert _WALLTIME._default is None - assert _MEMORY._default is None - assert _PROCESSOR_FRACTION._default == 1.0 + assert _LAUNCHER._default is None def test_invalid_values(self, available_directives_list): invalid_values = { - "np": [-1, "foo", {}, None], - "ngpu": [-1, "foo", {}, None], - "nranks": [-1, "foo", {}, None], - "omp_num_threads": [-1, "foo", {}, None], + "processes": [-1, "foo", {}, None], + "gpus_per_process": [-1, "foo", {}, None], + "threads_per_process": [-1, "foo", {}, None], "walltime": [-1, "foo", {}], - "memory": [-1, "foo", {}], - "processor_fraction": [-0.5, 2.5, "foo", {}, None], + "memory_per_cpu": [-1, "foo", {}], } for directive in available_directives_list: - if directive._name == "executable": - # Executable expect a string, if not found, then it tries to convert - # it into a string and becomes successful almost every time. - # Hence skipping Executable. + if directive._name in ("executable", "launcher"): + # Executable and launcher expect a string, if not found, then it tries + # to convert it into a string and becomes successful almost every time. + # Hence the skipping. continue for i, value in enumerate(invalid_values[directive._name]): with pytest.raises((ValueError, TypeError)): @@ -133,49 +108,6 @@ def test_defaults_are_valid(self, available_directives_list): for directive in available_directives_list: directive._validator(directive._default) - def test_serial(self): - assert _NP._serial(4, 2) == 4 - assert _NRANKS._serial(4, 2) == 4 - assert _NGPU._serial(4, 2) == 4 - assert _OMP_NUM_THREADS._serial(4, 2) == 4 - assert _GET_EXECUTABLE()._serial("Path1", "Path2") == "Path1" - assert _WALLTIME._serial(4, 2) == 6 - assert _WALLTIME._serial(4, None) == 4 - assert _WALLTIME._serial(None, 4) == 4 - assert _WALLTIME._serial(None, None) is None - assert _MEMORY._serial(4, 2) == 4 - assert _MEMORY._serial(4, None) == 4 - assert _MEMORY._serial(None, 4) == 4 - assert _MEMORY._serial(None, None) is None - assert _PROCESSOR_FRACTION._serial(0.4, 0.2) == 0.4 - - def test_parallel(self): - assert _NP._parallel(4, 2) == 6 - assert _NRANKS._parallel(4, 2) == 6 - assert _NGPU._parallel(4, 2) == 6 - assert _OMP_NUM_THREADS._parallel(4, 2) == 6 - assert _GET_EXECUTABLE()._parallel("Path1", "Path2") == "Path1" - assert _WALLTIME._parallel(4, 2) == 4 - assert _WALLTIME._parallel(4, None) == 4 - assert _WALLTIME._parallel(None, 4) == 4 - assert _WALLTIME._parallel(None, None) is None - assert _MEMORY._parallel(4, 2) == 6 - assert _MEMORY._parallel(4, None) == 4 - assert _MEMORY._parallel(None, 4) == 4 - assert _MEMORY._parallel(None, None) is None - assert _PROCESSOR_FRACTION._parallel(0.4, 0.2) == 0.4 - - def test_finalize(self): - dict_directives = { - "nranks": _NRANKS._default, - "omp_num_threads": _OMP_NUM_THREADS._default, - } - assert _NP._finalize(2, dict_directives) == 2 - dict_directives["nranks"] = 2 - dict_directives["omp_num_threads"] = 4 - assert _NP._finalize(2, dict_directives) == 2 - assert _NP._finalize(1, dict_directives) == 8 - def test_manual_item_default(self, product_directive): assert product_directive._default == 10 @@ -186,19 +118,6 @@ def test_manual_item_validation(self, product_directive): with pytest.raises(ValueError): product_directive._validator(0) - def test_manual_item_serial(self, product_directive): - product_directive._serial(10, 20) == 10 - product_directive._serial(20, 10) == 20 - - def test_manual_item_parallel(self, product_directive): - product_directive._parallel(10, 20) == 10 - - def test_manual_item_finalize(self, product_directive): - asset_dict = {"free": False, "discount": 5} - assert product_directive._finalize(50, asset_dict) == 45 - asset_dict["free"] = True - assert product_directive._finalize(50, asset_dict) == 0 - class TestDirectives: """Tests for _Directives Class.""" @@ -208,21 +127,22 @@ def test_get_directive(self, directives, available_directives_list): assert directives[item._name] == item._default def test_add_directive(self, available_directives_list): - directives = _Directives(available_directives_list[:-1]) - directives._add_directive(_PROCESSOR_FRACTION) - assert directives[_PROCESSOR_FRACTION._name] == _PROCESSOR_FRACTION._default + last_directive = available_directives_list.pop() + directives = _Directives(available_directives_list) + directives._add_directive(last_directive) + assert directives[last_directive._name] == last_directive._default with pytest.raises(TypeError): directives._add_directive("Test") with pytest.raises(ValueError): - directives._add_directive(_PROCESSOR_FRACTION) + directives._add_directive(last_directive) def test_set_defined_directive(self, directives): - directives._set_defined_directive(_NP._name, 10) - assert directives[_NP._name] == 10 + directives._set_defined_directive(_PROCESSES._name, 10) + assert directives[_PROCESSES._name] == 10 def test_set_defined_directive_invalid(self, directives): with pytest.raises(ValueError): - directives._set_defined_directive(_NP._name, 0) + directives._set_defined_directive(_PROCESSES._name, 0) def test_set_undefined_directive(self, directives): with pytest.raises(DirectivesError): @@ -234,85 +154,32 @@ def test_set_directives_item(self, directives): def test_del_directive(self, directives): directives["test"] = True - directives._set_defined_directive(_NP._name, 100) - assert directives[_NP._name] == 100 + directives._set_defined_directive(_PROCESSES._name, 100) + assert directives[_PROCESSES._name] == 100 assert directives["test"] - del directives[_NP._name] - assert directives[_NP._name] == _NP._default + del directives[_PROCESSES._name] + assert directives[_PROCESSES._name] == _PROCESSES._default del directives["test"] with pytest.raises(KeyError): directives["test"] - def test_update_directive_without_aggregate( - self, directives, non_default_directive_values - ): - valid_values_1 = non_default_directive_values[1] - expected_values = { - "np": 4, - "ngpu": 1, - "nranks": 0, - "omp_num_threads": 10, - "executable": "PathFinder", - "walltime": datetime.timedelta(hours=20.0), - "memory": 16, - "processor_fraction": 0.5, - } - directives.update(valid_values_1) - for dirs in directives: - assert directives[dirs] == expected_values[dirs] - - def test_update_directive_serial( - self, available_directives_list, non_default_directive_values - ): - directives1 = _Directives(available_directives_list) - directives2 = _Directives(available_directives_list) - valid_values_0 = non_default_directive_values[0] - valid_values_1 = non_default_directive_values[1] - expected_values = { - "np": 100, - "ngpu": 10, - "nranks": 5, - "omp_num_threads": 20, - "executable": "Non Default Path", - "walltime": datetime.timedelta(hours=84.0), - "memory": 32, - "processor_fraction": 0.5, - } - directives1.update(valid_values_0) - directives2.update(valid_values_1) - directives1.update(directives2, aggregate=True) - for dirs in directives1: - assert directives1[dirs] == expected_values[dirs] - - def test_update_directive_parallel( - self, available_directives_list, non_default_directive_values - ): - directives1 = _Directives(available_directives_list) - directives2 = _Directives(available_directives_list) - valid_values_0 = non_default_directive_values[0] - valid_values_1 = non_default_directive_values[1] - expected_values = { - "np": 104, - "ngpu": 11, - "nranks": 5, - "omp_num_threads": 30, - "executable": "Non Default Path", - "walltime": datetime.timedelta(hours=64.0), - "memory": 48, - "processor_fraction": 0.5, - } - directives1.update(valid_values_0) - directives2.update(valid_values_1) - directives1.update(directives2, aggregate=True, parallel=True) - for dirs in directives1: - assert directives1[dirs] == expected_values[dirs] + def test_update(self, directives, non_default_directive_values): + new_directives = non_default_directive_values[1] + directives.update(new_directives) + for dir_ in new_directives: + if dir_ == "walltime": + assert directives[dir_] == datetime.timedelta( + hours=new_directives[dir_] + ) + else: + assert directives[dir_] == new_directives[dir_] def test_evaluate_directive_none_job( self, directives, non_default_directive_values ): directives.evaluate(None) valid_values = non_default_directive_values[0] - valid_values["processor_fraction"] = lambda job: job.sp.i / 10 + valid_values["processes"] = lambda job: job.sp.i + 1 directives.update(valid_values) with pytest.raises(RuntimeError): directives.evaluate(None) @@ -326,13 +193,8 @@ def test_evaluate_directive_valid_job( for i in range(5): project.open_job(dict(i=i)).init() - valid_values = non_default_directive_values[0] - valid_values["processor_fraction"] = lambda job: round(job.sp.i / 10, 1) - for job in project: directives = _Directives(available_directives_list) - directives.update( - {"processor_fraction": lambda job: round(job.sp.i / 10, 1)} - ) + directives.update({"processes": lambda job: job.sp.i + 1}) directives.evaluate((job,)) - assert directives["processor_fraction"] == round(job.sp.i / 10, 1) + assert directives["processes"] == job.sp.i + 1 From 5be19911df1331bcf95a3922658f99502dbe1a25 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Wed, 8 Nov 2023 17:17:19 -0500 Subject: [PATCH 09/28] refactor: Split _JobOperation into _JobOperation and _RunOperation --- flow/environment.py | 45 +++------ flow/project.py | 236 +++++++++++++++++++++++++------------------- 2 files changed, 144 insertions(+), 137 deletions(-) diff --git a/flow/environment.py b/flow/environment.py index fdf87926d..d35540f0a 100644 --- a/flow/environment.py +++ b/flow/environment.py @@ -353,12 +353,12 @@ def add_args(cls, parser): pass @classmethod - def _get_omp_prefix(cls, operation): + def _get_omp_prefix(cls, directives): """Get the OpenMP prefix based on the ``omp_num_threads`` directive. Parameters ---------- - operation : :class:`flow.project._JobOperation` + directives : dict[str, any] The operation to be prefixed. Returns @@ -367,22 +367,16 @@ def _get_omp_prefix(cls, operation): The prefix to be added to the operation's command. """ - return "export OMP_NUM_THREADS={}; ".format( - operation.directives["omp_num_threads"] - ) + return "export OMP_NUM_THREADS={}; ".format(directives["omp_num_threads"]) @classmethod - def _get_mpi_prefix(cls, operation, parallel): + def _get_mpi_prefix(cls, directives): """Get the MPI prefix based on the ``nranks`` directives. Parameters ---------- - operation : :class:`flow.project._JobOperation` + directives : dict[str, any] The operation to be prefixed. - parallel : bool - If True, operations are assumed to be executed in parallel, which - means that the number of total tasks is the sum of all tasks - instead of the maximum number of tasks. Default is set to False. Returns ------- @@ -390,28 +384,18 @@ def _get_mpi_prefix(cls, operation, parallel): The prefix to be added to the operation's command. """ - if operation.directives.get("nranks"): - return "{} -n {} ".format(cls.mpi_cmd, operation.directives["nranks"]) + if directives.get("nranks"): + return "{} -n {} ".format(cls.mpi_cmd, directives["nranks"]) return "" @template_filter - def get_prefix(cls, operation, parallel=False, mpi_prefix=None, cmd_prefix=None): + def get_prefix(cls, directives): """Template filter generating a command prefix from directives. Parameters ---------- operation : :class:`flow.project._JobOperation` The operation to be prefixed. - parallel : bool - If True, operations are assumed to be executed in parallel, which means - that the number of total tasks is the sum of all tasks instead of the - maximum number of tasks. Default is set to False. - mpi_prefix : str - User defined mpi_prefix string. Default is set to None. - This will be deprecated and removed in the future. - cmd_prefix : str - User defined cmd_prefix string. Default is set to None. - This will be deprecated and removed in the future. Returns ------- @@ -420,16 +404,9 @@ def get_prefix(cls, operation, parallel=False, mpi_prefix=None, cmd_prefix=None) """ prefix = "" - if operation.directives.get("omp_num_threads"): - prefix += cls._get_omp_prefix(operation) - if mpi_prefix: - prefix += mpi_prefix - else: - prefix += cls._get_mpi_prefix(operation, parallel) - if cmd_prefix: - prefix += cmd_prefix - # if cmd_prefix and if mpi_prefix for backwards compatibility - # Can change to get them from directives for future + if directives.get("omp_num_threads"): + prefix += cls._get_omp_prefix(directives) + prefix += cls._get_mpi_prefix(directives) return prefix @classmethod diff --git a/flow/project.py b/flow/project.py index 26a4a7ebd..3d0430a53 100644 --- a/flow/project.py +++ b/flow/project.py @@ -277,22 +277,18 @@ def _make_bundles(operations, size=None): class _JobOperation: - """Class containing execution information for one group and one job. + """Class containing execution or submission information for one group and one aggregate. - The execution or submission of a :class:`~.FlowGroup` uses a passed-in command - which can either be a string or function with no arguments that returns a shell - executable command. The shell executable command won't be used if it is - determined that the group can be executed without forking. - - .. note:: + The class serves as a helper class to :class:`~._RunOperation` and + :class:`~._SubmissionOperation`. - This class is used by the :class:`~.FlowGroup` class for the execution and - submission process and should not be instantiated by users themselves. + Note + ---- + This class and subclasses are used by the :class:`~.FlowGroup` class for the execution and + submission process and should not be instantiated by users themselves. Parameters ---------- - id : str - The id of this _JobOperation instance. The id should be unique. name : str The name of the _JobOperation. jobs : tuple of :class:`~signac.job.Job` @@ -300,57 +296,27 @@ class _JobOperation: cmd : callable or str The command that executes this operation. Can be a callable that when evaluated returns a string. - directives : dict - A `dict` object of additional parameters that provide instructions on - how to execute this operation, e.g., specifically required resources. - user_directives : set - Keys in ``directives`` that correspond to user-specified directives - that are not part of the environment's standard directives. """ - def __init__(self, id, name, jobs, cmd, directives, user_directives): - self._id = id + def __init__(self, name, jobs, cmd): self.name = name self._jobs = jobs if not (callable(cmd) or isinstance(cmd, str)): raise ValueError("cmd must be a callable or string.") self._cmd = cmd - # We use a special dictionary that tracks all keys that have been - # evaluated by the template engine and compare them to those explicitly - # set by the user. See also comment below. - self.directives = _TrackGetItemDict(directives) - - # Keys which were explicitly set by the user, but are not evaluated by - # the template engine are cause for concern and might hint at a bug in - # the template script or ill-defined directives. We are therefore - # keeping track of all keys set by the user and check whether they have - # been evaluated by the template script engine later. - self.directives._keys_set_by_user = user_directives - def __str__(self): aggregate_id = get_aggregate_id(self._jobs) return f"{self.name}({aggregate_id})" def __repr__(self): - return "{type}(name='{name}', jobs='{jobs}', cmd={cmd}, directives={directives})".format( + return "{type}(name='{name}', jobs='{jobs}', cmd={cmd})".format( type=type(self).__name__, name=self.name, jobs="(" + ", ".join(map(repr, self._jobs)) + ")", cmd=repr(self.cmd), - directives=self.directives, ) - def __hash__(self): - return hash(self.id) - - def __eq__(self, other): - return self.id == other.id - - @property - def id(self): - return self._id - @property def cmd(self): if callable(self._cmd): @@ -364,8 +330,49 @@ def cmd(self): return self._cmd +class _RunOperation(_JobOperation): + """Class containing execution information for one operation and one aggregate. + + The execution of a :class:`~.FlowOperation` uses a passed-in command + which can either be a string or function with no arguments that returns a shell + executable command. The shell executable command won't be used if it is + determined that the group can be executed without forking. + + .. note:: + + This class is used by the :class:`~.FlowGroup` class for the execution + process and should not be instantiated by users themselves. + + Parameters + ---------- + name : str + The name of the _JobOperation. + jobs : tuple of :class:`~signac.job.Job` + The jobs associated with this operation. + cmd : callable or str + The command that executes this operation. Can be a callable that when + evaluated returns a string. + fork : bool + Whether the operation needs to fork to execute correctly. See + :meth:`FlowGroup._fork_op` for logic. + """ + + def __init__(self, name, jobs, cmd, fork): + super().__init__(name, jobs, cmd) + self.fork = fork + + def __repr__(self): + return "{type}(name='{name}', jobs='{jobs}', cmd={cmd}, fork={fork})".format( + type=type(self).__name__, + name=self.name, + jobs="(" + ", ".join(map(repr, self._jobs)) + ")", + cmd=repr(self.cmd), + fork=self.fork, + ) + + class _SubmissionJobOperation(_JobOperation): - r"""Class containing submission information for one group and one job. + r"""Class containing submission information for one group and one aggregate. This class extends :class:`_JobOperation` to include a set of groups that will be executed via the "run" command. These groups are known at @@ -373,8 +380,15 @@ class _SubmissionJobOperation(_JobOperation): Parameters ---------- - \*args - Passed to the constructor of :class:`_JobOperation`. + name : str + The name of the _JobOperation. + jobs : tuple of :class:`~signac.job.Job` + The jobs associated with this operation. + cmd : callable or str + The command that executes this operation. Can be a callable that when + evaluated returns a string. + directives_list : list[dict[str, any]] + List of directives for each operation in the flow group. eligible_operations : list A list of :class:`_JobOperation` that will be executed when this submitted job is executed. @@ -394,13 +408,20 @@ class _SubmissionJobOperation(_JobOperation): def __init__( self, - *args, + name, + job, + cmd, + id, + directives_list, eligible_operations=None, operations_with_unmet_preconditions=None, operations_with_met_postconditions=None, **kwargs, ): - super().__init__(*args, **kwargs) + super().__init__(name, job, cmd) + self._id = id + # Will need to handle user directives somehow. + self.directives_list = directives_list if eligible_operations is None: eligible_operations = [] @@ -414,6 +435,10 @@ def __init__( operations_with_met_postconditions = [] self.operations_with_met_postconditions = operations_with_met_postconditions + @property + def id(self): + return self._id + class _FlowCondition: """A _FlowCondition represents a condition as a function of a signac job. @@ -825,6 +850,16 @@ def _determine_entrypoint(self, entrypoint, directives, jobs): return "{} {}".format(entrypoint["executable"], entrypoint["path"]).lstrip() def _resolve_directives(self, name, defaults, env): + """Resolve a single operation's directives. + + Search for the operation in ``operation_directives`` first and if not + there use provided default if any. + + Note + ---- + Any unevaluated function directives will remain unevaluated, and must be + called before use. + """ all_directives = env._get_default_directives() if name in self.operation_directives: all_directives.update(self.operation_directives[name]) @@ -841,12 +876,14 @@ def _submit_cmd(self, entrypoint, ignore_conditions, jobs): options += " --ignore-conditions=" + str(ignore_conditions) return " ".join((cmd, options)).strip() - def _run_cmd(self, entrypoint, operation_name, operation, directives, jobs): + def _run_cmd( + self, entrypoint, operation_name, operation, directives, jobs, environment + ): if isinstance(operation, FlowCmdOperation): return operation(*jobs).lstrip() entrypoint = self._determine_entrypoint(entrypoint, directives, jobs) cmd = f"{entrypoint} exec {operation_name} {get_aggregate_id(jobs)} {self.run_options}" - return cmd.strip() + return (environment.get_prefix(directives) + cmd).strip() def __iter__(self): yield from self.operations.values() @@ -1076,18 +1113,16 @@ def _get_run_ops(ignore_ops, additional_ignores_flag): eligible_operations, IgnoreConditions.POST ) - submission_job_operation = _SubmissionJobOperation( - self._generate_id(jobs), + return _SubmissionJobOperation( self.name, jobs, cmd=unevaluated_cmd, - directives=dict(submission_directives), - user_directives=set(submission_directives.user_keys), + id=self._generate_id(jobs), + directives=submission_directives, eligible_operations=eligible_operations, operations_with_unmet_preconditions=operations_with_unmet_preconditions, operations_with_met_postconditions=operations_with_met_postconditions, ) - return submission_job_operation def _create_run_job_operations( self, @@ -1119,8 +1154,8 @@ def _create_run_job_operations( Returns ------- - Iterator[_JobOperation] - Iterator of eligible instances of :class:`~._JobOperation`. + Iterator[_RunOperation] + Iterator of eligible instances of :class:`~._RunOperation`. """ # Assuming all the jobs belong to the same FlowProject @@ -1129,8 +1164,7 @@ def _create_run_job_operations( if operation._eligible(jobs, ignore_conditions): directives = self._resolve_directives( operation_name, default_directives, env - ) - directives.evaluate(jobs) + ).evaluate(jobs) # Return an unevaluated command to make evaluation lazy and # reduce side effects in callable FlowCmdOperations. unevaluated_cmd = _cached_partial( @@ -1140,46 +1174,51 @@ def _create_run_job_operations( operation=operation, directives=directives, jobs=jobs, + environment=env, ) - job_op = _JobOperation( - self._generate_id(jobs, operation_name), + yield _RunOperation( operation_name, jobs, cmd=unevaluated_cmd, - directives=dict(directives), - user_directives=set(directives.user_keys), + fork=self._fork_op(directives), ) - # Get the prefix, and if it's non-empty, set the fork directive - # to True since we must launch a separate process. Override - # the command directly. - prefix = jobs[0]._project._environment.get_prefix(job_op) - if prefix != "" or self.run_options != "": - job_op.directives["fork"] = True - job_op._cmd = f"{prefix} {job_op.cmd}" - yield job_op + + @staticmethod + def _fork_op(directives): + # TODO: note that since we use threads_per_process and not specifically + # omp_num_threads, we don't necessarily need to fork when setting + # threads_per_process, however, to correctly use OMP we do. Perhaps this + # is an argument for an omp directive. Otherwise, we need to fork here + # if that is set which we currently don't. + return ( + directives["executable"] != sys.executable + or directives["launcher"] is not None + ) def _get_submission_directives(self, default_directives, jobs): - """Get the combined resources for submission. + """Get the resolved and evaluated resources for submission. No checks are done to mitigate inappropriate aggregation of operations. This can lead to poor utilization of computing resources. """ env = jobs[0]._project._environment operation_names = list(self.operations.keys()) - # The first operation's directives are evaluated, then all other - # operations' directives are applied as updates with aggregate=True - directives = self._resolve_directives( - operation_names[0], default_directives, env - ) - directives.evaluate(jobs) - for name in operation_names[1:]: - # get directives for operation - directives.update( - self._resolve_directives(name, default_directives, env), - aggregate=True, - jobs=jobs, + return [ + self._directives_to_track_dict( + self._resolve_directives(name, default_directives, env).evaluate(jobs), + set(env._get_default_directives().keys()), ) - return directives + for name in operation_names + ] + + def _directives_to_track_dict(directives, internal_keys): + """Convert evaluated directives to tracking dictionaries. + + Excludes environment/internal keys from tracking. + """ + dict_ = _TrackGetItemDict(**directives) + dict_._keys_used = internal_keys + return dict_ class _FlowProjectClass(type): @@ -3348,21 +3387,18 @@ class _PickleError(Exception): """Indicates a pickling error while trying to parallelize the execution of operations.""" @staticmethod - def _job_operation_to_tuple(operation): + def _run_operation_to_tuple(operation): return ( - operation.id, operation.name, [job.id for job in operation._jobs], operation.cmd, - operation.directives, + operation.fork, ) - def _job_operation_from_tuple(self, data): - id, name, job_ids, cmd, directives = data + def _run_operation_from_tuple(self, data): + name, job_ids, cmd, fork = data jobs = tuple(self.open_job(id=job_id) for job_id in job_ids) - return _JobOperation( - id, name, jobs, cmd, directives, directives._keys_set_by_user - ) + return _RunOperation(name, jobs, cmd, fork) def _run_operations_in_parallel(self, pool, operations, progress, timeout): """Execute operations in parallel. @@ -3394,7 +3430,7 @@ def _run_operations_in_parallel(self, pool, operations, progress, timeout): ( cloudpickle.loads, serialized_project, - self._job_operation_to_tuple(operation), + self._run_operation_to_tuple(operation), ) for operation in tqdm( operations, desc="Serialize tasks", file=sys.stderr @@ -3466,14 +3502,11 @@ def _execute_operation(self, operation, timeout=None, pretend=False): # Check if we need to fork for operation execution... if ( - # The 'fork' directive was provided and evaluates to True: - operation.directives.get("fork", False) + operation.fork # A separate process is needed to cancel with timeout: or timeout is not None # The operation function is an instance of FlowCmdOperation: or isinstance(self._operations[operation.name], FlowCmdOperation) - # The specified executable is not the same as the interpreter instance: - or operation.directives.get("executable", sys.executable) != sys.executable ): # ... need to fork: logger.debug( @@ -4075,10 +4108,7 @@ def _msg(group): keys_unused = { key for op in operations - for key in op.directives._keys_set_by_user.difference( - op.directives.keys_used - ) - if key not in ("fork", "nranks", "omp_num_threads") # ignore list + for key in op.directives.keys() - op.directives.keys_used } if keys_unused: logger.warning( @@ -5195,7 +5225,7 @@ class MyProject(FlowProject): def _deserialize_and_run_operation(loads, project, operation_data): project = loads(project) - project._execute_operation(project._job_operation_from_tuple(operation_data)) + project._execute_operation(project._run_operation_from_tuple(operation_data)) return None From 20bf55aabec949337d97497911100ed3fc54f9c3 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Wed, 8 Nov 2023 17:18:04 -0500 Subject: [PATCH 10/28] test: Remove fork directive tests. --- tests/define_test_project.py | 6 +----- tests/test_project.py | 15 --------------- 2 files changed, 1 insertion(+), 20 deletions(-) diff --git a/tests/define_test_project.py b/tests/define_test_project.py index 876e351fc..5f2aa4e25 100644 --- a/tests/define_test_project.py +++ b/tests/define_test_project.py @@ -51,13 +51,9 @@ def op1(job): return f'echo "hello" > {job.path}/world.txt' -def _need_to_fork(job): - return job.doc.get("fork") - - @group1 @_TestProject.post.true("test") -@_TestProject.operation(directives={"fork": _need_to_fork}) +@_TestProject.operation def op2(job): job.document.test = os.getpid() diff --git a/tests/test_project.py b/tests/test_project.py index 0758ae1e6..1cf115017 100644 --- a/tests/test_project.py +++ b/tests/test_project.py @@ -1109,21 +1109,6 @@ def op3(job): good_ops = all_ops.difference(bad_ops) assert all([job.doc.get(op, False) for op in good_ops for job in project]) - def test_run_fork(self): - project = self.mock_project() - for job in project: - job.doc.fork = True - break - - with setup_project_subprocess_execution(project): - project.run() - - for job in project: - if job.doc.get("fork"): - assert os.getpid() != job.doc.test - else: - assert os.getpid() == job.doc.test - def test_run_invalid_ops(self): class A(FlowProject): pass From 92a6ec68fbd83884d7cfba806ab6e36158d4a105 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 9 Nov 2023 11:53:58 -0500 Subject: [PATCH 11/28] fix: Errors in run operations from no _RunOperation.id --- flow/project.py | 51 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/flow/project.py b/flow/project.py index 3d0430a53..cea748922 100644 --- a/flow/project.py +++ b/flow/project.py @@ -289,6 +289,9 @@ class _JobOperation: Parameters ---------- + id : str + Unique id for the execution or submission unit. The id is needed for + execution counting in running and unique scheduler ids in submission. name : str The name of the _JobOperation. jobs : tuple of :class:`~signac.job.Job` @@ -298,7 +301,8 @@ class _JobOperation: evaluated returns a string. """ - def __init__(self, name, jobs, cmd): + def __init__(self, id, name, jobs, cmd): + self._id = id self.name = name self._jobs = jobs if not (callable(cmd) or isinstance(cmd, str)): @@ -329,6 +333,16 @@ def cmd(self): return self._cmd() return self._cmd + @property + def id(self): + return self._id + + def __hash__(self): + return hash(self._id) + + def __eq__(self, other): + return self.id == other.id + class _RunOperation(_JobOperation): """Class containing execution information for one operation and one aggregate. @@ -345,6 +359,8 @@ class _RunOperation(_JobOperation): Parameters ---------- + id : str + Unique id for the execution unit. name : str The name of the _JobOperation. jobs : tuple of :class:`~signac.job.Job` @@ -357,8 +373,8 @@ class _RunOperation(_JobOperation): :meth:`FlowGroup._fork_op` for logic. """ - def __init__(self, name, jobs, cmd, fork): - super().__init__(name, jobs, cmd) + def __init__(self, id, name, jobs, cmd, fork): + super().__init__(id, name, jobs, cmd) self.fork = fork def __repr__(self): @@ -380,6 +396,8 @@ class _SubmissionJobOperation(_JobOperation): Parameters ---------- + id : str + Unique id for the submission unit. name : str The name of the _JobOperation. jobs : tuple of :class:`~signac.job.Job` @@ -408,18 +426,17 @@ class _SubmissionJobOperation(_JobOperation): def __init__( self, + id, name, job, cmd, - id, directives_list, eligible_operations=None, operations_with_unmet_preconditions=None, operations_with_met_postconditions=None, **kwargs, ): - super().__init__(name, job, cmd) - self._id = id + super().__init__(id, name, job, cmd) # Will need to handle user directives somehow. self.directives_list = directives_list @@ -435,10 +452,6 @@ def __init__( operations_with_met_postconditions = [] self.operations_with_met_postconditions = operations_with_met_postconditions - @property - def id(self): - return self._id - class _FlowCondition: """A _FlowCondition represents a condition as a function of a signac job. @@ -1114,10 +1127,10 @@ def _get_run_ops(ignore_ops, additional_ignores_flag): ) return _SubmissionJobOperation( - self.name, - jobs, - cmd=unevaluated_cmd, id=self._generate_id(jobs), + name=self.name, + jobs=jobs, + cmd=unevaluated_cmd, directives=submission_directives, eligible_operations=eligible_operations, operations_with_unmet_preconditions=operations_with_unmet_preconditions, @@ -1177,21 +1190,22 @@ def _create_run_job_operations( environment=env, ) yield _RunOperation( + self._generate_id(jobs, operation_name), operation_name, jobs, cmd=unevaluated_cmd, fork=self._fork_op(directives), ) - @staticmethod - def _fork_op(directives): + def _fork_op(self, directives): # TODO: note that since we use threads_per_process and not specifically # omp_num_threads, we don't necessarily need to fork when setting # threads_per_process, however, to correctly use OMP we do. Perhaps this # is an argument for an omp directive. Otherwise, we need to fork here # if that is set which we currently don't. return ( - directives["executable"] != sys.executable + len(self.run_options) > 0 + or directives["executable"] != sys.executable or directives["launcher"] is not None ) @@ -3389,6 +3403,7 @@ class _PickleError(Exception): @staticmethod def _run_operation_to_tuple(operation): return ( + operation.id, operation.name, [job.id for job in operation._jobs], operation.cmd, @@ -3396,9 +3411,9 @@ def _run_operation_to_tuple(operation): ) def _run_operation_from_tuple(self, data): - name, job_ids, cmd, fork = data + id_, name, job_ids, cmd, fork = data jobs = tuple(self.open_job(id=job_id) for job_id in job_ids) - return _RunOperation(name, jobs, cmd, fork) + return _RunOperation(id_, name, jobs, cmd, fork) def _run_operations_in_parallel(self, pool, operations, progress, timeout): """Execute operations in parallel. From 0244e00fb3bd3238c38bfc89df219d15c836c967 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 9 Nov 2023 11:54:56 -0500 Subject: [PATCH 12/28] test: Fix test of run_options --- tests/test_project.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_project.py b/tests/test_project.py index 1cf115017..a3f252925 100644 --- a/tests/test_project.py +++ b/tests/test_project.py @@ -1878,7 +1878,7 @@ def op1(job): ) ) assert all(" --debug" in op.cmd for op in run_ops) - assert all(op.directives["fork"] for op in run_ops) + assert all(op.fork for op in run_ops) class TestGroupExecutionProject(TestProjectBase): From 2782aaf6e18787c9d05ac5df12e274cbd10922c1 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 9 Nov 2023 11:55:39 -0500 Subject: [PATCH 13/28] feat: Make _Directives.evaluate returns self. --- flow/directives.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flow/directives.py b/flow/directives.py index 773b07586..95fa8159d 100644 --- a/flow/directives.py +++ b/flow/directives.py @@ -172,6 +172,7 @@ def evaluate(self, jobs): for key, value in self.items(): self[key] = _evaluate(value, jobs) self._evaluated = True + return self @property def user_keys(self): # noqa: D401 From e40dc2b650ab792b39517c3fb36e420cfc038b2b Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 16 Nov 2023 16:59:29 -0500 Subject: [PATCH 14/28] feat (WIP): "Working" submission logic for slurm. --- flow/directives.py | 164 +++++++++++++++++++++++++++-- flow/environment.py | 27 ++--- flow/project.py | 30 ++++-- flow/templates/anvil.sh | 13 --- flow/templates/base_script.sh | 5 - flow/templates/bridges2.sh | 16 --- flow/templates/crusher.sh | 4 - flow/templates/delta.sh | 15 +-- flow/templates/expanse.sh | 16 --- flow/templates/frontier.sh | 7 -- flow/templates/slurm.sh | 20 ++-- flow/templates/umich-greatlakes.sh | 17 --- 12 files changed, 199 insertions(+), 135 deletions(-) diff --git a/flow/directives.py b/flow/directives.py index 95fa8159d..ca893425b 100644 --- a/flow/directives.py +++ b/flow/directives.py @@ -8,10 +8,13 @@ """ import datetime import functools +import operator import sys +import warnings from collections.abc import MutableMapping -from flow.errors import DirectivesError +import flow.util.misc +from flow.errors import DirectivesError, SubmitError class _Directive: @@ -190,6 +193,156 @@ def _evaluate(value, jobs): return value +def _list_of_dicts_to_dict_of_list(a): + """Help convert from a directives list to dict with list items.""" + if len(a) == 0: + return {} + # This None could be problematic, but we use if for directives that will + # always exist. + return {k: [m.get(k, None) for m in a] for k in a[0]} + + +def _get_directive_of_lists(directives_list): + directives = _list_of_dicts_to_dict_of_list(directives_list) + directives["cpus"] = flow.util.misc.list_op( + operator.mul, directives["processes"], directives["threads_per_process"] + ) + directives["gpus"] = flow.util.misc.list_op( + operator.mul, directives["processes"], directives["gpus_per_process"] + ) + directives["memory"] = flow.util.misc.list_op( + operator.mul, + directives["cpus"], + (0 if mem is None else mem for mem in directives["memory_per_cpu"]), + ) + return directives + + +def _check_compatible_directives(directives_of_lists): + """Routine checks for directives within a group.""" + if "mpi" in directives_of_lists["launcher"]: + if ( + len(set(directives_of_lists["gpus_per_process"])) != 1 + or len(set(directives_of_lists["threads_per_process"])) != 1 + or len(set(directives_of_lists["processes"])) != 1 + ): + raise SubmitError("Cannot submit non-homogeneous MPI jobs.") + if None in directives_of_lists["launcher"]: + raise SubmitError("Cannot submit MPI and nonMPI jobs together.") + else: + if len(set(directives_of_lists["gpus"])) > 1: + warnings.warn( + "Operations with varying numbers of GPUs are being submitted together.", + RuntimeWarning, + ) + if len(set(directives_of_lists["cpus"])) > 1: + warnings.warn( + "Operations with varying numbers of CPUs are being submitted together.", + RuntimeWarning, + ) + + +def _group_directive_aggregation(group_directives): + directives = _get_directive_of_lists(group_directives) + _check_compatible_directives(directives) + # Each group will have a primary operation (the one that requests the most + # resources. This may or may not be unique. We have to pick on for purposes + # of scheduling though to properly request resources. + if "mpi" in directives["launcher"]: + # All MPI operations must be homogeneous can pick any one and any non-MPI ones are subsets + # that should work correctly. + primary_operation_index = 0 + else: + primary_operation_index = flow.util.misc.argmax(directives["cpus"]) + primary_directive = group_directives[primary_operation_index] + primary_directive["walltime"] = sum( + (w for w in directives["walltime"] if w is not None), start=datetime.timedelta() + ) + # Handle memory. Since we have potentially nonheterogeneous requests, we + # need to check that the highest memory job has enough memory. + max_memory_index = flow.util.misc.argmax(directives["memory"]) + memory_per_cpu = ( + directives["memory"][max_memory_index] + / directives["cpus"][primary_operation_index] + ) + if memory_per_cpu != 0: + primary_directive["memory_per_cpu"] = memory_per_cpu + # TODO: Pretty sure this is broken for GPU submission with different number of GPUS. + return primary_directive + + +def _check_bundle_directives(list_of_directives, parallel): + if "mpi" in list_of_directives["launcher"] and parallel: + raise SubmitError("Cannot run MPI operations in parallel.") + _check_compatible_directives(list_of_directives) + + +def _bundle_directives_aggregation(list_of_directives, parallel): + directives_of_lists = _get_directive_of_lists(list_of_directives) + _check_compatible_directives(directives_of_lists) + # We know we don't have MPI operations here. + if parallel: + memory = sum(filter(lambda x: x is not None, directives_of_lists["memory"])) + cpus = sum(directives_of_lists["cpus"]) + memory_per_cpu = None if memory == 0 else memory / cpus + return { + "launcher": None, + "walltime": max( + w for w in directives_of_lists["walltime"] if w is not None + ), + "processes": 1, + "threads_per_process": cpus, + "gpus_per_process": sum(directives_of_lists["gpus"]), + "memory_per_cpu": memory_per_cpu, + } + # Each group will have a primary operation (the one that requests the most + # resources. This may or may not be unique. We have to pick on for purposes + # of scheduling though to properly request resources. + walltime = sum( + (w for w in directives_of_lists["walltime"] if w is not None), + start=datetime.timedelta(), + ) + max_memory_index = flow.util.misc.argmax( + filter(lambda x: x is not None, directives_of_lists["memory"]) + ) + if "mpi" in directives_of_lists["launcher"]: + if max_memory_index is None: + memory_per_cpu = None + else: + memory_per_cpu = ( + directives_of_lists["memory"][max_memory_index] + / directives_of_lists["cpus"][0] + ) + + # All MPI operations must be homogeneous can pick any one and any non-MPI ones are subsets + # that should work correctly. + primary_operation = list_of_directives[0] + return { + "launcher": primary_operation["launcher"], + "walltime": walltime, + "processes": primary_operation["processes"], + "threads_per_process": primary_operation["threads_per_process"], + "gpus_per_process": primary_operation["gpus_per_process"], + "memory_per_cpu": memory_per_cpu, + } + primary_operation_index = flow.util.misc.argmax(directives_of_lists["cpus"]) + if max_memory_index is None: + memory_per_cpu = None + else: + memory_per_cpu = ( + directives_of_lists["memory"][max_memory_index] + / directives_of_lists["cpus"][primary_operation_index] + ) + return { + "launcher": None, + "walltime": walltime, + "processes": 1, + "threads_per_process": directives_of_lists["cpus"][primary_operation_index], + "gpus_per_process": max(directives_of_lists["gpus_per_process"]), + "memory_per_cpu": memory_per_cpu, + } + + class _OnlyTypes: def __init__(self, *types, preprocess=None, postprocess=None): def identity(value): @@ -259,9 +412,9 @@ def _parse_walltime(walltime): """ if walltime is None: return None - if not isinstance(walltime, datetime.timedelta): - walltime = datetime.timedelta(hours=walltime) - return walltime + if isinstance(walltime, datetime.timedelta): + return walltime + return datetime.timedelta(hours=walltime) def _parse_memory(memory): @@ -310,7 +463,6 @@ def _parse_memory(memory): _natural_number = _OnlyTypes(int, postprocess=_raise_below(1)) _nonnegative_int = _OnlyTypes(int, postprocess=_raise_below(0)) _positive_real_walltime = _OnlyTypes( - float, datetime.timedelta, type(None), preprocess=_parse_walltime, @@ -423,7 +575,7 @@ def op2(job): """ _THREADS_PER_PROCESS = _Directive( - "threads_per_process", validator=_nonnegative_int, default=0 + "threads_per_process", validator=_nonnegative_int, default=1 ) _THREADS_PER_PROCESS.__doc__ = """The number of threads to use per process. Defaults to 0. diff --git a/flow/environment.py b/flow/environment.py index d35540f0a..bd6d121bf 100644 --- a/flow/environment.py +++ b/flow/environment.py @@ -24,6 +24,7 @@ _PROCESSES, _THREADS_PER_PROCESS, _WALLTIME, + _bundle_directives_aggregation, _Directives, ) from .errors import NoSchedulerError, SubmitError @@ -33,7 +34,7 @@ from .scheduling.pbs import PBSScheduler from .scheduling.simple_scheduler import SimpleScheduler from .scheduling.slurm import SlurmScheduler -from .util.template_filters import calc_num_nodes, calc_tasks +from .util.template_filters import calc_num_nodes logger = logging.getLogger(__name__) @@ -433,28 +434,18 @@ def _get_scheduler_values(cls, context): """ partition = cls._partition_config[context.get("partition", None)] force = context.get("force", False) - cpu_tasks_total = calc_tasks( - context["operations"], - "np", + directives = _bundle_directives_aggregation( + [op.primary_directives for op in context["operations"]], context.get("parallel", False), - context.get("force", False), - ) - gpu_tasks_total = calc_tasks( - context["operations"], - "ngpu", - context.get("parallel", False), - context.get("force", False), ) - num_nodes = partition.calculate_num_nodes( + cpu_tasks_total = directives["processes"] * directives["threads_per_process"] + gpu_tasks_total = directives["processes"] * directives["gpus_per_process"] + + directives["num_nodes"] = partition.calculate_num_nodes( cpu_tasks_total, gpu_tasks_total, force ) - - return { - "ncpu_tasks": cpu_tasks_total, - "ngpu_tasks": gpu_tasks_total, - "num_nodes": num_nodes, - } + return directives class StandardEnvironment(ComputeEnvironment): diff --git a/flow/project.py b/flow/project.py index fd72dc30e..37db3802b 100644 --- a/flow/project.py +++ b/flow/project.py @@ -44,7 +44,7 @@ aggregator, get_aggregate_id, ) -from .directives import _document_directive +from .directives import _document_directive, _group_directive_aggregation from .environment import ComputeEnvironment, get_environment, registered_environments from .errors import ( ConfigKeyError, @@ -405,6 +405,9 @@ class _SubmissionJobOperation(_JobOperation): cmd : callable or str The command that executes this operation. Can be a callable that when evaluated returns a string. + primary_directives : list[dict[str, any]] + Directives of the maximal job or directives such that all operations + have their resources met. directives_list : list[dict[str, any]] List of directives for each operation in the flow group. eligible_operations : list @@ -430,6 +433,7 @@ def __init__( name, job, cmd, + primary_directives, directives_list, eligible_operations=None, operations_with_unmet_preconditions=None, @@ -437,7 +441,7 @@ def __init__( **kwargs, ): super().__init__(id, name, job, cmd) - # Will need to handle user directives somehow. + self.primary_directives = primary_directives self.directives_list = directives_list if eligible_operations is None: @@ -772,20 +776,20 @@ class FlowGroup: Examples -------- - In the example below, the directives will be ``{'nranks': 4}`` for op1 and - ``{'nranks': 2, 'executable': 'python3'}`` for op2. + In the example below, the directives will be ``{'processes': 4}`` for op1 and + ``{'processes': 2, 'executable': 'python3'}`` for op2. .. code-block:: python group = FlowProject.make_group(name='example_group') - @group(directives={"nranks": 4}) - @FlowProject.operation({"nranks": 2, "executable": "python3"}) + @group(directives={"processes": 4}) + @FlowProject.operation({"processes": 2, "executable": "python3"}) def op1(job): pass @group - @FlowProject.operation({"nranks": 2, "executable": "python3"}) + @FlowProject.operation({"processes": 2, "executable": "python3"}) def op2(job): pass @@ -1118,6 +1122,7 @@ def _get_run_ops(ignore_ops, additional_ignores_flag): submission_directives = self._get_submission_directives( default_directives, jobs ) + primary_directives = _group_directive_aggregation(submission_directives) eligible_operations = _get_run_ops([], IgnoreConditions.NONE) operations_with_unmet_preconditions = _get_run_ops( eligible_operations, IgnoreConditions.PRE @@ -1129,9 +1134,10 @@ def _get_run_ops(ignore_ops, additional_ignores_flag): return _SubmissionJobOperation( id=self._generate_id(jobs), name=self.name, - jobs=jobs, + job=jobs, cmd=unevaluated_cmd, - directives=submission_directives, + primary_directives=primary_directives, + directives_list=submission_directives, eligible_operations=eligible_operations, operations_with_unmet_preconditions=operations_with_unmet_preconditions, operations_with_met_postconditions=operations_with_met_postconditions, @@ -1225,6 +1231,7 @@ def _get_submission_directives(self, default_directives, jobs): for name in operation_names ] + @staticmethod def _directives_to_track_dict(directives, internal_keys): """Convert evaluated directives to tracking dictionaries. @@ -1461,7 +1468,7 @@ def hello(job): .. code-block:: python - @FlowProject.operation({"nranks": 4}) + @FlowProject.operation({"processes": 4, "launcher": "mpi"}) def mpi_hello(job): print("hello") @@ -4123,7 +4130,8 @@ def _msg(group): keys_unused = { key for op in operations - for key in op.directives.keys() - op.directives.keys_used + for directives in op.directives_list + for key in directives.keys() - directives.keys_used } if keys_unused: logger.warning( diff --git a/flow/templates/anvil.sh b/flow/templates/anvil.sh index 023957606..a49e1b965 100644 --- a/flow/templates/anvil.sh +++ b/flow/templates/anvil.sh @@ -1,20 +1,7 @@ {# Templated in accordance with: https://www.rcac.purdue.edu/knowledge/anvil/ #} {% extends "slurm.sh" %} -{% block tasks %} - {% if resources.num_nodes > 1 %} -#SBATCH -N {{ resources.num_nodes }} - {% endif %} -#SBATCH --ntasks={{ resources.ncpu_tasks }} - {% if 'gpu' in partition %} -#SBATCH --gpus={{ resources.ngpu_tasks }} - {% endif %} -{% endblock tasks %} {% block header %} {{- super() -}} - {% set account = account|default(project|get_account_name, true) %} - {% if account %} -#SBATCH --account {{ account }} - {% endif %} # As of 2023-10-30, Anvil incorrectly binds ranks to cores with `mpirun -n`. # Disable core binding to work around this issue. export OMPI_MCA_hwloc_base_binding_policy="" diff --git a/flow/templates/base_script.sh b/flow/templates/base_script.sh index d46d03cfe..d4aeb4cf7 100644 --- a/flow/templates/base_script.sh +++ b/flow/templates/base_script.sh @@ -1,9 +1,4 @@ {# The following variables are available to all scripts. #} -{% if parallel %} - {% set np_global = operations|map(attribute='directives.np')|sum %} -{% else %} - {% set np_global = operations|map(attribute='directives.np')|max %} -{% endif %} {% block header %} {% block preamble %} {% endblock preamble %} diff --git a/flow/templates/bridges2.sh b/flow/templates/bridges2.sh index 25cdeb57f..acd683984 100644 --- a/flow/templates/bridges2.sh +++ b/flow/templates/bridges2.sh @@ -1,18 +1,2 @@ {# Templated in accordance with: https://www.psc.edu/resources/bridges-2/user-guide #} {% extends "slurm.sh" %} -{% block tasks %} - {% if resources.num_nodes > 1 or resources.ncpu_tasks >= 128 or resources.ngpu_tasks >= 8 %} -#SBATCH -N {{ resources.num_nodes }} - {% endif %} -#SBATCH --ntasks={{ resources.ncpu_tasks }} - {% if 'GPU' in partition %} -#SBATCH --gpus={{ resources.ngpu_tasks }} - {% endif %} -{% endblock tasks %} -{% block header %} - {{- super() -}} - {% set account = account|default(project|get_account_name, true) %} - {% if account %} -#SBATCH -A {{ account }} - {% endif %} -{% endblock header %} diff --git a/flow/templates/crusher.sh b/flow/templates/crusher.sh index 842d10bb5..4218df7c9 100644 --- a/flow/templates/crusher.sh +++ b/flow/templates/crusher.sh @@ -5,9 +5,5 @@ {% endblock tasks %} {% block header %} {{- super() -}} - {% set account = account|default(project|get_account_name, true) %} - {% if account %} -#SBATCH --account={{ account }} - {% endif %} #SBATCH --partition=batch {% endblock header %} diff --git a/flow/templates/delta.sh b/flow/templates/delta.sh index 4502ddf66..f3581a462 100644 --- a/flow/templates/delta.sh +++ b/flow/templates/delta.sh @@ -6,18 +6,5 @@ increased charges and is expected to be suitable for a minority of use cases." %} {% endif %} - {% if resources.num_nodes > 1 %} -#SBATCH -N {{ resources.num_nodes }} - {% endif %} -#SBATCH --ntasks={{ resources.ncpu_tasks }} - {% if "gpu" in partition %} -#SBATCH --gpus={{ resources.ngpu_tasks }} - {% endif %} -{% endblock tasks %} -{% block header %} {{- super() -}} - {% set account = account|default(project|get_account_name, true) %} - {% if account %} -#SBATCH -A {{ account }} - {% endif %} -{% endblock header %} +{% endblock tasks %} diff --git a/flow/templates/expanse.sh b/flow/templates/expanse.sh index 013e0eeb1..1abbd9c62 100644 --- a/flow/templates/expanse.sh +++ b/flow/templates/expanse.sh @@ -1,18 +1,2 @@ {# Templated in accordance with: https://www.sdsc.edu/support/user_guides/expanse.html #} {% extends "slurm.sh" %} -{% block tasks %} - {% if "shared" not in partition %} -#SBATCH -N {{ resources.num_nodes }} - {% endif %} -#SBATCH --ntasks={{ resources.ncpus_tasks }} - {% if 'gpu' in partition %} -#SBATCH --gpus={{ resources.gpu_tasks }} - {% endif %} -{% endblock tasks %} -{% block header %} - {{- super() -}} - {% set account = account|default(project|get_account_name, true) %} - {% if account %} -#SBATCH -A {{ account }} - {% endif %} -{% endblock header %} diff --git a/flow/templates/frontier.sh b/flow/templates/frontier.sh index dd60a01c1..54ea371e4 100644 --- a/flow/templates/frontier.sh +++ b/flow/templates/frontier.sh @@ -3,10 +3,3 @@ {% block tasks %} #SBATCH --nodes={{ resources.num_nodes }} {% endblock tasks %} -{% block header %} - {{- super() -}} - {% set account = account|default(project|get_account_name, true) %} - {% if account %} -#SBATCH --account={{ account }} - {% endif %} -{% endblock header %} diff --git a/flow/templates/slurm.sh b/flow/templates/slurm.sh index 88f4c2747..d1807c221 100644 --- a/flow/templates/slurm.sh +++ b/flow/templates/slurm.sh @@ -3,23 +3,27 @@ {% block preamble %} #!/bin/bash #SBATCH --job-name="{{ id }}" - {% set memory_requested = operations | calc_memory(parallel) %} - {% if memory_requested %} -#SBATCH --mem={{ memory_requested|format_memory }} - {% endif %} {% if partition %} #SBATCH --partition={{ partition }} {% endif %} - {% set walltime = operations | calc_walltime(parallel) %} - {% if walltime %} -#SBATCH -t {{ walltime|format_timedelta }} + {% if walltime != None %} +#SBATCH -t {{ resources.walltime|format_timedelta }} {% endif %} {% if job_output %} #SBATCH --output={{ job_output }} #SBATCH --error={{ job_output }} {% endif %} + {% set account = account|default(project|get_account_name, true) %} + {% if account %} +#SBATCH --account={{ account }} + {% endif %} {% endblock preamble %} {% block tasks %} -#SBATCH --ntasks={{ resources.ncpu_tasks }} +#SBATCH --ntasks={{ resources.processes }} +#SBATCH --cpus-per-task={{ resources.threads_per_process }} +#SBATCH --mem-per-task={{ resources.memory_per_cpu }} + {% if resources.gpus_per_process > 0 %} +#SBATCH --gpus-per-task={{ resources.gpus_per_process }} + {% endif %} {% endblock tasks %} {% endblock header %} diff --git a/flow/templates/umich-greatlakes.sh b/flow/templates/umich-greatlakes.sh index 6f90bdb6b..20a1c3899 100644 --- a/flow/templates/umich-greatlakes.sh +++ b/flow/templates/umich-greatlakes.sh @@ -1,18 +1 @@ {% extends "slurm.sh" %} -{% set partition = partition|default('standard', true) %} -{% set nranks = (operations|calc_tasks("nranks", parallel, force), 1) | max %} -{% block tasks %} -#SBATCH --nodes={{ resources.num_nodes }}-{{ resources.num_nodes }} -#SBATCH --ntasks={{ nranks }} -#SBATCH --cpus-per-task={{ resources.ncpu_tasks // nranks}} - {% if partition == 'gpu' %} -#SBATCH --gpus-per-task={{ resources.ngpu_tasks // nranks }} - {% endif %} -{% endblock tasks %} -{% block header %} - {{- super () -}} - {% set account = account|default(project|get_account_name, true) %} - {% if account %} -#SBATCH --account={{ account }} - {% endif %} -{% endblock header %} From 5f3a8458f3c54db2d0ade6151688253a7e46533d Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 29 Feb 2024 15:38:19 -0500 Subject: [PATCH 15/28] feat: Add None tolerance max, sum, argmax functions --- flow/util/misc.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/flow/util/misc.py b/flow/util/misc.py index 7d93c9da8..571bc2b3c 100644 --- a/flow/util/misc.py +++ b/flow/util/misc.py @@ -432,3 +432,31 @@ def _deprecated_warning( __all__ = [ "redirect_log", ] + + +def _argmax(a): + max_i = 0 + max_ = None + for i, value in enumerate(a): + if max_ is None: + max_ = value + max_i = i + if max_ < value: + max_ = value + max_i = i + return max_i + + +def _tolerant_iter_function(func): + def new_func(iter, *args, **kwargs): + values = [i for i in iter if i is not None] + if len(values) == 0: + return None + return func(values, *args, **kwargs) + + return new_func + + +_tolerant_argmax = _tolerant_iter_function(_argmax) +_tolerant_max = _tolerant_iter_function(max) +_tolerant_sum = _tolerant_iter_function(sum) From 36b303e736df10a835a1d6db2ff1c0e3ce85e79a Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 29 Feb 2024 15:39:50 -0500 Subject: [PATCH 16/28] refactor: _Directives.evaluate now computes total cpus, gpus, and memory This is useful for various group/bundling resource aggregation. --- flow/directives.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/flow/directives.py b/flow/directives.py index ca893425b..4bda0e6c1 100644 --- a/flow/directives.py +++ b/flow/directives.py @@ -165,6 +165,9 @@ def evaluate(self, jobs): This method updates the directives in place, replacing callable directives with their evaluated values. + The method also provides common intermediate quantities for determining + resource submission (cpus, gpus, and total memory). + Parameters ---------- jobs : :class:`signac.job.Job` or tuple of :class:`signac.job.Job` @@ -175,7 +178,15 @@ def evaluate(self, jobs): for key, value in self.items(): self[key] = _evaluate(value, jobs) self._evaluated = True - return self + + directives = dict(self) + directives["cpus"] = directives["processes"] * directives["threads_per_process"] + directives["gpus"] = directives["processes"] * directives["gpus_per_process"] + if (memory := directives["memory_per_cpu"]) is not None: + directives["memory"] = directives["cpus"] * memory + else: + directives["memory"] = None + return directives @property def user_keys(self): # noqa: D401 From 76f300cee26bb7beecd0c7e4b5d6a6581e8d3aa9 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 29 Feb 2024 15:42:44 -0500 Subject: [PATCH 17/28] refactor: Use total cpu, gpu, and memory internal directives We use these directives internally to help with scheduling. Even if used in templates, using the per-process first allows for more control over resource request and computing the totals later is cheap and simple. --- flow/directives.py | 101 ++++++++++++++++++--------------------------- 1 file changed, 40 insertions(+), 61 deletions(-) diff --git a/flow/directives.py b/flow/directives.py index 4bda0e6c1..e71fa3a34 100644 --- a/flow/directives.py +++ b/flow/directives.py @@ -8,7 +8,6 @@ """ import datetime import functools -import operator import sys import warnings from collections.abc import MutableMapping @@ -213,22 +212,6 @@ def _list_of_dicts_to_dict_of_list(a): return {k: [m.get(k, None) for m in a] for k in a[0]} -def _get_directive_of_lists(directives_list): - directives = _list_of_dicts_to_dict_of_list(directives_list) - directives["cpus"] = flow.util.misc.list_op( - operator.mul, directives["processes"], directives["threads_per_process"] - ) - directives["gpus"] = flow.util.misc.list_op( - operator.mul, directives["processes"], directives["gpus_per_process"] - ) - directives["memory"] = flow.util.misc.list_op( - operator.mul, - directives["cpus"], - (0 if mem is None else mem for mem in directives["memory_per_cpu"]), - ) - return directives - - def _check_compatible_directives(directives_of_lists): """Routine checks for directives within a group.""" if "mpi" in directives_of_lists["launcher"]: @@ -254,7 +237,7 @@ def _check_compatible_directives(directives_of_lists): def _group_directive_aggregation(group_directives): - directives = _get_directive_of_lists(group_directives) + directives = _list_of_dicts_to_dict_of_list(group_directives) _check_compatible_directives(directives) # Each group will have a primary operation (the one that requests the most # resources. This may or may not be unique. We have to pick on for purposes @@ -262,23 +245,17 @@ def _group_directive_aggregation(group_directives): if "mpi" in directives["launcher"]: # All MPI operations must be homogeneous can pick any one and any non-MPI ones are subsets # that should work correctly. - primary_operation_index = 0 + primary_directive = group_directives[0] else: - primary_operation_index = flow.util.misc.argmax(directives["cpus"]) - primary_directive = group_directives[primary_operation_index] - primary_directive["walltime"] = sum( - (w for w in directives["walltime"] if w is not None), start=datetime.timedelta() - ) - # Handle memory. Since we have potentially nonheterogeneous requests, we - # need to check that the highest memory job has enough memory. - max_memory_index = flow.util.misc.argmax(directives["memory"]) - memory_per_cpu = ( - directives["memory"][max_memory_index] - / directives["cpus"][primary_operation_index] + primary_operation_index = flow.util.misc._tolerant_argmax(directives["cpus"]) + primary_directive = group_directives[primary_operation_index] + primary_directive["gpus"] = max(directives["gpus"]) + primary_directive["cpus"] = max(directives["cpus"]) + primary_directive["memory"] = flow.util.misc._tolerant_max(directives["memory"]) + + primary_directive["walltime"] = flow.util.misc._tolerant_sum( + directives["walltime"], start=datetime.timedelta() ) - if memory_per_cpu != 0: - primary_directive["memory_per_cpu"] = memory_per_cpu - # TODO: Pretty sure this is broken for GPU submission with different number of GPUS. return primary_directive @@ -289,45 +266,43 @@ def _check_bundle_directives(list_of_directives, parallel): def _bundle_directives_aggregation(list_of_directives, parallel): - directives_of_lists = _get_directive_of_lists(list_of_directives) + directives_of_lists = _list_of_dicts_to_dict_of_list(list_of_directives) _check_compatible_directives(directives_of_lists) # We know we don't have MPI operations here. if parallel: - memory = sum(filter(lambda x: x is not None, directives_of_lists["memory"])) cpus = sum(directives_of_lists["cpus"]) - memory_per_cpu = None if memory == 0 else memory / cpus + gpus = sum(directives_of_lists["gpus"]) + memory = flow.util.misc._tolerant_sum(directives_of_lists["memory"]) + memory_per_cpu = None if memory is None else memory / cpus return { "launcher": None, - "walltime": max( - w for w in directives_of_lists["walltime"] if w is not None - ), + "walltime": flow.util.misc._tolerant_max(directives_of_lists["walltime"]), "processes": 1, "threads_per_process": cpus, - "gpus_per_process": sum(directives_of_lists["gpus"]), + "gpus_per_process": gpus, "memory_per_cpu": memory_per_cpu, + "cpus": cpus, + "gpus": gpus, + "memory": memory, } # Each group will have a primary operation (the one that requests the most # resources. This may or may not be unique. We have to pick on for purposes # of scheduling though to properly request resources. - walltime = sum( - (w for w in directives_of_lists["walltime"] if w is not None), - start=datetime.timedelta(), - ) - max_memory_index = flow.util.misc.argmax( - filter(lambda x: x is not None, directives_of_lists["memory"]) + walltime = flow.util.misc._tolerant_sum( + directives_of_lists["walltime"], start=datetime.timedelta() ) if "mpi" in directives_of_lists["launcher"]: + max_memory_index = flow.util.misc._tolerant_argmax( + filter(lambda x: x is not None, directives_of_lists["memory"]) + ) if max_memory_index is None: memory_per_cpu = None else: - memory_per_cpu = ( - directives_of_lists["memory"][max_memory_index] - / directives_of_lists["cpus"][0] - ) - + memory_per_cpu = directives_of_lists["memory_per_cpu"][max_memory_index] # All MPI operations must be homogeneous can pick any one and any non-MPI ones are subsets # that should work correctly. primary_operation = list_of_directives[0] + cpus = primary_operation["processes"] * primary_operation["threads_per_process"] return { "launcher": primary_operation["launcher"], "walltime": walltime, @@ -335,22 +310,26 @@ def _bundle_directives_aggregation(list_of_directives, parallel): "threads_per_process": primary_operation["threads_per_process"], "gpus_per_process": primary_operation["gpus_per_process"], "memory_per_cpu": memory_per_cpu, + "cpus": cpus, + "gpus": primary_operation["processes"] + * primary_operation["gpus_per_process"], + "memory": None if memory_per_cpu is None else cpus * memory_per_cpu, } - primary_operation_index = flow.util.misc.argmax(directives_of_lists["cpus"]) - if max_memory_index is None: - memory_per_cpu = None - else: - memory_per_cpu = ( - directives_of_lists["memory"][max_memory_index] - / directives_of_lists["cpus"][primary_operation_index] - ) + # Serial non-MPI + cpus = max(directives_of_lists["cpus"]) + total_memory = flow.util.misc._tolerant_max(directives_of_lists["memory"]) + memory_per_cpu = None if total_memory is None else total_memory / cpus + gpus = max(directives_of_lists["gpus"]) return { "launcher": None, "walltime": walltime, "processes": 1, - "threads_per_process": directives_of_lists["cpus"][primary_operation_index], - "gpus_per_process": max(directives_of_lists["gpus_per_process"]), + "threads_per_process": cpus, + "gpus_per_process": gpus, "memory_per_cpu": memory_per_cpu, + "cpus": cpus, + "gpus": gpus, + "memory": total_memory, } From 21239899a090dcfbda59835ed02f580f5c202726 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 29 Feb 2024 15:46:10 -0500 Subject: [PATCH 18/28] refactor: MPI/cmd prefix logic Uses new directives and -per-task options. --- flow/environment.py | 16 ++++-- flow/environments/incite.py | 109 ++++++------------------------------ 2 files changed, 27 insertions(+), 98 deletions(-) diff --git a/flow/environment.py b/flow/environment.py index 1fdb03946..f787939a4 100644 --- a/flow/environment.py +++ b/flow/environment.py @@ -358,7 +358,7 @@ def add_args(cls, parser): @classmethod def _get_omp_prefix(cls, directives): - """Get the OpenMP prefix based on the ``omp_num_threads`` directive. + """Get the OpenMP prefix based on the ``threads_per_process`` directive. Parameters ---------- @@ -371,7 +371,7 @@ def _get_omp_prefix(cls, directives): The prefix to be added to the operation's command. """ - return "export OMP_NUM_THREADS={}; ".format(directives["omp_num_threads"]) + return "export OMP_NUM_THREADS={}; ".format(directives["threads_per_process"]) @classmethod def _get_mpi_prefix(cls, directives): @@ -388,9 +388,13 @@ def _get_mpi_prefix(cls, directives): The prefix to be added to the operation's command. """ - if directives.get("nranks"): - return "{} -n {} ".format(cls.mpi_cmd, directives["nranks"]) - return "" + processes = directives.get("processes", 0) + if processes == 0: + return "" + base_str = f"{cls.mpi_cmd} --ntasks={processes}" + base_str += f" --cpus-per-task={directives['threads_per_process']}" + base_str += f" --gpus-per-task={directives['gpus_per_process']}" + return base_str @template_filter def get_prefix(cls, directives): @@ -408,7 +412,7 @@ def get_prefix(cls, directives): """ prefix = "" - if directives.get("omp_num_threads"): + if directives.get("threads_per_process"): prefix += cls._get_omp_prefix(directives) prefix += cls._get_mpi_prefix(directives) return prefix diff --git a/flow/environments/incite.py b/flow/environments/incite.py index b89e656ea..4c50c057d 100644 --- a/flow/environments/incite.py +++ b/flow/environments/incite.py @@ -5,8 +5,6 @@ http://www.doeleadershipcomputing.org/ """ -from math import ceil, gcd - from ..environment import ( DefaultLSFEnvironment, DefaultSlurmEnvironment, @@ -14,7 +12,6 @@ _PartitionConfig, template_filter, ) -from ..util.template_filters import check_utilization class SummitEnvironment(DefaultLSFEnvironment): @@ -23,9 +20,9 @@ class SummitEnvironment(DefaultLSFEnvironment): Example:: @Project.operation(directives={ - "nranks": 3, # 3 MPI ranks per operation - "ngpu": 3, # 3 GPUs - "np": 3, # 3 CPU cores + "launcher": "mpi", # use MPI + "n_processes": 3, # 3 ranks + "gpus_per_process": 1, # 3 GPUs "rs_tasks": 3, # 3 tasks per resource set "extra_jsrun_args": '--smpiargs="-gpu"', # extra jsrun arguments }) @@ -89,15 +86,13 @@ def calc_num_nodes(cls, resource_sets, parallel=False): nodes_used_final = nodes_used return nodes_used_final - @template_filter - def guess_resource_sets(cls, operation): + def guess_resource_sets(cls, directives): """Determine the resources sets needed for an operation. Parameters ---------- - operation : :class:`flow.BaseFlowOperation` - The operation whose directives will be used to compute the resource - set. + directives : dict + The directives to use to compute the resource set. Returns ------- @@ -111,26 +106,21 @@ def guess_resource_sets(cls, operation): Number of GPUs per resource set. """ - ntasks = max(operation.directives.get("nranks", 1), 1) - np = operation.directives.get("np", ntasks) - - cpus_per_task = max(operation.directives.get("omp_num_threads", 1), 1) + ntasks = directives["processes"] + cpus_per_task = directives["threads_per_process"] # separate OMP threads (per resource sets) from tasks - np //= cpus_per_task - - np_per_task = max(1, np // ntasks) - ngpu = operation.directives.get("ngpu", 0) - g = gcd(ngpu, ntasks) - if ngpu >= ntasks: - nsets = ngpu // (ngpu // g) + gpus_per_process = directives["gpus_per_process"] + ngpus = gpus_per_process * ntasks + if ngpus >= ntasks: + nsets = gpus_per_process // (gpus_per_process // ngpus) else: - nsets = ntasks // (ntasks // g) + nsets = ntasks // (ntasks // ngpus) tasks_per_set = max(ntasks // nsets, 1) - tasks_per_set = max(tasks_per_set, operation.directives.get("rs_tasks", 1)) + tasks_per_set = max(tasks_per_set, directives.get("rs_tasks", 1)) - gpus_per_set = ngpu // nsets - cpus_per_set = tasks_per_set * cpus_per_task * np_per_task + gpus_per_set = gpus_per_process // nsets + cpus_per_set = tasks_per_set * cpus_per_task return nsets, tasks_per_set, cpus_per_set, gpus_per_set @@ -158,17 +148,13 @@ def jsrun_options(cls, resource_set): return f"-n {nsets} -a {tasks} -c {cpus} -g {gpus} {cuda_aware_mpi}" @classmethod - def _get_mpi_prefix(cls, operation, parallel): + def _get_mpi_prefix(cls, operation): """Get the jsrun options based on directives. Parameters ---------- operation : :class:`flow.project._JobOperation` The operation to be prefixed. - parallel : bool - If True, operations are assumed to be executed in parallel, which - means that the number of total tasks is the sum of all tasks - instead of the maximum number of tasks. Default is set to False. Returns ------- @@ -233,44 +219,6 @@ class CrusherEnvironment(DefaultSlurmEnvironment): mpi_cmd = "srun" - @template_filter - def calc_num_nodes(cls, ngpus, ncpus, threshold): - """Compute the number of nodes needed to meet the resource request. - - Also raise an error when the requested resource do not come close to saturating the asked - for nodes. - """ - nodes_gpu = max(1, int(ceil(ngpus / cls.gpus_per_node))) - nodes_cpu = max(1, int(ceil(ncpus / cls.cores_per_node))) - if nodes_gpu >= nodes_cpu: - check_utilization(nodes_gpu, ngpus, cls.gpus_per_node, threshold, "compute") - return nodes_gpu - check_utilization(nodes_cpu, ncpus, cls.cores_per_node, threshold, "compute") - return nodes_cpu - - @classmethod - def _get_mpi_prefix(cls, operation, parallel): - """Get the correct srun command for the job. - - We don't currently support CPU/GPU mapping and expect the program to do this in code. - """ - nranks = operation.directives.get("nranks", 0) - if nranks == 0: - return "" - ngpus = operation.directives["ngpu"] - np = operation.directives.get("np", 1) - omp_num_threads = max(operation.directives.get("omp_num_threads", 1), 1) - mpi_np_calc = nranks * omp_num_threads - if np > 1 and nranks > 1 and np != mpi_np_calc: - raise RuntimeWarning( - f"Using provided value for np={np}, which seems incompatible with MPI directives " - f"{mpi_np_calc}." - ) - base_str = f"{cls.mpi_cmd} --ntasks={nranks}" - threads = max(omp_num_threads, np) if nranks == 1 else max(1, omp_num_threads) - base_str += f" --cpus-per-task={threads} --gpus={ngpus}" - return base_str - class FrontierEnvironment(DefaultSlurmEnvironment): """Environment profile for the Frontier supercomputer. @@ -287,29 +235,6 @@ class FrontierEnvironment(DefaultSlurmEnvironment): ) mpi_cmd = "srun" - @classmethod - def _get_mpi_prefix(cls, operation, parallel): - """Get the correct srun command for the job. - - We don't currently support CPU/GPU mapping and expect the program to do this in code. - """ - nranks = operation.directives.get("nranks", 0) - if nranks == 0: - return "" - ngpus = operation.directives["ngpu"] - np = operation.directives.get("np", 1) - omp_num_threads = max(operation.directives.get("omp_num_threads", 1), 1) - mpi_np_calc = nranks * omp_num_threads - if np > 1 and nranks > 1 and np != mpi_np_calc: - raise RuntimeWarning( - f"Using provided value for np={np}, which seems incompatible with MPI directives " - f"{mpi_np_calc}." - ) - base_str = f"{cls.mpi_cmd} --ntasks={nranks}" - threads = max(omp_num_threads, np) if nranks == 1 else max(1, omp_num_threads) - base_str += f" --cpus-per-task={threads} --gpus={ngpus}" - return base_str - __all__ = [ "SummitEnvironment", From ca805eb9b7330eb7a9b0a75b0996e8886e3c714c Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 29 Feb 2024 15:46:31 -0500 Subject: [PATCH 19/28] refactor: Remove Summit's calc_num_nodes --- flow/environments/incite.py | 44 ------------------------------------- 1 file changed, 44 deletions(-) diff --git a/flow/environments/incite.py b/flow/environments/incite.py index 4c50c057d..ab5fcce2a 100644 --- a/flow/environments/incite.py +++ b/flow/environments/incite.py @@ -42,50 +42,6 @@ def my_operation(job): ) @template_filter - def calc_num_nodes(cls, resource_sets, parallel=False): - """Compute the number of nodes needed. - - Parameters - ---------- - resource_sets : iterable of tuples - Resource sets for each operation, as a sequence of tuples of - *(Number of resource sets, tasks (MPI Ranks) per resource set, - physical cores (CPUs) per resource set, GPUs per resource set)*. - parallel : bool - Whether operations should run in parallel or serial. (Default value - = False) - - Returns - ------- - int - Number of nodes needed. - - """ - nodes_used_final = 0 - cores_used = gpus_used = nodes_used = 0 - for nsets, tasks, cpus_per_task, gpus in resource_sets: - if not parallel: - # In serial mode we reset for every operation. - cores_used = gpus_used = nodes_used = 0 - for _ in range(nsets): - cores_used += tasks * cpus_per_task - gpus_used += gpus - while cores_used > cls.cores_per_node or gpus_used > cls.gpus_per_node: - nodes_used += 1 - cores_used = max(0, cores_used - cls.cores_per_node) - gpus_used = max(0, gpus_used - cls.gpus_per_node) - if not parallel: - # Note that when running in serial the "leftovers" must be - # accounted for on a per-operation basis. - if cores_used > 0 or gpus_used > 0: - nodes_used += 1 - nodes_used_final = max(nodes_used, nodes_used_final) - if parallel: - if cores_used > 0 or gpus_used > 0: - nodes_used += 1 - nodes_used_final = nodes_used - return nodes_used_final - def guess_resource_sets(cls, directives): """Determine the resources sets needed for an operation. From 470157392aa77848445896bb6f8a8bd786a2938c Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 29 Feb 2024 15:48:10 -0500 Subject: [PATCH 20/28] refactor: Update template filters --- flow/util/template_filters.py | 50 +++++++++++++---------------------- 1 file changed, 18 insertions(+), 32 deletions(-) diff --git a/flow/util/template_filters.py b/flow/util/template_filters.py index f7e4e8fe8..9d1f3240c 100644 --- a/flow/util/template_filters.py +++ b/flow/util/template_filters.py @@ -4,11 +4,11 @@ """Provide jinja2 template environment filter functions.""" import datetime import sys -from functools import partial from math import ceil from ..errors import ConfigKeyError, SubmitError from .config import get_config_value +from .misc import _tolerant_max, _tolerant_sum def identical(iterable): @@ -39,7 +39,11 @@ def homogeneous_openmp_mpi_config(operations): return ( len( { - (op.directives.get("nranks"), op.directives.get("omp_num_threads")) + ( + op.directives.get("n_processes"), + op.directives.get("threads_per_process"), + op.directives.get("gpus_per_process"), + ) for op in operations } ) @@ -47,28 +51,19 @@ def homogeneous_openmp_mpi_config(operations): ) -def with_np_offset(operations): - """Add the np_offset variable to the operations' directives.""" - offset = 0 - for operation in operations: - operation.directives.setdefault("np_offset", offset) - offset += operation.directives["np"] - return operations - - def calc_tasks(operations, name, parallel=False, allow_mixed=False): """Compute the number of tasks required for the given set of operations. Calculates the number of tasks for a specific processing unit requested in - the operations' directive, e.g., 'np' or 'ngpu'. + the operations' directive, e.g., 'n_processes' or 'gpus_per_process'. Parameters ---------- operations : :class:`~._JobOperation` The operations used to calculate the total number of required tasks. name : str - The name of the processing unit to calculate the tasks for, e.g., 'np' - or 'ngpu'. + The name of the processing unit to calculate the tasks for, e.g., + 'n_processes'. parallel : bool If True, operations are assumed to be executed in parallel, which means that the number of total tasks is the sum of all tasks instead of the @@ -91,10 +86,7 @@ def calc_tasks(operations, name, parallel=False, allow_mixed=False): set to True. """ - processing_units = [ - op.directives[name] * op.directives.get("processor_fraction", 1) - for op in operations - ] + processing_units = [op.primary_directives[name] for op in operations] if identical(processing_units) or allow_mixed: if len(processing_units) > 0: sum_processing_units = round(sum(processing_units)) @@ -139,7 +131,7 @@ def calc_memory(operations, parallel=False): Parameters ---------- operations : list - A list of :class:`~._JobOperation`\ s used to calculate the maximum + A list of :class:`~._SubmissionOperation`\ s used to calculate the maximum memory required. parallel : bool If True, operations are assumed to be executed in parallel, which @@ -152,8 +144,8 @@ def calc_memory(operations, parallel=False): float The reserved memory (numeric value) in gigabytes. """ - func = sum if parallel else max - return func(operation.directives["memory"] or 0 for operation in operations) + func = _tolerant_sum if parallel else _tolerant_max + return func(op.primary_directives["memory"] for op in operations) def calc_walltime(operations, parallel=False): @@ -175,17 +167,11 @@ def calc_walltime(operations, parallel=False): :class:`datetime.timedelta` The total walltime. """ - # Replace the sum function with partial(sum, start=datetime.timedelta()) - # when dropping Python 3.7 support. - func = ( - max - if parallel - else partial(lambda start, iterable: sum(iterable, start), datetime.timedelta()) - ) - return func( - operation.directives["walltime"] or datetime.timedelta() - for operation in operations - ) + walltimes = (op.primary_directives["walltime"] for op in operations) + if parallel: + return _tolerant_max(walltimes) + else: + return _tolerant_sum(walltimes, start=datetime.timedelta()) def check_utilization(nn, np, ppn, threshold=0.9, name=None): From 3b03c5a8eaeea079a1a467073debf1cbc4442b63 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 29 Feb 2024 15:49:06 -0500 Subject: [PATCH 21/28] refactor: Attempt to update drexel configuration --- flow/environments/drexel.py | 6 +++++- flow/templates/drexel-picotte.sh | 26 -------------------------- 2 files changed, 5 insertions(+), 27 deletions(-) diff --git a/flow/environments/drexel.py b/flow/environments/drexel.py index 893364485..5dd485d01 100644 --- a/flow/environments/drexel.py +++ b/flow/environments/drexel.py @@ -2,7 +2,7 @@ # All rights reserved. # This software is licensed under the BSD 3-Clause License. """Drexel University HPC Environments.""" -from ..environment import DefaultSlurmEnvironment +from ..environment import DefaultSlurmEnvironment, _PartitionConfig class PicotteEnvironment(DefaultSlurmEnvironment): @@ -15,6 +15,10 @@ class PicotteEnvironment(DefaultSlurmEnvironment): hostname_pattern = r".*\.cm\.cluster$" template = "drexel-picotte.sh" + _partition_config = _PartitionConfig( + cpus_per_node={"default": 48}, gpus_per_node={"gpu": 4} + ) + @classmethod def add_args(cls, parser): """Add arguments to parser. diff --git a/flow/templates/drexel-picotte.sh b/flow/templates/drexel-picotte.sh index 2a1003c4d..03cd84c92 100644 --- a/flow/templates/drexel-picotte.sh +++ b/flow/templates/drexel-picotte.sh @@ -1,28 +1,2 @@ {% extends "slurm.sh" %} {% set partition = partition|default('standard', true) %} -{% block tasks %} - {% set threshold = 0 if force else 0.9 %} - {% set cpu_tasks = operations|calc_tasks('np', parallel, force) %} - {% set gpu_tasks = operations|calc_tasks('ngpu', parallel, force) %} - {% if gpu_tasks and 'gpu' not in partition and not force %} - {% raise "Requesting GPUs requires a gpu partition!" %} - {% endif %} - {% set nn_cpu = cpu_tasks|calc_num_nodes(48) if 'gpu' not in partition else cpu_tasks|calc_num_nodes(48) %} - {% set nn_gpu = gpu_tasks|calc_num_nodes(4) if 'gpu' in partition else 0 %} - {% set nn = nn|default((nn_cpu, nn_gpu)|max, true) %} - {% if partition == 'gpu' %} -#SBATCH --nodes={{ nn|default(1, true) }} -#SBATCH --ntasks-per-node={{ (gpu_tasks, cpu_tasks)|max }} -#SBATCH --gres=gpu:{{ gpu_tasks }} - {% else %}{# def partition #} -#SBATCH --nodes={{ nn }} -#SBATCH --ntasks-per-node={{ (48, cpu_tasks)|min }} - {% endif %} -{% endblock tasks %} -{% block header %} - {{- super () -}} - {% set account = account|default(project|get_account_name, true) %} - {% if account %} -#SBATCH --account={{ account }} - {% endif %} -{% endblock header %} From ea51b43ad6a7ba9264296533fdf1593b3c2f30c6 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 29 Feb 2024 15:49:40 -0500 Subject: [PATCH 22/28] refactor: Update remaining templates to new directives --- flow/templates/andes.sh | 15 ++------------- flow/templates/pbs.sh | 4 ++-- flow/templates/slurm.sh | 16 +++++++++------- 3 files changed, 13 insertions(+), 22 deletions(-) diff --git a/flow/templates/andes.sh b/flow/templates/andes.sh index ee7cbd40c..c8e30eb76 100644 --- a/flow/templates/andes.sh +++ b/flow/templates/andes.sh @@ -2,20 +2,9 @@ {% extends "slurm.sh" %} {% block tasks %} {% if 'gpu' in partition %} - {% if resources.ncpu_tasks > resources.ngpu_tasks * 14 and not force %} + {% if resources.cpus > resources.gpus * 14 and not force %} {% raise "Cannot request more than 14 CPUs per GPU." %} {% endif %} {% endif %} -#SBATCH -N {{ resources.num_nodes }} -#SBATCH --ntasks={{ resources.ncpu_tasks }} - {% if partition == 'gpu' %} -#SBATCH --gpus={{ resources.ngpu_tasks }} - {% endif %} -{% endblock tasks %} -{% block header %} {{- super() -}} - {% set account = account|default(project|get_account_name, true) %} - {% if account %} -#SBATCH -A {{ account }} - {% endif %} -{% endblock header %} +{% endblock tasks %} diff --git a/flow/templates/pbs.sh b/flow/templates/pbs.sh index 08c28f9f1..71475e99a 100644 --- a/flow/templates/pbs.sh +++ b/flow/templates/pbs.sh @@ -20,8 +20,8 @@ {% endblock preamble %} {% block tasks %} {% set threshold = 0 if force else 0.9 %} - {% set s_gpu = ':gpus=1' if resources.ngpu_tasks else '' %} - {% set ppn = ppn|default(operations|calc_tasks('omp_num_threads', parallel, force), true) %} + {% set s_gpu = ':gpus={}'|format(resources.gpus_per_process) if resources.gpus_per_process else '' %} + {% set ppn = ppn|default(operations|calc_tasks('threads_per_process', parallel, force), true) %} {% if ppn %} #PBS -l nodes={{ resources.num_nodes }}:ppn={{ ppn }}{{ s_gpu }} {% else %} diff --git a/flow/templates/slurm.sh b/flow/templates/slurm.sh index d1807c221..9503e5e3f 100644 --- a/flow/templates/slurm.sh +++ b/flow/templates/slurm.sh @@ -3,16 +3,16 @@ {% block preamble %} #!/bin/bash #SBATCH --job-name="{{ id }}" - {% if partition %} + {% if partition %} #SBATCH --partition={{ partition }} - {% endif %} - {% if walltime != None %} + {% endif %} + {% if resources.walltime %} #SBATCH -t {{ resources.walltime|format_timedelta }} - {% endif %} - {% if job_output %} + {% endif %} + {% if job_output %} #SBATCH --output={{ job_output }} #SBATCH --error={{ job_output }} - {% endif %} + {% endif %} {% set account = account|default(project|get_account_name, true) %} {% if account %} #SBATCH --account={{ account }} @@ -21,7 +21,9 @@ {% block tasks %} #SBATCH --ntasks={{ resources.processes }} #SBATCH --cpus-per-task={{ resources.threads_per_process }} -#SBATCH --mem-per-task={{ resources.memory_per_cpu }} + {% if resources.memory_per_cpu is not none %} +#SBATCH --mem-per-task={{ resources.memory_per_cpu|format_memory }} + {% endif %} {% if resources.gpus_per_process > 0 %} #SBATCH --gpus-per-task={{ resources.gpus_per_process }} {% endif %} From 1156b4fd12dbdf5005057fa32fbd42f53663cf4e Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 29 Feb 2024 15:50:51 -0500 Subject: [PATCH 23/28] fix: some documention and miscellaneous code --- flow/project.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/flow/project.py b/flow/project.py index 4544fcffb..16a8cc16e 100644 --- a/flow/project.py +++ b/flow/project.py @@ -406,7 +406,7 @@ class _SubmissionJobOperation(_JobOperation): cmd : callable or str The command that executes this operation. Can be a callable that when evaluated returns a string. - primary_directives : list[dict[str, any]] + primary_directives : dict[str, any] Directives of the maximal job or directives such that all operations have their resources met. directives_list : list[dict[str, any]] @@ -1220,10 +1220,11 @@ def _create_run_job_operations( def _fork_op(self, directives): # TODO: note that since we use threads_per_process and not specifically - # omp_num_threads, we don't necessarily need to fork when setting + # OMP threads, we don't necessarily need to fork when setting # threads_per_process, however, to correctly use OMP we do. Perhaps this # is an argument for an omp directive. Otherwise, we need to fork here - # if that is set which we currently don't. + # if that is set which we currently don't. Or allow for multiple + # launchers (consider OMP a launcher) and check for compatibility. return ( len(self.run_options) > 0 or directives["executable"] != sys.executable @@ -1911,7 +1912,6 @@ def _setup_template_environment(self): ] = template_filters.format_timedelta template_environment.filters["format_memory"] = template_filters.format_memory template_environment.filters["identical"] = template_filters.identical - template_environment.filters["with_np_offset"] = template_filters.with_np_offset template_environment.filters["calc_tasks"] = template_filters.calc_tasks template_environment.filters["calc_num_nodes"] = template_filters.calc_num_nodes template_environment.filters["calc_walltime"] = template_filters.calc_walltime @@ -4124,7 +4124,7 @@ def _submit_operations( Parameters ---------- - operations : A sequence of instances of :class:`~._JobOperation` + operations : A sequence of instances of :class:`~._SubmissionOperation` The operations to submit. _id : str The _id to be used for this submission. (Default value = None) From 1aec7f56682880fc18368a389410ceebc4485729 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 29 Feb 2024 15:52:12 -0500 Subject: [PATCH 24/28] test: Update tests to new directives --- tests/define_template_test_project.py | 39 ++-- tests/define_test_project.py | 6 +- tests/generate_template_reference_data.py | 83 ++------- tests/template_reference_data.tar.gz | Bin 26535 -> 82773 bytes tests/test_directives.py | 2 +- tests/test_environment.py | 6 +- tests/test_project.py | 208 +++++++++++++--------- tests/test_util.py | 11 +- 8 files changed, 181 insertions(+), 174 deletions(-) diff --git a/tests/define_template_test_project.py b/tests/define_template_test_project.py index ad9977547..cb5e257d4 100644 --- a/tests/define_template_test_project.py +++ b/tests/define_template_test_project.py @@ -2,12 +2,12 @@ class TestProject(flow.FlowProject): - ngpu = 2 - np = 3 - omp_num_threads = 4 - nranks = 5 + gpus_per_process = 1 + processes = 3 + threads_per_process = 4 + launcher = "mpi" walltime = 1 - memory = "512m" + memory_per_cpu = "512m" group1 = TestProject.make_group(name="group1") @@ -20,25 +20,30 @@ def serial_op(job): @group1 -@TestProject.operation(directives={"np": TestProject.np}) +@TestProject.operation(directives={"processes": TestProject.processes}) def parallel_op(job): pass -@TestProject.operation(directives={"nranks": TestProject.nranks}) +@TestProject.operation( + directives={"processes": TestProject.processes, "launcher": TestProject.launcher} +) def mpi_op(job): pass -@TestProject.operation(directives={"omp_num_threads": TestProject.omp_num_threads}) +@TestProject.operation( + directives={"threads_per_process": TestProject.threads_per_process} +) def omp_op(job): pass @TestProject.operation( directives={ - "nranks": TestProject.nranks, - "omp_num_threads": TestProject.omp_num_threads, + "processes": TestProject.processes, + "threads_per_process": TestProject.threads_per_process, + "launcher": "mpi", } ) def hybrid_op(job): @@ -46,21 +51,29 @@ def hybrid_op(job): @TestProject.operation( - directives={"ngpu": TestProject.ngpu, "nranks": TestProject.ngpu} + directives={ + "gpus_per_process": TestProject.gpus_per_process, + "processes": TestProject.gpus_per_process, + "launcher": TestProject.launcher, + } ) def gpu_op(job): pass @TestProject.operation( - directives={"ngpu": TestProject.ngpu, "nranks": TestProject.nranks} + directives={ + "gpus_per_process": TestProject.gpus_per_process, + "processes": TestProject.processes, + "launcher": "mpi", + } ) def mpi_gpu_op(job): pass @group1 -@TestProject.operation(directives={"memory": TestProject.memory}) +@TestProject.operation(directives={"memory_per_cpu": TestProject.memory_per_cpu}) def memory_op(job): pass diff --git a/tests/define_test_project.py b/tests/define_test_project.py index 5f2aa4e25..d235401a5 100644 --- a/tests/define_test_project.py +++ b/tests/define_test_project.py @@ -44,7 +44,7 @@ def b_is_even(job): # The submit interface should warn about unused directives. "bad_directive": 0, # But not this one: - "np": 1, + "processes": 1, }, ) def op1(job): @@ -58,11 +58,11 @@ def op2(job): job.document.test = os.getpid() -@group2(directives={"omp_num_threads": 4}) +@group2(directives={"threads_per_process": 4}) @_TestProject.post.true("test3_true") @_TestProject.post.false("test3_false") @_TestProject.post.not_(lambda job: job.doc.test3_false) -@_TestProject.operation(directives={"ngpu": 1, "omp_num_threads": 1}) +@_TestProject.operation(directives={"gpus_per_process": 1, "threads_per_process": 1}) def op3(job): job.document.test3_true = True job.document.test3_false = False diff --git a/tests/generate_template_reference_data.py b/tests/generate_template_reference_data.py index ecc077c23..15e15a15f 100755 --- a/tests/generate_template_reference_data.py +++ b/tests/generate_template_reference_data.py @@ -27,6 +27,12 @@ ) PROJECT_DIRECTORY = '/home/user/path with spaces and "quotes" and \\backslashes/' MOCK_EXECUTABLE = "/usr/local/bin/python" +DEFAULT_BUNDLES = [ + ("omp_op", "parallel_op"), + ("mpi_op", "op", "memory_op"), + ("hybrid_op", "omp_op"), +] +DEFAULT_SETTING = {"bundles": DEFAULT_BUNDLES, "parallel": [True, False]} def cartesian(**kwargs): @@ -68,91 +74,42 @@ def init(project): { "partition": ["RM", "RM-shared", "GPU", "GPU-shared"], }, - { - "partition": ["RM"], - "parallel": [False, True], - "bundle": [["mpi_op", "omp_op"]], - }, + {"partition": ["RM"], **DEFAULT_SETTING}, ], "environments.umich.GreatLakesEnvironment": [ { "partition": ["standard", "gpu", "gpu_mig40"], }, - { - "parallel": [False, True], - "bundle": [["mpi_op", "omp_op"]], - }, - ], - "environments.incite.SummitEnvironment": [ - {}, - { - "parallel": [False, True], - "bundle": [["mpi_op", "omp_op"]], - }, + DEFAULT_SETTING, ], + "environments.incite.SummitEnvironment": [{}, DEFAULT_SETTING], "environments.incite.AndesEnvironment": [ - { - "partition": ["batch", "gpu"], - }, - { - "partition": ["batch"], - "parallel": [False, True], - "bundle": [["mpi_op", "omp_op"]], - }, - ], - "environments.umn.MangiEnvironment": [ - {}, - { - "parallel": [False, True], - "bundle": [["mpi_op", "omp_op"]], - }, + {"partition": ["batch", "gpu"]}, + {"partition": ["batch"], **DEFAULT_SETTING}, ], + "environments.umn.MangiEnvironment": [{}, DEFAULT_SETTING], "environments.xsede.ExpanseEnvironment": [ { "partition": ["compute", "shared", "gpu", "gpu-shared", "large-shared"], }, - { - "partition": ["compute"], - "parallel": [False, True], - "bundle": [["mpi_op", "omp_op"]], - }, + {"partition": ["compute"], **DEFAULT_SETTING}, ], "environments.drexel.PicotteEnvironment": [ { "partition": ["def", "gpu"], }, - { - "partition": ["def"], - "parallel": [False, True], - "bundle": [["mpi_op", "omp_op"]], - }, + {"partition": ["def"], **DEFAULT_SETTING}, ], "environments.xsede.DeltaEnvironment": [ { "partition": ["cpu", "gpuA40x4", "gpuA100x4"], }, - { - "partition": ["cpu"], - "parallel": [False, True], - "bundle": [["mpi_op", "omp_op"]], - }, - ], - "environments.incite.CrusherEnvironment": [ - {}, - { - "parallel": [False, True], - "bundle": [["mpi_op", "omp_op"]], - }, + {"partition": ["cpu"], **DEFAULT_SETTING}, ], + "environments.incite.CrusherEnvironment": [{}, DEFAULT_SETTING], # Frontier cannot use partitions as logic requires gpu # in the name of partitions that are gpu nodes. - "environments.incite.FrontierEnvironment": [ - {}, - { - "parallel": [False, True], - "bundle": [["mpi_op", "omp_op"]], - }, - ], + "environments.incite.FrontierEnvironment": [{}, DEFAULT_SETTING], "environments.purdue.AnvilEnvironment": [ { "partition": [ @@ -165,11 +122,7 @@ def init(project): "gpu", ], }, - { - "partition": ["wholenode"], - "parallel": [False, True], - "bundle": [["mpi_op", "omp_op"]], - }, + {"partition": ["wholenode"], **DEFAULT_SETTING}, ], } diff --git a/tests/template_reference_data.tar.gz b/tests/template_reference_data.tar.gz index 022be62f54ad23dfe89ca418285b39601d672039..2f5c468e3ba6e3615ceaef13bab4c50094a08e4a 100644 GIT binary patch literal 82773 zcmY(rc|26_8~<-#qDUc$Axg4kOSUGGJu0b0rtBnyP|_hJS+lQ=vKCTgNf=W0eGM`8 zCHp=zX3qURQ}6HR_x=6TLx0S1=04}Tuh;c@J#VNm78bvbaa%gzY5Ty<`JSgO#@*J= z*4@^{+7@GT&-0$N=RJ3trRw{xBF*t17dy8ld~EK1V0Po4dX^kPXH+gT#aCg+ULHXl zw~m_{zVuE1gUHpDouOZV!f=T@xk^{>n#~z`;z{SySB9tMlMjA=^VIt16MQ5-60G9C zVV|X17?z_|o3&?0Ouz}PmYLO(LGgRnVdt&h&eP7j-X`AW2S7>d?>xa#<4IM0#`p&J z?N`a&a|_cRPjnPNhnq+>nd^n0(5@lzmA!qiKtjciq%L+YJW+8WKil2O{dTx_1e0xS zvb)%D9`n0!FD++bXXOF$kxH_W%g4rvR!T)SShPg}zI!F0EB?H3a9_~A%a4Yl954Os z)GoFIySu%{3j=6n26%13js()3g4FOkJX7cfC>$IC7!?>FJkp%F5Q1`XUBPvgd!q(P zIIhX}>)3~NHyWXiFE(iY*C`aB*$A)zV~zeQ=DOP&oYD44q;Cv!PL-`>E%?QA$Yw^OMckli-36(VHY(}uO= z%71h6F`6XY{^M{>Tu%76>6A}c&elhT?3u;hgKvShE)ipmUwr>C4Q)sVF6EF9DVwP1sSVoFhHF_UyOc zL*dbg-;a*nq2P9vdOf|j*P=k=e#W^e%p&o*^*TEM$Cw9ZXBP`(ZtA<~GRq=?7V!Hi(A5Kg>F#2S341JzUD zXFE!aOL;fV?0$uzW>0}l*}&C?*3oPmW&-BZ@7N3(Bf8D7u)Mm>us{KUg`Y;ZBqI;l zw@T2Z1b|v-9PP~e17f>#j{}w{bPfdP2t|#g)n9DlF~vqFosJmaE{-nlkbP{OC2Ge3 z``(_{*G@STX&egX%X8Ll6Yhxg>nf^m1Y(4}=y(CIY{ zAgoTzMwN1E%@hZY+uoA3=oWpStxzKA%sFvt5~aR>T-`#0yVCdEHSRC@viz`*^0>+x zis0%)4j~(_;a$kyz)^*$@YM+QeU9)lK#HvM@TbNLxUfj(vXr(dzcEUQ)QH(of$Njfyew~I*t z@1v)kEpG3Zc1}3QzmM;oGe7lCbl^idYuK{+*l}Dg(RHJ~lntftnvx@<6uY=Ef*T#R zSp5zKZP-!lWs&WNg;3P6gDX1fP5Fb@zNxD4JRcMOF=095={$sy8vedy95?H>4Z$2F zo=M=cjy0*|1FVx!JVB=4Vduh@%cHwcQWXj`_*ELT3*+&aM&p^}`P9M9r=5eiBh@a3 z+c3gzq*S}^BS4-|3B4E7IKx(AixASyuJ^kF35SoC!)~-ZEZK5et zARBP#X@s^t>Pjd%{G44=x8+@!Y=X<;$OItopt*gX{elRF#mBNn^Ok`_CSqVdR%htXCD_O#|z6jxeHmINqN`&Z4;xk44wWo@ru&PIX$Ls zqr*O$_!CM3XcTzl2SXF5=n4EEV6pNw6g}9E0<$pshCW~EzTYLss##yNxt83_ws*2p zP<=u0zP5)w>RJ8?C@T(|dIH+P0B~Retx*fvPoYO1!R8)u8b{}A2~iFw&JsDcQBRtn z%K*FN7ukI3QtaCnaj`6}K6}TnM*1yNy8gN4;5}{N;trZwZUWvxcR?AzQw^3mW=pLi z4f3ix8QKDGg_-#SxfSdtPRIn!1-hNT{U*M3^2lqDg)E6C+yl?`Ibyg+k&j*g$}d6R zN#MAK5rBII=5VGud>6*o2vM)w$f zFus7SPV{&RvN}2RySD|xG>#&>4%l4B3~o}Nmue5nGy`{j0lEd0*b{Tba1El=+aS4UtHK)JC5Nf%_Xy|7@*mVtckplF| z_A0HAxqz(~{e`#5Cyp)vzj0j8&`c%hyhwr8@C)U4$WWQ*@L9GDbQN}a{g+D%8&s20 zo>qkQbymI!Ig-CECD==G3~2E3faKp?K%81`oeLW4P|c(_56di?_vf8AY)re@B7BYa zAlfix0y4NTXnt5gqa~AWl9)UNG|K%TfRYY=0Xlz%gD0(oPFeUS9|%6bNdWz`bFGxv zeW1*zmK&+A24KI5K8!yCPHaG`xghpfQhc`gi;Q5|vmQBltk-;IEznuIo zcR!eQtY_My<9zJW%kkTVy1>`;G5PKD9yIQS>+r|zBX;LGbW#@f9x$t3z(ZZXcew}) zY>aSYt7A1!_z+S8NQ+1ZZ$3W`5dMfdROJ}X!!?I};mHh9~s9mW02?dSYM&cAK-1@az0uIe;?r9kvB??)_bdV-zWNYi^T=E1U8j#KOHeU zs&Bi!PgE{)4hb|b@Ly+!Ro}to*>FA$=0vR8cTGAh+>M?ZM?OCeD;2k*$x}!2>X0HIQiKXu3dP4WFJGnsDliEtJx=+AIFkq-|*SR z@Z1oMPbzsqSo&CLvvmil`S2Q{rWp$>n08`7(SF#+-ns=4Nqs9@Lc6WSsfI zRq=E;6*Qd&p~~Ru8YW`-F<5;d;1#y*-8<(yrfH|z4oTl;sj4{$${V5M-rKCm@9u#P`fEH5Uq2Gv}t~kuKjO|M3se2;&Kw_ zq{)pxnSkRow0jE(_{RHvYJMe`^HTHq)sVJdlAEcQv=0qMfl9~yW&!(e76>y7@2&`| z9RKbAPZ?KF;fgGcT4CT!VMgRE!#0Z41bHBxO9#SeK_xtDhxuWJ??mJXa;unvX9 zN|`=rc(dKlh|Ron>AsMg0KpXvr>cW`FlR{CK~ZqV^mNNP=WE+(H}HQRxNd{)#Z6oc z>ol|zO>GYfI;mMV^!D1m9)EF?C-bFt*tENTV=Rra>;CKXOoN312W-Bp7Y6+8k{7he zXeTnpM_J$_IQ5)>|3@eaiuK|Tdd(6##+`-CCK{hk|N zie3#|pmtn;@Zu-*{V;4jUonnLC%E>vzE=lfF7s%ucd!W}tQUYamx11^+8;|e$<(|m zue0Yg17~3ezPq{QOdl>hzdvvxFzK&g)nxl8{LeDL_n#Mr4dyy*!87r1}9{yaM0qhBeMMP2FiIdmI%4tBN|IRLW>of~k0qXzOM?V2~kFIYPh@8W8 zg04JZVn1R45??Vme(4{wb-C3i`KyiHAV^Kk&85a?w|a`7*_-%-7qyJ#Ph2)l+o3W1l68r4?-+;x3}hc@958@ zq*H35;}>$v48F1tFKAQ5*1!+}Tbw{LmSL*P;ct^rbG6t9WL$msNa*U}@9+SSdZ72enu6Hb9q^ zj;!H-uHiSz$~Umn{M+bICa`@I2qX#``nV@^iWpAEF=W%rTk7gvk9mrL7xU=57KYsV zX&u~n>xTmY!Dof zr+v=!%L6w5z2gN?Wp9CFA2csam3cNd^!WKBI4s}0yJo*KnoVmpo)b~cVYpRCn@7j) za*jrzA;?#mMuXaHZiN#dG6APRtD*(&HGw}Es^}EoH9q5VP5c8k_kw;`bvP$5oD<0 zDtHf(2e$yL#paMRCgX+jeTLT84$Gc~3OovGR!p}Tk!KJ)UP<3IP8FXVVGltufrEr6 z5>}^)0C@^s|DFP>7(TR(_H9Bs5lZKdLb!*Hq*y}SNJ~~k-TZ}?xy%L9G^U<*eru`x z4~=K@i92T|FM>c==?ulSe?QX@hW9(vI`XWEWE{GUrA*;+K-A2QVWF(_p-KiZ1{1@O zIJs_7`&1)yxac|ZAiuheA`jB$!%B(|luOQ~qCIlTyyeKMLf2vLfrcvQbqEwRC)dM(zCK03*|5(bi|Ce{iY9^mmn z=SucC@IXe)5jj{Q9GEshwyMf-QEHQr;Fd2L>Hr}Il}O{Fc<*k3g2F)~zNw@&!#z1& zHi0xZt)w4%7TZAh%{eU3t+eO8(jJ;QPy)Iy=On5BkpmumkP1JzKVS=^ zYgHZ2J`B?F7hFf%YPs}7wLqOZ*>x^`wC4>!t`IHhxAi)@pyH6SyHn4BHQdD*4%N@Q z#*7)5by!)hH!Qt^5cdLoS00hI9Axh^Kww;e0bUdUp;DU=7kLmv@m^Bc)^^NKYJK6} z^7U;)Pr(;EHR97_uxjSEF{6UTXPEEh?{6XVEDBXe;CeD35{gDP0ox(IT10jh{jR+X z*OPF98%W)WiFFhk&YYN&+`T%|_O+{lr6bgI^3+7YfBTEv)M5OV50JI5<~!DJ2%6{mB}*BVa+USHAW z8eb@coDiY6-C1W?kxJ~0siQWn1vfG#sp-N$skcsTSW^$ePqVcs-;-m^4t(`o*G^O4 zh{-Pw=~s(bYQLhG2ruR%_p*hWNBb_Kk8Ptma==UJO}xgKhJWp|b$mgI2!Lp)HTG6P z?%mTqwRnMoxHOY`n@_jnuy0#q{VyXI60NYK$VV^KR`WDIXO&4sq%?FMUi5?k?U+Ue zZfqTOunC~(fD{EXjF!_+4Q;-i(5?2kBe_^sSunGsvOX3vK(4Y>Zbk8UeBw^6-#9Yc zb%8h2xWKBwXiC34va+4`){w<$OiXp~l>VY)xE14mv4VB1rC7{ZJH3F6M^@vs+eFHC z=Y=Lt+mTwxoM>DN_MFLL-xgN-RZ3O}rYv*>5yM|&$)Yh9_Ff?|5qyW+f z>B}3xoLeN2xlSpq2=&zY+`qZ*tkm)W7Y*JcU;!z%gCPG0WF`;5AlmQYp!fT9nif0` zn{(%mzkZd>Wv2C-`gIhHJ_6Rs-V2@3UL;_yq!AX}S|CO}{AvcwFM`!H*!ZyQknE(^ zq@=B@b50+B;nzuV#;<>X{D|4nqDw+8m#~b725f|@_2n@_;0Hr()!gm?ecrOF$_M5v zBXv&_&-8d{j2d~Ir;byNL&!K~bh7k~`pH<8c8~`=&%;WibAX3esC47#o}$ z?m~jB;M@>0ZVOBdpR9i`mmTG#7bB-Bs=*K;&v8Y-D+nC$p)sW)adTvd$Ww^w{+tc^sdYS`y8r2niC4^B z7ZCqLODAK(b`3QPIvXMNB!C}bWMQM(`vF~doj@L{`ZVq5#U=*JI>!ylhU z8??7V#T`0!Ihz0k9z3-{L@~AV!v>cOjyyIgHby z0KpC9eaP*?wrMsRR_lhlNjM6<5>F4BSkU8bx+q%P>r2z`U-yV3BQV(w#gT!iF7|a4 zsk3(-DDw(2Jexy z=xx((dKS{(5>fAlba#ok?uj& z^Q^y+&m&4s@B;lm>oIA~iWs%HAC?fNCEvUR7ByfXEPWvZ2d|+E#=QOsxYC#fa&-@Tr0xGvCMN5&yFWaP|ZtDsuJf$%OnD6OQq$5k;%4%x=m zk0Z^8!&pmB?o~FlS-8d{B0@lnGkRnV5QN2)z(jbO>;KTV^JJ=;ySEVHmfL zeZU748^Gx)+{w{nfTIM~P@A78fC9iV$WKCRz^-#!kfQmA#Df0QSBwuoJU`v0DJp$5 zA=GRe+|%~4I&wAncJre$Ezwr3g@Oh)Ug8d`e@F-3d>32hI-j?Ke!x5eX^yW<9GPoA zHd z!txAXy*U;RI`}A)Lx@0r15-W+c1vMeGNR1$zLUWjg6j&fZj}gq(H7UCK#a4q9RGeM zheci=Wk6Pazh}I^(!2ts9bG z0X7L>82k;rI3rLoNV;`qB6t(ELETJ7I{>u>0P{Y32bbsuA% zc@Jgg6O3$QYz>kR~UJ`A{- zR!kTE{Iq^^p|`J;%lxq+`9?JM$3KgpGw{SIGIR#ECN~nI3YDh7&E6cD-A!VOEr4zS z`^71AqUI#AsG%$h#(p{TF8pBHiPV?8kskMxd6VA@DSJ#2{ZNVK;uj}BGf?t$t;QC{ ztBVutPFFxyl)*u5PX|gP>U0}zo6K5jnat#ovuMQr69-7q+}s6JJ=U;#g3`;Y526x# zpGdLD{%r#s=2)WZ4YrSU{)J?*vt)>odmL%#_>G#VY}Srn^Py7o>=MTAfk49ZR}f0=>0;>q|B zGKbV_aMjlLJh1i^aWDXc;bR}8TLTEVi)4GJ5?DU-B`}>tLpkuK>+jreVsf-|Gh?V8 z8q_oCG(WE`^>HYL?lnDv_}6L zBu8bH-gx1DqIXGsg1hy_A*ZUBSLN`1aFMH3rRsw+?GZ-CRueHfP#yi<%TlRx`imn*jO`NQPLA3lp25Ft*2G2XxR0^7=K>- z>w(+6%&ICYVO~z>YcfYR+-qI`b^a}KIYIvKk3~hK)g;I7U&@kSwZ%;9-TD}~cC|9v zcBhO7NOj~PlUQk`MpLz~ZqyOKI!I;$Ck4AUfubK!r3rbk;1~@}6(^i|Y#YZ^2epzo z^f*O=MS{1l-K$M%VGfmVIXN5pw)iNGbiQJrP;I=^{@hEoE!jtF{l{@8>!=NMXA@L) zpUk}T?F>?`SlV@bI3+Jan3p^?ta{KYVK_7`B0j4YJFYlXB)I!1nAmhMiKB`c&VLW| z*6?2m#;=Xm@s!+!>E3A-KNhS=0_`0JaPabxS%|p~iWpPf5q7HkA||YL8%7CierIS+ z>kL14HG(enqCWQ#XTdc_!z1U%!aqKf|5}|IX$LJktq<>t|Bkz#jw5{6;6TIx7TFZP z%7{{Ra~DM71#{KjhNYp+cI|?Hs&$2r&z*l!%My{C;`ADNrlP0`lXY{A_j0*=U{nz! z_0vuy4*WEl3C^#Fatz3Wx88HGR2_yhJq=(qBiN1vN2|S^Lxl>S*7oX}CJgyBo3@xW z72~$v^Dvz4c+bRo)*ynN>Z%Tl9|^33Le`M@<4qJ$DTO_*S~+=>L~^>Y-M z0qX&npAINKSxsWqvFZ}86b(EsPU>M{?^R=r6@5D=34WjJJZkYvGFTPyh#p|p@K=e( z&Kp#5J%JwC50CsgM%lMBT|b8|{sV-0VC*=q*R^vGLXX}n_wh%U#cb?hi)NQ?%6iFj z@6s(OGwXpR2{*jjOUjtAYhjVV(8Ubo{*~Q3hGa0cOgZ%M?p1!0@L@HZ@D)x1^w<~k z=KJtd+%`{Wh`fO~-P3dev@}wK1LZ=D^y9!!dME1mGvEJtPZB?q;5l^ohk)F@%7=OUbPcd+a)OZ8L)jbtu88j&whuA8TS`Z zz$LC@!d8_;?;^u-5t_7mj!Pu1PbE0+>CokanblKiDlYCsGLh&?{V=$ZRu>|Kc)04% z5jP8o;LJdn5WsPPA)__wMHp%|Ht`8$;{vcKjEsD^&;+B+r8Id-;xYDz$_c3!I|ku1 z(lp~ed{s=eAI}A1kJ3u`1_sh1`^PwX8hR58Y;F=bmb~91f)-srflDUvrp4)1JjCL~wd5Z65_Fa3Z;#luLG{n-tP>z@8}$YM|<6L|u4l2#%dIKo6>CK)qN z0*@Lz)yEZrcZFdz2^zVZ`RT)NW0}iC-ZyAZatS_nf8}+YxY*5Cg$N*(K&;9#?w!&( z>W-k(4;>++I&(m@^cp6ARY}OLmYiEQ2qC2%j##evYs1l(u5^E$H_?CPA?w?$E%kL| zg~p7RF12HvTi30f?p5K4CBtR}Ifj7s$E84FKM+-acA%^iRdKzG(!2Dw(M-4u) z0}A@{2)Isz!C|irFt$`KwSy1y+8C#SyNm`RaJkv9>vs9qp|S^?Ac5Mqfpox)hqmIj zEMMwJ-h_{$N3)a0U{bEk3Q1Zq^g;ZQS>tK>xlSc-)aZj(NP$CW;Ci~^IoO>E2j*`8 zLj%|h2Umt=VVn&a zBSH7)99CM!7P%;t&nm{ws&nm*HKeGFq$89H~)Yf{$_h0A+-tvOees39GV>UA(w~o(mpF6;pK)SqN*uTm{rvQFwUofqO6h zD=zn&-Swe|%~3*0FV+71r;hpyV2KU1v^9B7OBhU9OyT~DqY|bwSI-oz1L8LJi3hon z)Y-8B;x5&(mx>>{Cw_2m?T$QOp8p;vjWt`wBg=<@TOJ*3(4 znt%S?!d~yEiTe5L!TS-hWO`Q3a=YS&ni^TtiTA?!iR``#v9OwvT{cIajJjSO83YOG z1FHNWUwR$9b7K%4Z|#QO+l>R^HGuR;lNg2D_Em9aHnzH7%p~4CPS6N0C3O~Zol#|N zm+2^D>XJF&MVG_7m$|lHIFZ21GEWDVn;~;DT7j@iM|55H7NW%*0J0}#0s0ih+whWI zid_7qa8aF0)+bv|U*e9Jz3T0@ul7}!gt%%%*SCu3qYf3<$CyqdJE;`i{h{kWg;~4xhSGwrUVGS{-FCJ+|Drt!y5&3fgHh0F z4X+;%GE8&p>li%?Cw1ezGl?EkgzKkQgV#~~fJ3){hcs-GW%B=$VJxg`#}WEO7?~$C;rkrK zqHf7DwdJbXQ%Zk87FBWPl(eg2P9?8vtT zhn?MzJVk!k@0Y(06T_B&F;X|$kV)`cg6p5ZLJ0$#W2y;#(}u@P3K;z2?+C>()=TLr z9R1#B`5KcZy3yJBMY{=$S~z9~zGx1BseWouu*pRUaos@p8bo&Vo`CO5<}ou^?gO7U z_Ee~uE+9~ERdOxvdYPX1@wkHx$$9ZPjNLDJpL^Zq`t-0;QTLOXq)i5O%e_e5auN5^ z3tmJ5hfB7ny6}t(P>L00j+5YuT;wEkruC_`+&oTA+mE5TE#mf>Y;PXj-Ee(atoX~- zZ2iCq3SwU36k+RBvQ8bun+FtMDIMQiwAw=Vfg?;*C3Q~zr-G7J0>|rV$F35KGncM+ zSe(3XX#o=zuK%hCqKc^-s#oN+)GgPLvG-dmRK`y(&o?9RuwHSn4IMagmedT{^n=Ti zWON;ri$^TXXE}{7pJ^axv6)>orro^7IhWXZ$>1xgb&A>ka^aGSRK$M430^@27y+H! z0Ba6*`F9sqLUqp=lhc0sH>3fNtKZ?{)r?BRhc zM-1LG&>RzkQF~>pt3c`$?;5B{`ip;Y{~fDt+z|xcpMtg2=IaR{4T$hMS(ACY079tT z7diJ(@)Y;iGPSGP)zje&QrCo%3rOf+360bFms_6NQg+`>;e3|} z)!Q$e_aY>u(%QaiSmi+h1KP)^%x0cravkD~O|r@u>z%oD4jxv?};Cv{){3*|%% zX92aaM+DaZZCSKtoDNOo8@wNkFNTe;^g(}wfeZx|nk9aV-*ZbV+pu8v_v^c%)4#(E z5&M&|)l{0}^Oyex+)q9q;=%T?aXI|w>oh@9WzplfXPcqtDU~dj0 z;_V!YGwHi+Y8h{&k|OUhqb#4@rk|*|;eark*-BRscgKf5{n+y96xsm9-SPbc(UHBS zVfiX#n08?cQs}DLKy{}a2OgYMd;W9FpQLyD^v_)UVkmd!E#2j`M6qWNqD~>^4u2kE z?)cIRGzEQArmucZr9k*8_84;^>gd7=@*An{PUp*BOIDYP$+1eIFL5e52I`8p-)CNe zW1rBQvJeeiGh%c7Ocm&#!}Jt;V@k+dxPE90HB1kyHu11<{yGGwa_@~c0lNPp<>$BO zvPv@u!WTX>_c(U}SLUoF+85)MnE2Zan>bFk(HFt8swlDxR=0aT< zh`_Ok4xu6;D6Z@i*Eq#0t}yP}#DuD*9L_#($E$jtSiZ&dq4t@?*oDL!J&g1UNSue- z!p#wj`gvFwe%b%e+0psYmenEr@_E3_os*$T#~-i6#k@%?LyVp30IIR0)Ft#UC>X~X zt)mtdz_JtDm!G`Xj#!1(3H3aJMP{BA62V=#e1K}{n527!5j)?lcEC;6yXngV3XCb` zZ}MNzcJW-yP`mHv#!#6-FrH2z|ARwTMYHpgnQEFU(WNPHbfz)+qwD5g5z;1%8o_D~U{&A4mg4jGVq1@-qT}<}X zor*1WKf5nIP0zyY^B)lFeA4iWv3pKvbCN?=k3^$SvC0dvlQgqZ3tx87Xq2C?Qf0G< z_7Uh}Gr!R@*(VNjnaz(XzE(>Ok`}O;8>K(Kbr*hv_eKqkWn8nq5##DGXee3DVQUcb zGlzc_o%D7&S=qVcL+G0(g$#)yBsXQRDZa){K&Lk<_FMP#%Ezs|n+AF?`g|3bDMy`$ z2@5Bmp7Iv*^C54A$S7e?PGDC*el&74?ekKNazg(7L~kb}82Rwcmjr>2cDp7;>0y3- zu|?)`U%Ye2(pH_+i5_d*1QZ!Z4`RQAePj*KY6W_~XW3L0$9Q~30RboAvEW?s<@ATz z7ZNuvU5xhKjTYGB=c}Vqp(w&?9(lL_@1yDm1QoEpfuVL7q?Sd}yN8uq6=q%^6=aVe z3T{i1#)UnVT#uq#o4s*Kao**gsizYXY+Uzzn?g+>5s=$U4xu{lI~9T}W9BpH=9Xw0 zY95KC3E}MLb*&5LUNuQNUOAZ+t#u}4UJ@x~OBKeCx45aXM*chcWH)sFap(U$x<;bw z#K1NkB`SalAVTyMhWj~0gmKwkriV-15s09X*`$C>aoPBJ$}Hh+J6irB#u+iCXh zk}T7_908Z}KTwlDQ^g>}XS|`qA+e7-5F&Skmqk7~P#O!D+frmSVCwQ97b-M;?H>QM zO;dG+?(&<@ag2&%A^y$yDRWcPyEyLr4{Q<(f!nxooWaarP{j+OOv^t84gjaOwNN!R zR5z(ppE;5lppsB@P<4u=xyU}Ii+ z*rWIom^%XU!$HiWdt~tE*w)N5>7%&ehnC-K6F;D+ZX?>3A2#NKjYoSyHII@0rIHc_ zJHf&h%`=!M);qFxs&BmUpFK1@CasNP)*XVb^_)()quY?Pq!Le3s)eI%?aMGYh_m8PRrB&A=b9=Gp`T)>&|R9k;{@l+s1KdG<^&WHgis-wToL7SR=?U7#eJ+p+uoA1n)$ecm;4#3w%QA7NfI^5IzRoojoNcP z+Zp~dBt($xTbWCo26>$@=xzMNwjRghX{O!!6V)O=d>RWnq)%O3q8btrX2|!blHHS` z8WL7_5kzqlHJS#*5Mq&>TloW;DLT%)wLx$-fW`ju*F2-%$MVi-I3~-9gqj#=&T4wZ zmz+7@+DA)ck><=zXK`$Ki=|0o`uW+9%Et6uJqibBh`ta1^CX6*Wjs`5 zliwdWJHHXu*08U^9$hf?mVfDH)L!NHr#kMJnO6fqWC37Z!w(TUy+Xl2TD>r>B@H;( z{_`ZL1MfqSjtxbHA6)c2V|f(IWiY0?*m^mqBic}x`IOsFrD~*M@8zJHf^@bnhafFC z7lHLAj_pU(GRGu@p9Gn}`((4b@UcYcxoP=M4Ze5}_OtFCs+>fWUx`EzIoox|u@{!8 zw4*Yg9yXMa2OB6xsyH4MMhZNViNTTu=v9`&5`-dqs&wFa-<9-E8?lEsPVx(;ofWmo zHmR%Kym+@)i3)HM?RKGmR^yw$0Ouumv4_-|cv@v@EZb zrY7}!V_}=U_#WgEdxZo$%=NEm_O^eyK4OTO&N(0kBMWd0B^L=(gZI^ouW{8SyD}4htFNbT!arA_!dNAdTFxz!Vd1rdlU$_@^V%SPy zK^(+GANQb(Aior;ZHi6E=p+sNOSff}n(4<58Es<2?A*5#8#y|T-X2oyOgZ}WyNoI< zFFpbMJ`y_LgaW9l8ajSJ^7I9C(DRV0E&)2^2pAS^oFi3(9}yL_mb5)X`fwOT`Hk%E z0-mh9G-RBNEqO3?3LOYz?8&^7X;UDe20s(Zv7T`gDf3Q9|KlQf&J{3+J36v>jb9zVgh?+flRet7UNo0}H>Sv{LH`hjYA(G41ELx|Xm5z&jUEemw zef*2s4}+Yb*F*9mmuKO8jf$+4bv2LYvNz0cF!?%~oq0jZD{NX=#NeJKF&ybOdE#H5 zzIj!>cI3cWQsOcf$VZLZJmIQ7wvJlI(XRtOyuBL70X5j9rGIu4H2^%NVefnRMERyy zdWsywverH^!o|)ndw#2Ye_*Aye)#vGbrnjlWenQg#sc}yTY2^39y))HrWLzooli31 z0jhkT?(!c44~uOnQGc_sQCe8Fc9)l{%WVsleRdnw1~{$&v!e{Ty0OxClBPN3Ok%DI z1c#~lW;>p(+;>|_-Cv@s{}r>FeUK1xN(gK=0{W+zgMj%EP#ls&2s?;A92f-NS`F7# z(>`&nEWqYZvDnqUw*9d^O=3Zbv6HnuTxE0z3T}OW!@*p65QzUljv>IQ(m#;PplN9W zAH##52s{EjUamuqocf`IWu7*gC6fwFe3zonmQ0^8Rp?SV2rbaw7MU*c@%Oq&H?Ivh zA_;mR@XjX4sBi)7E6Nk|C9xAbB3m3MU=gtSd=d(!4Cu8MAJBZ+lbifd+g7Bw_my&c z{Cl3g#h-rD(D_}m%qJLFtYdnR+o*pEglO5PiC7UB>!_+}=$Zlt7reLcX5zKz8-A7b zi0kSvhwyg>$T0ib@8@8Vwm)WfTUDWBb$-cP>uBvJ`VHtrA)1g>VU@k#*8j10`9(vABkOWq5lgkBOg2EbCe}4@$2x})q zkvfsZyc~E=kp5sCn{;UR3;Wd=E)9KGJ-2K9dNdACYE6X#6cH$C{DRUOusyqp)*&PL z{kLdWeT#xkYTiK$W$mO%(|4eUzCd#cvrdCx zQ1Np>3O4QnGV??rgcoarIYi<6Emy(9@kyvxsO4IRc(OQX9YHAX-k)Qo z2`M|q+|hU2;e}&Yx*`aj6Ts&qL8jSk3K%?vS{$xUGSFnKu457Z5xYS)J_znRs}h0p z)(FoY_LBT#H<~97Cl?#gy*&-%dmeD%Arfx509h4v}YEbzH~D%MG}%(&b>Z|5a6Lb%&Ntrh~hoHka0QP3Eg^qo15{!+WPz zMU_rpvR2qk(>tmuDtE~$eqWG^l_ChewUj8b+8z>?N`5OF)9kY8?dpq8^`%gzw@f)+{zJ%>; z>@~uZM-LL2GHDPs$AQpgjE4!Xr^t;z>?oMyR{(O?{S4-R;1!OVV zIX#CfcY4m1H}esHrH3@>cnXzG*{b>uJjqXMv$-VV;K{fp}Lj2XMj2$SH=PmGPkmnLLc)r~HELQ<4xad84Le-t2n7$tYMVg@0ZQOf?{|t8*Gei!^}8cl$<5l;O^#@XNt#co$$*9LO^zM7NQ6QpX8HtB0bd-Ke`LYhE+ z?YALv6Q9F1-%^o3EdBdP%D?HJ~B0u?va51$dH%7M-fNWcsXS<$kS_&>q9;>f|Q z0Fba#bCxguG1rimksnvud~L+OA4%?5kNH}Jt0nIuT&)xAJM`B)`%C^s-ldILD$do^ZINkvvkJmYyN1m+Bd8#qHL{JT|xF>WS;N!n5d-RqG{&b_NhbCsNAze zV}I~x4TV+$b#&xnVpKr~I8T5MAz5oNRT&uKf*H?Eg4dJi*IbLBgBy7FnjQ8`xaVTr zu!3E=X6OpVpTB%3cHjnr6;RKL+t*<6*EFATO^NM+XE0vt3giEGh@+c;bD6D~2c3&x zk%+R8jifgF^CWbMRauI`K9Rt8Mql$nU#k^Qzl;j<+Xg#odJ=|NuiQQ)U9(+s(`U{M zK6~%c%m1>20_S)^w+=|O4y-jORtf*4W$sB4+`yNOj3odKMzci%Hb^KkyV(eezB2q}Bs%VE9^_C_ zhS8hk#&h^Jkae^ZO78`%rJ@XpUyr}xpLpJ>rEhDvaXsHK0bb316wY{gIAtmLp7sgp z2y3}R)(%$J*p8NW(x`b-5~cUg81prlTsF2of<|T4IPSfOe9gPzmKE~zofNaG=jZw4 zmKR?v+6Bcdt9wS*jj9{GYv!~4RM+F}*IjQ^oN2?da`CCEr~4#?c#nml+niEPYw zFqOnXz)`u157v&44h+jS=p9!%y)3^!oG+#-UT&EQsAfSWv0daz6(<_XGHXWSRjJ|q|h z-f%rRVVisK32)*p`u&Oeqg1{!3*jpn*Z=XAcd!6k$TO6GQeoiqPWXSs*y`K-$a?2s z)V)w&V*z75w{*paZ-37@UrNwv4oUh`b;1Y_776 z`!>ytgZeeJ3r~ceJbE%%6vhb$ZQl4=A;3CZ8lr1=%?K_wBT^f99r-Wctf z<&lksY1G2Xc7FWV=c_fK*E>@QC_5Q(e$tAY3}V#}DzjjoG_^|kHm)YD7d{tZmv;Q2 zM)YC2=H%{U(*;)1cfYf=JZixJ()RY{$7{#ZwL7ucR;34XPuW}Fd>y2u;v$9*l-;RE zXR}8Vq7Y6C{euc=-ZXFEZG>)jxr@M=u)*#m#Jl6C99WQa*iNFOxj~ z8>`dk#S3pxQxu0AS4rQB02%w(Qd7Vv*_>@)JTAC&JRq%@fI7~&g+lljlyl$=E=LM) zu#NiK0i__w9(^B_!1VA{azVJ#Ok}G&K6EGOvC}!F$nL|A++S{!s!TKTxv}LPEE)*7 z52$`Rf?r%VFH39y+a#Q!{Br`j7C8gM(kp7O5usT3FuuyO!{h%D_9oC!hVTEd5JFLs z-9%(75|VW!B}*nsA~l4BBufBLo^MtrT@R0-~hTH0{DtUbMB##I~LOhSV3m?eb#2RN($01 zLwc3v7-w>qVw2)6c@6I>Q}Q3O8f9SV|LOp0C!|Bge;)U2rrW8!Hy-ZM5WQw2=NAJTs);4gv?)vfPC`6?NT&9JZG zw8TdrRsvt-me(ulkA5<2G;#`NH_Verc&lXkPMPwCd($Gc;vLGhM8GX!R^Ypyzl8jX zojtWg7#^=(A)KaSY^Zp}V8f6f`|V>*tuh$zNG$Ic=dp55ZBTPQtfG}FyT2;2u2b#R z*2i{BJs9}afP!LU1sGxS!5m<+9m@`&7QdlRqW1=K5{u4}x#`J3_&SLw700^@IGzLi zyxSU1qA*}4m@8126hi)C$ZVmcp=afNCD=>yk|Ey zp^I%)0nzYXnQ&7@Tke#$EEFa#7rxSzS`aRI!`XZ23>=w<`re+|7x^}}yX4aVuHmC% zLVm7t!$AYyx9<*R>-?ez$HcDQ>W+$aGoI{QcuXz(P0qKwKbmDe=}V7_F{|#l+PP8m zK%26D%j>0n=n*t9N;R2!ZS`lP{#NL?D#a;oB_Xz{0&*V{k&+V8lg?o zg0Sd17Xe+jAQCcw%j(*`Lw(;*3&oc&S2xOyEzTNXvwp@Ch$GJ4*gIE8x{{oN^tH%> z#LW-quF=yZ2%Wymldt6#VGGWRIz9zEAt6C*1xdBst|(r&Lj~p2v!giz65_P5KPxi! z`aZ2Y?NjzoRMq$kHAlzi6R!*-C(hskD!$x}$E#nu0DbDL&*?t(r_X=1lW*QTA(Hh! zTm+6<;>6iei$vAmK{GfhZV_w5_wU4*=i*;l-f&3X^<84N@yfd18282qA?N8;O7u^S z70%MT$E^DjF4$D_YP=MKip%=&`{B?3f&Ww=J+xn%llTU>mnwpYHT?ZC!o7Pwd?ke-PT>@wHAcwh)yXuCElkw%SSjZ?0$Oe*-U2oPGw5X6Qx7b zH}h0d?R|y+@e!;qAs;bebM!xa#9@~I;Uktv@HrDN)(Grm`s>Lmf1aK2q4~g_>~C@f zXH4H*xcX7Aj{opMBx25n9x@?(6NXH1bwj(Xd>l9eIMDtWa4hW6u1i2J9#Wj)AaPxL zD8D&0TKwpI={Yg!A9hDWFFS89$&sUz5qWT1xBISwsXYaQ?wp zBgJys|75wm*~)S;@lO|cAN?4&>$mg3I*pe0Wv}2r=3*NS+-yVMG{GR39PU3SaCKmk znns$SC_s#~UyYX5ekR(hQ7B5~PF9&@hJ%So0(Z1$K8jOIsAP0ZmQGUMku{r?sobv2 zTr|dG{t8`0Kh5K$ye>Fz`i9TsLJsgj7XI&TmBAJP?U{z2!>)a0=iOSHKBa0hypp0S z)E@*&bTMOz)-cG<`6+j)d)IRy^UMDwP@~`LgjFQnWb;sloYqmb1uedWgHGZl+Oi_v z55bjzG?Urf3y&r(gk!>=tvdQ#U`%-gyt|P#7SKY))7h=FII%O8Ct-GuGRX}B2=O;Y z?1RPiW0)OktzIjQK8lLYB?f_AcM$c0472UWtlcjN20)+R-(dI64c?F|Cjv)Mz}o7kBQ=RPl5VNOyra=b3kZR+}2=b=qq zUlQ8X7r0R#VR9OjC{}M)A}*=AKN(nS5Iwo~RR}w}KtE{U<$E#HJv{H*9j~!*b?EK+ z{Cep4dNk;)fM!3D7`oXnql2F6kgY zhXkOobzq@`^_`+%;kEq@xf0PK7jn;NcIHv>qLNIZX#=J?kP}dw;k1REz_0iGujrh> zJ#AVM7c|u31bz~SSFvw_FqDSV5M5afOTLxAd?`B|jh_ZQj^=j?og7S|URIyE04YEH z6iE36ycVEt!~p3Tf-c>kaZ$_R1x@7$?VkwXEWlXYdK=SteKh%1A?u zMSVzWZRU^kEY@_3Lz{xQ3Q*F?rveYjpYM0BiwIYbO^%#V8YIlwY|fxQe);I?(>GxB z^QWt9*Zva^(CvdJO@DLY{N0H7rvT;=7!(5SWJ8TWDbD{@Iwo^g;wac~07#v4nRL05G6L(t$Lb@8yvD|XX@HE){o8DN+y`1<@T^&?9PYb(r`*oZXQt(8TY(f_#OiAmaDE9? zNQ7M=X?_L#vGLPnWYc6^eF8Wr`8&}bPAX9Y#eWKa-`N{|S#+mjuT0Dn({Y)$b1qGt zsXJ~tlH72Nd-%9ko)?v~v_UVmP>Epn;oz+)ww#uE^E^03 zBZ$rt?8M`v8_pPhyKP;ocW2`Jj$4iSiP`03x00t1<#gSgE^_OaILyZ; zt4<<2NJR*oxpPLx>=sYd)!f^Yn7J(AAd%JOE^6?pE`~uNg)!-X{r``#Blv_c1(xP( zYXmYe2xWy`NRI=Wq#Er0=h@d+_UTpI)7f5*?+Y5XX6stH8GZ)#E=kCw$_uv#nSxHp zC*3~c=r0{G8-VJcg9If7Gp!9&Vhgu1veqNc(EKt+mqrQFyo_)B2@UKp$Z{ND$q9I= zmJq+abBKIEcR%4S+fDx^C`BxdzPrD(%C8_?^xTCi3fEgOr3ntdk$;Fn(eWjq0W-2z zI}@01eDOA{tq%?#SN+piet6W^%i8p51Mxry@sCSU)q^JX<8c-pth30LsM8YmftQ_Y0jko!N$}BtOeDgZ3_F3b#1o5VVNHVr9IbGYdiJ$ z?&Wus&VC~9dy7}1mE!*FZ!HYY&#_tiKK5z{Yw14&bro})n^<@As{x=hD-`%Qk&2Gc z^RhnAEJKUOw!ME&U~1{T%a=^Z&aMKeU$&HV3H6BHtoLLyZ6A*AOtef?afhwTxQb!O ztFBm<7f(tQ&q%$D)63%@=gF@$%)u6{bnY08?yE@4_$u>RzTh?A2t#`DD=tS zAV3YG(v}9X{}WwBrTSZ3S*bFQonMDH#& zhxvsYq0pOwv(_)z@m@3d1?lE?8xzafy)t1|+T-98BhVNlz2oVZq1&-FvJNPf{| zGMjp_9EE1nKhSLI@LxS5;RnMFA)34SRD`YZG!xkUhze!m#14@2P5iCIqti~3Io2ML zrSG-%v+PP~BB|8NU#IC^jcQ7h+%)4n6F31ALV9YE0{A@vN;5pV(6#|z%Fp?Ay7KK>TOo>LRr?07^`{^ zobf5b7c}$wZ_v^zgv!Znrk(+nf`1D34)GWyzfwz-Ftl#JaF_3G#^^m&3@e|&(p5u| zYc>~Qz~Wj%G;+{cmqh6)auogGlhG+nS@9%i&m~9L_O0B`Ob91>znhaqWz28Wu`Woo zYwbiAHIFBga59}qZ3yEM!DWfSv00IP;^JBM!%cmx0^hp3Ofy`MHW~uw*w&Ee0Fzyn49@XQgr~MyQ-;ySFS0j2d->+;Sjd^UrR)yi zhRnSNH$?DAx>SrIwde8#G!=0kK^1m>_uo@x)D&tf?>qZZO(FFnt(Gla)l)oT@_@ zj-X7VSQayYdqX?2X`BlstdqRI0J)m*;pnL(FEe9(DZ{D@hoWA2kb~BXt>%g>HWbm7 zpDO^8>iV=IGaxCXN5h_93}Um{(*_vVfbL~hm2w%{$F6f#(I3YXS~&~?f)(TXV~(pY zOcY({Rb9QW!cHx&Soo%nWPU5^oY>U{?xGkS8rE_#XcD518EC{D8IfW0N@!nRsNvxY zamn%8b7<*pShQN7dM_|rv)IPf{RRS!3kW(1Z{xjdgw_?TfG!P-TV!F2uU{by6adaO z#CMG#6TA}LX`C5%F}g?p)V&V1bJxp9V(ZnF7!u!!<}FI_|@?0 zO{{`NL-CkYTan_W2l5p=VfI+yBzXP{xqp#{FQ#R&D^otH#{!YEeV;yLSwCcS?gAf8 zHZ4)iF3|St6G*m2d$RhF7S?_=Z5Ccb{8u-TVRA$ZtxaQ&@O=dz=6ax$)`WRLf+?W1 ziLwQB;W3!6NTw~gB|%J3_erHvo8H|wAB{UW=1$cQk?lvjJ18BpM&CM;DQEIHPVdQ| zaC>pVFw0FSt>o(hL1BYNnQePMwINBnQof#FRI!wG)=kUJKj*LM$s0w}W!iC5Zksf( zuX;#-Q2Egxf78-1qG@huTN&^N1TZ_eNkBIvzJ=zt9Uu;$IMq{{y z@M4pgpG`ppn>7)~lX=!tEPiiHT&lRWQBmvmpqx5PS}*T)?fAMe{BZXoNL|D1rFYr+ zuUCM;Ew-tu(>1`Wl^PG!wxRf0k((x|brL6^UHroT=~%<2bZGR@9+&WkfVZifos#Y- zqqjYk_(Ip~+3FwZL;F&ydnv&W|C6e6nY_TM$|gRnxP(`8DI+UPfbip>+^Da@z9Lbr zB`QX5K2(8bQ;kLGks?Aop6=!Y0(Lv{8J-- z`elOUBTd!xj9HUO?1?HQ?hp1POB*s-y6wfb@FyU6s02-824OHDPL?72>C0oLcIjdl z*{+z}yXRP{dL_zWEL^-2-Oi|DbNllC+Xs14<+*jx$5hARHE={9}plO`; zI7C@UH08XE5GFV7;;E5@mP6V0>Uq?3oQ%%XC_*gbs`lUK7x~veYYLBH>yg%BZ22rA zP8V-F2DT!OrD6<^Mn|xx$A;E0mB;}o10H&$R>&!3^)Z+2x%%&AE4wBT<^GIfvj4?~ z{L@^WfXimV241YG%4+)xR-qEK7U07H3l;_NJ)d&BK*mfUCXYe4FW2~oksg}T6zlya`7g8B(C4AnS$++oy=0u^Z0pF)Ky4a&0SdkVEsdAo1!lXepwGC$Ao?p-{ zC{fR?cg>7ctWvhM=BTp1DD$GLe;J`A*+O$ctibg>_QCWNqG3YSQksZ75SeA^@Egzx zg#Nx~p$|c0OQAOnO8+e!H2#8nYuSSb7_0`%R?^IF5y?i0X0NhXb6;-@*vGa8#?d&x zTENi-WY^Lf1W|$lmPoiiIgf2ld-Ml8It$U0LvU2C*VrhBn*SnsCB$jDEM+= zQ(@{yA1Hc|)`mBEw$6J(i~q_dau?+zy26Aa)h|@6R8@jX!##PYVfl{ayQPvLT!w!6 z3;TSQQzi$@_9%1oazrtE8p=%Mxwp#h`88MiT=PLL_o_nph>vSs-_xea@s|@z5Bh`5 zLpG*WYd4#!owW8T4{RaQ3uJ4;f#ruzOPHih!iE;iSU+8h2%-9oZ`l%Nqy0avI8-)) zOoN*3Ee6IT-k(zGk+I8~y8?EW3GMZ-8R5Jud%469@6)ihH0S48$t^0E-3-cB0RMK- zGlIA~q1ft$V79wv1Ep@ujr?`of9Dy zDWRkGJwVG1vcoE*HtS^2y5eUi_)`zs^eQw^w+a+$`ft*u#?S4o_lA17=$%uxN3Z!_ zEKa_Wr(7zMI9bNRdCRY&&Hj6*7O@^V2Urt9^329*c$8V!kl;0B8cbO7pH5If*fC$a zPF&6l@JtPUaic;cbh1ph0q2reCIe2o57KxS*+(OaBU2yJ=2w5Zsg3CW8;&-NY#vgCHxI6L2zGKgBM2D4B zZ}>yU^WnA)iF)p)bvk8`F@n9XhCU_=QB^_GL_g}Ls|Hioy?=x9h2p^s^pB2zf`RO z;{D|NB$6kEJioRHl!27Hb`z=+Oie#)03ozawqJ&fZBTqx zbLE(!Kx)9o(SvirN3@Stjc0@QXQim5Ax|FXK?$jwjg}Mf@m=a=6WlL;#jMhn4QG0G zl8aQanuD5SLZ|sbc+|piGOeZQ=>w%QF0P1(aT_en_xuBerRj4QLap$@OD^<-mwZ{|jm4*^gYo%TJTq3gxC2#0?1-!8k}bz4U-*Khu{4z=He z-IePWeCHnZrOf*eu8rxJ%(k??>rNps*gk>+c^FlT_8gQvn$^%v7J`ynvBdyyEtLM%IEy+5SM=Gv=f=emrw$hd>Gp83hzk)ZVse3(!n zeAw^X)plx6zid4D?f&VrJ3qCg46XL3VHSy!B(9Hhl?bsB!H1c^kIV*@@+L({uY!md zU>}N!%=&2>eO{(K$8+9OQ7-n7QI|%N1_uPV+(r~3FCZ6TO1xoVYUh=bK>P`<;1x|U zFdphzraWlifzGQdbC18c2T)7y^>gqj{CcDBS}S#F}TPws6tl@Q2+20^g$y zp52?X=e~0KVUNipA<+!MBVXne)nD!?Tj94A@Z}naCDKVx=d`GTH8s+P;u}v zSx_fE%&D5p9`ji|K@peH7$)s(Zt@T%dLFKG$Y`?t5MM4E!R7f>a?X z%f#&V{1ni&PUSLCUWS()RP9UL30604YRu2zQ*?aoW!xReow8i0#5QID%|Q|7^h15= z>}w5DW^emT*~kB3D|=+%2XPg6Oi=NRsE`OKpy8fU*uqRS3Y&=HK112d-X{~*v@IiA ziK3bpd4IcA+ud<6qS&BrZcwK{9QpTZ;ANF`x=3jP7#ddnduKwzvu`kFUl+?q!%@NRjJ02Kfmb;S2*F zwLToAAEfzEsEQ_m<}2pB}h4G;uJ954x(DY)+$6g%hY7DXlPrHfi6s zf;qW~&!WZMWSavP?ms6Lo;Q=~pE4?+B-9-511HHy>7Y+P@)jt6p%VLUhIn<5?9Ds0 zT^tjVKtq;e)X?Eu#Ij7u_SB>J)X_X#m*jZvnZCdmH#mi!6NuOvW!$n~yKwp6)`dRr zGLDw^CaIQ}3&mVz8Rk7Lbrl+GmlBX?j#sI_EmsGI*QA_s+-EUN^a)YW= zQbd#pDtCqrEmKEbo*me|?J>NyN1Kra;|bXhl{bbDSs8qkEy~WLN|o+E7WZ)OCFo{s zW~Y{jRVM2W`01{)LApDo4e?S1(5`Gq%?X4)p*m4PCU%Bo3GiNP6styZ^QV@4y}!W8 zchmzHKOWFYjw%YJyv>X=zP-mzg~58x40?s`TkT{XsV@$ zDQdYobpFBuNc5jZGBS?J9@KsN;l$o4j)7%q>5q*D7B69b3GsG((E&B}yIfQY8Nd?{ z(^t`tC;t$<@tWe5c}tk~=yc!%lSx+z8X}=<&xEqX`D!F&8?hm_25S0Ig{L10d91=C ze^?)zv?HQ31K|l2gYctLGF(KYe*pgOF$G&pfy^lfCc{qNsV@EY3aL&p`@DCzLF?IE zb8?)=Uv^34aNWG$Z{(EPF?!P;vkdc>zY2rtZA--L2#SlBa6Xr~|d78!%8 zMGxPmQdO~rx8K$br3BezRBs+;yV>|&?!|Ddupj&2BdLNE#)HfNBB8M8HZ2ifOd!X8 zw7(+c)1k*?7I@r=&kVAT8_3A~F}vNY-=gcGEwj&S|K};U!*zv~4}?7a%3Yr48h{!2 zSYTI1chUF60oP|4ij-MH5)R&Fd+EX1-s0z~_WG#r*yFL>;T&^R%J}l6-q*3^wzF#9 z+vNwM>~N1b-xn*8icX@V{J=*MReC&3DE~7x%5aX~Hl+B5&FMyDJk7ZYTp>ZRu=C9z z8nKA@-f!=4Q&T3D=2r5Y!vRhIk(SRfzP`|;+FJB!xnt6!-~>-NC4vG&=z!zKbY7F; zVp|SeGKI~;wG~=YRk4(N>zaOF2y(b1byihIw48tp)W;l@Y0o*52w6WD5&YTBwvA7K zaSn-D$3Gt_298!@rd)wNDC|}|-oFG!4U1IOPgLTOX`TpiDW_nQL;9yjER(|R`SNZnV=0cMzv0L$-aj6i#&$#T~sD?v?pyHS81xlAe zG3|l+FN5Oyzw_?1jki89SXWT491E^ivn9H4J)iepn3bQ?uDI{!t^Ijk>xd0Al~4x? zE5RKSo(sB8-suVkn2y+uAnJ}NT+%QM+Lu5d$Q4c^1}pO0g-Ax}%O$VqIl5_{N7+dD z_XGK_6nrn9hT9o-I@sr&Q3(J3M>{3*KP~r03$^FkQ#ef~gGPgXxIKonuzQl=Q>EW{ zI3zOqeB;LsF~<{U$#JG;UvD#la?#a}o`9muO4ey6ovZZtG>g?IR|VrpG-aBJWP`rI3H0)p3B zoHHVGvg(g3@4y%;GVa_WF^kmpb7Tcy#>>Wk{3?3DYHl6Kz6Drah|@w0T& zMBGk;So^$$#;4oO^>^EV##ivn6>!2b8^Oa;zIDpdsgoDvxY@DGm@fLPmId@zbiIa- z8fKksUzT`z#Po&4XCCdZCbER7^8y!Mj}^t7DCdGqJf|+~vnM&x{wpA*3Y-rCe}~Bn zhvc~T-&Us9!Kh)BiR5NazkPQr8&xrmn|#$J>2mNEoH1jr3Z6TNj+DzihMQGlkKWh= zkKNhuQ*!s`s5#y%*qwB`rWyl?*S3F8T(oPWchiE@l6Am**gCjZLO#mIa(nIL6$8p} z^tl19FDq^ZoX}A()cDHfa>w=-CCH^?mI=pDm(Pr1K*$FTBByoIBO9f7B*V^h$8JWh zNF*pnxzBqp8nO%d!cc3(;T$}4W-Mm|IN#GZ8jXUWS|pG&9@uRE%81Hw`T6#P1Cq6n zwB5CdtP|h)4Pf&(z0m`W1sX_$rS>aL!uo#ZuWu-A5;Ne(43=k-1TN-N%cBG0-lql0 zR>q|*5Gy=4FxOi?Aodoi4zcVKYP_>ZPY%rsMC>=K1fD8$grQE<=o2vSLr%#5LXpp# z{-{UC&s}zgS~j_?qt1XU0@#$zLC|iOJlL?@&f{UDE1i?)*`fE{RYG1i|K``r1C2gc zODhw+dqa}5y0X6+SxsKK?Qmduec+TR_5vG)uWqrd*lzCo_{y@u%3Z=-^{ofzC5Ll} z{EG@)xpOy$sDG}vv5~ec;>2lh{jqI4P!S+7xCy=Nu=8{!Va#p1ZfIjja(4G5ab~Bk zWgNx^l!fiEIk2MHZBrdH>AwHw@4plGUPuY9jj9ih>e?eE(9|ciM{OfH&CC{xw7ivj5e{pAPEAy+FlR7OjY1EdsA)63Ok395IVO zAVi*WW~XhGgw(eVLj%dP*RB#%`#Ztw?~LzeM{~a;`M7-Mhu^+m`dNE|zA1$ts9i!2 zFb!k9(5p%y#)X@z%8MrXp!?R7^dIDX`eQ0H$`q;jUOF$c%GAS;EQ!XYE*N^LN`IKG zU)>4AYVOn^-;lt9@H7t!?H-OMM@%i3rz|1DR8?(woCzTwy}oH|Jtr*X znr2I-LHmbK8)hN>ukX>9r4&zY-aM$;x;ofuQ;)m=*2O61gWrD&Xd=CU8oA|hg}9`j zA-j~~@7QYYdl^|}u;&)*D7o<5a!=|4dj=|(SP=>K!;Nvkn-&rJ2>MUbz8!(vQp&eI zIu8jGcmSaoV-=}86!Y_#rJ$X1sNP|R#copv;dWEvt|mCVGM8u)d8T)1I=OGzXjZU6 zKajXoa4&FBXjSzv`jT-Li+cpR(FMIP7UNx~UElIPG0I9_?7AUa{vmSkzw>WNCguwl zOi7C|P4UB3c)@5gK9lpy|8 zv(#eIwxx%1mu({Y~aCD`s=6Rp|qg5VdHj z9gYZ!KXxAVu+?fyb>!M0`fUhnugj2cX}mI#B)Vd}oj-upE^|68CpTveZS1g3W4Nr(@M`9Vrkrz%YAkN_vwaq(fN zF~TMZGmAjXwFg}=&J445m=zZL{!vFj(K7Ic^`DU7CjCm6O`q-E#CM3#v(5CB#Ld0P zW8zlg0-lxX6UZvO`w0ERtE6s(m(_&s>bUKE*lwji=E>fVO_sqmUE^f80#acmBJ^xw zsjgf~FXBcr%*vyd%!1Si0R7>^*kgN9jcTS{46sj|69E5)&d;F#$V+3jw!-A|`=VRL zE@fRJV@hQ%>q2EI*)`X3-qcCv|&|# ziC}&m6sLE>p%ml|t1y|1UYAe&f_1sAr{hP z7n%>D8lGgr)=7>Bd%KbHOK3Igcf|2tB|OK&Sc8p1z)ruw5e`4H7CaPJ=F@+?8_9WfByd=tBQa+DKpT?wKVrGVDuhww1(CE<)< zgdhy%q!E?XN3@#M*aQQ7#ua3EuH3Rnkv8y%Z^Ejs5us<^2xvVxT0{5DE0J&u%Mi_n z0RZmqs75!_t@V4APY<8kez+l+TtxM(17~-1gWL}y>j=TOv-nJ9+|WYcx?co~W}AZ2 z-AYrS)}I3Ocg!F$l%ic@SmZu+*-+$?@%Gf7N3cic-NyAR0MVxB`t@V6Wt%x&5)WJ2 zu4;}AKJ5thzh|O9Q?+yVioVf99o99%Z0CcoAW81vFsMje$8&Gt)J1cZ{a|JFMJ{VK zEYxI7X|%>1wvI%s8VBKx62VLNDl{F%4;f89jTVa*ie!0x4BoIiUVl0gC>5vMSo>uv zexK$0WPkAzzYN;=6=JvAJL6CEX1JK%3>OJTmcV9suYNHtT%A`RHp4A$CUc6?swAUu z^$q7JT>VU(~7I={}~nMFG7UxdWjPpiqn0O#`ec7c)tS3$)bvFRX$CKnMnJ z!hkU8iA&w%5~ndQ4-4mh9P91-J8AUYn+5rXQ=?}~H9V4ib>-Aa`uZ22Jzx$>-8bOM zBIhy0S%T^lE6P@_qYop+YeCxzvPk?*pSqT0Zn!e4v^yEu9lXkBVA)Y+C6hdNgz+za z7u0i1+WvTJ41GpH(nZ<+9ll07k}M{{!t-%wucFYE;fwbgGxm4!%c{-3 zU4E`TuET4HTJ5@E+hqY?4t)-lT1p!j$C5B|s9mTH6>N=vk>4dC z=C6J{)-pKOUM@_@@V0C^|6M7Y^SdsWIf+ zN)$anA83hvfJbdaeaB3JZAXky$L^?aijk7}U!s^6sF#}_$-*NjJVlA!N)LYl#DL2P z!E;ro9@K7~=$-T#;S27WXOsAbuHP&ND~0<8*{nG6`mfn#G-CJ2E&5-kTVgmjb3X4A1?t6wFf|Od8JRJOdovw>OP6Jzqine@9tte zET2ZU;6n=K(_B@4dX_TUvV4Wxh%)s6F>406t39ha^wsIs;dkpnb(Yi@sh8vmXr%f1 zVHy$zf}9SkQL#5bJDNeOI~KK$T<-eh4lt`=Q3P;hErJWZCJi8L;-Ep~9#u_dN$bp3 z$%zA8)@mQefqU0aX}e~z7@p+#gfyEdk914-I=tofsXCtwbMT4gs}8^3|1EHTn?jm< z!U=JG;g+SctA6&LIU0DMZLRHdz8^Po?@U(7K@1mugqP2vZqE|K{~(4cL;-N19Vls| z_)SE7^mO}Mjy3(r`w{^&`?HFlwcm_#4g$O_=2vO9u4rE6iqJ%wN#s@G>L2%=5go^W z;9-Bv68?ObmoQMwE+ON1#s~*mkp=07!0Xu#QN1h^SDdWUIrZ$tEITN7QK4)~4x`Ym zTqK)&Bb8ety@S35iwQCb>AO&kSWxjS@z^|gOimjZ!M@@D&j5Kl^6iDA&O9F)hH9i` z+VpapKS=0#?MB&FT%;uwFvB&=tlStzHbfHq#OQDXEas&8gD_q6g~y=r8LijJfc$Qy~hwth@i4w#;zff&XjDb zz8$S?iC(3Gl&&>lY5w~n!oFX3xbvaow4Jzug*jgg!D0;uIn#?^RqJnl`UWfgY|*=v ze%=*gcJ4GkYG?09CA?xs;k^XWnvUVrC&*Qh+Ot;30|b#Ih>?m_r&5YND|k5Gx|LzFmoI+yC^i00VrCBvxzUK=}PeP`c} zhg_cg{JNPr$*wqx!!}{og-n3&2fa?wl(rwOfh#%TQ;w5yH?D^S>cej41z3NU*hh}| zDFPyfr(j}}$SCOb-2G|C!w3F890(Wu`Cm&jC_fJbpxg}hnQC>Qgnv=ZqEdwdlg5Xy zZ`iIU5252tZP>Qdq+eraUMiW#vU8uAyd2i~-~({fMveY=aP-_79AC@#!@;rEAN{}< z8d1Q?xn^TL2VTSGecWWT=soHaGgsoiFBQ6FhbMP{7X1w`PjGuQH4Wo9&KGs7u>{kk zdL5!(4>qr76b*AB*R-;C>)-r4H0#|q-z5?MMl2KQ zqh@*LpInd&9_Iplf%{$2Z^aaxj73=+RGB%z?Z0eAG;fyP7keiz6=LnQMh5ixJg*-O zs((Ayt!VPMr|qtf68-{(3lGTPaNxnNk?5yap!DhO-vCF?#u-q)jib0>2_tp01VE7! z9Are8fl7gG@WaAGz_g5-!%K4WCAV2|*Oj`5)?PrBPjhD%tR}?2aw@%`S@8IxOV8!) zU;A#C?>xw+OVg)ebZKr@8Qj~hSkQXxaR8cXl5A*rj_(22A|>q7eX<5t4(Wx9<$3Hp z^WFGZ5wk-1pGvC*^KGohRydo1?oIU>#k#+n*zi#k`1^>H*z*dcFzp}u#ZmY+)tSj6m zllAu5HX!`JQYAUnjEpF~AO5c-#)aRm4GB!@Mjw5q9&^8%XpvFrwz=6_u%a(WRnOd} z2}a2X+Zy5!wx#_s1yU3$VK4LbEaNfpoEqh`%2W1tqoUe%oDN>Jc>N@6wun*m?G;GW z69HAhI}N6n!_|F=dN76Hw3CV1x@U#(y8!I@AElBa|I7jRgs5DHLpcv6Pl$&Ze7rfU zxBV>q4EFr?DBsDzdIW`;V?pOYQg^QE3o~nr|4NlCOoSA4TcgxR&Hn^OCTA2k3RU?- zsgn1WRH^WPq)I|-NT=YFA>i@`Bzn9BUc76_Np9X#*Dc3&G#jkh4dTx=%Zfzr@z>h+ zY641?TIf=xj{iuNHb}@%fX_oea`b@m?s!#w-;iiY!QGYhv)KMi0Q>+aVJ4~UmR`vR z>Xq7-#bprCKlkS-nUX&4;IX*@4b4rdP3A26KeJRxU>H0?Wt`c*gxZVjaCH#aEeoupO<7-r(nE!9F(!`cn zX+mbl_3e)0EBlprE}YWhE9pEW(TQKvw$Akkl<`!{$z+R>?SJbO-E80bvIck8;8F%*@@0ASamC8a)*Ho|yz8{(8iZby*nhPw}YGLQk^M zeI+8IV5#xcJ4aI8j8!}c^-vqQb^|i(%V{BFSmR5w0t~~?W`M^O5rZK_2Q}6m+?IR? zq$#71-3wJ}p)ieW4(v|_oCmUK`YRFZ1VuZ~Hk}A2xLuN(_Mu=6#G5g%fHyaD#=y{y zeVNg+yJMe<56R^nR_U;hO}QKa-d%l>t9drHHu<<|qUpTHkv(J{y|}UY?)=H69HX5PH?+9-vM*v%p|B>x6#`?V#c`#%hk&2QD+x$U zZuOdtPS*VM;YX^RjK>Xz_RNJ$HoNamR|&EIVQ}-@@XEE2pg40;`Llrv7I%1$`jazl zUx<-UAGL1*U?$E zbkBgxm|diu5H{i?D-`F$csBGWD{!xcoLURA;BWJtIs2(ZAhUq|@-eyD1Q|DJzG{di zd@fRf`=NiG6`$7<^VMr%fITuCRb4$GN&d6Cms+BQe!R^@#T^(UtWmhSrRPJqW%cyM zM{5PYCkzh>raByBVyIkW+@ZpD+wRem2fN=bFSFU`{Od*LW=KVKWb7qin@qL%9pUTD zi&Nh-A>C%&VnMo7C3Nk>#Y9f}0s!K<_z15`f4Beuzwv*u!}?3aI9%ID+{evTZK^QW zHJf}8^V}71#K7$f5EZlDLtE1;jA>a z|6uwjZAfAV1>MvcM;ccs5X682`4ElNJq=GL_GWcGi|bTiYH!}*`cy3PYASkh)9Nv; zdv^3nzXu{MIE;mVp9k1N40wSbre=?l3&?3h2_WzI7QH_j^`$9m{FQtsxpck5(m;}L zToEq$5NKk$f|j~7t9*G^x_2H*W?VMul9}_PSlbB~(&d4oVS*Ycm!)V&0Ct93n}o@W z-1M!=@4)7-=QceIyNIeYAYmbjsN@chsO_EPaVAZTX-NQ$ab)ekpU<jF@zg<&W~dYz2Q7e&`*Z9U$uvL!`D|~E2c7qPR6H|_E;+y3!#38 zr(&YZXL|)Kx9wC-j_ic0w_uS+ffh9g=O_U}4b}4iZ*ZLf=^V$t@h0Q;{|9Jje>qnA z+04Nx?^CMfYo;u#mfhz*$PY(sOmC-aT@=oN8y4+5>HGXrBI!=(AIn&k8!+C!nSm*% z4$+$(M2X8Di^M7VtPocpoUmMD{P$S(5Sk3(|@8UGSF=nzBs7Q}G<70Z(? zh+&vQh-!Yd2%&l!<}M9SLCpU2mf7D|i#(xzc2e$vq3ex5RcdozbD`Q);T)Wt)*08Z zGx_O4bW|cuX&F=81o;6ADX{zG1mSE0=lNPUCJyDW`O25qVT=sK=BDI<;v!xI6F>$zs`KNet08A3C9>2){+5q-zcdbm^AonOj4zjw@7BFX*(U-omBQ|8}+9&4-39$?x|;&p+^2MSd)Q zm%)?z=Vm$*5Igtj0rWok{Dj4od+na3^RIS){Z7I0&VZ~LLV8gTz?R`~MHd1bM)8e0 z8TsJcVG?y*@qvLGB8}hdVrL-u`;($4Sbk?{>97w4wa(BfIw(u!T?E$cE5y`wd|($8SVZ}%xjf#G1+FRqXQgM{o{?6n^{x(Wa5+?PQ=!CLg8>vkr@! zJL4`J3}TF;N}yqI4I|wRggsHejpf6lZ|lX_(*jWwlL>L)|n{?>_KE&($v~=nj>KYN_sX-L~ z7N~Er>X~7gFKj+p=H^~lenhz$zhS^CcwqV%<2P^ifphy{u(hj0dxG)ZFTWgHK`PM^C(a9XiLU~zZ22wMjpBj$Kdj^fs1LGacw~t+x%cqF0 zTbN#YN_Ar+W9ldvz9#S+?q~VMw4F^@5#(vDV2_~i+v-`xDq@5)#QJMeJ_abZl%7|+ zWSFsE!i((@bw=!6-dnh|^K#=G886l}Wes0l$?fo;0XxV|1hTTPstQ7757B86;)nLZ z%~w5fUnWD|OR$+A6XXdvBoW_E?vMEJ2CB%V^9yepV4A@xh~F{~XB^-<72vYWXBY7T{#a}>t9^XZE540Dmz2V(kOoo!$ z(eMQ*U_mixUSBjSbzf%&=k@ts=(nfPz175@Rs6kGeJeSB)#@~ zYu=4xl8cAVJI5-(V(D?UT`3(x@NbM}{z34D^#36EwsE{{uE8*#WhMD1T(i79gn@}~ z*S!34P4+2?Hp$$QkBs5C`BG%5gAwq^;nwYp-aYr_mQVMGcg7oKf;VgPI^L`8r0Gr(nZHgTOI#wZ=?hTV5?&9x%Og(t-1#oy~iCPVR~*UV`w+!YkwqYDz1A*f+8h7 zEkL`rps+Z>@|W(!fv3M7ibs-)&S%zS4_u+s%X7U2p73(7nvEouaxcM-R^97W8@S4|9UUZJ^6uEqXXl3 z%20r--+FOk2r9reci?-+dC+*Je>m5;F?+@Kby{G^_6qO*2>-`b5LS%Whp^(u>FYz` zlEZ5MSH<1KH;$x^{nd{_e6>k&RF>(SX3mlHiDM#G^1ZvJV?WQ@WA*TSCai~j4@ES5 zuNE_sN@4&fbW4rl)HEUMwPm?sg21fkw-cOu083p%G4Q*e3$9}pj6P(jcp7QqsW^t3 zn4Mg8lsKk-?N{=Y(~s&c=cx7M@Wd^H4q%*#>bdHZaeY)JcXZJU2R)gl{fjiJLKj0G zQ*h~}YZO5g5_+O+2uRjNSn=9dha9#I#C`!Ks54BN2c`S?zrPSD26SYcg$;@B$DwxQ zbTcRteSIw=zoq8*wEu;!)vjmdcY-pkxP&hp$h6nyBuQN_L8SBWPdm-1VWCcjz}(aeSSzigVg_z;#U^_yzccc25w@;qJNi z2Nb8MltljPCy_d=p9~H7S}ze7D5sc|8q&pbCZSXCZuV_u5|cVGqWq(46m6R}@yN|UD3ggN5# zoDgaDGJtE-uWTiwu+EU_u0!D91$f*GEU2*zz?LfF;yF5InW|4qe4TD3z_=F3?$nyd zk;d?^N5=fEBeEmIUBlx_+|biy{uJB2;5#fd}t)I1GE z*1?mbJ~nNg%)1V2y&AUKYgoX1Z2e7O(@+kY32hIv$QBOzFl3<#7S zln04Z6uYQHUV_O$vIPlHW8K?`3JZr(lZKnkz1YK*!#z$TPfpcRy6lo)$H6vI56J$n zHu5+MoJa~w%fRO3b9Lx;!sYwHr%5xE*35%WmA*L!3MX!zXkQvTE~5$4hSiFgy(Lh* z1$p{s+*`>xP`vdU<|bj)h95gf&7VAJ7!-*ap5Vo0wip%IQ}5Fj;$|lo`bqr_eUft6 zmMIi(kquz6=>q3o?EM6DR2KK6Mo_941lBGP7q1;s$9l+Lq0+F2Ja=y!$qFBa0jJVv zvZ{-H{4U4{_0y)LuVs#qJgFeU`KeKr7^+FmGXkNFMrrw%Y$Lm;?IB!8xXSwrZa`@_ zdGh3${`)Epqc6%2LGmOJ@E8P4faw=f2=0JS!KvgSdHEBIa7s>|U*@~60}ZdmMk}By zm;aMWqVjBM!&fC+e_>N^{#~gSkliSKET^3FmR*95!Dep;RcrOAiXPKM{d zp5YSVtJ+);uV+jos`27X2xj=J++d!v`$FxYJq)q*cQjDEqWh)}r(1TCJnK>(Z!!v|n z8M?*r9HuS%QruyEut@*rur1@x@K!6YhHl&5vmFF}y~w~MPI77$it*ty*$32T@E1i5 zMi#`1S>9luB92m2n|0kUH z3;Sa>E-pQqUGPiXIyn5wvS#+ax2d~7B9s>xIe&9dBsO~2Pa}gVFk~?mwVcgF^McL~ zg@fa{KvC+9jNIBotYBK1C`+6Ib77r`JUtY23T#%qcKBms&mI z4-`f@Nz5={OsW-H>ozltPbFfXtR(1H0Bo~JNGQ}s@tukdjbk5|Q51`&(QASt6N4N5 z5x&pO^x@l(<*C)7IcU!eJ$w&1XZ5GSNU)!4QZ}t1a@Svt7~<`se`GS(vby1tzATHW z^1SLukI?|;>f7@iGZ4UU=C7+SZwV43?#D@)n|~gjD^>7BhVdL|zui6C37kgh?+tK= z7C^S>BbfRTn9>T2B>k?v^tb>*-+_d)-vIpaLof>Er z%9#qLAg;X~n`C-4z@>d;Y#=_B;@c4mu`SW|pSB4b-DKQ#2F4vqv;_0IN)v1Tsr!UG~I9)in1~eyKIYOS;m-0(c zANb6XKvNg>R&suk;z{YOwjG7-Hsd2(`1;XEnI`Fm8qAP}Hz(TCrD9`ga-U1(!oh^Y z{ma&yhO-}1*FyaP&Aj!}8igufLAUuiLN1?j;Kz4}Q9owwU z2?LF{7HSL!Xy~smr4EA?+7t#A{4I(ZcDP8LYcm^Arox>W6S(|l1QRN5e{L6hvHOVy z6rfMwIe{v?IUzD5p9T9GuYWQA92%H*c*2)8u^?DwaToh@tB{#n#uvn`xbz{fF3lKcp_@8B<}h z2(IT_XUYS%_^`;X1d_o`(iFriq#4kDrzU=?gncj)cWoaKYr!fO>_xnFxXtA2#{c*P zh<&=_dW8AN-gPmiKRga$eyJS;t95gOAcnLm5W2%qPVl*$ZRQ zF8ovG*T%8c^ydcO?6I}CkZBT{@Ub|@ImAMZ9Rv1dv?0kcE!2JR`@ zt3D=rI(LW`g%HcgG9RBjn19rC=m2GaYx7$c%6$n?5OJ>=P@y?RJR6|GskXQex#2KC z42(XYPy99Ha3H3QloNz|cC3F#2te{G2-Ul|H$3vLLmtQc z4nySH4TcH~w0{Cp43mrB0_Y~;yk-9{K_mwWqMU430Qc-2DU(G{A87&a@)N_s_FUXK zd9nFmr$N0_78{y&?{X(j*>54?vR$HVvYS1IZ`eg8T}S*2O_;+rT_`H9>yoz+bbZn< zGv3dmcK6tj;R=r^9`9P(;@C8{P&;O+_}u_M7&&y-qs;Z;CQh}LYJDYmt?-X5LEAO_ zsc!osSS}`g#I+{zbr{ad_tZi4qK-=BG{*+md{$WN69c3fjwUMlgSK^W9UAY<&S3ZX zvfN+e-B%wL_&mT8aLUHRo@MYZ;Qpl@kfaKvlY z_$>LJhwKBM=C6UIljE5EB48Y;lb@ zj#LXrd==Hm59#BwrvH>yyL;)E7(5MzKboDfkr~PU@ZZ|4{uH9)#LO1lb^IKg)AT?D zL*)3myKB0dby+1>T*m}APwlR@*WQ6Aj4^nk{2c~QbYAfxF|FcZaGvgd612nFmz9N# z;EH4c6-hx0>WY$MhcDeuOo;w{u(r_2%Ua88BmEP7#o@oUI`}@Uznw1sv(>TXc>PDw z!N-xAS^Wg!J_%%k05{|T$Od)JH^c*at{luOOt0C^LX)nWuTFnv%5mz$JBGQEk#}YJ z!cUKb5^eM}SiBFnTi}QBj-&YV#=ZE26Tld=LSWkIlcbZ+3wl1tIUV_0 zgw5?b%5v%FLu~KXG2B#MxyQgQ9=G58^45{|MmQ)1`^$r6%*h9&Bl z2#ex$6jl`1Ag`w};tg#Ue~Y~K+Iv(?&z5+98$x>S2u43lFA?_y^6 z3KCBIfSf^1OPJZaVV=W{TnD+{ca1SU95sk8>@RKFi5-XY*D3YlI|+X{QW zqgdl+%?gP_M-OJb8NqIj%R1EKFbM2Q;sv@|F5B?4#qf!TH{-7l_TU(|H zOY2CMkK<0JggB|gtS3gg?{ZKcXi;zYR$vQs0^AdHTbx~kpwLP(SXkjBEn1G_(}91? zl$5IZT%hxi3?1ePrVBEHnC%)Z0aQ8QzzDS=gvWGczNIA6ZULflJwzli$?B z-1Rqb&Vs)F2=^^y$07%DG#Ue=7TpVaTVquZ|&lbl>5i zl|W`FrYo+H zdd|>R_r-3*2m4i(Jl)|(!(V+2el%q-3|{l7a*2E;F|`0rC2+z=BLz>tlKiU{DrcY^ zy<`KYC|F7F+1!#I5J_wQ{_Q2M8mc!UDXS$1=q^EM;=a1sK)6~&^R6&#WL3gWaD2FBXSgPXX8zz&K}J?)#LS^O zVrC1&>B$&QUppCd1H9l{A#qL3LU!>eXqWw7T=-D2XakMa6apOoNE>vc0=thZ*%0}*NnH8!Yab)q_KvH@JaCfDwdx@;lpabDjNu93R9p+h z5+bCT9izTW%=}^4qb0(g8BWt4&;HYIq(Ixwno<6`iL76P?*QXx`){?YOlcblpLm(l z7c@2x<6ce1@~AuwdI5xcKp8vGgUWZuP7lFdNw%oo2`T^w`d-u^;;mc)b^7Ifk}_uU zN#2o7rtd93t9CB+<((+l)!}$mR@vI^ffB>!bO-fA?AbSrHzMYpS*1tg#o-occ=@tP zo03|5Y3%Z%V^p`3d}H-w`L0?7=)G|$m-X1NYt4mlVo!@$9hBn3MKw9&xT0nlsRkhp z!cD>=CUwe-0MLE;l>{Nmp_zSOnp>>wA|3@yYFdp3U5(N?`_erhyfn_x*I#ABWFYy`Jc zgu|dN^XbO=rE|e+rmnYhFKQ`gNOX!NpQQfibpr0NbGX*sZe8;scN&9M_NJkHT^|2z zHEWoyreBBJ#-DENeTRzl!c%#xgSbWq$bGIxufP?m^rn76vqhIR`6F^6||7^a-epL4~`a$pv1C~%c3dX z+w!+eW-gn6$n?oa;)Bo1N6H=TFk@=IwL#e4PrT5WYEJ?mKsmnvA&&txHbZFt^`a`p zku{H#pR^*RZ=grPILz-@13l0(hiCd-KHle3m<5yCQ2c1av?|&)wX}Q(GWM4qsH+dHgNnZ@e8CY+ zg`d257wq2w`>&jaE}P4z-}3Iw#lZgY>x+fJ8LeF%`nZB{`#5QezkLEx3xmsheW%ij zj4pjT4-N+^&U_VW?CHBx*)7&cbQ|u`iT_)drzME;&rRZX zv@Fo&mX;b&M%=yB;E|@urEFZ)tuCzR%S!{Oj-SR{Zn9SO=cImhel}ne`nPE1sVqo* z;|p!4egKY;aHBc&xtZm^J}%iLcG+v%sz68V;$;408(nzWD1OgtfukuSzFbZW5Pa!8 z+_B^a+aV0UmR0u@$1WjkQ`e9g5(S&#GrqaW&jk^P)rs1z0zO5T5> z=DyTv5yALaF&jYwXGF)BRESe@WkH-W-gAo=;*`|h*aey?Y)0X7tvt zVp-)&Jv0X3@BMig_pIN4M77h4&7t71I9vVGhPIiMmK0^mcE?y6vjXe*nwI4z#5N<5>_DL-DKH_g)I<9XS8-toz{t*$|UD+5uvT zJk{l!ct+WdsAD_Xh?r8Sw+Wfb_UxcnVTE)W7OMm?y*=ALM^uNChYObZV9FyLtpnd2Z{C27X6(!5vRKvt#|j z1CZxSZe~fPX%QesTDWch zC#C;DZU>~i^3$8(E}FdJw(I}ml<8z3Zns6c-D%KWff-9-wh8Vis9rVqnU5ry6FIY9 z3JQHoZ8f|gn(cH&G%-D5!{Nc?qQ-{zwb}i_NkU^jC$8t8yLU;#eY>e+$cZMgd~M8+ zVHu46eAc_6{HJCKbC`BOhpB!rsrfLW@ePh#*MYdoz4!AP-L*3YeGE@Vs9=Fe3tSEra=SX+*u*d_KR{EMO-wd~S~B6$3^ncei(6Ie%Rh#z)-!+HJaxm9 zhFhDOmm1DfTF`tVt5;ZkjQ+0_Z?S;%f21O!C??->46i^NyYU1(Pk@tEX@$#y7z1E3bM#spRQpBWPpsg)rU|)G}Z? zaU)qw1uz~1?LyW(FEU${#bWor)}D$DZrrWmePbKf{NusY`+!K{TRIjdcWR6tS9664 z9|%`0$J5<>G>gm%T|i$SjI`ZaXvGhbmL9IYa^$>4Qa<}-Y?T7i&?&BPghk#=RI{|qVKUm7y+*fGAdE5TgFP&r|7sa?0 zZP)hBCFHC6{m1Ka_^rxbime}h>s4!`oW~ai&GOaK{tg&)s5{<`Kn2df0z#=U+ypYA z(3P(xENJ#n&DSdY$E7X_ujObq76Sdq;$1dM1?b7=@*4ahmhI^~8{{AZAC#ZQj&w4U z!5_d!2`Y1W9@liW$L5{P*cmmT#b$YVsM+4lhV6lhazv@Lz=Ik4kUn~cD8biS0|KN6 z6obu+6$}HF9v68Eb2@pxs3heCl4xTwfe`fNFF~N|e`)x_DD{It?C)-{2lVfTJD=hQ zvl}jrey9-C6x3`w*TA1JSwn4F^|2qD)j#&BU-7J-qs;LgOAcONW zc8KoCn?v)XfgPUwaQrZ{LrVxJv5Yt944K4#n|P4v!{|@L&82spJ8swVsf?B5(f0Y? zk9D4n&NMnME9ZH71RA`Poe90dxn82eJB}=Y8lZd+we#4`f8ULiDLjds^8^PEeB&9D zX|?~L6`=E;zmIm|GskZc2eV6ms27-bapMVa2~~oQO5DP5gtfMTFTFINTE+RcBROXd zs3S(O3G{>MN0^MypB);P3ogE&aOI4fg7si_s`}}{B*2DTTe(% z%a%$oV~%~8WMD}n9E8Rng36&qO#=2QDK?b`_OT|ii}vVzNEg1L8NGi>ZLNZIIP2pB z8MuPVIjp?#)yOMj7*`9?73lwjvaB5TUsTWCLW`O>_4waIWm?qMxe~I06cIq~VaVj4cDjEdU=B>8_Rk8Gv)6 z&m|mqaognN1v(DFQe=C|#sx&0<%}OU}GL-Kzf^I0^OBf?A zUYy5sz}ddN00*@QA9u6}7B(m@EEeEkl%4HFYSr>X?niNwkV%A$e>@mGBU*G@oaL-*af|o4%A13x$IiSoNoYW(ekE_65Wjr0mNB*K zZxW>3P;bq@BuG2}m$uo49j}wLU3wAz@Ew`<9uUzkt&~&I{Hx)f(~7sNw9Towh}FV^ zrO=JL*oKOusTUVv5>?iYI?G~N3~17)$ABO?OX={I z6Qbhu6Bd(oqVEN5#O!V>1;I{59P3n4yD;gF!89Xgw+$P|W&T1g5W+A@H3gL-<1Pkf zRWxW#yozmX(l%-<6%F{6Vl;!<|ALjE{qI*+_x8vC5h1i7AJOdzLi@fX(3TG(>VB6B$4BPJlbuDXwFu zpysmZlZSAlqYlZY;W=?Jehx>yk!*-jsgB#o%je)z(~j%UzF&mGbg-&_EBVI{Sy0zS zQz#4?SM8WKl5^gHb2Eus?ME={ZF{34if5axnp0~@f~hkxQY!6TmTVLukCb-n?Qe38 z+A+`9gVYgF3Key*saM=->Mz9D1H95RHAGQ(P+OJLWE;7U9=KHwvoMgok|1V~=0orm zk@&Qan{m_C*l!V~yaqGS6O-~{G=K!}SL zJWaZ__YXvu`(@DxPYfE)i6=?g~0IClZJeFsf+z7R)~kanyXyHjI0_Nl5Mh%35gu5)yOG*yP~t zXr%|=_R~vuYCM-zPB*EP9A*r2!RTJsKg zV=nX@cuL_Sws2tY*6IGiK^vyuvfRZ?lP_*Xj{QoXw$0|x&KLmC1f$-an>^EK6%Zs8 zC)If&CoM9Zz40oOaqd-vbW*4~*esYp{v07JvezrX*jdO8!k!dtuVDy4&z1X8_Uz#+ zcY>mXgqtE*yPRx1igc?o8Zbk?f5p^so}bw+46)3l-b5&>SoI*{ALN3$1p+O()ffH` zXI_Z@AxXHI%Vr9EJY;qR*T}dmW_m3U?GaprfVsfRmyeH;9bpoDDkL@`nJ2#( zl25Rq5qn7rbnpR*x6c;|tlHb#BqF9Fd_9~~gZYy3`}RJxRBYu#mCrT7Ma5SFZcr}o zHjXQ8MxblpD>BzYa>*U*pL6!uqgVK?L*_1e4Kw7wuiA;CTkKuzSEXGF$ym&EiaNCx zOb8{l-x&kRzccHf30C8Ijm|BCST)sBYZx=uo!Ot+zg-@`GMC@Z@L^Nv`H_N4he?}< zz_!|aCIJs~qhn-ISSjYm9(+?`K`Ud3FpUnTHRQ6A?-<_GbksYK?G?b?!Oy z=%KF8OhU1#vXo9sJX&v+8)c}>;;pBoeT#G5Hz!hNBI%noeZ$No`0|@%&qgI>vQQTz zUocRyoS3g2EcZDBw=X;5-uCv|yNG-41s|Mu<2}ASZJSHZ#uBKeKQ0zEzug|#E_*+a zS+dyLnAuZF$9V2JaM{LPfHk%TyoS2iYq_tsc`093AFe7SBumC3kWt>k8(@sGwpQw- zmbQaTjK6q}zm7`8O&Qt9rNhI>H>W(WFPvAoOknD3WLTwOzlU18TK`IyxCwdR z7$7)D1EP#d;=#l)etHsdyl8#aYO1ok`e2^7{BP&;&Zd!bx?ONdvjI$@Vyz6%=cDV< zy#fnv;qDW+G7jrXrVA(5pt;cM)%eMEZ;v?mxMWNi(j{CN_B2)4v!L051qQe^NU< z%nB@0{Aj2+iYFjmAH8U}{l#s5n6_4k<|YipdfPN^$Pz_;FVSA!O|*k2xoy5b@f7?4 z;H-iAy#=*>D>#p@q+Xb;McoGNOJVNYxaUBm4&nn$>jBzLR6l+le9uLnpfn;^61{cg zq7-ujoiQy2t>5mD$e>a_7V_=t(-D&27LZ*Tik*euAjAh;$N#%r;Etedk)aJ&u%Sw4 zY}CFwuG;*^;h|4f{wID)&u-ew@F1iJ_*Rg-Rr8$vyMLHFH^3aZDHZI3hlyw(3pW-K zJA6IpyV%{!!^OYLe5b2_0meKSU*&VHDaGWKz^`IY_(eb^HCyU9?y9GxFW7DQupS(S zm?b1*&JF1Y2DAlfp#^F>>I;RW`UEv1StX+++_S3#x0lK zChQ-r{#plWXYm9FGTpb;p`R-t-1=;xeA0#JGsWNk@G};ivbiSs_>0M4y@KX)OI%xv ziS~Zk>zT~qx23H`9}Zq64rDiu8SG~zH(L^l=dW(xC#vAP1uKU(tS@(MuLVz&W3a&G zp#B~953X$DIT6}G*WwSFOm4d;gKC6r45E>TABlb9FE#wCGf&?eN1c6$K2mfceovLi zq-$=q@6qW4+jQ^n#gFEpgz-J|GKoQAUwR&rsZ{)tBr` zK`VF-L7+?rW$JuqZtCBCT^J;ETlnruqvp5IDZ1kO^XkW4bD=mrss2V7N=njB`AYyR zCj$Kr*aJ+$CT`9I_@PW}ZD7!>~Rx3?d=GxkayZg2mNz()(=B!P+Ni)1wT^93~@#@}5T zdP}rp?elXg$q;&3lB76kwDNjNeOKU|o7_&?lwkgB<0_Um1^a`9k7ihWgg8mGaW`R6 zvsK~Zb7xp!iAh^H0#396-W3Ifi>sP1B)`Ty5;QT%=F$!4UVQkr*K*aP7dFZ5oBrp1 z4#31@rcrQb6=!y0Xg6u+%1Xg2iw|zn&`Jzu5qeTj@1oZ6YS)Bx-0%l&m@~zU= zBK=LPzhwX$YM50e^m&4q^zGrz%*i91x!CSDMffCxdnDN8hEnic%Y0i%3k*m{9fea5 zJm~z@IUO0ac}Yx|lj#xzn?yNp#8f%df;{;m^*)`?7t2|Og|HG9!nxhK5deN7!jpO)BFC)CLBJ1|t0;pFX}{ z&`v8N@nbK4$2HR8cA?8A;{1&oN`)a)qB@4#!u7z2AHI25B6(nm6dZ%QTvF4@$MAJK z7x~x6+@KDLYmJ%~N}A1gE(nCTq_4)K+9NOWZ+-gNFLTtVq()IB|9;oYIf51}kzZhm z+>BV)C?-F3gRhl+Kkx>*L22{P>&!^M7YBRoyVNS5I_gUoEs*mTHt1K+rA{KBngJ^b z_L4t-5&^Zc5cM+KDAM%ucRZj31~85j)z;H(iaV-jzZ60pfBvP+oi}MR65)r3!Ak~y z`H!6Wkeqrc;BLLOnj=tcJ&d;`VBGHv6dY*x354xRQUU&|tC$*bfV3D~XiC@YwWLBf zU$N;a9WK@?=~QzWM{?1UvZ9guy`<45G|(?Fy<4v05ZeD9Wc&da{DBq@4E@;WJwEi6 zke5&aze2D{zcSPX*zR62WO?(R|NGXVOW(E9)Oqy8y1wrrEelj)dc+Buch4X1Z^b}y zb7Pc^eH~kccv`C+nSabbP9fTYDc)<5>z*J33wjFG5ji@VTikSEzt5iVSmp?R!-G;5 z24Yns@V#!{@89!KhK<*T)>M`gt9U^ws7nzQEQ8psQXB<=K-62PS&>7yN4C~ABU)=e zTVSWIaaKi411{@8mPUiQyQlc2ra*0z;1E`H z9+)VlvtK?prRnXGU(t~A~PX_>fh z-7LC$Vk#75CT=W1#ifuZn6jutS`C5whh_7RtHHpW2A7O~*gVfYzL6;pnG*2raXnmK zqa6d;3Ze9(;t94Ij`Yg7yT_;l#Y7L1TUWHc+>(@imlg9>tN9^^$kDv)h&|jYq9a@1 zMM80xb=IUpNBUv%V(r8#Nk4GyS%3WHvy0sd8Jp(&VD93pKDr$c#UPV`H;gHEKag(w zGdP7T!Cy_t4s*u6G1)W!maUpLGonEwL;YUyMMW1?lXgqU47>7b*Z7k6)0B*kWX0sK zL+i=44+f4y@ieEOG~6?7ITKAWp4anypN(g(F$_BTX~1x{LQ zwwtDdn5h8pBMm)6m35bbXY%^6Wb&4sn8lpZVhJnWbW!OGjxe~}i6uy|nmD`c{^clC zfK0iISiLN+9LDQIn2yM_4i!`F31MJRkkHe_`d<1zR{Ls*mD4~A&Fg_JYvtmH0ZZWU z`b$hq!=u_6#%6_4M32)+C#Qv-q|XsGUsPGF;grG+KS69xg1?u7;O2 z7x0>+I4dz(VM*;A#`87wcT3q5WRrt>;2_vy@lej@eQeM<(G0IwpMuuEYqs9K7yUff zfxOuK9%m+H)jX7G4qxNXzxPff->^ayf_WK(Zwi2 za_DPdaR2lu%S}^~!GRWhE0_qQAmL769VY$xPR+cB0V>-!3sT~gInttXmkN}2p~e!% zr6)8`?@?4~ceEAQ*4$#z5%B!CTjvOcukuDb!JK&;2&!inEZDkqVA%kv;oVv9nSW zy;Z4R>qqiEaOY2c-s>y;;}C+GP68}{zd7kEVEo>COed5fLN`zw-qK0z2kO92| zf5*`G6|8k+!Z_c1;gZ~~hnLT~@?=F^I>~p(%Il!0whFO#X%}Prv-le+d*E0n@Nca1 zBP_}(FpGGKMnTC$yDQc!8ZBH1zt}KwJzk=+)8y+MTWjNyR5|WkvN?8$>)0Wvc4`}R zrbLvJ=zyg#YIJx4U-H>A=%ePfAc2TsTn*>Y2DlJHGRIW+HsE}DJw;)e%K`k`-(Lb_ z9v1JK`1XkISjWUS$P{{~7EmpKk{T%Ts zfR{-v81u438l2QKOjj~yzPDy`w(e_)mAL5qN&|2injV7CsguYH-L~Vk=^wq9Z-gT+ zwh-HnfhHh(@)vZ1JfE#XM3;sf0HU=sz62!zeKv|WdmqY>N_w$?xLaPB6!$nZ{9K~p zzFqjMdnGonf>GsoOs=77I)&fnj(rQX`=2{$2$-;PP{9Gp8J@p_a}nJ#P+11wKZlz?vE07 ze`iT0?V#Wfpg`j}vCb>ti~!FurWJYRMvsq8(sxUjg~TmQq?k`u04zq0h*(SSbbxi! zdQ-UXAUPCh-2jLe#qKqQPb9t`qz#*<_@dHZT~{_$`#GE__}BtQ2py>@E`jr~7MT&bX?W9jL}{a#o+3{>p`^r#)*YsH~Xg zfiLzg*FctBqJi0dXX8}oax(8xz;l$MYnxjAE9(nsD^`*=Cz~?Lw`7h9aHoFs7oVrR zlLW+$q zkN5XLduU*x8&S*cdfOpV!SCJe*<0nv7b*N)Xi2C;n7et!C^6Gv@tG;hRrowpBZ|ZBQ~&vY40WC&vM) z{2Re%>>VH12-U5+CuElWli9~uZI-P*Qufn#=#B5b@m|?H@k(7aaGykmkq=*ig*XWb zpx+%zF*XIfP}>+_2vk)u-@nBrA^$&dB^uTif>Q|xEl>LE-f4rre@hAVn0>2umWGf# zJkGHW$wN*_-iue|)Rg|RhmOZ7J%LCS5}uEZ((T@>c!d`Cq&;B&_~9^?_Yuru+FYM6 zUrhfU2lR_5%#^r41n+3;ugdws2D5NX^TpmtAPsQ)(OKJFHG)EXkBcN1J9+iz4 z!LyxLZ=eiG0I?k~oWC0B#x~c~`?7y-k!CslWJHZa^JCrByi%Fp-<}ygkAa(O>fT~5 zW(lP*`f~2Tj{<-WEw3V(+pw`$F%fY~=4Vj%Wt^+E)7!>6_S}exN4{d#5z%QRT8`VH z<9?&~1D2Syo;$}^ay#cpt_JG3msC;@sbd>hD2zt%-RCaEQuE?KHjGAKfN5sfVZ^Sn zUM!Vv4rI5~x^wEq%eWOEj!TA}hpATrVS?|ccko_wb`mzT4jPR zJM-5*aDhFhC)gUssXjy9Bj}r|s82YcN0&|PiIB}H$GXF{CdPhi#B`vne_z(b#Hz$| zH}rFv$4#%YVM061FHSm!I$x;;go&8`rDZarCbYR`v+3-1PYaqk6fc^*V`*&X1_ny= z17rg)Sme6DserHnDELNG+XzxEr4Y|_%WOASy1gNMS|zrPfZQj8q~+v2DgbU9G$?G3 zwo7%A*nE>3>H8|Tvx{XcvO}Ty8o{?e(koivszXtim$)t02{Glm!YgZA-#3zQ_D^k? zWxH0TJ^CN#K8+1a8oC=M;Rxx0#+CXh7s5r{3;f)|PynU$Zgn<;wf+@nbqc3L?t-DV%bQxW%7b4Q%j|H*&r z>*0|1oA<%JOkSP&6P8rGX9u)4*>JC{TlXcYWz+x*7c|rpq_z1nyF z_yMTfr$cAk5Ya!X5NM$R((kH}f+jGixk$Y^p(Xs?YGJ?s*D&THb}hSR;v>Yj{nq<9 z%}P>P`$2F9T97ly z6M=Ug!HI8-L*HwH2-v@&Ct@=y!y;ij;=Mb-Eh=#>`*)?{cF3GU%85!i{6)>VIb~tP zpW`U*ENp5aTLDjX*Ucr3BC`$AbS^E2o6^m9x?1;PUrs%l%Yrkpm)1^c#}^)HxA*$_ z!KkYCS~nK5e}b$jGS~h(1S`5@FM-+mAjUjE=HeXzi~(^ZIG$G4gTK7f=~+ z8g=x8@T+_3mK`ljAmf9VI|E!Z9Q%BtMz%gY^WrtuljlvmSH@c6XG^%h_!V1YtLE%B zNDa*7C;!6NGgvXr_t!q+;?>GcUJ7_IOjZ98AC~plo8VfTA&J$UooB;Pn}Ly|CMHV;i=AzEtihF_57(2kRTieo`$kaM+38x|X6?hNDL-!9-PbWOVZfMW0Y%oV;)t8#!F zft2(qjFNu8TjpJg2Zz=LgFDBCn45gk0$7D}42ONkvDJ@yUkt2jP@$oh;3a4PH)j>l zJn#7zJ12j-V%TvYGm>0-r}h;)Jr(lAN@yic0YZ8eL!;9qqw%ZNuDf267oI zxPLCRl4KwcKZIf2S(h2eLK%%?w?V8fm_I>g;xvQIu^x*1Y~f4M6R&Jf-85Rkr1?Ta z-O2mrzAMuR>>{}^09^tCMK&E0ULa%?pYf4=o`|m`eGTs$$Pjs zz(MhO0*J8B;%0X?MwwHWZ#l+RumFQVaBjX4-1!Q`my?@wzR7;yoQ)@vp|vj_uj-+z z5bQ{ha#=QV4cOX1paapG+oUP)#Sj3qqhS7Z7fTT6*P3?G*4N`ar+3bZgo}j`tP4q->=^Z4 z55=9-i+g$Qo5bA)H5$pM!}60^f-W05AbyWs(5;YH36?!3BHGsry^T_XZ625}*KZx2 zsBzx`i?#>LGnXFd$Uf<(T(_l#WjO~)4&ttuzc5IiaY%kepaVS3hD7IO>_ z5})rG#nlN>f>tPoAr1YeJ0GEI*zwi1>tgIzkB^Z;)Axq!Ii6nBhCuM`xv{Q|@qdN0jx-{g=<$>Wp+1gO3l@|=;dO%_@ z7=H<48zN~bZtMf|J;=pe5YLb8Een~I@ojDqjOj8G7G6FDldCKPW?8;Jmv0${8J4ws z)#P?vfrp^1#%g6QP~-!h50=eEH%?#6H(BEA0WE$YDFX2lJ*=TGva0hkO*hTSI@L)g z>p1I1PW{udTVIlX4`WG|$s}#X9*_VDiI)V&<|Dzk)nlp<&BZlj;@w{QVSFQ&?VXsl zr>k_%<5ClKvdM-B2koVEEL>lAAAzU10b$nGVNsaG@33_E_+BF4IHJV{jJ58zkau(f zrP#gB5D~Kpk2c({nN&0pGfRsTKb)JkLJ9-l_TGTSwsrV)4B7bM3k;hz$RqOQ*CNzp z^P|hj(4=91FkPDdi5Mw!V-unI3nDH4d6L$7M$B?aV0RtO9*K;5MMY=JS5M#U-g^Vy z@0MZb*q!@*ZZ`@cAE^F%*@MUjcctK)NB+*Y@RO~cP6;|Yu`%ww^~$2f(=F=ABQaCw zXIt1;1Y2n?Qq3>2CtTdPGagg_J%fh;+>u3r{qDU+9q&>F>uNVALP3IA+gTeH*;f6W z$U3OIxl5Y|l>ydE|8zG>aOon(ju^m}Z{uM4&?)_A^dw_ctb{b=1Mw~M+saL#(!G9R z2A^mCA$)~6lo!+6IC`czG-q~vi%$yo`Ey5;UUE}9?E^x3J)*?|+V_LYl|(#rn~%n< z{U5U4JD$ov{vQ@9N?FOw$=_kNu81OgXBUZ*RERPTDSJdAkzs;r&uh)1k6!Rl)pC*udTOF&?ZSg~h9vmlfCx%;0 z>hJH$jR4t3ONjI&fU29=CELH8K)C^kjS=|laKI?(3_owA69qqxm>EDjIEFPsZYxNa zILEJ;z!ZMVY=9~9s+Pmy)1`)S5Gk1xB>b|R~I{Z{}HEG`tzo|X%Sf1u0NwZXhgW2L@>Bp zO<-_ztz=~nHPVnT!o~(KuSF$P95ExQNMdkgzugod4@520ss3Uv4&8?YvR*? zTPvW)`a`wVMt@YX$cS2!w0q-s=RVgI*PXGs!w8Perq|%ud`HiX=YKQlvW-H(rSVul z+9zVb^cm#6AEXf0@eXhNm{N7M<1OMZxodV$ctq;b#r^DBBp%#MysXwb1m9Uv`zk}qpC;up0*(APJwY=6bis*mLLo&;JhrX zBNgA%iRW&{cH^o26 zP^q~>fYlooyiKGaM^>-s4qe1y!s92`gO?3LG)_KyNjx;1X9~NA0$9NdBMlus51DEG zjD-VByCU$_IT(uZXNCzSE5rz(t9zMATJ%NB5xs)Pk@hcMEn3-Y2!d%Dt5K9oWhUwuBE5m{L6MR-+NhNTa;)0<7 ztp!;0mVKF$E?|KIR+p{yv->Bgwe%R1#YL5-gX67qL>t+6TXjbq?Rm~6PZ8SfN z6ngt0It}RE9a_UfrzTjujfw}~1^hKC%?9tKn-5>h`KWl({r=q17} z3ivYvM*gKsRx%5L#x8V|Q~FZ&OM4YoA8Tgjdw^fiJKlj|Fv#%_Q)D ztsT_%>{6$AZzUX6+L{L*YdACJ&`n0{*2$oD>Ay&X%XHmYfVhrtlBF6Z7dP|@f$W>+ zEdkO|GFHms6}c0xnwqmwjPtJB+TMaD9(m&VW~z;c_iN-Gu#qdy6#UaO2$nZfp*ib- zZBCz9><6XUG%|8G@R9U{^oP6c8+ZzY?nUXHPadbuh#1BobY1iF{XZB#dDr6m_%d&Y-hKpK91rY^H;&ePu~wqKIQrqdFI?xkA=^3rv4y* zIW&J@^;@KHqeDn!o>uVQP0Rveehn8a7PJ|o4~t3v5kS8TZZ#lRJ5HjWDf@Z28EGBp zUdW-i7LYP<4{HzsBzW>2=d$}Ix>t|EsQ>ung~i|-*W{t28D4We-sM%x9m$R2cp2y9 zJU(47oP=HbAjr`lAA9?i%f0fq>pQK1^jix7+dn_f-ZGltbblo4DI=Q&yZm878fC)^ zK+khnqW41j(9hHBAM4@L;R!D&`gJFzaI7Jo!_BNiUp#?N)1~O%DGg!el$Nep#jmZg z*GX!Tc#Q`S44AWAck4b$2!%yIht45$7-TN=^J{8ShX>_a}vItzrGkXe-R^qo=a_7?GE_erU}V-K2t znCdG`^iX;Ie*J9EE)RKE4z69|<&IQv^xHEES!s}eqGbv`ayWnE`^%3#@E}_*qM7)Y zuKAaqLkNq^Pr>kYT^Xw@-&9R@l7s!8#H{`^K3CWYP5VH}=GhPKkymeS&8$n^qa$cO zr;>gI!d@I&A(RXg8YJ(sRwV0*o_pY}vYf&CJ($ZSaepe=Md&{Fm>#q|uNg^ya{O); z;S>6Ou72!$&*;YG;+ploL-Va&QUsDP!x`v9lzrn1m?+j9*2exE-T)>hZm=F;dzsP1 zbeTMTW;pIeQ+$GiT!Vd46Jaouiw_|iDzreu^Y(w9T%;qAFXxaC{b z>bmj2(ESXh&xt);cer&F-&~9M?A)d^XUydPaM&L*bneVqH9sqLr^!nBPRDv@)b{JT z&~~i~Ili6oaIFdPPXU5htqg~i_L9WNZYoH$u@-JaCbFT6(Pcwr3JyyXhZOE#CpQ;T zTA$xVH)bb7m)Fipv$w3_xqy^(fLI^r_;47|)cXQ_z9JPLr2GREyb*MniST>_pICg) zrTdrgakU-dn&g*k;@5Ek<>tz-ZDKpjNEBE^?4S!m%o1)Ou>rWJZuM<}^RUu5))Z0j zQ+`khd#f2#x;?Qs;BvM;DtpH+GhXmp2G3en;2I)wq2}pn@|6sI6_Kx{_T9SNXUt8M zBJk#Y-!=+e3zX>>KMQist1@#Fswb&;MbT(%6rP?s4Db(tO;>v>SQMZOW| z3I&ugt@NSrJ&@Ox-vw@TZcLZ^yL%mY~LBMn!SJwtvg(HRD`h=a?_oc80`;_Fzx6! zf`I=dA8(jwpRA> zRP>{FoesYqMDJ#)o!A3c%umq`sG_o^NLN3oeRtuAzwL!b@4iqe<-@!R!+=ZM+8+;p z_=6X`0BN8DBbzZ;Ui?>Fq3c!HOQh5*w5T!xY)`p&;KwLPvGu5L+yLa*!SC6>J7< zcWbj-$4*7D)XfyH;RKy(NXZ=@@Ak>Rdi^oxOKjrxoTarv#5M^>V{TfyeZaryg%jMv z^@azEBJU;0jb-#u!YOk>yZa9fUQqQTRZVaS}bJ45 z38+`_j$??jH%#V8M=GK+Uuz^kvvyX2iov)=&=v$N%Kt>!WeHpGj>V5XX4~vL9Xc*h z{zWvmnRyY>q(8wOGHIc0OfzlasYvxbBDcbF)zER)jynCH%}6<Fz9eBE9ox`x9j?c?D?J-d1}>Td;msPC{{o{mgl z$()d{XqOFaC0KV++TnUDVOx~~nJW-un7Xtk+;aWz=k2R3F<*X7=Ls15bKW^dS5EwP&-NM~nx;C~P0F^;5!$$lfl9Q#UCiG&w! zMxrjQJq$Q3kRYRr^U*Qp9<4ZUtn=bP$&36$!$%MO;f0rN^lkE2g$EePv$J9hEEoO5`;qY_yc}A3{ny>~ruq z5LJyLJ$%D{n`bIFp+ORJ0m!C-65bweNlhK=2#s^MHGM96n5UdHdZ_CV!m%_?{BzXh zE%DQK^VtvoB@CWHp`)IH|MpR!WQ1)>AUS5ty!xu1u~Dl})xNZAlC)XFjmH(=d8LUQEO=0lTG#D8e!?V+N zLM;=-6hwBf3`Xz_UVD4APDD({K}se+L_Zx;BT^O9{GMgdo0D|h-?v@L* z+|}?3B4twLx8dgSCK;7v{ zci;c4f0tpynNLWq)4y%@KqyQb{CN{nreFwJ5!6&b*@C5KID=j)^|G*o*b}=JlhFfF z;r>J6puw36Gt}SnbS6JQ^*JxI3$}c`v4~51`2eUa5iNG^2c(l#u7LOl|MAIMBlCx& zB2Chs^GnMMKcQ=W@;TUgJ&}@ohU-2?m!XoE_;n$1hc1JywBKHq1@82aXI==jXJ8<7 zi0LA6NB7nxCURC**T0Kw8-DCavuwa@mEGykA(Q*{-1l7|?6zxK6J}e3m4i#TB^(*& zcIFF6dOIUY#)XpbJ3Ib&GIY(BO==0#8GUBLaiMy3run(EqRWgfRIi}wV9(R5tuyd`nX7bQda1T7shpT90%?OcQo zpHPoWm4-qV+1fig@&9fsRP(@BkOJkN+al0@AQ~gcd`{gj@!ToNTycN>8N=S$VXLDm zAB`bH8ngEFtf#tZEP0v95WA^ME0D=vi(Dpon*LnH8(svW^#6#bz$;#E>DScJT>T6! z$JUQgz1osz-e0d>N8pXXthaVKZC(#Ucg~663X-E%&l*L+krpuTEk!8Re0CRVXODwR zju33SGr{jv0@Q2|_AQcN@nQq1gKI!$r+E1+e*z0e^WVqBg)1&(6G9p0&+hlFrP+{; z-Pz*j_&YTu*xe0`U`Tv-h`sl=mz|EjH|#Drn78b3;X z^Z=@d>KzwnxaCOFjoto7Ig;v--z3k@6+E;;I0Wt`?cv9HIa3;+JcSuPa5WB$9o*kv zTn!dHCFlIUCt9mMvt&X&dG%9K+nCz*ibKl1FzUK#iE{m?ZO;O8qik=e%gK5F-yVN3 zbdHY+L+3(RKU{(tn*=?IC)7%vb3STP%bpB=Hv(D;UKt~KR{yvW1hO>w1oS7If3r^b zc`I^{XvI?7=<|xdMIVm8vlzsej5AX(DRB@OYD3^l>_F`4-sr|d`n(5vir)(=jDN{A zN-{reGAQXBp-{@9xEES6#f`0guRdZD!;yu-tc||pm|*Z~F8@@YS0PR29Ga#b_)1%SV`FEedwK^Dtr=*TV4O zob59SemIdB|N1_IAToYsgm8AH@7QjUQnmX13#Q~RE)KfGQV!{nt7BLG2di7gu)4Jj zFaSL)C>Eliv9S)h)a@ie^N9l$NLyG3Q(XUcegvVeF6Q(qN3-1`4;0inZ0?^gIAqVe zYqy8aPFDT+7KG8o$Xbx=hK!T5ljOOu6@M&9b)X$=JEpvg=JW6-%~U+6&@f7b|6d270Gh@pFjNNtCD^33AXyQh6) zP2gm=quPT2=xs68g^<$G)U9iVqA%T^A9#3O^C{NS`i*<&2+)$0;^bmrCFt!H<~ z)`5Nuv1;InIBDA!Y%CPlY~hR3SZPJAuG0G2$Yhr$m}ue3j-Ec2ZN*@_d!*{TNXh3H zOzw)p7h6Mz2bmqzA_nhm&-r#HP+8p6+3Po;zVU}@2izlOk0Kj|mG`&=ZKKJ^7tr+u z&>VD9W9(2wO5o`RaE*C%%hw%()zyEGOz58y$Er23IU- zk4Mp2Jjk~Hdisc=71v8wua*{_d(i$2qBUjV5iM6}fB32G)zh9|yR3uWDDu?i>eBL|0%(j7T7KdR zGjN0uZoeEPs!Pj70@qWo`wWalE5c!{osYM~iR@~np{^&(;^8T$Zd@q}DY{R?t@m?f zr1r!p;a3!(UV@Z+_OWmvdGI_{twAK1B5gZwzCHNijc>pe5W|Hv1vK{=1GLJYt^el*qFKcGjk6 z-Mh7P;!k(*Y0;Yg-*LeQvmjp=)OH@-eM-moiH z+3`i>Xd3r?#W_K%-%`<1E!6{sm6`WAkiK%Vy(Yjf4E;Ilv@3t&{Hg870|@S}Q( z2Lw9mT)QuyKGQ}>Z)H?ne?f6uR@Xh8@%l8q^ugMFes6D6jgk?BLR9w1RQwAde+4g&cI=;epQFyr_g+Yo>#R;Zf8O~wG|8_tG`{b$2%`PK z<0VkL3u^39L}H2Xb=Sjbd><)h4Pj>ieZj}aBCX{S7Jp-!qfsBERGB%A(lm?QlBNl_ z?_UVp<~|4=;&&2Hx4bm2vRqk)y;L-6#Lf)TgOBk(f84dCYW;4~nCv8yTtK$0Dqwm%-n` zcaodg6ZXr0(L0d(X7E8s!)QY90nKyiHl@8oR_>()_oWjk zem>)4cmY4kf-1Uk?^op(a=23s=619};vB^jYUC9-r=S3km1^mV$`}Zm!Z&;zwKOx)OjL23!__R4+2{wihUyBZOqpy8cYAjAB_jSZWIbJmZG4GcJC8Dl;i1e@Sd{UuN-=uWdPR#^4(zM}v#Lyd1}w z(J4q$4>0ACN{sodipN$?g%?Lv35Al(okpqn^={WiS&k0Z1jZ`BXa;vzW}Ru zN!z{|-GRi@*867w+a2PVHqG@$%`eQySo-}loD?ZE0uP<}5$^`HFy7)mJJ zND&k%vNGv#d|4%MN^q1uqIs{~u?2zU2);}zw$tb@`r|kI*^ADvuX)SwDDX{C?WbT2 zV$%?)x|qpyzqWY=Cs4Ttpjr$ffq#zpB^n6nA9FO+iz|{DgffXWW&bmq_chcjSaTp< zRhbXcRew6)lE6f{55SlkMQMH$92TLQ_B{9>jl5`$HzcgKWLZq&qLpdqgYhBLvAlzi zy_APwHD-MdtMQ3bQm^y&?{QirTA*Xo(BYkEgQ_;uKloGZJn>J%-}j@EHo^UInqV3~ zo3gSvOHA^&VLuZS#vatuPsRZHTe8%*1u=ACQFNPtc{!U)6jV3Uf05|gliMUniaFj; zj*(T0cI@5#2j-cnispyDdXigXA}r)HH44S2toEY#k#& zqrpoVyO&8}`6~l92Y$oC8vt@Ew`M-jyXF%?CI!Enj`k7Qi}BgwB0)|hfVPHtF~By@ zAe$gDWfx6=xzk8N(I%s?P%qNg|7jf12t9n4cqU9;U!SMXMw82cQy^ny(&5hKK}+)* zXAU-H*;EO;=tpj+#YB{p&nRtzn}3!`rlAI4nWM&CwO9SRdgnrE(*it^^yKZSjDUPv z{5`v1FyVDEX16gdHYTmWrQR~pLex>3Vj(5_p&dC5$tL5ND6y$psE+pK`>XvguEZbj zo#BvZw)R0zT6JNf#Bg5ix`*3IyKy&89D!*i*FM1IlE?S8nYnvS<}!^k3jnebDq z7OsCCab(+f6NqgSjWP-G#?Wx7hh8l5QC}V&eQ)}ii7#Fzr1E^=s=q-_<32S4pZ{OB znXkr{YR_kwov?124@AtFz&UKgdfgdohO*ai`fr02q+B=PpkU2}sVN%WL4P==Dk%j`;qE`O@7b*FJC zUw*wZHR0Oe%p00J|0(_NJC6g?nej)4594EMvFM_t~-D!=(6KP>Syt zJk9;zV0;e_#)_27~CAb?Guh6V3mHDFTK3}-{``M+49aZ9Vz~LQLA|4vaKt1Sp^JJ%}fHfG)Se&>W zMX~VkEt>%jdbwzE$BrbC|K|ndOn(#!B$GOy!}UibKWY&UGOQ(e6KiaV-3tW6KSavh zi2f(%yY|#9b$T$UoU;sAL*Utk zLi}c`q1*Bcfw#(ybSVEAV@LAjmv^N!AdzyPHpn<^5P~%G{cM5?{UlBLuCA}2gCe<} z#V>bQDPIP@@W_%0n2tDUKc*vQ|A^^`?bW^yY&eKA^NLTlfQ+}lH*wTxaXq+wh(a{e z6WD$CDw#NkGVVvU0mK8qF9_W*oqp+7bampppux9PjuGc|smI?Wcem|1eBS~Z+wSt~ zG(ZA(pZNKYyOP;~oXe9ZExadr-YD^0!NG5 zHx5sIQAl%RuSYaf7Ths+k7D{|IQ_X7n8bVK=G@Zb;%^02it>9!sdaDML>Hi1WDcmO z(#Oc%FaOU0fDbKB?A{@iQ#$3xpqNbLF;u})_7tjPYkGp^8YP@ zxY@Ra{QVt*nW%y!llANV(n(<&I~$NCM*_v%(*+u`u?74Idq(l#5jv@Oak1Eo1HIAxK@3yQbcxd;yZ)jZCyB< z>p1r0_+w?UkP`X0w$pdst?0VSh{0D_hu&A>_HGTb&H_Jbv`irc8yo%zc1ipjskm9BiZVRPB`$qW%!0EG z4!-u6Dueo#Y6J z1lg}8cW?cb;Deh@snAxb(B<~=()b~v^pYMe7s+70c9z(lTdUiDRMK7_c|TYDqY(aV zH+&-H7Z|hhBMt_wHZJ(jjZWG4)_BCMH_!R|1i!pB6(lJz1*oZ}!A&VNUwRGyAZVE0 zL0ioO4t?DD)n(`-w+)1SLY4{`MPp_fIC)dU8w;(iH=llS5@+#Ebd4)IuU!uwPl_pp zS+??t!(0=d_ZC>L$x^I?&bOU#2AHH8+|Nu=x`(=Etec}_E{}lxG6CBVH@UH0)w*NX z0_-0vhg~Us(V@k{Fx!TVm7c+Dq*TXwpp8^_bQl$T-9NeUvvw7dClicmxapD!^3CQJ zv#&rz{3K>2Rh4-9lB>N}i&)A@U)`?-reTkhv*%^V+K)^*m5J|mErl2FJbh)&pJg|bM{;VPBmYV_$#RIV+s@+@^uN?+I+6K`TlNv_~Uq$ zSu^FMY2e`?;SDIZ^qGdOz-tv`*uVu7aMF!85p()_ za)bYTdj|O%Szl(x;XgkM7DmFwPF=hbe=D#$< z87csRdjY`c13YaQUk;vf0fS?9K+0D9Rj4&h5vw=)VXj3mh({?Us1BN`(jbnTHbHGT}P;>HZa>j0> zlE)!?J%dpWQ6wG2$iCi@ByS!9#8-91 zlKt}^|E|PRdniX!8VTx^m5o2R^iPvRZL_-D-+kn_d)cEAngFeov86K8n_=+r$x`;h z|1e1>bB}=XbFyK3zaPPp!1UaH@`BJWJDxR(szrw;tjAv$tchJ}59mF_XlXO)aIEwOdVLP>j^x_FpPskZ7DNo_)$(UL%_O^TYy4iX zz8&R52(T?@hgoIS%Fw@kgYaoU#6Tn}83_GBIvRk>Unp|a&>3dFA`4~x%|n?K#!2J_ zW}9J@Y;(iea$SAEsY0J$GEh@a+|*%RhlJ1Rcy!=3cnL#LHnX!*ZFs(Zg(yhvf3p7T zsOs6Hv}igDnA;?F0iSNzr?N$+=niO{hzjz@?H{l7zLu*e=+KV?nO!IrIr$earwfNh z73u@h5B}|+xVI^wMUaM+E&?Q7YJB127Uq6nWKFm8TCTY#=-ad`3OX(AuN#K$Noc9A z!{NGN`0Bj*diFl(j!Ly-*EWJbRfkOd^Q;ls7ji#{b%bzl4KQpIAmN7_#krV9i#esQ zjVdOE_dnfj*R3Lc^N-B#6II*!kf%9-K%V9p1+Twnhe{LI2$FR71VKRN3t(;p?7)cs z2fdV~1YX1RMY3N@5knnE)`V+2-YAUn;1?nW{V%rjPxn}y-L>QJc5{^Q8#VfugTko_ zG6k8aX$>EN!%C`ufFnlLj9F7RiOZQXr**C9Frj~y8s5N>_OnT3S2}z@5Rn0?%{#jO zE1z2@{?jbmLd~)U-Q1YSSL7xm3^T`Ol{r8kA9CfvcneGMci*+V)kjII? zP*d1={>7*C{g*ijZ>I%$K|Y9S8TtMnts6?Gcdu0}nTdFYhIQMIB^{Gauu-;aev6%? z9nJDqr@{YdmYJ8bk4Qt!GE;W|EzEAEM!rFBkHNn#1K`XscvEs@4D?rf4y@v(eHTaV zrP+rND=CjiLe=2e-8ob7ciWpwsOhK5n3>74EkZhYBr7}u>i(l+h(>{dju*una7v2# zRRkvXtwAaeL@>`@&arV7^6qU=DWdm@EU3+tUdr^`L_+jrWsQVl4E?kdzh^1yak9mU zH!UKd#!$~=U&aVkfnuVaXNI9%(wHIK{$P|S!&M;aj zhW>`kL`-d*FuL5ztnEX2(`qrpi3j%cj4xC6Qg?Ll3YzaAq zlzj#9IcPvI95w;1==H>2Z%IyRBWe{<)&%B3IIDTZb>~A`!n3L? z#Fu-X>O0n#6%|l(d;EMn84KBWw=2iHmS|W%C!IYPz|-;-#9H^iA!{b#!RTZ@7$nFW89 zcKgj7s<l^yv2oB43s|9m8oDGbBR9Xn$nj<2;mI## z&#}^S7+GWm>Po3G^fV0>ku+P5bokoR%9C%c?Ql1;yFY{0?&RsC1V1{rU$4Yshm(t3 z4t}r8G}rDtCX1QY;O;X}`s9E@=H>h%AcSd3h%n6Dl&S4(vl&C%G3h5AS{HRgOS-4S znu({}E?^L>`K_yx_uiuHSevR6RI9rzYzEF{0!70u`dLI7NWF^UE)%bW*0N z$!b)S%P8sE5W{aQDXerSDeO`YPm|YR<`xFzs*_?MSJ*RNiL)tPxCJ9trGuB&`!As) z%`Ko7VT0Vce8{X)-2VQ@yW(LB{Sx6MD*m1u`g>uPIpkTi5f7bvi=R;0jw6K1%@;C0 zfla$Dc|Un%cW>d7q}6`Ec;ukdPqW*_$XqAMsE+oeC+s|YLBi9BEgYKP3O%hc#%wR> z;{>56w@9FebCuj$SVFrs8e|r>Hvzg$Pk`{r$}#*OL^}m2S!+8pK4HzdOm-EZb12$zUJJHDItUoyEd_hjL0#yR&k|9SS4)l>Gzq z&kj}2wSStd#;H11&v2Iv{X=2>|K%GYl#L$F@Vm8VmvDBP4*;axbE8IM!4dK-6VDTp zMMk}uesE@*MP`M~eqf2=Yxa6cczx=j`1S<3`7J@O{~s-)x}p#gX9t1;uc;16WYv|> zl3*jY@|%p`hqm)4xQ)srH=vqjV1WTc4~6|>V#@gMIy%m12IWSW0TNO#-ry& zF`=YO8x%@DYxw)0P_luR@Nb!Wm0Ua%i52rTtv) zV&5pC41G@x+^41UvoX@_7Y+tAU_Y3%0*#pKM@-B^xH^JvR{Z3c`IjIB_ykkRKRxi2 zmEm))d(YIYI&or5;xI>Bnr6qJp8IcnF^}XScR$*3HkVV}jWp0B`9K5Qbf|HIsC?9C zf?@C0Wejrcj3gDm%^oo!H|c(m{m4@mo0$Gp+|*mm^8<=Y+x~NdX!)dAL`KDrkzxV0 zUYj6W669;uBXh6(ppg|HU_Qq&i8$850$RCh_^U00U#x3c%_wL8FH8L@X=^7_g?)qP zbFtNt!e+RE<=LnmEwey45O@Jt9^TEI}*UL-=;vi4oSd#3RJl}iw>6tK_Lv43~i_vSkSr=JIj&d^>}m);Dvoaunds+aKhLod1fok`enG~OJ?f67Op zc|Gig)$rg*;Xb7;vysm%QHO>@$ivzYrZ-_!inZ5(2-^8{a2b3r(y*&-%H{ zOFrYhw>}VDxA*m0|E!6t=zOF%b6W3jg-O;I`{tyG#MZJH7S{G>Co(L_8@ROg87-WS zyv7w8?rnZ4`-gw|sHlI=*8OrVo@!rLCoezC-x_ZxUPL_Ala)3&8AdE{AXVi%U|R$B zqj>2=K&H$~Q7AAKL}X)8WCK5A`iP1$=$0*;%>4Mgtto|Qz0QYvz8D5ISrq&x(z$_Z zPvU?y)BWtIy*c4~n@|fGl#mDF;QQd!7=Z@(Mh|@*PVq=?>O$#Vdq@#^-VijK796jxv5LN{@W2l?iw{kp5iy8U>|OO{=Afi zbADp>m+?v2I+gPDp186_g31JxT1wZ0yD}6aAC>k#zdW+-E5j|sdl9)uA#5Nee-GK6 za<^G1z1rd85$u~LtN$c#(_r5d{Qk{r*A)DAW#P|1hCjo;uqJC2^(xxoqg~>n0jlp5tEjkno$C8i z=I7!remN1C2i=}J?@~mP>yabWUR1++{e^JlQ@b56_V4Md zSd#b7w<@N+GQ;&PS-y%E9MP$7rM_}C`~)2n#Wxxp+r+muAlHp4kA`gf8WFRMX4vK0 zCQ*nG0yZGo4cPu&_OSS;b}ErA?2 zn3-6#EvMpXz2Dmi4G`|Mz@_|!Cd3{1np^oe3g()~x|j5vQaoDlT%p-dGQe<`QSlz> zT@M;v7K02d@(eZ_zVf$8e|EteC{s*@(c!f+dg6_t9=xb%@E%9JR4zoWfhH;%+gz8Bx z4(I6?OWAqjHx7QAs7+Lo)!u$#TbS~)XLz^HqRxSunweUg?eoVS9)Q~v5koRmL$)ju zy?c?{bhW1t*9rV8u-psgwuwO#ld39b-{{;<-OP|T&Cup5%@vfsvg`@pP;KB|m&HPl znMYk*@KX|4M$XULLVKb z_WAg0iJZ<&_k|#TTN9ya(xI9AZf2$9k z9K7d;aGTcrpnsEUA0n_B!wDK|{+^)_53tmNdy->#RXMopuyvsHT~8eY8Y0jNPZmyO z+cEK3(SU0K(QgGJMwV}X=m=EhsZY0EH`I-7qL-22-NbzqvXXJgxZR&R`OZMkPgs8B zvHcB8s;Yohf;;cFOLgpJLbfsdsai=sM{ExE#VzofuFGABzL|NRU?+WfL2bS(Wvw(k&URsHPde#0tn&SA<36mqOvhPS;KJLb-=SXhBZ{ zGTKJL{XxXwrb>(LTJk9^J%*Hv-bV3};g)&ytZ6azh@3y){-+~B?A}5%L%h1$Pe*z% zs<7iY!M+m^#%WK>k!~&8`fMAL8MlezP}u|?ITy3hfALG_2shXt|N2=K`Lovc6RjY; zdLDZL?eDoNjoshgJY}G>hR!INz;u`ZK~Cv(@K8A@mgY5-sPvA8fC6gJ(+JOy(Q{cR z&+3h|<=PWd!j{aUxmnt*%Q~X9Gu0YGR~CQnmw!SpEk`rQxua`N<^ch!LI3f9vq)eZ zbn(-s+v`QTKI7C4QJ^|n%APD+-+wF)F_AtW6ie8kW-HhfJ4SOFQ=<%#aX~0!$x^np zJh-J2L>1JtLeyFh-2`ll#30bf$(^C3?BsCEE5wmgNaTG`?OztFMZqfr`_1mt##CvD zLoG_#iNgkdzyF;@&;B)V#1Q)8agk!^et=@AFGg{hNwFWv_w~C%CrvwM4(*=kBGp3n zW_B-pjqQU~8h!uA@H|EKjAez6ueUHE%bN$_7m7=#bxh?JiDH*ICaSm%UN;3-GF7C$ z-X^ZvJf*hw8{d8LpXDnfrTjQ*3SDFLGc`6NS(}*j85*KhFtF7K#$5*iJvZqsB z=TGEz%)(^8ywT#MfV;rA`Jw*1W4eqt$7%eXJk+M|zmY=a&O)CB3(C~ZSWxUJSP_7m z=j~QPoN5-mQsxI}y{;PO)6Yced|9PlLjJa@e*yKP6uiX>s%_YagSWE&d--5S9?dD#oM&lc{H%(U(xwIF#|B)Eu!NZ;S5Qw@_uMo;rj^%$WAA7Ay|l!&b&6VI-o(KTI_Yz(tNNg(&;-++OXJ zaJ!&OYzO)tPw+$?1(?I1C`bhEt*b`o}E65eP zkaYe4^}rMd${-Y+rsH-vQrF|PuBM>YN!Fn6S6+&Jpe~hMkN4owAbEXzimFRVK1LkdXj$)6#M3VhMaqS^%qG2;)npaC8mTv*xRy8VDZL^E3!>~o zA?&7QIQ~m87$%&`@iaq!BO_Aqbcb&c@+N@KwIObo zv@3>!0*ZU=49=^q9DQy#8UZdnF%Ym*nR=~kH^jx}rlWZxrKFW&z0`<`w`Z<-ZFp^r zY?lw649U)yf8d+>t-t{`yT2(fxRm_*Ufx|}^xe4s1;29|DiguxCLxZ|k{*WVLQeMq zitjMEZF80@xcOp$b1IXJ)ypyuG}N?tK!3l+zJ!rp2Ag08Ux!7Z*b%?4pEJEG2G`sL zc>S$T8QJe0dV~91NlU`P2-Go<(hHKt!7kME`F)FcaIz6VU*{OGfqEt%SnF`XU04-% zVPzH3q)o44lHY`;A^~!KyL$1*AH+m%$yL_XK{FeSW>7{^ss;b7$daFcYK3T}&KIo4 zCNH0qHJlZukCJWXP&lx%LL6eSxepGu4ZG@Wtm~Y_e0>%X=@c-1n&b(cm#8YBV--d_ z_6?(yMnjE5b~Q0W95c9q27 zF7Ko*Y9tW?_G}W~F%;ZJ1vU+UMHE`hwro29j#Mic(v8uL$tbhi)5%<%d+s`IGrjx# za1dt|Jf-P%d*w##Jr?)>vlBVY47N52G8>ODd)oJ-AOQ?0NZ&jy(vfq)hKc_`RPdw* zwJ)EO0y2)sK47OK2f#OC4|e|^#*Vlgb}4eU|4u2HAVXiV4>g_IA_QYr7#~3TfMBWb zixE7#)q8oS{Gv&NZlh8zGh=A6cZnCz`bR?-II#eX8rK#~;{QRsJvB3W2vLfIFcU3KO)T zis}HRZymYhS|X?E@i(yFL^SV4kk^>$ArsA#w)IYT`1?|7vJZ+O!D)b<(w7c~K~@Za zL)%`kachNW1l6=qLML4Pt zk7A%AR(sma{u%*ZgaP)dfkyT!{r2HxD=h|&E2Sj-x;<_b42>fYw~b%QmK%rsaG+rO zmxTYu5bpQ3b_oapfvKrRc+Mro0nWaog*==mB$z(=x%p6(gVy*d+LD}%AtxN3 ze-PEqt1xphojc+#D@?eBIzIr?SNeDCU9;;*JR#BT-gXJ(Jt zS?{`!5@m~bzMPRB_*ng(#?wgR7OzSUa)E-pK=*FrAUpb^3Sc~bW|ZwIKboJeoNb3X z(o$@XfQ9XtS9RNebE5cN923gx{J7G<*Tg$5>-;1l^Y4Q(4UruWON=nqr==bNP&66 zNvB+^2OXUAjHckn@g^2N$+Mq^Ju??^H9P)>j0iG~q`L54YNQ^?&7;b!4XQ%K@ zL%7O-VU-PYIxibX{L}xZPko5>sWR$&@`A^{_P3E*8Q?BVLV2!E; z&AChK7tS+;V9r`+NDDte)k^?li-z$fH&JefJ=C=3^~lsFVnWKE#ZHY#6BF06ew@zN zBP$Zl31JhZcgpgO8{&?}8ok*g2CJLPQ>?lb#e6+_Z|%Q+b!iQpX@l`RX_$}m$Ajp= z7E&toWB*wi`$(&HO^0SxkrQ@_Ux(RGlFKb<*sc(06uw=6;OQa*ToggDM+)V>zUTCs z#aWD82uoub^vSjVb8Fw=3H=9b5|fj02O7HCS*Yek+<>+Se1YN50t)B1cl0-esXw6E z8FO81Swf4+c{B_s&EAWAI*9(<|eUwoyb?}{8Rj@bUl zUo~iXviHHE=})8h1jl9kB1nX^qr*`Ee>ZSre*W13D#y`}>gT4d|XKEgekY^T%h?9+)=Zoek&zK3{ z%Z+Le-%s5zN|y-i>*#P?%5QrpJ-Tg_eFu7EFNj9~X-jlby<(DNq||O>I0S|fTWgB# zXGh!4qkH2&^W+@pcGb#IsB3<%sIV_c$v0Q2<(2f7UqH#svF+^?0y_0U>KQ}6TTigi z3H$uRJWZmN;HtG2ns03wV9=WaoaskjoBv83 ze$d6|635vWebFV-Ed0bp+M@NGa_XD6j{09R;HQJ+IfQu$37gozgdAD2%}#|nF~?Bo zb3|7n>c=O#V(dEdv5jA$O@!0OtWc+3E<8o|#+8M2vHLW8J&MU*S2&koIwbOeWCSVY z_@QTe#`VSs*YcNev-my^;LAvsG;TL_J9a}%kk6imN{fkCJ}Qm5v$d`0Z%n>U6LHR6 zS(NGrl7rg8V8?er1upCH#m7+3cp(4$x|Lr0z@xzvpU<%O3vLU*?f>cQO2eUi-?k+w5@k;{_AL=ASw{)k z8j;FUN+eN45z=Gd64^pxL_!J?lPqHwviH*u+4p^CF!S8+J^cUg@qT{4%;C$-Jf35& z=en=+Jg@WQT|!=uv9RTZZy@eR05q@+SR$SbE(q1Njpb^5-0L7K`_SsL|Glht67;kW z+s$bYpF3Q3l7`U`L~H@g8bJF-=G^gXhrw5LL3bG3V!Nk#E!Dl$Pgv4vZcoGe&FF)= z&yKrGAEs=i=F)uy>Z<{35HzpfMwC%Ev*c6T#4b{9>*l~$boXoJBnk%09JTm|FA7`m zPNlx@_c)eh?6PY1KslL-zcO;f?x46jWuUMgQF9{)jZHL;VC~W8vq0b(kjcL}3KZS% zx9;;!*~)S@zevtFx&R7l0XG@J_9HJu4?4tsa~Q5zCzd?olBxBz#HPoWZ=$F)8l(30 z^8P`~{#&zqavPR4A_iNutX>W(EM#7u?fs2`mIXGU6el_{~;_ozIa2xUOU0sRJ8V> z`HZr}ev_yZV$Ymg__*H^gYH>hKh01-@NJ#oIZ326^-|(6MF|7-qYLwCmSJmOWRAU^ zai39crY($Fe_NjPO#`XVwf%dLaty*c&GPoYUQkGo${ok{qnir={X;;&QUtO|aRbZM zgjM|6?BAG;{B0II`JRI1=G5Q`YZ7m|rSbjd5Vs^;uEqqz!Lpl;3e%Nwq_TxX)@yE) zbBfW>F+8Zm(>u&+lZ?~>%?e;c8j!++?Ggd;LvIef)F#rQ8DWc^WLA1`y&ia$(U19( zCZB(_P!=jpvtS_Ft^ zvtVDZ&>Rc}5=E7vL69$el3OyY09DUwdUSdi`>BN*48^%TP*kQY5Wf7{n#0#!J6fzq zcp^1U?ndn}&tkpU+bUL<#S{QXktSi>W+#c-$4jJKmtjBZxm>D8@bYU7t#MppWydzZ`6$&$w=Oj9IkQ86ANV$_I;<31mwKh!Z_xYqK09Ys0&= zMf_clc$R6ion_E`MQ8>VF+S#!&Iit#NoR65C6VnF&wsQfYKZ+TEZkWVFM>oKH~~e6 zE0!62d!s8%R{nz)dBoi=U{`a#Tj17s=)7ss+1l6no#%V9dH^R|wSH?ry;Z!)YXKz! zT(um+2BNJ6OBlFKT-L|}rTK02GRfc)J8d`_YXJfA$T=GrvW%PMrEQz9>?6kzOZ%Ru z&h6>xS4zx%1WHPthL@?cHuu3e1(RCD!wf*k=(;GXr}z^PW|lX24isJc{%Cu^ZQ<_L zZ(dTF=RWMft@!cxzvLR(#J!a$kLiG-4^F--)WS(R?)bB*9uy4;xbVml46i0^@vPQ zeJ7cmi8KAqaX_!%uv%WeW_6foBk9eQH{?T?i9T_R_${QhU_Xrg2s04QqUIzr(3{6p z3@9xlOey3GsPZ(IfXRtzSrtmv{MluR&tJk9}Pd z<|MT?O4K$ZbWGfl%9f`@@iC?i@A*@}}8@-0Sad^nddLblVe*JV-J z3glwSX+5qB9~vcxgKdA`h`XOx>3%`mw_=J@oyt|vUIkBX=dOL_$;eHHBQP{tI9ZA! z!P|_h*RP|Hdo<$7UtLX4(I!Km;^N{;34O!%6#mc;vP zV`*O25^LQ3lOdyXx7j&~^p-TnGO1Y5UAzecy<>)uM#2h~-S>7- z6{F12nNiSZvv3P!gm27Rm z%e-g0>})$>Bqg`d8AFmc=xRVpn)(|M-=szSMUWc`k$!b8s zaLDiI?LP9xSiaj9vN}c1t=b~)R86}Q`?wT~T8~4NK%iN=Dcfb!nfPPMuxj>FtHz#M zHMK`}97fKIYmxE%=)x}pzh&$sxN1p8mcb3ZVEK@c&!gk4x;7&p^s3HqD;mo`8pXrQ zfpF#-_2@V+&eOCz9w%d)80SXlQE^=q2#K|VCz8)kjeUm!&NtUvwq!h_!22U)NNvNp z>vY5DcR49@gEFHZoBDnM*F*=%e6{rf6;tr8!wRDR77E$Emv?1*EXAOF7STcz%Tc1n z5dD_97?_jZPFcgpgX{<@U%_kgt3gpDCbpkxs5DCAUb>(N%fYTwn}k0!)B%vC4R)c* za}iW`$?RspVobW-SnN!_Ab}ZdC2aQ)M;h^M);AT3`I_v33Akc$gFX0k6@#MJHj#QQ z5a6W$#snqnJDKRvK~{8>sK=?c6;@RbIg4cU$mYUizS}=QVb*OFL`Jk?Nse38X5^x9 zK8_~SOS#mi4aU3J01OO$gl1VHdpAK*({s#8rwGvZ^ohF;;~$xi{;UgXFJ4?CaEpe; z=Y=`bXb`{D*eoo7A!lR23R+{sfUgy0!vdPF>R~{{Qz(_?3`K``mbfPSgrJ4UW~Ta2lU8 z(4GljlT5COIaf|eegEP9H%+EDprHJ&W2?Z^&&x?>w+jMz4RiuXE}Bp}o1LDyuh zk@^|i$1;(eFK~ep+8IzoKOiOuFXIytwut{iBv+N1LYk!8#NUbi)t);u6lfT0{VN)R zp0_B*-pmodvDy3@-yTxW5L~s$gC#QK%eGIO5l5Ht|1M+55t>DIWGpUXe*7+Ohb>gKjly4l;w`u8u=ti3XR&|hIvcNlAII}T0{Bax(5=5b`cKd2;VwV4$^ zP*&o3zaB6bD84h*5=nIrfHpsvFwj~KBIQT0)g()6a@-SIh>+A$5t97vy-T+l7Le(8 zKzoFeTl4>hM@(0&Umb{Kf2b4lOLOi+RF!Q;mLZ{+k$l$dL2#R?*^qBk^IDP8^7ycJ zuJP(@6Al5WhCcq$$A)sS1pdXaGWW&9oxJB0mu6y-~OroohnLP%r`t67Q6G=W)$$Nr-RckW;MykDh9 z^4^!jAx;TV0#faQ_Y41&a;9X{H}_f{K2LSie;EQaI4py~T0P>W1kzpu(>vq==BoTS z*~~!>=F(zXm9Gufaj{_*!X>87PG^_oBo59t-(qkYte8E6IVdHFc6Op*&l(aiOtdg0 zJH8Nqpq?XxDpaD@sJ0-QdkS4RzSui{;xE&^;Lh%vOKGVsJsgQ*s{TixCIl0elLZA3 z0%7a}jUj;UpqfzA?e@K#1U(5(gaU)v4Mm3@zs!-`_JTA}p!MXtNz>Ei2_(IYctl!2CBdQB)E(0t{3Y8(UYQK$&~m!Ihpy)>flk=zgS$D+!4wp;_TSr&!rT8Dltht;-2PL8 zg}47Yy-mdNy?uI5_*_F-QP*1w;jM);mJC+FXPkKkETRz`U<59`EcxqrH;xcR=_vRd zLHMm#+OvgYSL7I~4cLSO{w33JrJeHekk`&lFB;0CIee5idEDFArUr360wji!1X6As z8;~&}$IFLes8SXh!-r(s(ceHQ56%5aJtP)0u7?4}WV|Fnqz1`%^Wz7WkRfkhHgRm- zJNFF#!hraEWujzpXz0Cp7pb$pl^~-ED3Hw8V*;#jF%cT(e*FjTK$^enX$5)|z&i%F z-|dutPN9nwH!Qk>aV$r-h*(nYfCBesufp7FPNeMe&$H1h-J?IlE?;9$GWymcA4#0_ zk`pq$a_FXurpS`M=#ir)SM<(jjw~72<4Tk#d&e<^afH zsTw!=*cO-3=`I~Cy#qe5nvap)+G>{s(UE$x*d8bfnI`4_)8Cq@|L%PxQ|yJ;=kF)I zZA#9J70^ocIK^*yfWD}Qforg$+SMnzfM2G<_lq$dt1px@TeoESLN7%_wOuiI*s>up-YwD#|@Wm0$DsDc^@LN(v-r~IfT2h;}M1PyH< zOP@T!u#J83!j1a{I&QfT?MaH>#;;}_Gm*BK6L^a?V&LGlKVbs8b5TFN_gBDUs2l16 zt>@4<3jjJuh=q#N9hE_*rIbJzKSu_|5WobX4qnS3p+-3Wu7&@sb&-Lgn5eKuX=|e> znvM!FG(;J`v7LD}sFb0;AJD3iYK$E8Tzw51uPFlgEi9DKl$5+Lc6)TDA;Bfm*`Zvl z&=oFg-E_LbT({5h%3yU-ZS~!_pr(ero7^3<#SAhK&=1h0*&lA{4S3S;Bud7vH637XG8Ok4A|V@#{mygwu9uB;{SJij)W17xbZCm*M!I~;c>JWO&t_lco59V#7A zVHC$_!^NCJUv=+1hvmOPI5G%vOmHKVUJHnix|C(vHLB+DCp06y0KD(hLP%M zcsiuNFC^x6wb%T^SSVA<(hM8A-iRDUCt&um)JxFQOi$wthr?NYN(K$J|8JwLF=rz( znv9f=ByMcqFIY5s@X_XHvf1*-C^c3Q(n3nBhAEBI-&NY(jNE(@PAVK`DMQ3^iZ zRSH_B2s_9Ez+-fjg02FtHrFg6?sQR`r+AghO^=W)-6Y^SAZ#2FfS;3JI&#=U8^BCR_MmARdaY zxL$=!g0F{Bd(~3HUz_)r-pDR;qz=OL^|B`atns*q9ofBDoJPPGWAVy>IoHB$VU5)h z26zg?`vXsD)Yw{p>~`(A*M6EPNB?Whw^L=GUu{5|t1pK`$uH9`?zO?pI?GE>>U`Ki z-X!)rIL0&w6@k7!aYuB0l~sXYw$ff&+?~QioTpmgtkeSE88Ga%a(S^y0V!w396xTP zyoH&k49ySCS8w;mFV~tm-Qhh$<6T>akchG|IsiNl1?@+gb!f!tqb%@&E(CwGIE8kF)C#Ca- zy8@7J{iIVt1_O>l8wt%DD(OszcWfklK;0mb*6h-vhGWZj+L z)$2>x3I|vC6?S;uxv@_bPVn$!9G(pZSnN*lBkK?i8V3_2M5S@C{lKSwNma#%)ngNodkR%N<@3yc`E4^zZeB^Upk0*Pun4qVfwl3}E1qMkd z&%Kr6qyO&!^A6?8Sp&+=cBgo<%rPq7Wj8`;>PQ!6xc-Y*x@zy#XUF4ajh+8TSZDwP zMGYBt-b}xYJxamcYXW&kNRF?$_OU@Nt*l@xHFMXbjw*5dSYsxT5G!dkV&YmOlJFL3 zBlF}mh{HjII};8f?mqt+M6@RDsez(!5HW#+NE@9G`l*5Hss<})0feZqiOI1iFivZ( zIEs%1o&jPZZMUSJEjz-01}c$mrP#yJ-LJg6N+fPL5S@gRh~$6X4+pc5mcSYfeF^*Y zcXrZG@>#kPXG5O+#X$!E4;e!!b6BSP;(MUIhz0Him>&g8HY)?^X8c}S%+|qhStxx4 zbLU;s7JTZQph}S3*L_s)f^JnpBECcAM`A?9K7Z1{F%ZJB2%0%T3H_94w3XD|eIHKz zO=jsKzR;ERTA@&Gk|n&Xl0L8JBIwK?{0r9&iT-3SbbAvKpMU`63^ZEm2V78>8y&s` zOyE3X1CK$y-EYC41YldgiO>T3_g^egtZ9>AT;+1Yzg$zFW)YAHDL5 zcuz;IH_9LKy4M2Zs-wcTo~x}9iGG3W-sG$bm5Ox^4gJT6}3dT32>7EQx#f+_Xe(WKtWitCroS?mXMGM-`}gdOdKA=>b{ zND(2PX?K5P!*!lP`m<4UP-aGGU`hy6Dwck^_S7JHTF+_lg`>!g*W9-&yLhR=3jX_d zv7JCL`h@IeHy5aATtbZskf?_!2nnJfNZYt)&>DsXz|IpgH?|dF>*$-a6+xL>+#7o| z7%G@be(1ofNPQeZ$_YXBKX*$Kiwq%IV-TpS8{ZLxgMY z@yyf9rV^Jw=g07+mo@yo$0;IYR&UFZf>Y?Q=k{5i_?>dK)9!aluQ}>=lb9yUq;Y)X zTf%|K9#b0@4e4`941{97jmf6z8`KAp)i8t=i@l)=D0dKv8Sw_&i@OJjVdG6VH;RJd z7htD$5QSMxZgG@1S8}{Uqch{Qr+C5ME-Tcr<&{S0F1O$$5W_xFn>zk_;+TmY7^0oRY?HX zCxM{=P`$NoK9MNn9Gj1`P?mo8xJ6E{G){oMR$C7>Ae82^^{LKh?dyq+XF-&XIYGY* zZXkOQO?(B!OYTz!4B1Hk=V0*6eVcKV@5BVruc^Nf#m$UtS~i@fGH8}F`2)I#Vn%9= z*mV|lb#yFTFNmbYUT+DHr+ep5*Gq6Z#N)8A$f?is0(Dv$hhjSX@keK|bItSi*bj^E z75~g}6QS6d>ib#)bcT@?!k=+yuagD$Q_5xu76Id*wD@@Ms_|tP%PZbXOE2mdlQSE| zepbKQI- zNk3tX;~A6eWW2xI8y+qZ=C1JTv~|>TK~@ZZ<@$w=LMk10^yMHO+*R@cc~B7Ps(LIi z7u1lcP;JR9A>>G^m!RNj?tdOMhgg}$XFdzo`Ira673w*U=tUoM!ePoch5GK3FpNBc z0iCHvzRc^#je^U65a&?pr~LUbOHg}!S&+R(@@;JM#UqgyT$_pVQl5%-+eEv}yr&)_ z-}Cy09Zv4}B@^Y>067MnBjxUcSd#$-q|al#>L_=EMGOHv8caxg5$>mj{;TrG8S^^lJfDv>)FTcKmc#WPOyI7E_pKYQcRet- zJ+64%_PFWhfpK@e>w4s_>urXmnr*CX$Gf@toNe61D$^g0i9hXj(+y#4gPVPKd{yQi zPu@AQ!H(-o>A5-bV3+)|kK0kOeI-WF;c}X--1#(jvm}k~d7HCJkJ20ja;R6_8;cig z)*-kJo+1?N@*!thNjOv#!)G}}GtomsO0KE2D;o&};j5KHn)jbpgRapVXL`D^yXS~% z=Z&m-%}Xr=g)Q?5G`7)FEb$j`Jfy7LJ=Jt}V2iX~3^}#%qiLku0<}%oH?Z*umwpuK zCTRNj)06=5icfjfmzOZrW_@WJCrpjJ((CsNMQ3b9iFl^j?|%-1n{tO5fx@&3KLf zRt6KExl6^(MeE-w)Vb!FOz}KON)F(OeUJ78&^8x)s6VS0{nWO_?7-)s&F-il+Y8$> z{;zN1rc^D)GFQqmqGZ&N-C)b2gJkayDm3mq-ndBq2~qLg6g(zpCQ1NTy16o5vt9hd zOB?#V{RbbAzHNWOGkrY!r*2;%WntczG(bK1+2>MjhwQ_3s?u|oy<41nt z%Wxy%&$F6N*1Re92&sM8MJU}U)>*&LB)OZ+N5*wQawL=~YzXTFgIaKl9QB^&DC>d( z0!<&u7D}O1&Ev2mUyz8{P_jZ=iE{KblYp&1gNIy!nd$Kr`5N|DZtlXb9SL5X?;U;b zYmGHQPM3~guK-#xI*674sPl`;0G@-kr|>Y<71NxuU+Pnk(+v29@)W=9W57Zd@o~b zGdTRNu#6KSkW*&tlyyt;_6C1buYeSY?|@!+K`dk(2WaDkg`P4Hi%emiIPiA&*{#tC zAckEA`LL!jT)l@9`{=6vNy`z%tmnLp`z?%G8wL4g?u!DKz2B;2u~v>%%(X;AYRw-JXvgZ4Kl^>F_K~qVu;lU|cIOfioq+vB=tA zCR!zFiWki4v+S(qbF=ck`n=)kOn9?-Z0?$X*p2&Jp;<0~c3T6yw|6FVE)X;@nC;_~ z4(cU4QQr^aFF`KcKM1KWpnn6tHIP1Mx4nQ4{kV>bfuY+lbZkw;vvC^SeQtd-^r)GH z$gYgsdlUW_mj95O_Gd%)GUc7%SH&v%wa%~DeiFoA8PG2mM(NoN82JnS4j{)73?w|O z|45gmgAod!jTktl3t#66GOpu#!a}FDd*FPE~Lh( zcPDE%wC}|XqL3$NC4LpvLsjf!k^=kcpv4nR)h(cWgKS}z1Qc4J#XQ`XNu}E&`N>KC z_5S7x@2?iI=Y$ruj8h#6*Elyu-3K%DZ}NShv_h>AHWLK9!FXv9wXqH zrb!SS^&_y->?ok!sC~*#o`Z)1bww8lt8+QCU>lkmjesFd&@&3>4r-Fx-_F!`fKTFADSrdSA{)6aL3g&7S95+w*9-zIdN^0N>VGugPf@$^8qYj zPeSGwZ%kVz?fZ;6D&hB|8~$qB|{p z_q@nKW}Dz8$+VE;Jb%p7Lylj&Umr=0qWUgShhDRlT!&!r50>9zv*Xu}Dj3mBVoS zMYb0#vu`@;_BQ*;riK`dG~Cywc!MiS3%>+PI0E^rX0+2mSGtC7#*#M6N`N2j?MRv(Lf5MBOI9kMAX}3<7 z`m;Re!JJlB(KINfp#M5135t!)bP?)d~_2P^YQAIv8?PEA^P>w zf!v2RbPR>CwuN&5@?Hm-?#M8Dj`ztiPmYThJuQ1;@Qg2M(VMDzl(p(sDNmn&As zds&SMoZ<}&wL%_S7IDo`*1Ennqovr9xUa&uaG~I0!?+_b{`-catk1BB7QI$50Da2g zwP$0ap_@U@&BatVVFnK|kN`uz0ARpc&cK}&@NgZ&AuG6u&!w4eLwmQC-_u(BoIZf< zA0Vn-*{(8kWQA`9kcjwE3R+Zn5!!ui`#1Q;YGKT;XD)e$EA(RSHwlsROx!On#s|)% z$c~9AUb`jdr5HC`C#YiZe!5dg5l`Y-IJNz=DisO{a!p-0;;RYgt#I>N{hWDQd~t4N z@wKuDe*ke3w;eEFBXF0c3O0os*3lZ%_1(~pgJtcn>%!Wi>-m~Y;!WnsVhU{5G(lo@x-iD=|yi=whOPRr{=PRW4f zHYke`T|+h9k0s_5wnD}ur-&$jZI@|w3bY3LzeDPlkuZt|oM9l%@PR}lD8sTqv|)>n zK8Oh$9nZ z^oZ2o{gM;=xOlPQK%-&21A27{WKEKL=7|_t*pPkEK2+|@nv<9kDe7fG@&jD-+GU9C z5Q@z@m>Crn@+Fm(eHXKVq!E0u39<(yKPvkak?@C8KUmOJeGmkSP%CwpXvl|9O;vzn zl7=&tU4g*>;0r)H4-LGdq7%~q*8ZMvfI#=<^~sTnT8{(0y;?Rzl@vaJS@N*edZ1MF#=NteaS@7k&EHGWW=(cpI-E$*$C6$t_ z^(2S4AJm@dm>&1hj{dn0sl9-)aw6?U&scUTFzn(tux!!2XKrrBJ^;40Gie`(dG!fz z)bBHJoyhOvt;%WDr*_GYgUD(AQXtslB%55vJ}bSIunxmKmNo zN=AtkzXS!yKuFj?jW#YJu&ss8D}Ks@f*bQdUw9Te^lhUL1WC%|oDH{XQ+VrO*_qj> z{wO?@)iJAu)G?Fr-f z-Gv$Ne(7`7@uwokpSq^w+z4e1F3cLAueL9uFKXW|)F#&+vB3VZICAF1SDmGUS&D_OD_D zXgTk*sc2=kkQMaUZGr5VNy|8oaig)oOVmp74L!J{l;#b)aUoAd0vI%OzSa{Kv&Pv(jBP+NMXz8s+MQe{9~rlRV^4dBeTuwBUa zaMMhVzLV_dV$$2ymRT5*1h%*v@eSd6+UKH)4WdUv!uOah*xx2d=8H+(nZ{ot^KC*2 zL)!jv60K09ofv)Z`-&1ib->CVz3rgO2l$3keKn`kxEklL2Chlu-+o?J=pK6jG7&KO zR`mW)7bIPwG#aD}nycb+b0#x1YUu z&B3Fg6Ig$t3_LuZ2{|0XVXv%XGjTZDUYZB|w)D}}pd|1(AGlHpeQWq`zZ0QEzg9?j z8h>y6ZP*(61VAJ3WtV?ii^b8mZgC;RmxZjaGF)ln#C{xSvq0y~}@~6uDf34d~LjhTzjkn{ezWnsxmbiso zBcT@M7%lp9I@h8C@VH0+%xR;KJwOJW-FbitZLSK8Bsk1m9O_whW z4QJpCkx;W2fpIbWlapgezdpgud0(4-kJLwWIr+56G`P){Hf%_c`4++61mJQ49xZ;% zCuFimeK#%ye2l8W!F(e5UC2j@TlJt<13*0yWhDom~R9yBcowHr*w3?0JFFW9B3%} zQuw+CXBB*~7a0#vhnz$Ckl2j%Yg)uxw^^gy6U}@?qZL$; zBfonZkfVHUwtlE_fmv*=NH1(h*fb>E5zp`LxnxoCi!k)#p;_MWBff~A1cw#xJWT^S zMtBR})jWhtg;+Jj%J8s7H7r|C&FP=QcU9RX2lJh|Sbw6T`?f&X+8Oe65qI2EQB|qE zb;0f*8gpl~ZuO7;J*0Ie)NV}zi=f};dnVMiX6V*HESL}^ zL{Z;3s09_gwA~k6myn4F{;kWo=i70G4TmVDr z+k_Ip{T&f#P&Ytjf&PPa$gB(=N<#J?WNT(VxA*Fsln*AiPLqzwg!+v5Z}S6z!Ox(P-68|6ml3Q)Tq^qw@O}$Sjjc86vur^1od%D2bQFG{F1Gt zShfE82)C9U(z+_lu`0ES8iaJc;S||bf+VMY#@jHxKFKd^@TkNFOhxt1qC0_AVZ1na z_l#GJH5A6T0N*Ak7q)2zo}PhoV2{SnpU&as_CbD_6za^tHlb%6XO6D{KcCFw5AL!K zdgnRxPP;YbL>}AE0Zyac`+LoKwqYVF^!qwN1)ho@hJFYD={`DU&`-Dq_E-~V241)y z{TwlLg^n2*{SE?lMkkPV6~UjILXA2HU#>Q5&zqSWMVyE<|1JW5NPH}wmoXT*%W8_1 z%ai1cEDZ8!@9*&Rul+CG;Jri6y{Y*E6J3^y$L%OV7|)j86;jsv_~45J_d{ALN@9bC zrzC0)+y_%9;eccyHVgD5KvDs65IQW+8Kt8k++(16;?y*LC=U?2;;5*S>^)?m(&K5$ zRFB$c=hqY7Tp6I^s#Jnus0jG}M(?FT;xtUnFaYnr?6ZYMS>B)4H-&z|(3~q;AN1JA zQjn*&7q)Pv`Bu>gr|EdXbq6mPcfhAZ7x6*^;5d6%nC#aQm&2~H1NE8C52R9cOv3H_ zsn!`~@JgIio*A?JQK~Fj5!w6JZ20lyB zLkIDCmc*-|dI|E4@2TAc$7wl#->jojaxH=KH_3Sbs3Zs`ElmO40NXXC*>_XnoaW)G zPX52#-D4fG`=qU2r|7X=u94~+@?;V_D|xld>S!M8%z+nCgoi%G8$Hd3TYuc?Y1loz zH2>Q15j<*MgS(^r@PnH6_U}a~uoMu9+PJjk)zS62&3k|HH_H=TpP=dqZW%#$%V*YI z8_(=^q@8em{~V@1hi(sQx5PCgy9jkyYC>++s?vVC8{Cu)w^ViClSo(ou9* zgzjhzm(I$ggu56%NAoDc4||Iny2}MM}InxFh`{(N{-MZe%WY?y?75_SZl^V4U9yddm)`2>s3DMXT4u}MUiv-3kiyMnjtj!q6 zdIsAmB`4>OT*ZEh<9&Va`Mg*GF;(Yq4nHtys$c(^R#zXdpw8NbHNb%KkGl|gg|N{jk_i;#irsm*E{$LFN86i z(*d@zSdQ7SXXM-~dKaX2m-&EGA#~-c6?zKnXVI^lqNqa+kYN_c%@+aet&q?!eCNaG z+R3R`7&*iayh)bE#Ht^k6`6Z4ww*RrYmmX$JVd@w8S59d3lY+=>~R0yRfb$$efoF& zU%KL)N|`c95gaAwa3a)KQv|+#CfvNEaNl}l^p|j9v;8g6la_OS6=si4a=F~`+8t7V zCCX#tVZnb9%e@|wcIw|hBRzeX*}3+frTSMV$c z3||5%q&-Y8} z2A@9j`%CWhTMibDn=e^7HEe8olxWY5%y+FZwnD61xB?hu03ZAcy$=J0jAJxJczpr} z#S7T9K$|1Ag$1E1;b2fG?^`&_j1ia8)l&CVUVWkYqErE~)E|hvcn-lRg=Ygi%*J~u zvIs)iys(bGvmb-qMCBtSs5 zJM34?UReTm3O1K=RRascxwm=(-wh)GZ4TKZ_tjyWOGQtPO>8Ky5`?Hj4&~dd2s8Jd z0q2utks04J1U>W;{O}0vrsBB%Jnx3Ue6)K($WjP!SN9w zvw`_t@B^4Wb=P2z?LGYE2y~_gQS)4puys4+zGR<`Y-=*@Y zA)BDIFJ%#W-wH($QLVdyN*p-c2rc+ghf3t|4TobYo1pLm-m2#Y{r`?Y)3t~!y5HsXF+cHT&ngATc23m_<~>tVnF9fly^kzvOD ze_1qj7^3v*ZAOq4^O3${QZ<;)g@N1}*ltIpl8gr%i6DfbK2;8v0yxgUu7O*&651A! zC36{BGB9KZ63D2>L!TRWcQJ~&&{`^cDb!6=c~A9C>rpqsDu=N{N3N46nju_v0NCsY zoOUeh|C(NInXxoHp_lye%a@w`_(J_43ce5#so2l?(CqSczX1rIL&K}eKyQ1Ch)w@9 zWh^<{Lz=x1%v!(KSH?f{aD?=@@qts9HB=8q#1HtNz&pXMi`6I84m`EjP-x`5R-yTo z)hpk%`)XT8UwzLW+6;=ylS=zu^SXGLLX7JKu;r?5ZY*ma$@6U4o#znkJ~;O%u! zz4q}2ulvyNfZfdd=Ss7^X1?IcZ~hD{yOE@G7wvLxl^Eb?(a#igtpDKp#(kEAM~Fl< z^b7qWgtHSw({X1In0)f6x0U-x3zYit7g&&;Y=vy5@awI1M%=S8gOMh$+}lyOa(&g) zA_KNsnuLfo?UKhyR%JJX7Uf%S{`@g{YxAyUqr&)f(bD@*T{d!}H*isiur$@TWpR&~ z!x&kjkqCH^>#aVpgJs{RfiSgcJm(^m<`Ws5Zf(P_Eq5Tp_4O=TmhVLZMFfv!K1G~%A6&CW! z&FO=gVhQ8dI2g3s>(qX7Dg=JlE|-dGXhMsTw04-MBEmc)`#1iqusHCM`PR|j0rGXl z7LawoP9iM-==ycTM*vSDS-QCp$jYpvbNFDcIFKm?j+xY2s~VdL#vHhHwvfZcEOa{V zo(Ap+jrx79cJ{+zrm!wvOqtLU9@9OG+1;_2=|CHuUA&Gqg`VOU7k=q{hY)V_3Y>B| zDb0`z(2W46c6|9YX#EHyBDLagU?+=t`kCdZn;y|9wBpEU+WzFkJ{_;~Wl;eSRbuDx z7Ljx{I(c$OjefZ2JEBG#zyTy`dRHa?)+<@C?<*AF;B=5t!UiKx#HPtsf(P6{w_n(a z1Z|(XZXyV+=z&ysM9>G*2fsdD?`&}?Ojh9R3W{x3TFFp6Fk&-`%x(Ywv4z%{TA>0G zsu|iB545N7`uUFodxg|}O%pGi?`xlx=fNO=wG(Kz#SDtI33<9$*rb#cpdqUpSr2t`L2_1XRV_sV_RT1 zYD?b|WJhJc<$~JQVi)l;-Y6P@Us8+2m7k}<(N$6Y zJo6=M(nXt#Z<=dm&fMAkR62>l&+8PNVSeU%lC|oIdvr>wV=wXRE3sXiog+_^inp)k@2(O!{# zTR>&kSAm(ySPUE>Lvc&r;;Zd~f+?7%8s=2fY+f;12z~nXxJQBX-{7=5*WC-aL_N)= z&($>?p6@7sAEwE{v=z@4yZQQYaI=F-C`x!)J6yhKzf0U~>+Ky!Yo?xwn#D}6RD7XX3<|GZGb zpj=nrHi+v`O?sL>MZO$2WAv4z)t!+arq* zXDgI40PN<_D`?xEv_O@Ca(T?fuJ?hH%A|o4rRd6Mp2&K8Q9t?}Tyq2hzd^@uFx>=$ z^qJ+}Rmjfvm2KT$%Wv~rAS;T-K+92m{6FkTv+7^9_59BEzO;c_1lh0Pp3D5%ET^4r zTOEDt5ZYLg9~+yvH_ZKB%}cZs&~#e>ef}m)<GbK;4t*M-)2HD7(x){|rbIBV4c-Y&;}xeNL=IJJtz{!)KT}P`yj!57-vK5ahEhDa z+YwV!1q_*09Le!|zdmD|ZxO`2YzBe?9unsj1Bl*z?jIWnpk@tf4sYB7n(lx|-9%o2 zsKT4ktsAH=^a$|Y26rE~AnfVWI%au~I~QQC?xy08a$+((WOt5bp+fNc z#bVSdTH*)nM<{D2-RjezR3i(II)y0sBjJvnA%5G_vJgWP8$s(%Zt4kC9A4T>CL+ZS@F5p@l zIF$NH=vp3P+i42GvU|1aGY~&VacgCpuhJs`MQ+3Zz8mRZ}k244*z$w+Swb2nV6B~$j>?Ze_4(l zo$B;)y}XX@T7)8yPlGA}#?xKWon~S7@JHnk5Zr`b%mF)6zTlYIL3sv_0&y+eOR4A9 zoMxoyyHRtZ1%B{$_(xVJSRrq@*NVVgRNmBu+WG3$e{(dnNvH*MD`bJt8{5qz{KFj( zV*i@I28ft`>z-?+ZI%NojuK#t?-jHI!<-c0c+LwP!9^@A3){m(AVp2nzewGv!@4==G&f z=9Gx~%!lDw0_BN$89||ZkmYtjjUoMXV`%7I&$-r+cea`a$!`9XqVSD&10|BJ5kMaY zgbYy;i;IINRofmPkl1(lR$1&S51U+7?`vL))7u`Om$^O#@>RJ{53bs~|21Ddf7y`6 zPR2UH4bPtKd{s}^!Sn@m>Ww+b+6WODWtPd5oJ%RWm#9J=_wxZ113Y;~U`#Gh?%Q9W z{1(ufvM-@+I6wENd|A}^5~RIWZtsO@eqBLA*Q=`wi9WO4Nl|3aCfI>q26+!C<}eo=?4 zz`jJ#{uhPj1@{iLt$ePO)#8twe2Gf~9BHqbpkze;HU>H9Gck0DOc;^K+ERBUvU7+; z_M#t=$XtVAkOUL*z^yglu?DUFLt~`K7{->Wu#*3b_(6*D0;qxww;{?R9+8k!w@BDju9B<;x8;cTIH1|>P;x<6kT!+c_ zl-n-1aUse5eJ7|UW&@dnAX8u_#fNx7-nn7NZ?Jej8@clBVR5gvhrPMc78?sRd z;Ffzl#C|+&s@>VCMd7oqNl0c>*RR$5)gK=TvV zHcAQ(Lm)Ns_TjYtm`pk5vNO!}nw9LUd=J^$A9^`m?0M6(d1sLvXaH0SiUiGZ3Y$^2 z#VWz6wF@OrzHMvoIsSBl_a|S*`v=tVL7@tLOjqOSni`SKPxNVZ7!JF5rzMzs8gEGc zXHZA2B_cxM`~{jXFPVgFbc9Bf*~Q(Sxg0EY!uN^kzM>)hV;QaCfq@!p;@=hvwM|Uw zKd*PCOI6?VjB5DVZzUMkoN?pN+S&x`9z^&ea8MQ1fNLY`Dj^R^tYKCfp~8Ex&{5d! z8wJBU!5UM4@mKVt>~}Zp-<$a)SxMLMM?HeFPHCNc@^^hm$>6_!*k`tByfhg%MHMxs zD;}>N(dDGTmL$MCi?7b;xwHwzTx|lkBewC#-mZ4Vz3veG6xU|__K@fW$Lkr=-60l6 ztLf2D!Ii{=A^wX(?ole^VskD0n~C7@1Nc!s?EBheo`$5V^e6s1dHq&{@#=N*3-P#U zAh`<(#$bl^0AJ{3xRP@fNeLa~cfNRzXCTB+QJ&#l8<%)R^0|v@Xx$pz@#~N3T*@0n zmsqw}(#y%|O{3=H!1&iV5kbfae4a1KJ1iAn( zu@QSZ$Mjf6o!A|-Jxy=UNnc}a*Ai_zeF87mL?U>$C{3(t{_u%>Op(~H$s3CV*M#TL z-yuW>%pzjs@P8#RcXvW!KFhP?+^p2 zIXYc`LTgTYk1r;aB~wu{L1eXJsO0n-BF`a_vk(y8wo>zT6GuC{=x=Mb)j|cE;W5h;>&qr8%Ng9*UNu5qi9vnaZM#}z0!G-5G2gA=j(et5P z&s>W=AE=~%KjA>_UN+yRgSF4Kzx)#zH2|{oyBI|xOsy)@sO)z$fp9$q5*h>t6jMkF z5_!8spR$Cl#;7bF_YCQ4QOeyA1m^T>h10KP{Abiq*w8bWsws(BqAPt{xPzeh)h0xY zN3xNrKr#)QpF^L!xQ$Piq8p+Y)-lNiMv7Ci7L8$|WkzY%+OKGhc&mf8sdZoO zHKjL*VdG+zqXUM|MIq6JA>v5y9R8!eOYy4mJrDbcc{XPOpi6h3l>@6`)GZ`Bg#=U` zK*S3dnf>Bvr=OqBzveC-otqY|(WHfDty!^ZN*6p@b%{7rmo*`38)!gdr$ISz0m9em z`7_7~_K2b;^nQa<_32w4!hoiu4MnmB!E7!kV#KO$pD~e$AJAKxBb&b2vPLNXf!ZC) z-y|xGP=18gSGFVX6Od7^6JS6F%{0Dc26KNHT%SkVy2338T`Zs|CpB?=RkH$_^7{&H z2K`RIv>QvJN!_BxDzl!tRr;&L>Yp5_Bm+GMx@+~H{P1%inNB}B1;;rw<4zUB1Gc!!)S;cEnX$Pfvr#c8$8{l#@28tXjt2iON+pTTJ=lZ)b(- z-LcQeUi_Eim+x?VEqQyuIs_pJs&}y3XFcH2%b2@E@P!eA?=1;ax_F7GWb6;{fSF+k zlBYYPM{T(bPDtzRR|xJmVu??(Zcn{y`6jaVTk}!N+|a+t;Vvn=lo}`Ej-9iepR_V|(q*+~oV;Ue z8XVlbDGGxA{D@|SuY8F-`|PU(Y3h(T_1{QrHOxH-9qDh50j>RvVd!4OEXXMYxS4>r zbl-r`0-!GmjLQ$f<|B|RZh$zS%frxjkbt=AYmgxw7eKi*Y*fhF5V(P;;TZsb2Fbf> zTG%=3I zhigGS2C`_1_CZl{AM))5Zqd*Pu8Ca)ZpX}jG;7bBxz~{^0 zMzi+lb=EN#*ly?1QLCoz&WUCCfri5bCd5Dp;wqv?pc^*jfKNbb2PMr@0&-wwB}AD) z0$0NHx%*vVPDbUMxrrcq4!er^t`XyAr`!K1x?2zY(-ci+{5YP?37r4&Duc@RvPkc5 zJKNJ#|GP};|I`$^&xXv7jT~0n(i&j0BasaWHexc`({GC(nC9ZjlMXg5_0odZIa*e{ zbq%=J20CAjkB;^QE?@f%5J&tm)h^$v9m#1`>rd=(Sw&`juZhk@-FSf(Bos2(FrEVn z_JR7Hr)yCFm!UpxFSD=6GW|~B4p>eA6p|+q6S<4!$JqS*SMHB?U+Dp(D+ts5{D-daqF5i3BQfIi z?F*=OrZmyVRLx5?Tx_Q!D_3E^wBTvm)JFT2lC{v`zt%+Tm|*ZC&MK5cGx__w=~`VQ z74^PWsDXs~0}$Icr^ULiqA?v=?#5K#3VRhu8TkAd0jvm7PtZ+A?fJi!GPgNq1H+&fjX@LroO@CHc{&4~Th_=yp^AC?T%nN6Ryte*BVo zxIQ;Iuj|4%c1!&@qESmm)~ry|1@qs4Niq%rQB%gaRP%5L!y0ad`Ij*-Ob*OOq&3@L znJq5ZJhp7}|H%In5UBx1ptdRV5Z5IY3P|4q9i-BI>!_UijD*I2<+6;C6sF3X5UHhmj!drFAvbY--CE^0+-=Eok~VjhhhD9&GZZgclP{1zV_ohpe9!t7^=VuLTYd*x zmMqP$)%~fv`BJ)6{cKE&TSVaUy#>K|)l~wbGR&b9TA-;Gfa^&?pM_-lQGaMPN9cJD zh-#M!w2(de7!1lf8zf2_UoT2zdoJ)*n^Q$jiCJEp-NKWE4eF5}#-=#9F@4hQ-75=3 zexW-7?b$jWaSV$sf^VN-q_oq2O&XH-T_s$fz&Af~cEAdzm~fZG7j(1c$%~ocZ%FUr25*uL(n`@*+>ND zAAA<@>cz|#f(-y+3V1jIS790?0Qff72@+TSlMTUZD^lUj^377IJ|s^19`-e084wM) zzppgpL`=`6p9n{aF$`s^s_ztTs1R(Zg=nG%i<_uL{FhzZJ=MR^NQS@-q-mes2K!?= zXAcQ{j<_}g=GLG*Bc$eZ0$eKmN8BuZ&QR=@?=ZCz12>JW!&%c_?$WE8_$&f% zZw8Qj5k3^rPy6mDWOO^n&-%&nKOrQknEUr`z4vkLy^9XYN;|!YER0NMAJ**NS^Y6r zi>qtQKC}dGGIl>KBnAT1;S|MoXnSejDJdQzHlLdQE&Eue==d;S|LJ@Y_z zLVYX24JH&2QO^}X9u-{JQg%aKC!r9i?o9z_vybjac3BG*6!&Yd3m?{JSWv>|A>oh) zRpsz*J=(QNW$aDLr^Ai5x|{GyF%YU3UjX!;S^=#^Lu4Dmdy>5U{R52Ammq8{=$Qw} zSRBxDp;fBVHGVbOQ>}v|5-r9u&DMrIR$l`q#YO((&sCjJKG2?4)FvUWUpD=8uKrmD zyN>eYn0y|A$4E4x7K%dJ)rSo_F0WA8O+N5)T|%fo~UfPL@7JV&|WDa?@}R zURA8tcAIodd@|f*o>f5ME7wyG_u8p7SYxpfUzF*Yj7MPg@>vFYuL@}?A&ssMWBaDX zrA8>V#G96L=Q}7y$~j56yQ_)O-{htwxgWE0D%{FX-mu*=nxhp~G7qSzat*%0oBWSV z$)<9ZzisxM1*>xzT~my&gAg(7i5r5gqQT(lg)DMHFVZYp*a6?Uyy{je!&E~B&_~LU z?$s#{Zsv%JfpnxMwdU)FFt1qzZpxLowFL!4U;ZtZ2%Nj2eMEc)DCYt9MBJ>_VddH% zbf=kc$#PC|8*g;81<=8=@Qx=Zq`RN_9CKQX>a4!~8!eX0XJ*l=aa)>Ru~mW@m3lJw z#irtY^m%;fh8$iK2d+IM^YPgtzELw&>8bgsyvdll&|?@M5;TWTO&wjwh&55M2&Zmt zwTol%WKK^KFnwcXqFNv?)+Srj{)nu!7}TZY#KM-juhvCz7{WjA!XJ7vq*+d>>>3(+ z_!w4&h6b8^k!BS@{0T<7GlyCLLv7FQ4?@Bp)=<)Yz@i)AOlmKm{3MrppZ^nMTj;G5 z&8Jpdf=`P^5~-Jkdk{qbBDjN4h!@vhxSl($xu1=$F>fM-X+H-KI7u(9Nw8O4;2*|= zN(3(`Ax0%xu+f$|)8q?67#X&bjCyI4uMhTrQ`QnTe4*2;tyu~wlp zji<*H5=WUYF|88zQ=^C&(SO{;utyG%RQHF!ldY90U6*2=#8FHUzuaJub3IQ{y@+A1n9PzUCV zt|R3bkC4_e+0R*mSY1}{X1FPdA`puLjRBo!U}XgKbFt9zdcYRllM5S!N*2N8e+X`Z zE0CvmO9Muc>;+N++0Fv_g+cJaYHlYWLJ#0coSX>pNmQQ~&bZL$UMk>rbWkPf)j+FV zOp;s)#{~hdMwRIAjt2~1n4dfh$*gIDzYp8+d!SA4r|9QUJ!)9%p7WpDp{Uvwv{6=i zb+%U%;=GI(>Mu3T)&0S~*gV&3LqF|F3Tn?PAqZ|UZKRdB|Mo z*N=!B4-+*vHaS(u7RX`5+YoH|m{F7PW~T%aj+8+7=_L?NUIe4JApBGfHe__oU_hcu z7Vw^-R3a?X+3Rx*;en>UT2Js5MGAuTMxCX+2Ei(}}2m z@Kysv6(-|s%pOK%ir|uf>PE#7U?F4MDNqnhf@Rm8+S09c9aoD64t0pas+g{F%AHbw z4RG+L_?NRZydhxxUlQ%dXljrCsk>{6Q~#_eDDmG7B|Hd)jRG2dLkX3Eo&{iAv4lr# zEu`}xp8nPErn>A2g%Y`YapE)jw@kmx*m_-0&2RV(uAHF}BxK^aV~^tukG8Wv<00K^ zx?G%8@RRXspsHPXGroyWjao9}} zDJixB2@acRsDB3kWq5G{9ZKDNwFWXzI9kG{NJ#@u$b;_`HoH4o=Q%NP9l#bKy9)U1 zGo-HJh)c~Is+o`ic?!$h3R+WtA%@GpoTI-C@U@}R(<|*hBvFZ)G%}>`jqE9e(UzP- zI9a9Dm5$eir@m?33_jk=S!f<Sm{!?c2BZ{p{H`TM1bZ6K7o<+`Je{FE8-=T9|6_NTA*${#o5W{@Els zp&w}`GVCQ|A*8oZu{nvBc0_`Ngkr=$I~N@N%|6;zO;Y42D#ct{`we5X-Wh6u{#Ty^ zUvMk6|9drYCL(G4@kAKWcvS)ylfcFtzB;#?wgH)y!rVx`&P`0ul;8aaZW`L45@pLZ98&yjy{LAqlqTRwH?( zd$%aD6wOyX0Avp|jNhlBzwT5Mv5jBsI!?6hN_e|@tk~=S z!5w_n2){ues9Z;7(a@&!wj37V&kcS^fMxAZ0|7rc-~mz}-b&N7w$D7PHJv0c&Dz4m zF67UDWE=YYj|mplsWDYeHMx=*QcxS&em(3m{{q@{4*hsN=W!EVpOzw_YPXQ5mV}vd z8IoWMAmiQu%Mc=lE_P1X3Ff;yWw>06JJg?}!eeWF;0n`+`#C}kQ_mM~jNRZAypip9 z;zH|?bMcWrq@gAaYRGQIT$ZCGeRn2brUF)sONL6tM=d?lg zHmu|oet7Zqk8*C1^ksZ_7EcwJ#tR}onDWn*T-?^_Y~DMEWc#I=|`>! z!1BUILMYYHcBg^>DKa5*#sDcKf&CjfL&rz{Qbk8V&@wO-$S579t%3X0sK#|n(#HfC zmDc<9R-1!*l9uan&B)_P8EbV<-&~k-_L$qHx2iA9VkUFvg8y8hin>*O*76e=rC?>s z_P4o+T0;4{Kf?LX9~cpk_U8A}1*B=ZfOG*7kiz~Ika!XKh#nI`1SDOmo#M0WM_nLV z`h6IM=tl~Oe&mDG4=9cI9Y1C0{%ugu;LB~jqq8BzAaVZoqZ6}xQYY%pV#CTNj*819 zA+ibbdK>`*L=+oG!xrKS*N|~)9EE7uO$Qd`Xa-NNxivy3&am?^8PE?Tmcqv_We;}I zfFzugjK~bU(IEJSAhNm;eFPK?L(i7cgOKSqzLK`W!!Vx!t`0zUOt=1td#K5ZGEorQe85#lfowdnIZx0g7=Ro0opZ#=C&T!m(S z!pERg7m~oN@7x(0#uhBaCxgU85Ca1l-3VV)H=lp4k(v|57Seqx*mwDE!-CNMIE{zL zs6)q=YKW-I0l;*B%sTqfhj+iwe_k-N1tW4}6FB{C10d;o0ie9r4m5eH|BcJkI`49F zt^u?q+N+CW+XU|)pxR63A7)vvYF4v2SSV$~G%L5~bDG!&Z3@j&B!>L28%t=#SKfe; z?xo^>m;3;ck%9gdO8et(y0&j1xpj;_qBJ*bAus`XI3I?dl6>kp;^p$#VXp8@yHIiH z6z3}brTayzmd%$$rh;qabUTz$nKU6O_*6=Pj9GVB$EHP=;&(aC#K-t=f48*{7EaD( z&ex=zm0m)ORU3|N#8`bGy|iPj_HFUqqCx7gtYQmPb?@(st(BYahANOF&wK%6<`4#Q zO4n+&$krk)@Y=Da)d+m2UCL{C%qzJI#AdQ$&ja=!tAJ+{aLgE$qCD$JzA2BE^Ob_N-v)(L!u)Ra&k<(BbXWC9ED|uFfRY7pL_p}goXmTv ziVc72p_CJ1Y`HT17sVB;xC3^UcFwe;jJAZbAA`V|TXEMqo(ep6$or8?fiRxsgeRkn zzpK-mdG$j)Km1_SS`&}2W$)dtdsy)(bEty8w4)G>qZ#`4T!Kz2KW-`Gmv-9BxKoI$ zYQsu*RWrck_lusE9yQ5AVz%VnBPnoz&!ng(;;A0Ol42kHkgE(E-$~BoJq8H6dNT}- ze+0txJ4h&cbvzVv z?A#%TQpr{H?I{+{Z+*Y7o=QqN@jP{Ox)V)ERTU4BqP9GC;MPVngi`BwBZx|< z_q!q8!)Kf8KUW+Szji{MrHhLRc0S~HwQEn`_DktCf z^HUWa7XkI!YoMnK>m|3%9DO;gJl!S=vJ2PYE zzJAxJ-}C)Ge?9!;=s4n@>%PwGJg@V7z2AEe#374{P+u_dxijJ;mNO0@z8C9Kerww< z73DTxUJAiHC4797Hirq6pP#vWJMWz9Oc<;W@GBcH)u)wg1?VW{H;RxB)W5*C+ScWD zK$~fH4BIiq3#5%E5r}N_fx_G~4!$wGM=31b!3uJEaj*51f_B6i-t}d3^DCBUq?4r1 z8+R6G?7}!Lxt@E)Y;QOCbah49P~r>?-VOU-g6j!nj%YIoU%;!d^4cSE^!C1=SO{}C z(2CvD*ydJ_F@1U)9QylQT#G^~5}>b7}s;W0S6X`~Z5Zp8BD(62B(nXHN**E1f+ zg+F7ZD+xqD=&$av<2Ls%J>U72i{t=V#%Tz$W_e#i&Me_dp zVm#rzWvCSY7%LRO8gTtw5-GV=wME$cyeI;E9ELPDw`NHP$v6n1(%?!?l3Y^{Pyd?4 zT08=nCaNo}Q`X}1yduQ+Bwt}LjN_JWj_`bgmL5<7cXt z*td#`Sy)t9l5dp$Eh;=L9N5kr@$HWVC&-Y6-WY|snBhkh$lXP^J)nyR+_@?YH0;P% zR{@mww@5?rvvKCRuIYr&lbwUq>t;Fi5U}(RCv5?iO&JS_x$>!X&U1!;L~4x?zWH$K z>M|3C^HfI#>_U3d9ObU10>&9E5mGmLgXrxFpgl`G(pnCpkG4DS-uv9j%a5Khgrw#w zAhxofIF}8I59-Fq@bkn;c-V$UowS_%=g5T$+>W@rZ@0C6ZO*=mb_eW)S)eNngY zmdN$|GYxoEw`4*?*9GxT+=9!^3++5!z9}Z|q3R8vaP4dGUI$-<-ZJ5o=oWb6hWmMb zN$9zL1s#FH89?Ob!y`I!!w-=g{y(ZT6NubQ^!`O|G{M3mz|d7`LN>DivG}oD8p!tE zJNUQZh=fqUU@1t4Y|SXTVE+XP{r2ZdXYz`ErHiPX!#1xChfRZTpM;GwK8=gKZ_Wv{ z(meL*Yz{$egd-_E0_^4mF_}T0A;rdU8;kSwioJ>qUyZzbDDfYrg*J-Ki zKMo4OOgpxf0=CFOx_VwP0BE%Ff?EaP=>OyZ`J&k}u7i3`ru8Or%LFMsOni*B*VUxN zBE7$O<=j(l_r37+@BDlE-zqsbLKhO$C((rjL?|REzj}c`2Nw*bYsd*+@IsD;W8}kV zj1wLILOX)ujW#sg3m24mYj88eZ)EYj@bV#Jz3px;O1Y5>p!I6_t0HtGMDiEsC>x>WC#A{){#*E-CqG14NPG5;E=ln zHIKEgP8`X0cq=PfuD*>~UyUcbN?ctYjSRm(lXB?4_3OTHE5mPAXqxqfZV8nJouz8u zF|02OeS}vys7F{H_C9O+NnedlGElYU)3;oVq`fY?UKsm>%VBkUsEZ*l&HBNe+2j@BO<3 z1iG7N{s`VuzNw;iGAlw82wcb~Kp=FJ5ZY*iI?Z6uT;7`b!>5(z?+lz58avQqBH!-0=1Hi^ByJRGWI%6 zf-S{-q2rnCNDph{_m9n6nDpAY6VBPf8mdA{8<|m)Y4T6`aD%&TZhbdBr{#8mTMWT_ zG@wLo4MgsJDBp`b{r&}!It|;VkgD9nQ?+ZCNUV`V-N-qK#UBm~!pS~G&D4k0Hv2a$ zf_5fXYej3b7E9ozU-jn!TeLrOi7+koDqop19@z@1uA0u!8EkwwG;Pp_)cG~FJ}~zb zGso0;@#8)kR7Mj#$Ve1QJd?@KFsxo>X}fGxXC*(T3>ah|EmkHx?4v!%QcEqq(D za0w*tw44NQDMZ(|==-P@V!%4_A#I!c88~Hn+%}=XRuf|GxMf_D_k`x?g;vqqKSYA? zKcWfGZg6zOp1D}2CEF!d9>r%+u)Bq|&}CFf-Y~yE5R)P$7Q6X7l{cC3*V(fp?jM!p z{ZdnPlM{aoet#Axvq)0O&D!nbn@Nb9My$6SV6MR?C$y?SxhI~|)nT3^_3&^$|ez)-LE-k^|oF+XVP0 zRlyzLP%YC*Ki}oQH-SVbqWXIiSg_qB5~$HPFTYub0)D*50P+ENyrdwyCo$U(#7De6 zl8QH$6b#chauC*biirI3=?#UbVxz-wpn{`Gr?YY;mVdt5Q`1fPFIFj8#{AJUGg2^s ziNmd&+3h#;KYkqP8-cVN`&5e^x5i4g(CvYQ6N&|}HHYS}A%TBeb;|Ts9bH_%g0wo# ztsoR`NiK1j!>$>yS66&9aa6^hV%Ko@i~Xxsakui<7S=YVA~l1qTMkAP>>j4Et??41 zDqW7Q1An#2K|0c|V$8Hw0QdsuNb+)QMIgMQeie-Ko#D=Qx3rwph>5YjJ7+4;Twn0F zYd7>O$w)iDNn4Q1v&C3MLQ>_#==P`aI%FfB7-}#fu#6y(_~r4;%Fy`qd)^AJUUDsx zOk484n{+ke2pSs~{fz2=S2fh}-eI3=>xqS1>vm6w?yu7h&%GNic5*omwCil<)Tq&Q zhV|U}@@Q)=^*!ow%f*SY27Nn(J1p1Ghh3WawY;F1B1zGHI^>JD?&j_9C%fFMip>SZ z#aP;VA)-H6oBfP*;#B?X!Bnw%qT^05w&HuL<7(UVug#CDrp(vKP;zz{cGF``1m_{Q zz*Y&l1sX5U@BAd*msP6bOub0UD15{Bl+cA8l?trltt<@7K$8mSJwz=r*=u+z$F`b! z(9E(73r0kJnMYh8L&?}%;$&izo?d{oJSO|-SDjK#TpY^Wr?@;(>QA#bh@ru(F^Wzd zfp%0MM&602rR>+ft4;SLd5$JhhRr)qE+|s+L zq7$r;#hzI4YjLs`oHC{QX=qU&9_8hP)%dJ4*WHXTEqRJ(Pq;99 z+H7y?v>BKDagscgJ1G<3o3d(sT8G9S|7>SpV}CmWY5Qx`+!~U+n%!HNHk(Psw=CUR z0wIX-8QjOf3lwxwgwwMilO)rM9j`wnOJ8R>AK~>&iYIM>CD5y=cIn!dBjE~60ML;z z^2ZLi#&Ev9)lCdt9zLPi+BJ#Q-K5I5(uN_@26bn1$GQ(_OEZ7dbIALsh0(l@nUH?| znq0e&Rw(4Wr96YKCPJ&m&~3yGY+Qp>6lgsPqm@qY$rP>q;At_0t;nvXXtSjOT)Abu zOrT%v)h z{K05kf50~)%I#o%g(mse*~{bA7HLi-(vUnx506?lhAAw_eg%)FNn!`RJWNqij0j|P zQ4joGQOfl&SK4y=pTd7LZM2i*bqU~kVWf<9d;DeFCJaG5@8%Su9(sbS5vtCjhc0=| zc%;oASPN|sfkq}he~e5l@WxW>KkaNcHa`1^{jQ;JLfBNC)1{g_E?6l-A~y_{YF$#x z!{a1Z#c8vVI1^d312!+ORETQDJP>d^UXu6b$FA*d0hhuw-`^U^sS>&9_7wBHEf}sj z3K#n~S`iXr4Cn9&MNE~PF zWkIOnjz8EQF-yfbH-4|Qv`c~U{jVpz)2wnCNI9re8h=8}9;VxSy-MC#h!(nkFz(3P zev+&5Hrv43__sN0sflo_dS4;R1E+@rF|}#av;g{Kp1TZA=C5Y%Csv)W9$ebIk@jM4 zj{P|Uv~ogQX@txrLM_Hr7j>#Gk*RTnNGb% zLwz<8p3A7C4;87Ku6_G9slNY7!p$ETX9c-^Iwvr3MyJX!RHiAR#DGVt(QB>cwX9!- zI8+4Zg{lHSRPXw|`Y7m96EZ@yAaw_VMx=X`Xy<>62VDO&hGj+v-OyS^=?D7uk2@8n z4rvSHo_i&=-&$HaJ}SS1;lRECaB|p*b5uI6&g{FI=-ziDmC{fK3OY#(pQ*tA$iZHO z6`Y?W%5R8GW4HID0>^R0YMdx!?q?nxbLL&SVrB9KUQT5=_n-5;7H8E@Q0-!VL@UOehJ)vtq{gX(^->4kl8 zP)~6f_?GDR5Uh72l0a-HIw%Y(izss8)7-LhL51bo;_l-W=Z)+yKN?pX_sf(np_vvk z1P(@~F*0{WXne?#XD*koWM3zduPwPyh5Cy3pI zhKLtHzn&V+My)!Is#&k4x>sNAc!$Z6Vks?oR+r!DFey4jqDI4WP_S!Opw08xXRdrS zErs5uAyx0ecm^5oH26JpieM_Gy4gWmm@VU29uA@68o?`Q&U$GrRl+1A zt}k!GNz=fk++D$OcLT5S4JFJKyywhnk!H}GV0V?}vg(a~<+GgQ@C6=U#baKjg=-YGwC~CT3#Jy1 z%$0se3`GDlsQ*AMHnW~y>O+|Ffek-My|jKAm=vQ}=>jl#fWD?(th<|Pq#`O$NOoB! zu&!Q;Z^`>zRLrnqTBu`qc_zNNDP+zvuuAlFuPg=xf~#ZLN>0!ORu7V04*>4D4I~ge zaeYK0L{%|UaePpDxDhw_PUEbP3hNs72#Qxnhp`#{Cr*JeDy|dDi%QaaPY_0R$pXYI z`oH5Sgx9sefcr9SLB{1AI$Rgu&vp9akL_0z1`J=E^2SHz#lNct?e8bvrFnQ1-FO_y z)5oLI{@A=){Cbf*qc_J|kM3H}$An}vn$n$;ki<~yTO|H{BJxOuNkY%B=)N?4{*wY; z4CHPtYBZUUvIMdzz*3nS-6}}MhJfx~T>m1m_j@Ozdl?L~lEFA~Z*0aMN$?;ES-Ehh$G#0fXO~}eGV$CS_P#X-5QKI5nj|VmP zPBc)OMqG}8p;(;s*K{WHvKE$EYyHv+zkf=U|B z;_A(YgcO^1+n#(*F^!Lq4!NRq{dQ=bYZ>J_JKyX}!oe=gX&MG?rVta~Bm4RRTLIij z=f}4`ADK{5PsK?u4o{F4nn1jDS#N-la=lkZx^~z*0l5&9j_S{R%fXZ>O)lPpAXB~x)0DiO5d5u!BJ^Cq<>3V3f~FJ7Syuak=BkPiFQ*jGCaANM*s6z_+oUw-Xrqx+|qD6 zuy;s!IejSD35{1z_>iMSxOe$>*MY~xo?e`6$#4>7D!YHZiH`;T@6Jvn`72+f<4&q- zAn1z3n=cV^Y)~Dz=BKX+38#mDirtdq=wZSa$ttR z;1c+@1fn@co!V<9)1V|xGL!E=p^*cSw+za&$k8Dh{tGmcrpHD18vHt7K{0zy=5B&cQ0y9^#r7XqUZFt-HSLA+Gx?+q5B z7W$>hsDtu zf%FG7-1jkT3uS?QlBni#rHFm*m$-JP=v4ZlYGZ4qw91_JQTZpx2D_mw=V%h+^ub#T z4-NM+?;~+gH~v8fyMe*Uvm?ao)P4mG_I~6y8OyRvJiI*I1fcWn{vh*zrw^rGxZuwI zLdGG5@FHp_unq7&qEQ%lPlvfzsl}?1f#n-B+$mpvVV|(WX8%5tE9_tHGHgb3=c)2e z!8Bz5IPn!|zZD*qx?;4yDZ$~WT!N`obx!^Z-_W`Re%3r?+Y+aMj{wCPu~jasEGi1SjJ@!=o!5mz_mNnvl{9n^{dScC-ZRnn=3Eho@d8%4k?lac zGbwy`AOF}LJq33;mF?~dvY&nDk!4f6b4iz;JhPSw^nH=wBo|&-I6BRh&Y`*wW0;9C zJiX89U&h{#O}sYZ?(aq&t;6f6pDt8hNW}i@qvf!8O@nD1I|BNAWC5gx3o+#}%jPmT*Y~@2MpuX_Zzoe8PaArUL79EN zpI0Mrs}@Tl!47F7(nBs53lJy*eV4##-ZfD549v@fh=)&%yT^45rk6)stTYG|s5V{T zKuRgK@&T5C+q*G_s$o7l_cQpa8(~Q>MQtq_g;Oq=YJL%KGxoaCwoP{eF)%0!1gSXF zBWGqInIw(Uy|=e#XCvZ-CjDC{m;2wVj`TIHG}X4=$42D)U?MDjcSV& ztX&de55PwetsMvUH=*$^_tqhRF>Iazu%uD|wUAvT69lHYk8ayv;o{$M;tW2SIG?qw zz$>IcRhkqj8uLXqz-jJe~kYF-yrP+oMp36K2JjQ@4zF{HFAJw4{#AQhYy1a*Y9>^!7 zLJP=m(7Fn+LpXdx)rLRK7bW}v5^6N!m$M@@*GQFvptxqQaau*jOKx-R&-VMn!^^m2 zYt^hbQx9gYI|ynOX3sW@oA^~8Enjx7@UfpnRubj+`n&q|`i{14a~)b(9TSIY%_|>9 zJt?=c{61svlsvy;IQ?~yc)&lg36BZpg)twxfmG~Y8g96O4`tj&j<$#7k`_vYrl_mOFnA}hfXE+)N}P_sE6c*2>)Bq$^os5?n{H^>R(-|9+dg!1i81u|jM1mW_VE{G5qn*Fh|+UeU1w7Y z%+5=6$x;MP4BW^lZo-e7vfT^ezVq__$vMnn8@79-J1(K%bvJ@RtwO=9!7t*Yd&}~P zl85#lJf!j-VPF6BZUb2yKDz?OPE%JNZEj|hgKLO($AU*VTODk)#ZSP7Kr%3sjhotmao-W%&1)b}oOwi`;8V#{-(U*BxJj z_3<-}??;F`XMi%vRS;E_j|ZH9xCeDA!EFo!3DQE}FEE^}#=3+R`0uZ8({n(ersWBP I#!iO+2dE~VxBvhE diff --git a/tests/test_directives.py b/tests/test_directives.py index 551b3a320..319bfe79f 100644 --- a/tests/test_directives.py +++ b/tests/test_directives.py @@ -79,7 +79,7 @@ class TestItems: def test_default(self): assert _PROCESSES._default == 1 assert _GPUS_PER_PROCESS._default == 0 - assert _THREADS_PER_PROCESS._default == 0 + assert _THREADS_PER_PROCESS._default == 1 assert _MEMORY_PER_CPU._default is None assert _GET_EXECUTABLE()._default == sys.executable assert _WALLTIME._default is None diff --git a/tests/test_environment.py b/tests/test_environment.py index 5b179b161..8b1ee688b 100644 --- a/tests/test_environment.py +++ b/tests/test_environment.py @@ -45,15 +45,15 @@ class TestEnvironments(conftest.TestProjectBase): class Project(flow.FlowProject): pass - @Project.operation(directives={"ngpu": 1}) + @Project.operation(directives={"gpus_per_process": 1}) def gpu_op(job): pass - @Project.operation(directives={"np": 1_000}) + @Project.operation(directives={"processes": 1_000}) def large_cpu_op(job): pass - @Project.operation(directives={"np": 1}) + @Project.operation(directives={"processes": 1}) def small_cpu_op(job): pass diff --git a/tests/test_project.py b/tests/test_project.py index 55be7c2e8..e886a4be8 100644 --- a/tests/test_project.py +++ b/tests/test_project.py @@ -27,6 +27,7 @@ from deprecation import fail_if_not_removed import flow +import flow.directives from flow import FlowProject, aggregator, get_aggregate_id, init from flow.environment import ComputeEnvironment from flow.errors import ( @@ -73,6 +74,41 @@ def is_present(cls): return True +def get_unevaluated_directive( + project: flow.FlowProject, + job: signac.job.Job, + operation_name: str, + directive_name: str, + group_name: str = None, +): + base_directives = project._environment._get_default_directives() + if group_name is None: + operation_directives = project.groups[operation_name].operation_directives[ + operation_name + ] + else: + operation_directives = project.groups[group_name].operation_directives[ + operation_name + ] + base_directives.update(operation_directives) + return base_directives[directive_name] + + +def get_evaluated_directive( + project: flow.FlowProject, + job: signac.job.Job, + operation_name: str, + directive_name: str, + group_name: str = None, +): + unevaluated_directives = get_unevaluated_directive( + project, job, operation_name, directive_name, group_name + ) + if isinstance(job, signac.job.Job): + job = (job,) + return flow.directives._evaluate(unevaluated_directives, job) + + class MockSchedulerSubmitError(Scheduler): def jobs(self): pass @@ -536,7 +572,7 @@ def test_invalid_memory_directive(self): class A(FlowProject): pass - @A.operation(directives={"memory": value}) + @A.operation(directives={"memory_per_cpu": value}) def op1(job): pass @@ -564,17 +600,19 @@ def test_memory_directive(self): class A(FlowProject): pass - @A.operation(directives={"memory": value}) + @A.operation(directives={"memory_per_cpu": value}) def op1(job): pass project = self.mock_project(A) for job in project: - for op in project._next_operations([(job,)]): - if value is None: - assert op.directives["memory"] is None - else: - assert op.directives["memory"] == 0.5 + unevalated_memory = get_unevaluated_directive( + project, job, "op1", "memory_per_cpu" + ) + if value is None: + assert unevalated_memory is None + else: + assert unevalated_memory == 0.5 def test_walltime_directive(self): for value in [ @@ -595,11 +633,13 @@ def op1(job): project = self.mock_project(A) for job in project: - for op in project._next_operations([(job,)]): - if value is None: - assert op.directives["walltime"] is None - else: - assert str(op.directives["walltime"]) == "1:00:00" + evaluated_walltime = get_evaluated_directive( + project, job, "op1", "walltime" + ) + if value is None: + assert evaluated_walltime is None + else: + assert str(evaluated_walltime) == "1:00:00" def test_invalid_walltime_directive(self): for value in [0, -1, "1.0", datetime.timedelta(0), {}]: @@ -621,10 +661,7 @@ def test_callable_directives(self): """Test that callable directives are properly evaluated. _JobOperations and _SubmissionJobOperations should have fully evaluated - (no callable) directives when initialized. We additionally test that the - directives are evaluated to their proper value specifically in the case - of 'np' which is determined by 'nranks' and 'omp_num_threads' if not set - directly. + (no callable) directives when initialized. """ class A(FlowProject): @@ -632,8 +669,10 @@ class A(FlowProject): @A.operation( directives={ - "nranks": lambda job: job.doc.get("nranks", 1), - "omp_num_threads": lambda job: job.doc.get("omp_num_threads", 1), + "processes": lambda job: job.doc.get("processes", 1), + "threads_per_process": lambda job: job.doc.get( + "threads_per_process", 1 + ), } ) def a(job): @@ -641,37 +680,27 @@ def a(job): project = self.mock_project(A) - # test setting neither nranks nor omp_num_threads + # test setting not setting directives for job in project: for next_op in project._next_operations([(job,)]): - assert next_op.directives["np"] == 1 - - # test only setting nranks - for i, job in enumerate(project): - job.doc.nranks = i + 1 - for next_op in project._next_operations([(job,)]): - assert next_op.directives["np"] == next_op.directives["nranks"] - del job.doc["nranks"] + processes = get_evaluated_directive(project, job, "a", "processes") + assert processes == 1 + threads_per_process = get_evaluated_directive( + project, job, "a", "threads_per_process" + ) + assert threads_per_process == 1 - # test only setting omp_num_threads + # test setting threads_per_process for i, job in enumerate(project): - job.doc.omp_num_threads = i + 1 - for next_op in project._next_operations([(job,)]): - assert next_op.directives["np"] == next_op.directives["omp_num_threads"] - del job.doc["omp_num_threads"] - # test setting both nranks and omp_num_threads - for i, job in enumerate(project): - job.doc.omp_num_threads = i + 1 - job.doc.nranks = i % 3 + 1 - expected_np = (i + 1) * (i % 3 + 1) - for next_op in project._next_operations([(job,)]): - assert next_op.directives["np"] == expected_np + job.doc.threads_per_process = i + 1 + threads_per_process = get_evaluated_directive( + project, job, "a", "threads_per_process" + ) + assert threads_per_process == i + 1 + del job.doc["threads_per_process"] # test for proper evaluation of all directives job = next(iter(project)) - job_operation = next(project._next_operations([(job,)])) - assert all(not callable(value) for value in job_operation.directives.values()) - # Also test for submitting operations submit_job_operation = next( project._get_submission_operations( aggregates=[(job,)], @@ -679,7 +708,8 @@ def a(job): ) ) assert all( - not callable(value) for value in submit_job_operation.directives.values() + not callable(value) + for value in submit_job_operation.primary_directives.values() ) def test_callable_directives_with_groups(self): @@ -700,8 +730,10 @@ class A(FlowProject): @group @A.operation( directives={ - "nranks": lambda job: job.doc.get("nranks", 1), - "omp_num_threads": lambda job: job.doc.get("omp_num_threads", 1), + "processes": lambda job: job.doc.get("processes", 1), + "threads_per_process": lambda job: job.doc.get( + "threads_per_process", 1 + ), } ) def a(job): @@ -710,8 +742,10 @@ def a(job): @group @A.operation( directives={ - "nranks": lambda job: job.doc.get("nranks", 1), - "omp_num_threads": lambda job: job.doc.get("omp_num_threads", 1), + "processes": lambda job: job.doc.get("processes", 1), + "threads_per_process": lambda job: job.doc.get( + "threads_per_process", 1 + ), } ) def b(job): @@ -728,7 +762,8 @@ def b(job): if submit_job_operation.name == "group": break assert all( - not callable(value) for value in submit_job_operation.directives.values() + not callable(value) + for value in submit_job_operation.primary_directives.values() ) def test_copy_conditions(self): @@ -1132,9 +1167,12 @@ def op1(job): def test_submit_operations(self): project = self.mock_project() - operations = [] - for job in project: - operations.extend(project._next_operations([(job,)])) + operations = list( + project._get_submission_operations( + [(job,) for job in project], + project._environment._get_default_directives(), + ) + ) assert len(list(MockScheduler.jobs())) == 0 cluster_job_id = project._store_bundled(operations) with redirect_stderr(StringIO()): @@ -1256,16 +1294,19 @@ def test_submit_status(self): def test_submit_operations_bad_directive(self): project = self.mock_project() - operations = [] - for job in project: - operations.extend( - project._next_operations([(job,)], operation_names=["op1"]) + operations = list( + project._get_submission_operations( + [(job,) for job in project], + project._environment._get_default_directives(), ) + ) assert len(list(MockScheduler.jobs())) == 0 cluster_job_id = project._store_bundled(operations) stderr = StringIO() with redirect_stderr(stderr): - project._submit_operations(_id=cluster_job_id, operations=operations) + project._submit_operations( + _id=cluster_job_id, operations=operations, force=True + ) assert len(list(MockScheduler.jobs())) == 1 assert "Some of the keys provided as part of the directives were not used by the template " "script, including: bad_directive\n" in stderr.getvalue() @@ -1476,6 +1517,7 @@ def test_main_submit_walltime_with_directive(self, monkeypatch): "submit -o op_walltime --pretend --template slurm.sh", subprocess.STDOUT, ).decode("utf-8") + print(output) assert "#SBATCH -t 01:00:00" in output def test_main_submit_walltime_no_directive(self, monkeypatch): @@ -1665,7 +1707,10 @@ def test_directives_hierarchy(self): ) assert len(job_ops) == 1 assert all( - [job_op.directives.get("omp_num_threads", 0) == 4 for job_op in job_ops] + [ + job_op.primary_directives.get("threads_per_process", 0) == 4 + for job_op in job_ops + ] ) job_ops = list( project._get_submission_operations( @@ -1676,26 +1721,10 @@ def test_directives_hierarchy(self): ) assert len(job_ops) == 1 assert all( - [job_op.directives.get("omp_num_threads", 0) == 1 for job_op in job_ops] - ) - # Test run JobOperations - job_ops = list( - project.groups["group2"]._create_run_job_operations( - project._entrypoint, project._get_default_directives(), (job,) - ) - ) - assert len(job_ops) == 1 - assert all( - [job_op.directives.get("omp_num_threads", 0) == 4 for job_op in job_ops] - ) - job_ops = list( - project.groups["op3"]._create_run_job_operations( - project._entrypoint, project._get_default_directives(), (job,) - ) - ) - assert len(job_ops) == 1 - assert all( - [job_op.directives.get("omp_num_threads", 0) == 1 for job_op in job_ops] + [ + job_op.primary_directives.get("threads_per_process", 0) == 1 + for job_op in job_ops + ] ) def test_unique_group_operation_names(self): @@ -1786,29 +1815,34 @@ class A(flow.FlowProject): group = A.make_group("group") - @group(directives={"ngpu": 2, "nranks": 4}) + @group(directives={"gpus_per_process": 1, "processes": 4}) @A.operation def op1(job): pass - @group(directives={"ngpu": 2, "nranks": 4}) + @group(directives={"gpus_per_process": 1, "processes": 4}) @A.operation def op2(job): pass @group - @A.operation(directives={"nranks": 2}) + @A.operation(directives={"processes": 2}) def op3(job): pass project = self.mock_project(A) group = project.groups["group"] job = [j for j in project][0] - directives = group._get_submission_directives( - project._get_default_directives(), (job,) + submission_operations = list( + project._get_submission_operations( + [(job,)], + project._get_default_directives(), + ) ) assert all( - [directives["ngpu"] == 2, directives["nranks"] == 4, directives["np"] == 4] + [directives["gpus_per_process"] == 1 and directives["processes"] == 4] + for submission_op in submission_operations + for directives in submission_op.directives_list ) def test_flowgroup_repr(self): @@ -1817,7 +1851,7 @@ class A(flow.FlowProject): group = A.make_group("group") - @group(directives={"ngpu": 2, "nranks": 4}) + @group(directives={"gpus_per_process": 1, "processes": 4}) @A.operation def op1(job): pass @@ -1828,15 +1862,15 @@ def op2(job): pass @group - @A.operation(directives={"nranks": 2}) + @A.operation(directives={"processes": 2}) def op3(job): pass project = self.mock_project(A) rep_string = repr(project.groups["group"]) assert all(op in rep_string for op in ["op1", "op2", "op3"]) - assert "'nranks': 4" in rep_string - assert "'ngpu': 2" in rep_string + assert "'processes': 4" in rep_string + assert "'gpus_per_process': 1" in rep_string assert "submit_options=''" in rep_string assert "run_options=''" in rep_string assert "name='group'" in rep_string diff --git a/tests/test_util.py b/tests/test_util.py index 392d76b09..dacadaf48 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -77,7 +77,14 @@ class TestTemplateFilters: def mock_operations(self): class MockOp: def __init__(self, memory=None): - self.directives = {"memory": memory} + self.primary_directives = { + "processes": 1, + "threads_per_process": 1, + "memory_per_cpu": memory, + "memory": memory, + "cpus": 1, + "gpus": 0, + } return [MockOp(1), MockOp(8), MockOp()] @@ -94,7 +101,7 @@ def test_calc_memory_serial(self, mock_operations): def test_calc_memory_parallel(self, mock_operations): # Test when operations run in parallel assert calc_memory([mock_operations[0], mock_operations[1]], True) == 9 - assert calc_memory([mock_operations[2]], True) == 0 + assert calc_memory([mock_operations[2]], True) is None assert ( calc_memory( [mock_operations[0], mock_operations[1], mock_operations[2]], True From b523c7abdca7701cccee4d9bdbef4b5f46152eda Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Thu, 29 Feb 2024 16:12:43 -0500 Subject: [PATCH 25/28] feat: Allow mixed MPI/nonMPI operation submission. --- flow/directives.py | 68 +++++++++++++++++++++++++++++++++------------- 1 file changed, 49 insertions(+), 19 deletions(-) diff --git a/flow/directives.py b/flow/directives.py index e71fa3a34..96c415a8c 100644 --- a/flow/directives.py +++ b/flow/directives.py @@ -214,15 +214,47 @@ def _list_of_dicts_to_dict_of_list(a): def _check_compatible_directives(directives_of_lists): """Routine checks for directives within a group.""" - if "mpi" in directives_of_lists["launcher"]: - if ( - len(set(directives_of_lists["gpus_per_process"])) != 1 - or len(set(directives_of_lists["threads_per_process"])) != 1 - or len(set(directives_of_lists["processes"])) != 1 + mpi_directives = [ + i + for i, launcher in enumerate(directives_of_lists["launcher"]) + if launcher == "mpi" + ] + if len(mpi_directives) > 0: + base_directives = { + "processes": directives_of_lists["processes"][mpi_directives[0]], + "gpus_per_process": directives_of_lists["gpus_per_process"][ + mpi_directives[0] + ], + "threads_per_process": directives_of_lists["threads_per_process"][ + mpi_directives[0] + ], + } + if len(mpi_directives > 1) and any( + directives_of_lists["processes"][i] != base_directives["processes"] + or directives_of_lists["gpus_per_process"][i] + != base_directives["gpus_per_process"] + or directives_of_lists["threads_per_process"][i] + != base_directives["threads_per_process"] + for i in mpi_directives[1:] ): raise SubmitError("Cannot submit non-homogeneous MPI jobs.") - if None in directives_of_lists["launcher"]: - raise SubmitError("Cannot submit MPI and nonMPI jobs together.") + if len(mpi_directives) != len(directives_of_lists["processes"]): + for i in range(len(directives_of_lists["processes"])): + if i in mpi_directives: + continue + if ( + directives_of_lists["cpus"] + <= base_directives["threads_per_process"] + ): + raise SubmitError( + "Cannot submit nonMPI job that requires mores cores than " + "threads per MPI task." + ) + if directives_of_lists["gpus"] <= base_directives["gpus_per_process"]: + raise SubmitError( + "Cannot submit nonMPI job that requires mores GPUs than " + "GPUs per MPI task." + ) else: if len(set(directives_of_lists["gpus"])) > 1: warnings.warn( @@ -267,6 +299,7 @@ def _check_bundle_directives(list_of_directives, parallel): def _bundle_directives_aggregation(list_of_directives, parallel): directives_of_lists = _list_of_dicts_to_dict_of_list(list_of_directives) + _check_bundle_directives(list_of_directives, parallel) _check_compatible_directives(directives_of_lists) # We know we don't have MPI operations here. if parallel: @@ -285,24 +318,21 @@ def _bundle_directives_aggregation(list_of_directives, parallel): "gpus": gpus, "memory": memory, } - # Each group will have a primary operation (the one that requests the most - # resources. This may or may not be unique. We have to pick on for purposes - # of scheduling though to properly request resources. walltime = flow.util.misc._tolerant_sum( directives_of_lists["walltime"], start=datetime.timedelta() ) if "mpi" in directives_of_lists["launcher"]: - max_memory_index = flow.util.misc._tolerant_argmax( - filter(lambda x: x is not None, directives_of_lists["memory"]) - ) - if max_memory_index is None: - memory_per_cpu = None - else: - memory_per_cpu = directives_of_lists["memory_per_cpu"][max_memory_index] # All MPI operations must be homogeneous can pick any one and any non-MPI ones are subsets # that should work correctly. - primary_operation = list_of_directives[0] + primary_operation = list_of_directives[ + list_of_directives["launcher"].index("mpi") + ] cpus = primary_operation["processes"] * primary_operation["threads_per_process"] + memory = flow.util.misc._tolerant_max(directives_of_lists["memory"]) + if memory is None: + memory_per_cpu = None + else: + memory_per_cpu = memory / cpus return { "launcher": primary_operation["launcher"], "walltime": walltime, @@ -313,7 +343,7 @@ def _bundle_directives_aggregation(list_of_directives, parallel): "cpus": cpus, "gpus": primary_operation["processes"] * primary_operation["gpus_per_process"], - "memory": None if memory_per_cpu is None else cpus * memory_per_cpu, + "memory": memory, } # Serial non-MPI cpus = max(directives_of_lists["cpus"]) From 8f007b51cc22c00109c501b4a6e51b364b8a22fd Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Fri, 15 Mar 2024 09:30:41 -0400 Subject: [PATCH 26/28] (WIP)fix: bugs surrounding directives processing --- flow/directives.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/flow/directives.py b/flow/directives.py index 96c415a8c..4517b0b7a 100644 --- a/flow/directives.py +++ b/flow/directives.py @@ -229,7 +229,7 @@ def _check_compatible_directives(directives_of_lists): mpi_directives[0] ], } - if len(mpi_directives > 1) and any( + if len(mpi_directives) > 1 and any( directives_of_lists["processes"][i] != base_directives["processes"] or directives_of_lists["gpus_per_process"][i] != base_directives["gpus_per_process"] @@ -291,16 +291,15 @@ def _group_directive_aggregation(group_directives): return primary_directive -def _check_bundle_directives(list_of_directives, parallel): - if "mpi" in list_of_directives["launcher"] and parallel: +def _check_bundle_directives(directives_of_lists, parallel): + if "mpi" in directives_of_lists["launcher"] and parallel: raise SubmitError("Cannot run MPI operations in parallel.") - _check_compatible_directives(list_of_directives) + _check_compatible_directives(directives_of_lists) def _bundle_directives_aggregation(list_of_directives, parallel): directives_of_lists = _list_of_dicts_to_dict_of_list(list_of_directives) - _check_bundle_directives(list_of_directives, parallel) - _check_compatible_directives(directives_of_lists) + _check_bundle_directives(directives_of_lists, parallel) # We know we don't have MPI operations here. if parallel: cpus = sum(directives_of_lists["cpus"]) @@ -325,7 +324,7 @@ def _bundle_directives_aggregation(list_of_directives, parallel): # All MPI operations must be homogeneous can pick any one and any non-MPI ones are subsets # that should work correctly. primary_operation = list_of_directives[ - list_of_directives["launcher"].index("mpi") + directives_of_lists["launcher"].index("mpi") ] cpus = primary_operation["processes"] * primary_operation["threads_per_process"] memory = flow.util.misc._tolerant_max(directives_of_lists["memory"]) From c86c4d417161f90122bfcb85820caa0f0de03bc6 Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Fri, 15 Mar 2024 09:31:34 -0400 Subject: [PATCH 27/28] test: Correct template test project launcher values --- tests/define_template_test_project.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/define_template_test_project.py b/tests/define_template_test_project.py index cb5e257d4..a5e4f71ed 100644 --- a/tests/define_template_test_project.py +++ b/tests/define_template_test_project.py @@ -43,7 +43,7 @@ def omp_op(job): directives={ "processes": TestProject.processes, "threads_per_process": TestProject.threads_per_process, - "launcher": "mpi", + "launcher": TestProject.launcher, } ) def hybrid_op(job): From c17fd6f0f7db34d60ac45bbb7b04cf1e4a782e4f Mon Sep 17 00:00:00 2001 From: Brandon Butler Date: Fri, 15 Mar 2024 09:32:34 -0400 Subject: [PATCH 28/28] test: Update template tests configuration generation --- tests/generate_template_reference_data.py | 53 +++++++++++------------ 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/tests/generate_template_reference_data.py b/tests/generate_template_reference_data.py index 15e15a15f..c6dce0890 100755 --- a/tests/generate_template_reference_data.py +++ b/tests/generate_template_reference_data.py @@ -9,12 +9,12 @@ import operator import os import sys +from contextlib import redirect_stdout from hashlib import sha1 import jinja2 import signac from define_template_test_project import TestProject -from test_project import redirect_stdout import flow import flow.environments @@ -28,11 +28,16 @@ PROJECT_DIRECTORY = '/home/user/path with spaces and "quotes" and \\backslashes/' MOCK_EXECUTABLE = "/usr/local/bin/python" DEFAULT_BUNDLES = [ - ("omp_op", "parallel_op"), - ("mpi_op", "op", "memory_op"), - ("hybrid_op", "omp_op"), + {"bundles": [("omp_op", "parallel_op")], "parallel": [True, False]}, + {"bundles": [("mpi_op", "op", "memory_op")], "parallel": [False]}, + {"bundles": [("hybrid_op", "omp_op")], "parallel": [False]}, ] -DEFAULT_SETTING = {"bundles": DEFAULT_BUNDLES, "parallel": [True, False]} + + +def set_bundles(partitions=None): + if partitions is None: + return DEFAULT_BUNDLES + return [{"partition": partitions, **bundle} for bundle in DEFAULT_BUNDLES] def cartesian(**kwargs): @@ -71,45 +76,37 @@ def init(project): environments = { "environment.StandardEnvironment": [], "environments.xsede.Bridges2Environment": [ - { - "partition": ["RM", "RM-shared", "GPU", "GPU-shared"], - }, - {"partition": ["RM"], **DEFAULT_SETTING}, + {"partition": ["RM", "RM-shared", "GPU", "GPU-shared"]}, + *set_bundles(["RM"]), ], "environments.umich.GreatLakesEnvironment": [ - { - "partition": ["standard", "gpu", "gpu_mig40"], - }, - DEFAULT_SETTING, + {"partition": ["standard", "gpu", "gpu_mig40"]}, + *set_bundles(), ], - "environments.incite.SummitEnvironment": [{}, DEFAULT_SETTING], + "environments.incite.SummitEnvironment": [{}, *set_bundles()], "environments.incite.AndesEnvironment": [ {"partition": ["batch", "gpu"]}, - {"partition": ["batch"], **DEFAULT_SETTING}, + *set_bundles(["batch"]), ], - "environments.umn.MangiEnvironment": [{}, DEFAULT_SETTING], + "environments.umn.MangiEnvironment": [{}, *set_bundles()], "environments.xsede.ExpanseEnvironment": [ { "partition": ["compute", "shared", "gpu", "gpu-shared", "large-shared"], }, - {"partition": ["compute"], **DEFAULT_SETTING}, + *set_bundles(["compute"]), ], "environments.drexel.PicotteEnvironment": [ - { - "partition": ["def", "gpu"], - }, - {"partition": ["def"], **DEFAULT_SETTING}, + {"partition": ["def", "gpu"]}, + *set_bundles(["def"]), ], "environments.xsede.DeltaEnvironment": [ - { - "partition": ["cpu", "gpuA40x4", "gpuA100x4"], - }, - {"partition": ["cpu"], **DEFAULT_SETTING}, + {"partition": ["cpu", "gpuA40x4", "gpuA100x4"]}, + *set_bundles(["cpu"]), ], - "environments.incite.CrusherEnvironment": [{}, DEFAULT_SETTING], + "environments.incite.CrusherEnvironment": [{}, *set_bundles()], # Frontier cannot use partitions as logic requires gpu # in the name of partitions that are gpu nodes. - "environments.incite.FrontierEnvironment": [{}, DEFAULT_SETTING], + "environments.incite.FrontierEnvironment": [{}, *set_bundles()], "environments.purdue.AnvilEnvironment": [ { "partition": [ @@ -122,7 +119,7 @@ def init(project): "gpu", ], }, - {"partition": ["wholenode"], **DEFAULT_SETTING}, + *set_bundles(["wholenode"]), ], }