From 12238cef4a1b397eb142d6ea26c184cebe208474 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 2 Mar 2022 16:25:44 +0000 Subject: [PATCH 01/96] Split parsing from building --- pysd/building/__init__.py | 0 pysd/building/python/__init__.py | 0 pysd/building/python/imports.py | 78 + pysd/building/python/namespace.py | 145 ++ pysd/building/python/python_builder.py | 550 ++++++++ pysd/building/python/python_functions.py | 88 ++ pysd/building/python/python_utils.py | 61 + pysd/building/python/subscripts.py | 350 +++++ pysd/building/python/visitors.py | 1257 +++++++++++++++++ pysd/py_backend/components.py | 13 +- pysd/py_backend/data.py | 32 +- pysd/py_backend/decorators.py | 1 + pysd/py_backend/external.py | 69 +- pysd/py_backend/functions.py | 69 +- pysd/py_backend/lookups.py | 115 ++ pysd/py_backend/statefuls.py | 82 +- pysd/pysd.py | 19 +- pysd/tools/benchmarking.py | 80 +- pysd/translation/structures/__init__.py | 0 pysd/translation/structures/abstract_model.py | 168 +++ pysd/translation/structures/components.py | 272 ++++ pysd/translation/utils.py | 4 +- .../vensim/parsing_expr/common_grammar.peg | 18 + .../vensim/parsing_expr/components.peg | 36 + .../vensim/parsing_expr/element_object.peg | 47 + .../vensim/parsing_expr/file_sections.peg | 15 + .../vensim/parsing_expr/lookups.peg | 7 + .../vensim/parsing_expr/section_elements.peg | 12 + .../vensim/parsing_expr/sketch.peg | 58 + pysd/translation/vensim/vensim2py.py | 1 - pysd/translation/vensim/vensim_element.py | 474 +++++++ pysd/translation/vensim/vensim_section.py | 124 ++ pysd/translation/vensim/vensim_structures.py | 54 + pysd/translation/vensim/vensim_utils.py | 116 ++ pysd/translation/vensim/vensin_file.py | 276 ++++ pysd/translation/xmile/xmile2py.py | 2 + tests/conftest.py | 6 + tests/integration_test_factory.py | 61 +- tests/integration_test_vensim_pathway.py | 226 +-- ...subscript_individually_defined_stocks2.mdl | 4 +- .../pytest_integration_vensim_pathway.py | 523 +++++++ .../pytest_select_submodel.py | 9 +- .../vensim_parser/pytest_vensim_file.py | 54 + tests/test-models | 2 +- tests/unit_test_benchmarking.py | 2 +- tests/unit_test_external.py | 54 +- tests/unit_test_pysd.py | 64 +- 47 files changed, 5363 insertions(+), 335 deletions(-) create mode 100644 pysd/building/__init__.py create mode 100644 pysd/building/python/__init__.py create mode 100644 pysd/building/python/imports.py create mode 100644 pysd/building/python/namespace.py create mode 100644 pysd/building/python/python_builder.py create mode 100644 pysd/building/python/python_functions.py create mode 100644 pysd/building/python/python_utils.py create mode 100644 pysd/building/python/subscripts.py create mode 100644 pysd/building/python/visitors.py create mode 100644 pysd/py_backend/lookups.py create mode 100644 pysd/translation/structures/__init__.py create mode 100644 pysd/translation/structures/abstract_model.py create mode 100644 pysd/translation/structures/components.py create mode 100644 pysd/translation/vensim/parsing_expr/common_grammar.peg create mode 100644 pysd/translation/vensim/parsing_expr/components.peg create mode 100644 pysd/translation/vensim/parsing_expr/element_object.peg create mode 100644 pysd/translation/vensim/parsing_expr/file_sections.peg create mode 100644 pysd/translation/vensim/parsing_expr/lookups.peg create mode 100644 pysd/translation/vensim/parsing_expr/section_elements.peg create mode 100644 pysd/translation/vensim/parsing_expr/sketch.peg create mode 100644 pysd/translation/vensim/vensim_element.py create mode 100644 pysd/translation/vensim/vensim_section.py create mode 100644 pysd/translation/vensim/vensim_structures.py create mode 100644 pysd/translation/vensim/vensim_utils.py create mode 100644 pysd/translation/vensim/vensin_file.py create mode 100644 tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py create mode 100644 tests/pytest_translation/vensim_parser/pytest_vensim_file.py diff --git a/pysd/building/__init__.py b/pysd/building/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pysd/building/python/__init__.py b/pysd/building/python/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pysd/building/python/imports.py b/pysd/building/python/imports.py new file mode 100644 index 00000000..bd69864d --- /dev/null +++ b/pysd/building/python/imports.py @@ -0,0 +1,78 @@ + +class ImportsManager(): + """ + Class to save the imported modules information for intelligent import + """ + _external_libs = {"numpy": "np", "xarray": "xr"} + _external_submodules = ["scipy"] + _internal_libs = [ + "functions", "statefuls", "external", "data", "lookups", "utils" + ] + + def __init__(self): + self._numpy, self._xarray, self._subs = False, False, False + self._functions, self._statefuls, self._external, self._data,\ + self._lookups, self._utils, self._scipy =\ + set(), set(), set(), set(), set(), set(), set() + + def add(self, module, function=None): + """ + Add a function from module. + + Parameters + ---------- + module: str + module name. + + function: str or None + function name. If None module will be set to true. + + """ + if function: + getattr(self, f"_{module}").add(function) + else: + setattr(self, f"_{module}", True) + + def get_header(self, outfile): + """ + Returns the importing information to print in the model file + + Parameters + ---------- + outfile: str + Name of the outfile to print in the header. + + Returns + ------- + text: str + Header of the translated model file. + + """ + text =\ + f'"""\nPython model \'{outfile}\'\nTranslated using PySD\n"""\n\n' + + text += "from pathlib import Path\n" + + for module, shortname in self._external_libs.items(): + if getattr(self, f"_{module}"): + text += f"import {module} as {shortname}\n" + + for module in self._external_submodules: + if getattr(self, f"_{module}"): + text += "%(module)s import %(submodules)s\n" % { + "module": module, + "submodules": ", ".join(getattr(self, f"_{module}"))} + + text += "\n" + + for module in self._internal_libs: + if getattr(self, f"_{module}"): + text += "from pysd.py_backend.%(module)s import %(methods)s\n"\ + % { + "module": module, + "methods": ", ".join(getattr(self, f"_{module}"))} + + if self._subs: + text += "from pysd import subs\n" + + return text diff --git a/pysd/building/python/namespace.py b/pysd/building/python/namespace.py new file mode 100644 index 00000000..71250ec6 --- /dev/null +++ b/pysd/building/python/namespace.py @@ -0,0 +1,145 @@ +import re + +from unicodedata import normalize + +# used to create python safe names with the variable reserved_words +from keyword import kwlist +from builtins import __dir__ as bidir +from pysd.py_backend.components import __dir__ as cdir +from pysd.py_backend.data import __dir__ as ddir +from pysd.py_backend.decorators import __dir__ as dedir +from pysd.py_backend.external import __dir__ as edir +from pysd.py_backend.functions import __dir__ as fdir +from pysd.py_backend.statefuls import __dir__ as sdir +from pysd.py_backend.utils import __dir__ as udir + + +class NamespaceManager: + reserved_words = set( + dir() + bidir() + cdir() + ddir() + dedir() + edir() + fdir() + + sdir() + udir()).union(kwlist) + + def __init__(self, parameters=[]): + self.used_words = self.reserved_words.copy() + self.namespace = {"Time": "time"} + self.cleanspace = {"time": "time"} + for parameter in parameters: + self.add_to_namespace(parameter) + + def add_to_namespace(self, string): + self.make_python_identifier(string, add_to_namespace=True) + + def make_python_identifier(self, string, prefix=None, add_to_namespace=False): + """ + Takes an arbitrary string and creates a valid Python identifier. + + If the input string is in the namespace, return its value. + + If the python identifier created is already in the namespace, + but the input string is not (ie, two similar strings resolve to + the same python identifier) + + or if the identifier is a reserved word in the reserved_words + list, or is a python default reserved word, + adds _1, or if _1 is in the namespace, _2, etc. + + Parameters + ---------- + string: str + The text to be converted into a valid python identifier. + + namespace: dict + Map of existing translations into python safe identifiers. + This is to ensure that two strings are not translated into + the same python identifier. If string is already in the namespace + its value will be returned. Otherwise, namespace will be mutated + adding string as a new key and its value. + + Returns + ------- + identifier: str + A vaild python identifier based on the input string. + + Examples + -------- + >>> make_python_identifier('Capital') + 'capital' + + >>> make_python_identifier('multiple words') + 'multiple_words' + + >>> make_python_identifier('multiple spaces') + 'multiple_spaces' + + When the name is a python keyword, add '_1' to differentiate it + >>> make_python_identifier('for') + 'for_1' + + Remove leading and trailing whitespace + >>> make_python_identifier(' whitespace ') + 'whitespace' + + Remove most special characters outright: + >>> make_python_identifier('H@t tr!ck') + 'ht_trck' + + add valid string to leading digits + >>> make_python_identifier('123abc') + 'nvs_123abc' + + already in namespace + >>> make_python_identifier('Var$', namespace={'Var$': 'var'}) + 'var' + + namespace conflicts + >>> make_python_identifier('Var@', namespace={'Var$': 'var'}) + 'var_1' + + >>> make_python_identifier('Var$', namespace={'Var@': 'var', + ... 'Var%':'var_1'}) + 'var_2' + + References + ---------- + Identifiers must follow the convention outlined here: + https://docs.python.org/2/reference/lexical_analysis.html#identifiers + + """ + s = string.lower() + clean_s = s.replace(" ", "_") + + if prefix is None and clean_s in self.cleanspace: + return self.cleanspace[clean_s] + + # Make spaces into underscores + s = re.sub(r"[\s\t\n_]+", "_", s) + + # remove accents, diaeresis and others ó -> o + s = normalize("NFD", s).encode("ascii", "ignore").decode("utf-8") + + # Remove invalid characters + s = re.sub(r"[^0-9a-zA-Z_]", "", s) + + # If leading character is not a letter add nvs_. + # Only letters can be leading characters. + if prefix is not None: + s = prefix + "_" + s + elif re.findall(r"^[0-9]", s): + s = "nvs_" + s + elif re.findall(r"^_", s): + s = "nvs" + s + + # Check that the string is not a python identifier + identifier = s + i = 1 + while identifier in self.used_words: + identifier = s + '_' + str(i) + i += 1 + + self.used_words.add(identifier) + + if add_to_namespace: + self.namespace[string] = identifier + self.cleanspace[clean_s] = identifier + + return identifier diff --git a/pysd/building/python/python_builder.py b/pysd/building/python/python_builder.py new file mode 100644 index 00000000..10b70aa5 --- /dev/null +++ b/pysd/building/python/python_builder.py @@ -0,0 +1,550 @@ +import textwrap +import black +import json + +from pysd.translation.structures.abstract_model import\ + AbstractComponent, AbstractElement, AbstractModel, AbstractSection + +from . import visitors as vs +from .namespace import NamespaceManager +from .subscripts import SubscriptManager +from .imports import ImportsManager +from pysd._version import __version__ + + +class ModelBuilder: + + def __init__(self, abstract_model: AbstractModel): + self.__dict__ = abstract_model.__dict__.copy() + self.sections = [ + SectionBuilder(section) + for section in abstract_model.sections + ] + self.macrospace = { + section.name: section for section in self.sections[1:]} + + def build_model(self): + # TODO: add special building for main + for section in self.sections: + section.macrospace = self.macrospace + section.build_section() + + return self.sections[0].path + + +class SectionBuilder: + + def __init__(self, abstract_section: AbstractSection): + self.__dict__ = abstract_section.__dict__.copy() + self.root = self.path.parent + self.model_name = self.path.with_suffix("").name + self.subscripts = SubscriptManager( + abstract_section.subscripts, self.root) + self.elements = [ + ElementBuilder(element, self) + for element in abstract_section.elements + ] + self.namespace = NamespaceManager(self.params) + self.imports = ImportsManager() + self.macrospace = {} + self.dependencies = {} + + def __str__(self): + return "SectionBuilder " + self.path.name + + def build_section(self): + # Create namespace + for element in self.elements: + self.namespace.add_to_namespace(element.name) + identifier = self.namespace.namespace[element.name] + element.identifier = identifier + self.subscripts.elements[identifier] = element.subscripts + + # TODO + # 1. split control variables, main element, elements from other modules + # 2. build elements (only build 1 time!) + # 3. write model + + for element in self.elements: + element.build_element() + self.dependencies[element.identifier] = element.dependencies + for subelement in element.objects.values(): + if "calls" in subelement: + self.dependencies[subelement["name"]] = subelement["calls"] + + if self.split: + self._build_modular(self.views_dict) + else: + self._build() + + def process_views_tree(self, view_name, view_content, wdir): + """ + Creates a directory tree based on the elements_per_view dictionary. + If it's the final view, it creates a file, if not, it creates a folder. + """ + if isinstance(view_content, set): + # will become a module + + # convert subview elements names to python names + view_content = { + self.namespace.cleanspace[var] for var in view_content + } + + # get subview elements + subview_elems = [ + element for element in self.elements_remaining + if element.identifier in view_content + ] + + # remove elements from remaining ones + [ + self.elements_remaining.remove(element) + for element in subview_elems + ] + + self._build_separate_module(subview_elems, view_name, wdir) + + return sorted(view_content) + + else: + # the current view has subviews + wdir = wdir.joinpath(view_name) + wdir.mkdir(exist_ok=True) + return { + subview_name: + self.process_views_tree(subview_name, subview_content, wdir) + for subview_name, subview_content in view_content.items() + } + + def _build_modular(self, elements_per_view): + self.elements_remaining = self.elements.copy() + elements_per_view = self.process_views_tree( + "modules_" + self.model_name, elements_per_view, self.root) + # building main file using the build function + self._build_main_module(self.elements_remaining) + + for file, values in { + "modules_%s/_modules": elements_per_view, + "_namespace_%s": self.namespace.namespace, + "_subscripts_%s": self.subscripts.subscripts, + "_dependencies_%s": self.dependencies}.items(): + + with self.root.joinpath( + file % self.model_name).with_suffix( + ".json").open("w") as outfile: + json.dump(values, outfile, indent=4, sort_keys=True) + + def _build_separate_module(self, elements, module_name, module_dir): + """ + Constructs and writes the python representation of a specific model + module, when the split_views=True in the read_vensim function. + + Parameters + ---------- + elements: list + Elements belonging to the module module_name. + + module_name: str + Name of the module + + module_dir: str + Path of the directory where module files will be stored. + + Returns + ------- + None + + """ + text = textwrap.dedent(''' + """ + Module %(module_name)s + Translated using PySD version %(version)s + """ + ''' % { + "module_name": module_name, + "version": __version__, + }) + funcs = self._generate_functions(elements) + text += funcs + text = black.format_file_contents( + text, fast=True, mode=black.FileMode()) + + outfile_name = module_dir.joinpath(module_name + ".py") + + with outfile_name.open("w", encoding="UTF-8") as out: + out.write(text) + + def _build_main_module(self, elements): + """ + Constructs and writes the python representation of the main model + module, when the split_views=True in the read_vensim function. + + Parameters + ---------- + elements: list + Elements belonging to the main module. Ideally, there should only be + the initial_time, final_time, saveper and time_step, functions, though + there might be others in some situations. Each element is a + dictionary, with the various components needed to assemble a model + component in python syntax. This will contain multiple entries for + elements that have multiple definitions in the original file, and + which need to be combined. + + Returns + ------- + None or text: None or str + If file_name="return" it will return the content of the output file + instead of saving it. It is used for testing. + + """ + # separating between control variables and rest of variables + control_vars, funcs = self._build_variables(elements) + + self.imports.add("utils", "load_model_data") + self.imports.add("utils", "load_modules") + + # import of needed functions and packages + text = self.imports.get_header(self.path.name) + + # import namespace from json file + text += textwrap.dedent(""" + __pysd_version__ = '%(version)s' + + __data = { + 'scope': None, + 'time': lambda: 0 + } + + _root = Path(__file__).parent + + _namespace, _subscript_dict, _dependencies, _modules = load_model_data( + _root, "%(model_name)s") + """ % { + "model_name": self.model_name, + "version": __version__ + }) + + text += self._get_control_vars(control_vars) + + text += textwrap.dedent(""" + # load modules from modules_%(model_name)s directory + exec(load_modules("modules_%(model_name)s", _modules, _root, [])) + + """ % { + "model_name": self.model_name, + }) + + text += funcs + text = black.format_file_contents(text, fast=True, mode=black.FileMode()) + + with self.path.open("w", encoding="UTF-8") as out: + out.write(text) + + def _build(self): + control_vars, funcs = self._build_variables(self.elements) + + text = self.imports.get_header(self.path.name) + text += textwrap.dedent(""" + __pysd_version__ = '%(version)s' + + __data = { + 'scope': None, + 'time': lambda: 0 + } + + _root = Path(__file__).parent + + _subscript_dict = %(subscript_dict)s + + _namespace = %(namespace)s + + _dependencies = %(dependencies)s + """ % { + "subscript_dict": repr(self.subscripts.subscripts), + "namespace": repr(self.namespace.namespace), + "dependencies": repr(self.dependencies), + "version": __version__, + }) + + text += self._get_control_vars(control_vars) + funcs + + text = black.format_file_contents( + text, fast=True, mode=black.FileMode()) + + # this is used for testing + if not self.path: + return text + + with self.path.open("w", encoding="UTF-8") as out: + out.write(text) + + def _build_variables(self, elements): + """ + Build model variables (functions) and separate then in control variables + and regular variables. + + Returns + ------- + control_vars, regular_vars: tuple, str + control_vars is a tuple of length 2. First element is the dictionary + of original control vars. Second is the string to add the control + variables' functions. regular_vars is the string to add the regular + variables' functions. + + """ + # returns of the control variables + control_vars_dict = { + "initial_time": "__data['time'].initial_time()", + "final_time": "__data['time'].final_time()", + "time_step": "__data['time'].time_step()", + "saveper": "__data['time'].saveper()" + } + regular_vars = [] + control_vars = [] + + for element in elements: + if element.identifier in control_vars_dict: + # change the return expression in the element and update the dict + # with the original expression + control_vars_dict[element.identifier], element.expression =\ + element.expression, control_vars_dict[element.identifier] + control_vars.append(element) + else: + regular_vars.append(element) + + if len(control_vars) == 0: + # macro objects, no control variables + control_vars_dict = "" + else: + control_vars_dict = """ + _control_vars = { + "initial_time": lambda: %(initial_time)s, + "final_time": lambda: %(final_time)s, + "time_step": lambda: %(time_step)s, + "saveper": lambda: %(saveper)s + } + """ % control_vars_dict + + return (control_vars_dict, + self._generate_functions(control_vars)),\ + self._generate_functions(regular_vars) + + def _generate_functions(self, elements): + """ + Builds all model elements as functions in string format. + NOTE: this function calls the build_element function, which updates the + import_modules. + Therefore, it needs to be executed before the_generate_automatic_imports + function. + + Parameters + ---------- + elements: dict + Each element is a dictionary, with the various components needed to + assemble a model component in python syntax. This will contain + multiple entries for elements that have multiple definitions in the + original file, and which need to be combined. + + Returns + ------- + funcs: str + String containing all formated model functions + + """ + return "\n".join([element.build_element_out() for element in elements]) + + def _get_control_vars(self, control_vars): + """ + Create the section of control variables + + Parameters + ---------- + control_vars: str + Functions to define control variables. + + Returns + ------- + text: str + Control variables section and header of model variables section. + + """ + text = textwrap.dedent(""" + ####################################################################### + # CONTROL VARIABLES # + ####################################################################### + %(control_vars_dict)s + def _init_outer_references(data): + for key in data: + __data[key] = data[key] + + + def time(): + return __data['time']() + + """ % {"control_vars_dict": control_vars[0]}) + + text += control_vars[1] + + text += textwrap.dedent(""" + ####################################################################### + # MODEL VARIABLES # + ####################################################################### + """) + + return text + + +class SubSectionBuilder(SectionBuilder): + def __init__(self, abstract_section: AbstractSection): + pass + # TODO Use an intermediate class to split model, this calls could be inexistent and point to Section + # Namespace, subscripts and imports should point to parent section, others should remain in subsection + + +class ElementBuilder: + + def __init__(self, abstract_element: AbstractElement, section: SectionBuilder): + self.__dict__ = abstract_element.__dict__.copy() + self.type = None + self.subtype = None + self.arguments = getattr(self.components[0], "arguments", "") + self.components = [ + ComponentBuilder(component, self, section) + for component in abstract_element.components + ] + self.section = section + self.subscripts = section.subscripts.make_merge_list( + [component.subscripts[0] for component in self.components]) + self.subs_dict = section.subscripts.make_coord_dict(self.subscripts) + self.dependencies = {} + self.objects = {} + + def build_element(self): + # TODO think better how to build the components at once to build + # in one declaration the external objects + # TODO include some kind of magic vectorization to identify patterns + # that can be easily vecorized (GET, expressions, Stocks...) + expressions = [] + for component in self.components: + expr, subs = component.build_component() + if expr is None: + continue + else: + subs = { + esubs: subs[csubs] + for csubs, esubs in zip(subs, self.subscripts) + } + expressions.append({"expr": expr, "subs": subs}) + + if len(expressions) > 1: + # NUMPY: xrmerge would be sustitute by a multiple line definition + # e.g.: + # value = np.empty((len(dim1), len(dim2))) + # value[:, 0] = expression1 + # value[:, 1] = expression2 + # return value + # This allows reference to the same variable + # from: VAR[A] = 5; VAR[B] = 2*VAR[A] + # to: value[0] = 5; value[1] = 2*value[0] + self.section.imports.add("numpy") + self.pre_expression =\ + "value = xr.DataArray(np.nan, {%s}, %s)\n" % ( + ", ".join("'%(dim)s': _subscript_dict['%(dim)s']" % + {"dim": subs} for subs in self.subscripts), + self.subscripts) + for expression in expressions: + if expression["expr"].subscripts: + # get the values + # NUMPY not necessary + expression["expr"].lower_order(0, force_0=True) + expression["expr"].expression += ".values" + self.pre_expression += "value.loc[%(subs)s] = %(expr)s\n" % ( + expression) + self.expression = "value" + else: + self.pre_expression = "" + self.expression = expressions[0]["expr"] + + self.type = ", ".join( + set(component.type for component in self.components) + ) + self.subtype = ", ".join( + set(component.subtype for component in self.components) + ) + + def build_element_out(self): + """ + Returns a string that has processed a single element dictionary. + + Returns + ------- + func: str + The function to write in the model file. + + """ + # TODO: merge with the previous build to do all at once + contents = self.pre_expression + "return %s" % self.expression + + self.subs_dec = "" + self.subs_doc = "None" + + if self.subscripts: + # We add the list of the subs to the __doc__ of the function + # this will give more information to the user and make possible + # to rewrite subscripted values with model.run(params=X) or + # model.run(initial_condition=(n,x)) + self.subs_doc = "%s" % self.subscripts + self.subs_dec =\ + "@subs(%s, _subscript_dict)" % self.subscripts + self.section.imports.add("subs") + + objects = "\n\n".join([ + value["expression"] for value in self.objects.values() + if value["expression"] is not None + ]) + + indent = 12 + + self.contents = contents.replace("\n", "\n" + " " * (indent+4)) + self.objects = objects.replace("\n", "\n" + " " * indent) + + # convert newline indicator and add expected level of indentation + # TODO check if this is neccessary + self.documentation = self.documentation.replace( + "\\", "\n").replace("\n", "\n" + "" * indent) + + return textwrap.dedent(''' + %(subs_dec)s + def %(identifier)s(%(arguments)s): + """ + Real Name: %(name)s + Original Eqn: + Units: %(units)s + Limits: %(range)s + Type: %(type)s + Subtype: %(subtype)s + Subs: %(subscripts)s + + %(documentation)s + """ + %(contents)s + + + %(objects)s + ''' % self.__dict__) + + +class ComponentBuilder: + + def __init__(self, abstract_component: AbstractComponent, + element: ElementBuilder, section: SectionBuilder): + self.__dict__ = abstract_component.__dict__.copy() + self.element = element + self.section = section + if not hasattr(self, "keyword"): + self.keyword = None + + def build_component(self): + self.subscripts_dict = self.section.subscripts.make_coord_dict( + self.subscripts[0]) + return (vs.ASTVisitor(self).visit(), self.subscripts_dict) diff --git a/pysd/building/python/python_functions.py b/pysd/building/python/python_functions.py new file mode 100644 index 00000000..bdde7130 --- /dev/null +++ b/pysd/building/python/python_functions.py @@ -0,0 +1,88 @@ + +# functions that can be diretcly applied over an array +functionspace = { + # directly build functions without dependencies + "elmcount": ("len(_subscript_dict['%(0)s'])", None), + + # directly build numpy based functions + "abs": ("np.abs(%(0)s)", ("numpy",)), + "min": ("np.minimum(%(0)s, %(1)s)", ("numpy",)), + "max": ("np.maximum(%(0)s, %(1)s)", ("numpy",)), + "exp": ("np.exp(%(0)s)", ("numpy",)), + "sin": ("np.sin(%(0)s)", ("numpy",)), + "cos": ("np.cos(%(0)s)", ("numpy",)), + "tan": ("np.tan(%(0)s)", ("numpy",)), + "arcsin": ("np.arcsin(%(0)s)", ("numpy",)), + "arccos": ("np.arccos(%(0)s)", ("numpy",)), + "arctan": ("np.arctan(%(0)s)", ("numpy",)), + "sinh": ("np.sinh(%(0)s)", ("numpy",)), + "cosh": ("np.cosh(%(0)s)", ("numpy",)), + "tanh": ("np.tanh(%(0)s)", ("numpy",)), + "sqrt": ("np.sqrt(%(0)s)", ("numpy",)), + "ln": ("np.log(%(0)s)", ("numpy",)), + "log": ("(np.log(%(0)s)/np.log(%(1)s))", ("numpy",)), + # NUMPY: "invert_matrix": ("np.linalg.inv(%(0)s)", ("numpy",)), + + # vector functions with axis to apply over + # NUMPY: + # "prod": "np.prod(%(0)s, axis=%(axis)s)", ("numpy",)), + # "sum": "np.sum(%(0)s, axis=%(axis)s)", ("numpy",)), + # "vmax": "np.max(%(0)s, axis=%(axis)s)", ("numpy", )), + # "vmin": "np.min(%(0)s, axis=%(axis)s)", ("numpy",)) + "prod": ("prod(%(0)s, dim=%(axis)s)", ("functions", "prod")), + "sum": ("sum(%(0)s, dim=%(axis)s)", ("functions", "sum")), + "vmax": ("vmax(%(0)s, dim=%(axis)s)", ("functions", "vmax")), + "vmin": ("vmin(%(0)s, dim=%(axis)s)", ("functions", "vmin")), + + # functions defined in pysd.py_bakcend.functions + "active_initial": ( # TODO replace time by stage when doing a non compatible version + "active_initial(__data['time'], lambda: %(0)s, %(1)s)", + ("functions", "active_initial")), + "if_then_else": ( + "if_then_else(%(0)s, lambda: %(1)s, lambda: %(2)s)", + ("functions", "if_then_else")), + "integer": ( + "integer(%(0)s)", + ("functions", "integer")), + "invert_matrix": ( # NUMPY: remove + "invert_matrix(%(0)s)", + ("functions", "invert_matrix")), # NUMPY: remove + "modulo": ( + "modulo(%(0)s, %(1)s)", + ("functions", "modulo")), + "pulse": ( + "pulse(__data['time'], %(0)s, %(1)s)", + ("functions", "pulse")), + "pulse_train": ( + "pulse_train(__data['time'], %(0)s, %(1)s, %(2)s, %(3)s)", + ("functions", "pulse_train")), + "quantum": ( + "quantum(%(0)s, %(1)s)", + ("functions", "quantum")), + "ramp": ( + "ramp(__data['time'], %(0)s, %(1)s, %(2)s)", + ("functions", "ramp")), + "step": ( + "step(__data['time'], %(0)s, %(1)s)", + ("functions", "step")), + "xidz": ( + "xidz(%(0)s, %(1)s, %(2)s)", + ("functions", "xidz")), + "zidz": ( + "zidz(%(0)s, %(1)s)", + ("functions", "zidz")), + + # random functions must have the shape of the component subscripts + # most of them are shifted, scaled and truncated + # TODO: it is difficult to find same parametrization in python, + # maybe build a new model + "random_0_1": ( + "np.random.uniform(0, 1, size=%(size)s)", + ("numpy",)), + "random_uniform": ( + "np.random.uniform(%(0)s, %(1)s, size=%(size)s)", + ("numpy",)), + "random_normal": ( + "stats.truncnorm.rvs(%(0)s, %(1)s, loc=%(2)s, scale=%(3)s, size=%(size)s))", + ("scipy", "stats")), +} diff --git a/pysd/building/python/python_utils.py b/pysd/building/python/python_utils.py new file mode 100644 index 00000000..1836bdac --- /dev/null +++ b/pysd/building/python/python_utils.py @@ -0,0 +1,61 @@ +import re +import warnings +import numpy as np + +# used to create python safe names with the variable reserved_words +from keyword import kwlist +from builtins import __dir__ as bidir +from pysd.py_backend.components import __dir__ as cdir +from pysd.py_backend.data import __dir__ as ddir +from pysd.py_backend.decorators import __dir__ as dedir +from pysd.py_backend.external import __dir__ as edir +from pysd.py_backend.functions import __dir__ as fdir +from pysd.py_backend.statefuls import __dir__ as sdir +from pysd.py_backend.utils import __dir__ as udir + + +reserved_words = set( + dir() + bidir() + cdir() + ddir() + dedir() + edir() + fdir() + + sdir() + udir()).union(kwlist) + + +def simplify_subscript_input(coords, subscript_dict, return_full, merge_subs): + """ + Parameters + ---------- + coords: dict + Coordinates to write in the model file. + + subscript_dict: dict + The subscript dictionary of the model file. + + return_full: bool + If True the when coords == subscript_dict, '_subscript_dict' + will be returned + + merge_subs: list of strings + List of the final subscript range of the python array after + merging with other objects + + Returns + ------- + coords: str + The equations to generate the coord dicttionary in the model file. + + """ + + if coords == subscript_dict and return_full: + # variable defined with all the subscripts + return "_subscript_dict" + + coordsp = [] + for ndim, (dim, coord) in zip(merge_subs, coords.items()): + # find dimensions can be retrieved from _subscript_dict + if coord == subscript_dict[dim]: + # use _subscript_dict + coordsp.append(f"'{ndim}': _subscript_dict['{dim}']") + else: + # write whole dict + coordsp.append(f"'{ndim}': {coord}") + + return "{" + ", ".join(coordsp) + "}" diff --git a/pysd/building/python/subscripts.py b/pysd/building/python/subscripts.py new file mode 100644 index 00000000..d004f0a9 --- /dev/null +++ b/pysd/building/python/subscripts.py @@ -0,0 +1,350 @@ +from multiprocessing.sharedctypes import Value +import warnings +from pathlib import Path +import numpy as np +from pysd.translation.structures.abstract_model import AbstractSubscriptRange +from pysd.py_backend.external import ExtSubscript +from typing import List + + +class SubscriptManager: + def __init__(self, abstract_subscripts: List[AbstractSubscriptRange], + _root: Path): + self._root = _root + self._copied = [] + self.mapping = {} + self.subscripts = abstract_subscripts + self.elements = {} + self.subranges = self._get_main_subscripts() + self.subscript2num = self._get_subscript2num() + # TODO: manage subscript mapping + + @property + def subscripts(self): + return self._subscripts + + @subscripts.setter + def subscripts(self, abstract_subscripts): + self._subscripts = {} + missing = [] + for sub in abstract_subscripts: + self.mapping[sub.name] = sub.mapping + if isinstance(sub.subscripts, list): + # regular definition of subscripts + self._subscripts[sub.name] = sub.subscripts + elif isinstance(sub.subscripts, str): + # copied subscripts, this will be always a subrange, + # then we need to prevent them of being saved as a main range + self._copied.append(sub.name) + self.mapping[sub.name].append(sub.subscripts) + if sub.subscripts in self._subscripts: + self._subscripts[sub.name] =\ + self._subscripts[sub.subscripts] + else: + missing.append(sub) + elif isinstance(sub.subscripts, dict): + # subscript from file + self._subscripts[sub.name] = ExtSubscript( + file_name=sub.subscripts["file"], + sheet=sub.subscripts["tab"], + firstcell=sub.subscripts["firstcell"], + lastcell=sub.subscripts["lastcell"], + prefix=sub.subscripts["prefix"], + root=self._root).subscript + else: + raise ValueError( + f"Invalid definition of subscript {sub.name}:\n\t" + + str(sub.subscripts)) + + while missing: + # second loop for copied subscripts + sub = missing.pop() + self._subscripts[sub.name] =\ + self._subscripts[sub.subscripts] + + def _get_main_subscripts(self): + """ + Reutrns a dictionary with the main ranges as keys and their + subranges as values. + """ + subscript_sets = { + name: set(subs) for name, subs in self.subscripts.items()} + + subranges = {} + for range, subs in subscript_sets.items(): + # current subscript range + subranges[range] = [] + for subrange, subs2 in subscript_sets.items(): + if range == subrange: + # pass current range + continue + elif subs == subs2: + # range is equal to the subrange, as Vensim does + # the main range will be the first one alphabetically + # make it case insensitive + range_l = range.replace(" ", "_").lower() + subrange_l = subrange.replace(" ", "_").lower() + if range_l < subrange_l and range not in self._copied: + subranges[range].append(subrange) + else: + # copied subscripts ranges or subscripts ranges + # that come later alphabetically + del subranges[range] + break + elif subs2.issubset(subs): + # subrange is a subset of range, append it to the list + subranges[range].append(subrange) + elif subs2.issuperset(subs): + # it exist a range that contents the elements of the range + del subranges[range] + break + + return subranges + + def _get_subscript2num(self): + """ + Build a dictionary to return the numeric value or values of a + subscript or subscript range. + """ + s2n = {} + for range, subranges in self.subranges.items(): + # a main range is direct to return + s2n[range.replace(" ", "_").lower()] = ( + f"np.arange(1, len(_subscript_dict['{range}'])+1)", + {range: self.subscripts[range]} + ) + for i, sub in enumerate(self.subscripts[range], start=1): + # a subscript must return its numeric position + # in the main range + s2n[sub.replace(" ", "_").lower()] = (str(i), {}) + for subrange in subranges: + # subranges may return the position of each subscript + # in the main range + sub_index = [ + self.subscripts[range].index(sub)+1 + for sub in self.subscripts[subrange]] + + if np.all( + sub_index + == np.arange(sub_index[0], sub_index[0]+len(sub_index))): + # subrange definition can be simplified with a range + subsarray = f"np.arange({sub_index[0]}, "\ + f"len(_subscript_dict['{subrange}'])+{sub_index[0]})" + else: + # subrange definition cannot be simplified + subsarray = f"np.array({sub_index})" + + s2n[subrange.replace(" ", "_").lower()] = ( + subsarray, + {subrange: self.subscripts[subrange]} + ) + + return s2n + + def find_subscript_name(self, element, avoid=[]): + """ + Given a subscript dictionary, and a member of a subscript family, + return the first key of which the member is within the value list. + If element is already a subscript name, return that. + + Parameters + ---------- + element: str + Subscript or subscriptrange name to find. + avoid: list (optional) + List of subscripts to avoid. Default is an empty list. + + Returns + ------- + + Examples + -------- + >>> find_subscript_name('D') + 'Dim2' + >>> find_subscript_name('B') + 'Dim1' + >>> find_subscript_name('B', avoid=['Dim1']) + 'Dim2' + + """ + if element in self.subscripts.keys(): + return element + + for name, elements in self.subscripts.items(): + if element in elements and name not in avoid: + return name + + def make_coord_dict(self, subs): + """ + This is for assisting with the lookup of a particular element. + + Parameters + ---------- + subs: list of strings + Coordinates, either as names of dimensions, or positions within + a dimension. + + Returns + ------- + coordinates: dict + Coordinates needed to access the xarray quantities we are + interested in. + + Examples + -------- + >>> make_coord_dict(['Dim1', 'D']) + {'Dim1': ['A', 'B', 'C'], 'Dim2': ['D']} + + """ + sub_elems_list = [y for x in self.subscripts.values() for y in x] + coordinates = {} + for sub in subs: + if sub in sub_elems_list: + name = self.find_subscript_name( + sub, avoid=subs + list(coordinates)) + coordinates[name] = [sub] + else: + if sub.endswith("!"): + coordinates[sub] = self.subscripts[sub[:-1]] + else: + coordinates[sub] = self.subscripts[sub] + return coordinates + + def make_merge_list(self, subs_list, element=""): + """ + This is for assisting when building xrmerge. From a list of subscript + lists returns the final subscript list after mergin. Necessary when + merging variables with subscripts comming from different definitions. + + Parameters + ---------- + subs_list: list of lists of strings + Coordinates, either as names of dimensions, or positions within + a dimension. + element: str (optional) + Element name, if given it will be printed with any error or + warning message. Default is "". + + Returns + ------- + dims: list + Final subscripts after merging. + + Examples + -------- + >>> sm = SubscriptManager() + >>> sm.subscripts = {"upper": ["A", "B"], "all": ["A", "B", "C"]} + >>> sm.make_merge_list([['upper'], ['C']]) + ['all'] + + """ + coords_set = [set() for i in range(len(subs_list[0]))] + coords_list = [ + self.make_coord_dict(subs) + for subs in subs_list + ] + + # update coords set + [[coords_set[i].update(coords[dim]) for i, dim in enumerate(coords)] + for coords in coords_list] + + dims = [None] * len(coords_set) + # create an array with the name of the subranges for all + # merging elements + dims_list = np.array([ + list(coords) for coords in coords_list]).transpose() + indexes = np.arange(len(dims)) + + for i, coord2 in enumerate(coords_set): + dims1 = [ + dim for dim in dims_list[i] + if dim is not None and set(self.subscripts[dim]) == coord2 + ] + if dims1: + # if the given coordinate already matches return it + dims[i] = dims1[0] + else: + # find a suitable coordinate + other_dims = dims_list[indexes != i] + for name, elements in self.subscripts.items(): + if coord2 == set(elements) and name not in other_dims: + dims[i] = name + break + + if not dims[i]: + # the dimension is incomplete use the smaller + # dimension that completes it + for name, elements in self.subscripts.items(): + if coord2.issubset(set(elements))\ + and name not in other_dims: + dims[i] = name + warnings.warn( + element + + "\nDimension given by subscripts:" + + "\n\t{}\nis incomplete ".format(coord2) + + "using {} instead.".format(name) + + "\nSubscript_dict:" + + "\n\t{}".format(self.subscripts) + ) + break + + if not dims[i]: + for name, elements in self.subscripts.items(): + if coord2 == set(elements): + j = 1 + while name + str(j) in self.subscripts.keys(): + j += 1 + self.subscripts[name + str(j)] = elements + dims[i] = name + str(j) + warnings.warn( + element + + "\nAdding new subscript range to" + + " subscript_dict:\n" + + name + str(j) + ": " + ', '.join(elements)) + break + + if not dims[i]: + # not able to find the correct dimension + raise ValueError( + element + + "\nImpossible to find the dimension that contains:" + + "\n\t{}\nFor subscript_dict:".format(coord2) + + "\n\t{}".format(self.subscripts) + ) + + return dims + + def simplify_subscript_input(self, coords, merge_subs): + """ + Parameters + ---------- + coords: dict + Coordinates to write in the model file. + + merge_subs: list of strings + List of the final subscript range of the python array after + merging with other objects + + Returns + ------- + final_subs, coords: dict, str + Final subscripts and the equations to generate the coord + dicttionary in the model file. + + """ + coordsp = [] + final_subs = {} + for ndim, (dim, coord) in zip(merge_subs, coords.items()): + # find dimensions can be retrieved from _subscript_dict + final_subs[ndim] = coord + if dim.endswith("!") and coord == self.subscripts[dim[:-1]]: + # use _subscript_dict + coordsp.append(f"'{ndim}': _subscript_dict['{dim[:-1]}']") + elif not dim.endswith("!") and coord == self.subscripts[dim]: + # use _subscript_dict + coordsp.append(f"'{ndim}': _subscript_dict['{dim}']") + else: + # write whole dict + coordsp.append(f"'{ndim}': {coord}") + + return final_subs, "{" + ", ".join(coordsp) + "}" \ No newline at end of file diff --git a/pysd/building/python/visitors.py b/pysd/building/python/visitors.py new file mode 100644 index 00000000..3034ad18 --- /dev/null +++ b/pysd/building/python/visitors.py @@ -0,0 +1,1257 @@ +from re import X +import warnings +from dataclasses import dataclass + +import numpy as np +from pysd.py_backend.utils import compute_shape + +from pysd.translation.structures import components as ct +from .python_functions import functionspace + + +@dataclass +class BuildAST: + expression: str + calls: dict + subscripts: dict + order: int + + def __str__(self): + # makes easier building + return self.expression + + def reshape(self, subscripts, final_subscripts): + subscripts_out = subscripts.simplify_subscript_input( + final_subscripts, list(final_subscripts))[1] + if not final_subscripts or ( + self.subscripts == final_subscripts + and list(self.subscripts) == list(final_subscripts)): + # same dictionary in the same orde, do nothing + pass + elif not self.subscripts: + # original expression is not an array + # NUMPY: object.expression = np.full(%s, %(shape)s) + self.expression = "xr.DataArray(%s, %s, %s)" % ( + self.expression, subscripts_out, list(final_subscripts) + ) + self.order = 0 + else: + # original expression is an array + # NUMPY: reorder dims if neccessary with np.moveaxis or similar + # NUMPY: add new axis with [:, None, :] or np.tile, + # depending on an input argument + # NUMPY: if order is not 0 need to lower the order to 0 + # using force! + self.expression = "(xr.DataArray(0, %s, %s) + %s)" % ( + subscripts_out, list(final_subscripts), self.expression + ) + self.order = 0 + self.subscripts = final_subscripts + + def lower_order(self, new_order, force_0=False): + if self.order >= new_order and self.order != 0\ + and (new_order != 0 or force_0): + # if current operator order is 0 do not need to do anything + # if the order of operations conflicts add parenthesis + # if new order is 0 do not need to do anything, as it may be + # an argument to a function, unless force_0 is True which + # will force the parenthesis (necessary to reshape some + # numpy arrays) + self.expression = "(%s)" % self.expression + self.order = 0 + + +class StructureBuilder: + def __init__(self, value, component): + self.value = value + self.arguments = {} + self.component = component + self.element = component.element + self.section = component.section + self.def_subs = component.subscripts_dict + + def build(self, arguments): + return BuildAST( + expression=repr(self.value), + calls={}, + subscripts={}, + order=0) + + def join_calls(self, arguments): + if len(arguments) == 0: + return {} + elif len(arguments) == 1: + return arguments["0"].calls + else: + return merge_dependencies( + *[val.calls for val in arguments.values()]) + + def reorder(self, arguments, def_subs=None, force=None): + + if force == "component": + final_subscripts = def_subs or {} + else: + final_subscripts = self.get_final_subscripts( + arguments, def_subs) + + [arguments[key].reshape(self.section.subscripts, final_subscripts) + for key in arguments + if arguments[key].subscripts or force == "equal"] + + return final_subscripts + + def get_final_subscripts(self, arguments, def_subs): + if len(arguments) == 0: + return {} + elif len(arguments) == 1: + return arguments["0"].subscripts + else: + return self._compute_final_subscripts( + [arg.subscripts for arg in arguments.values()], + def_subs) + + def _compute_final_subscripts(self, subscripts_list, def_subs): + expression = {} + [expression.update(subscript) + for subscript in subscripts_list if subscript] + # TODO reorder final_subscripts taking into account def_subs + return expression + + +class OperationBuilder(StructureBuilder): + operators_build = { + "^": ("%(left)s**%(right)s", None, 1), + "*": ("%(left)s*%(right)s", None, 2), + "/": ("%(left)s/%(right)s", None, 2), + "+": ("%(left)s + %(right)s", None, 3), + "-": ("%(left)s - %(right)s", None, 3), + "=": ("%(left)s == %(right)s", None, 4), + "<>": ("%(left)s != %(right)s", None, 4), + ">=": ("%(left)s >= %(right)s", None, 4), + ">": ("%(left)s > %(right)s", None, 4), + "<=": ("%(left)s <= %(right)s", None, 4), + "<": ("%(left)s < %(right)s", None, 4), + ":NOT:": ("np.logical_not(%s)", ("numpy",), 0), + ":AND:": ("np.logical_and(%(left)s, %(right)s)", ("numpy",), 0), + ":OR:": ("np.logical_or(%(left)s, %(right)s)", ("numpy",), 0), + "negative": ("-%s", None, 3), + } + + def __init__(self, operation, component): + super().__init__(None, component) + self.operators = operation.operators.copy() + self.arguments = { + str(i): arg for i, arg in enumerate(operation.arguments)} + + def build(self, arguments): + operands = {} + calls = self.join_calls(arguments) + final_subscripts = self.reorder(arguments, def_subs=self.def_subs) + arguments = [arguments[str(i)] for i in range(len(arguments))] + dependencies, order = self.operators_build[self.operators[-1]][1:] + + if dependencies: + self.section.imports.add(*dependencies) + + if self.operators[-1] == "^": + # right side of the exponential can be from higher order + arguments[-1].lower_order(2) + else: + arguments[-1].lower_order(order) + + if len(arguments) == 1: + # not and negative operations (only 1 element) + if self.operators[0] == "negative": + order = 1 + expression = self.operators_build[self.operators[0]][0] + return BuildAST( + expression=expression % arguments[0], + calls=calls, + subscripts=final_subscripts, + order=order) + + operands["right"] = arguments.pop() + while arguments or self.operators: + expression = self.operators_build[self.operators.pop()][0] + operands["left"] = arguments.pop() + operands["left"].lower_order(order) + operands["right"] = expression % operands + + return BuildAST( + expression=operands["right"], + calls=calls, + subscripts=final_subscripts, + order=order) + + +class GameBuilder(StructureBuilder): + def __init__(self, game_str, component): + super().__init__(None, component) + self.arguments = {"expr": game_str.expression} + + def build(self, arguments): + return arguments["expr"] + + +class CallBuilder(StructureBuilder): + def __init__(self, call_str, component): + super().__init__(None, component) + function_name = call_str.function.reference + self.arguments = { + str(i): arg for i, arg in enumerate(call_str.arguments)} + + # move this to a setter + if function_name in self.section.macrospace: + # build macro + self.macro_name = function_name + self.build = self.build_macro_call + elif function_name in self.section.namespace.cleanspace: + # build lookupcall + self.arguments["function"] = call_str.function + self.build = self.build_lookups_call + elif function_name in functionspace: + # build direct function + self.function = function_name + self.build = self.build_function_call + elif function_name == "a_function_of": + self.build = self.build_incomplete_call + else: + # error + raise ValueError("Undefined function %s" % function_name) + + def build_macro_call(self, arguments): + self.section.imports.add("statefuls", "Macro") + macro = self.section.macrospace[self.macro_name] + + calls = self.join_calls(arguments) + final_subscripts = self.reorder(arguments, def_subs=self.def_subs) + + arguments["name"] = self.section.namespace.make_python_identifier( + self.macro_name + "_" + self.element.identifier, prefix="_macro") + arguments["file"] = macro.path.name + arguments["macro_name"] = macro.name + arguments["args"] = "{%s}" % ", ".join([ + "'%s': lambda: %s" % (key, val) + for key, val in zip(macro.params, arguments.values()) + ]) + + self.element.objects[arguments["name"]] = { + "name": arguments["name"], + "expression": "%(name)s = Macro(_root.joinpath('%(file)s'), " + "%(args)s, '%(macro_name)s', " + "time_initialization=lambda: __data['time'], " + "py_name='%(name)s')" % arguments, + "calls": { + "initial": calls, + "step": calls + } + } + return BuildAST( + expression="%s()" % arguments["name"], + calls={arguments["name"]: 1}, + subscripts=final_subscripts, + order=0) + + def build_incomplete_call(self, arguments): + warnings.warn( + "%s has no equation specified" % self.element.name, + SyntaxWarning, stacklevel=2 + ) + self.section.imports.add("functions", "incomplete") + return BuildAST( + expression="incomplete(%s)" % ", ".join( + arg.expression for arg in arguments.values()), + calls=self.join_calls(arguments), + subscripts=self.def_subs, + order=0) + + def build_lookups_call(self, arguments): + expression = arguments["function"].expression.replace("()", "(%(0)s)") + final_subscripts = self.get_final_subscripts(arguments, self.def_subs) + # NUMPY: we need to manage inside lookup with subscript and later + # return the values in a correct ndarray + return BuildAST( + expression=expression % arguments, + calls=self.join_calls(arguments), + subscripts=final_subscripts, + order=0) + + def build_function_call(self, arguments): + expression, modules = functionspace[self.function] + if modules: + self.section.imports.add(*modules) + + calls = self.join_calls(arguments) + + if "__data['time']" in expression: + merge_dependencies(calls, {"time": 1}, inplace=True) + + # TODO modify dimensions of BuildAST + if "%(axis)s" in expression: + final_subscripts, arguments["axis"] = self.compute_axis(arguments) + + elif "%(size)s" in expression: + final_subscripts = self.reorder( + arguments, + def_subs=self.def_subs, + force="component" + ) + arguments["size"] = compute_shape(final_subscripts) + + elif self.function == "active_initial": + # we need to ensure that active initial outputs are always the + # same and update dependencies as stateful object + # TODO: update calls as statefull object + name = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_active_initial") + final_subscripts = self.reorder( + arguments, + def_subs=self.def_subs, + force="equal" + ) + self.element.objects[name] = { + "name": name, + "expression": None, + "calls": { + "initial": arguments["1"].calls, + "step": arguments["0"].calls + } + + } + calls = {name: 1} + else: + final_subscripts = self.reorder( + arguments, + def_subs=self.def_subs + ) + if self.function == "xidz" and final_subscripts: + if not arguments["1"].subscripts: + new_args = {"0": arguments["0"], "2": arguments["2"]} + self.reorder( + new_args, + def_subs=self.def_subs, + force="equal" + ) + arguments.update(new_args) + if self.function == "if_then_else" and final_subscripts: + if not arguments["0"].subscripts: + # NUMPY: we need to ensure that if_then_else always returs + # the same shape object + new_args = {"1": arguments["1"], "2": arguments["2"]} + self.reorder( + new_args, + def_subs=self.def_subs, + force="equal" + ) + arguments.update(new_args) + else: + self.reorder( + arguments, + def_subs=self.def_subs, + force="equal" + ) + + return BuildAST( + expression=expression % arguments, + calls=calls, + subscripts=final_subscripts, + order=0) + + def compute_axis(self, arguments): + subscripts = arguments["0"].subscripts + axis = [] + coords = {} + for subs in subscripts: + if subs.endswith("!"): + # dimensions to apply along + axis.append(subs) + else: + # dimensions remaining + coords[subs] = subscripts[subs] + return coords, axis + + +class ExtLookupBuilder(StructureBuilder): + def __init__(self, getlookup_str, component): + super().__init__(None, component) + self.file = getlookup_str.file + self.tab = getlookup_str.tab + self.x_row_or_col = getlookup_str.x_row_or_col + self.cell = getlookup_str.cell + self.arguments = {} + + def build(self, arguments): + self.component.type = "Lookup" + self.component.subtype = "External" + arguments["params"] = "'%s', '%s', '%s', '%s'" % ( + self.file, self.tab, self.x_row_or_col, self.cell + ) + final_subs, arguments["subscripts"] =\ + self.section.subscripts.simplify_subscript_input( + self.def_subs, self.element.subscripts) + + if "ext_lookups" in self.element.objects: + # object already exists + self.element.objects["ext_lookups"]["expression"] += "\n\n"\ + + self.element.objects["ext_lookups"]["name"]\ + + ".add(%(params)s, %(subscripts)s)" % arguments + + return None + else: + # create a new object + self.section.imports.add("external", "ExtLookup") + + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_ext_lookup") + + self.element.objects["ext_lookups"] = { + "name": arguments["name"], + "expression": "%(name)s = ExtLookup(%(params)s, " + "%(subscripts)s, " + "_root, '%(name)s')" % arguments + } + + return BuildAST( + expression=arguments["name"] + "(x)", + calls={"__external__": None, "__lookup__": None}, + subscripts=final_subs, + order=0) + +class ExtDataBuilder(StructureBuilder): + def __init__(self, getdata_str, component): + super().__init__(None, component) + self.file = getdata_str.file + self.tab = getdata_str.tab + self.time_row_or_col = getdata_str.time_row_or_col + self.cell = getdata_str.cell + self.keyword = component.keyword + self.arguments = {} + + def build(self, arguments): + self.component.type = "Data" + self.component.subtype = "External" + arguments["params"] = "'%s', '%s', '%s', '%s'" % ( + self.file, self.tab, self.time_row_or_col, self.cell + ) + final_subs, arguments["subscripts"] =\ + self.section.subscripts.simplify_subscript_input( + self.def_subs, self.element.subscripts) + arguments["method"] = "'%s'" % self.keyword if self.keyword else None + + if "ext_data" in self.element.objects: + # object already exists + self.element.objects["ext_data"]["expression"] += "\n\n"\ + + self.element.objects["ext_data"]["name"]\ + + ".add(%(params)s, %(method)s, %(subscripts)s)" % arguments + + return None + else: + # create a new object + self.section.imports.add("external", "ExtData") + + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_ext_data") + + self.element.objects["ext_data"] = { + "name": arguments["name"], + "expression": "%(name)s = ExtData(%(params)s, " + " %(method)s, %(subscripts)s, " + "_root, '%(name)s')" % arguments + } + + return BuildAST( + expression=arguments["name"] + "(time())", + calls={"__external__": None, "time": 1}, + subscripts=final_subs, + order=0) + + +class ExtConstantBuilder(StructureBuilder): + def __init__(self, getlookup_str, component): + super().__init__(None, component) + self.file = getlookup_str.file + self.tab = getlookup_str.tab + self.cell = getlookup_str.cell + self.arguments = {} + + def build(self, arguments): + self.component.type = "Constant" + self.component.subtype = "External" + arguments["params"] = "'%s', '%s', '%s'" % ( + self.file, self.tab, self.cell + ) + final_subs, arguments["subscripts"] =\ + self.section.subscripts.simplify_subscript_input( + self.def_subs, self.element.subscripts) + + if "constants" in self.element.objects: + # object already exists + self.element.objects["constants"]["expression"] += "\n\n"\ + + self.element.objects["constants"]["name"]\ + + ".add(%(params)s, %(subscripts)s)" % arguments + + return None + else: + # create a new object + self.section.imports.add("external", "ExtConstant") + + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_ext_constant") + + self.element.objects["constants"] = { + "name": arguments["name"], + "expression": "%(name)s = ExtConstant(%(params)s, " + "%(subscripts)s, _root, '%(name)s')" % arguments + } + + return BuildAST( + expression=arguments["name"] + "()", + calls={"__external__": None}, + subscripts=final_subs, + order=0) + + +class TabDataBuilder(StructureBuilder): + def __init__(self, data_str, component): + super().__init__(None, component) + self.keyword = component.keyword + self.arguments = {} + + def build(self, arguments): + self.section.imports.add("data", "TabData") + + final_subs, arguments["subscripts"] =\ + self.section.subscripts.simplify_subscript_input( + self.def_subs, self.element.subscripts) + + arguments["real_name"] = self.element.name + arguments["py_name"] =\ + self.section.namespace.namespace[self.element.name] + arguments["subscripts"] = self.def_subs + arguments["method"] = "'%s'" % self.keyword if self.keyword else None + + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_data") + + self.element.objects["tab_data"] = { + "name": arguments["name"], + "expression": "%(name)s = TabData('%(real_name)s', '%(py_name)s', " + "%(subscripts)s, %(method)s)" % arguments + } + + return BuildAST( + expression=arguments["name"] + "(time())", + calls={"time": 1, "__data__": None}, + subscripts=final_subs, + order=0) + + +class InitialBuilder(StructureBuilder): + def __init__(self, initial_str, component): + super().__init__(None, component) + self.arguments = { + "initial": initial_str.initial + } + + def build(self, arguments): + self.component.type = "Stateful" + self.component.subtype = "Initial" + self.section.imports.add("statefuls", "Initial") + arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_initial") + + self.element.objects[arguments["name"]] = { + "name": arguments["name"], + "expression": "%(name)s = Initial(lambda: %(initial)s, " + "'%(name)s')" % arguments, + "calls": { + "initial": arguments["initial"].calls, + "step": {} + } + + } + return BuildAST( + expression=arguments["name"] + "()", + calls={arguments["name"]: 1}, + subscripts=self.def_subs, + order=0) + + +class IntegBuilder(StructureBuilder): + def __init__(self, integ_str, component): + super().__init__(None, component) + self.arguments = { + "flow": integ_str.flow, + "initial": integ_str.initial + } + + def build(self, arguments): + self.component.type = "Stateful" + self.component.subtype = "Integ" + self.section.imports.add("statefuls", "Integ") + arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["flow"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_integ") + + self.element.objects[arguments["name"]] = { + "name": arguments["name"], + "expression": "%(name)s = Integ(lambda: %(flow)s, " + "lambda: %(initial)s, '%(name)s')" % arguments, + "calls": { + "initial": arguments["initial"].calls, + "step": arguments["flow"].calls + } + + } + return BuildAST( + expression=arguments["name"] + "()", + calls={arguments["name"]: 1}, + subscripts=self.def_subs, + order=0) + + +class DelayBuilder(StructureBuilder): + def __init__(self, dtype, delay_str, component): + super().__init__(None, component) + self.arguments = { + "input": delay_str.input, + "delay_time": delay_str.delay_time, + "initial": delay_str.initial, + "order": delay_str.order + } + self.dtype = dtype + + def build(self, arguments): + self.component.type = "Stateful" + self.component.subtype = "Delay" + self.section.imports.add("statefuls", self.dtype) + arguments["input"].reshape(self.section.subscripts, self.def_subs) + arguments["delay_time"].reshape(self.section.subscripts, self.def_subs) + arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix=f"_{self.dtype.lower()}") + arguments["dtype"] = self.dtype + + self.element.objects[arguments["name"]] = { + "name": arguments["name"], + "expression": "%(name)s = %(dtype)s(lambda: %(input)s, " + "lambda: %(delay_time)s, lambda: %(initial)s, " + "lambda: %(order)s, " + "time_step, '%(name)s')" % arguments, + "calls": { + "initial": merge_dependencies( + arguments["initial"].calls, + arguments["delay_time"].calls, + arguments["order"].calls), + "step": merge_dependencies( + arguments["input"].calls, + arguments["delay_time"].calls) + + } + } + return BuildAST( + expression=arguments["name"] + "()", + calls={arguments["name"]: 1}, + subscripts=self.def_subs, + order=0) + + +class DelayFixedBuilder(StructureBuilder): + def __init__(self, delay_str, component): + super().__init__(None, component) + self.arguments = { + "input": delay_str.input, + "delay_time": delay_str.delay_time, + "initial": delay_str.initial, + } + + def build(self, arguments): + self.component.type = "Stateful" + self.component.subtype = "DelayFixed" + self.section.imports.add("statefuls", "DelayFixed") + arguments["input"].reshape(self.section.subscripts, self.def_subs) + arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_delayfixed") + + self.element.objects[arguments["name"]] = { + "name": arguments["name"], + "expression": "%(name)s = DelayFixed(lambda: %(input)s, " + "lambda: %(delay_time)s, lambda: %(initial)s, " + "time_step, '%(name)s')" % arguments, + "calls": { + "initial": merge_dependencies( + arguments["initial"].calls, + arguments["delay_time"].calls), + "step": arguments["input"].calls + } + } + return BuildAST( + expression=arguments["name"] + "()", + calls={arguments["name"]: 1}, + subscripts=self.def_subs, + order=0) + + +class SmoothBuilder(StructureBuilder): + def __init__(self, smooth_str, component): + super().__init__(None, component) + self.arguments = { + "input": smooth_str.input, + "smooth_time": smooth_str.smooth_time, + "initial": smooth_str.initial, + "order": smooth_str.order + } + + def build(self, arguments): + self.component.type = "Stateful" + self.component.subtype = "Smooth" + self.section.imports.add("statefuls", "Smooth") + arguments["input"].reshape(self.section.subscripts, self.def_subs) + arguments["smooth_time"].reshape(self.section.subscripts, self.def_subs) + arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_smooth") + + # TODO in the future we need to ad timestep to show warnings about + # the smooth time as its done with delays (see vensim help for smooth) + # TODO in the future we may want to have 2 py_backend classes for + # smooth as the behaviour is different for SMOOTH and SMOOTH N when + # using RingeKutta scheme + self.element.objects[arguments["name"]] = { + "name": arguments["name"], + "expression": "%(name)s = Smooth(lambda: %(input)s, " + "lambda: %(smooth_time)s, lambda: %(initial)s, " + "lambda: %(order)s, '%(name)s')" % arguments, + "calls": { + "initial": merge_dependencies( + arguments["initial"].calls, + arguments["smooth_time"].calls, + arguments["order"].calls), + "step": merge_dependencies( + arguments["input"].calls, + arguments["smooth_time"].calls) + } + + } + return BuildAST( + expression=arguments["name"] + "()", + calls={arguments["name"]: 1}, + subscripts=self.def_subs, + order=0) + + +class TrendBuilder(StructureBuilder): + def __init__(self, trend_str, component): + super().__init__(None, component) + self.arguments = { + "input": trend_str.input, + "average_time": trend_str.average_time, + "initial": trend_str.initial, + } + + def build(self, arguments): + self.component.type = "Stateful" + self.component.subtype = "Trend" + self.section.imports.add("statefuls", "Trend") + arguments["input"].reshape(self.section.subscripts, self.def_subs) + arguments["average_time"].reshape(self.section.subscripts, self.def_subs) + arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_trend") + + self.element.objects[arguments["name"]] = { + "name": arguments["name"], + "expression": "%(name)s = Trend(lambda: %(input)s, " + "lambda: %(average_time)s, lambda: %(initial)s, " + "'%(name)s')" % arguments, + "calls": { + "initial": merge_dependencies( + arguments["initial"].calls, + arguments["input"].calls, + arguments["average_time"].calls), + "step": merge_dependencies( + arguments["input"].calls, + arguments["average_time"].calls) + } + + } + return BuildAST( + expression=arguments["name"] + "()", + calls={arguments["name"]: 1}, + subscripts=self.def_subs, + order=0) + + +class ForecastBuilder(StructureBuilder): + def __init__(self, forecast_str, component): + super().__init__(None, component) + self.arguments = { + "input": forecast_str.input, + "average_time": forecast_str.average_time, + "horizon": forecast_str.horizon, + } + + def build(self, arguments): + self.component.type = "Stateful" + self.component.subtype = "Forecast" + self.section.imports.add("statefuls", "Forecast") + arguments["input"].reshape(self.section.subscripts, self.def_subs) + arguments["average_time"].reshape(self.section.subscripts, self.def_subs) + arguments["horizon"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_forecast") + + self.element.objects[arguments["name"]] = { + "name": arguments["name"], + "expression": "%(name)s = Forecast(lambda: %(input)s, " + "lambda: %(average_time)s, lambda: %(horizon)s, " + "'%(name)s')" % arguments, + "calls": { + "initial": + arguments["input"].calls, + "step": merge_dependencies( + arguments["input"].calls, + arguments["average_time"].calls, + arguments["horizon"].calls) + } + + } + return BuildAST( + expression=arguments["name"] + "()", + calls={arguments["name"]: 1}, + subscripts=self.def_subs, + order=0) + + +class SampleIfTrueBuilder(StructureBuilder): + def __init__(self, sampleiftrue_str, component): + super().__init__(None, component) + self.arguments = { + "condition": sampleiftrue_str.condition, + "input": sampleiftrue_str.input, + "initial": sampleiftrue_str.initial, + } + + def build(self, arguments): + self.component.type = "Stateful" + self.component.subtype = "SampleIfTrue" + self.section.imports.add("statefuls", "SampleIfTrue") + arguments["condition"].reshape(self.section.subscripts, self.def_subs) + arguments["input"].reshape(self.section.subscripts, self.def_subs) + arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_sampleiftrue") + + self.element.objects[arguments["name"]] = { + "name": arguments["name"], + "expression": "%(name)s = SampleIfTrue(lambda: %(condition)s, " + "lambda: %(input)s, lambda: %(initial)s, " + "'%(name)s')" % arguments, + "calls": { + "initial": + arguments["initial"].calls, + "step": merge_dependencies( + arguments["condition"].calls, + arguments["input"].calls) + } + + } + return BuildAST( + expression=arguments["name"] + "()", + calls={arguments["name"]: 1}, + subscripts=self.def_subs, + order=0) + + +class LookupsBuilder(StructureBuilder): + def __init__(self, lookups_str, component): + super().__init__(None, component) + self.arguments = {} + self.x = lookups_str.x + self.y = lookups_str.y + + def build(self, arguments): + self.component.type = "Lookup" + self.component.subtype = "Normal" + arguments["x"] = np.array2string( + np.array(self.x), + separator=",", + threshold=len(self.x) + ) + arguments["y"] = np.array2string( + np.array(self.y), + separator=",", + threshold=len(self.y) + ) + arguments["subscripts"] = self.def_subs + + if "hardcoded_lookups" in self.element.objects: + # object already exists + self.element.objects["hardcoded_lookups"]["expression"] += "\n\n"\ + + self.element.objects["hardcoded_lookups"]["name"]\ + + ".add(%(x)s, %(y)s, %(subscripts)s)" % arguments + + return None + else: + # create a new object + self.section.imports.add("lookups", "HardcodedLookups") + + arguments["name"] = self.section.namespace.make_python_identifier( + self.element.identifier, prefix="_hardcodedlookup") + + self.element.objects["hardcoded_lookups"] = { + "name": arguments["name"], + "expression": "%(name)s = HardcodedLookups(%(x)s, %(y)s, " + "%(subscripts)s, '%(name)s')" % arguments + } + + return BuildAST( + expression=arguments["name"] + "(x)", + calls={"__lookup__": None}, + subscripts=self.def_subs, + order=0) + + +class InlineLookupsBuilder(StructureBuilder): + def __init__(self, inlinelookups_str, component): + super().__init__(None, component) + self.arguments = { + "value": inlinelookups_str.argument + } + self.lookups = inlinelookups_str.lookups + + def build(self, arguments): + self.component.type = "Auxiliary" + self.component.subtype = "with Lookup" + self.section.imports.add("numpy") + arguments["x"] = np.array2string( + np.array(self.lookups.x), + separator=",", + threshold=len(self.lookups.x) + ) + arguments["y"] = np.array2string( + np.array(self.lookups.y), + separator=",", + threshold=len(self.lookups.y) + ) + return BuildAST( + expression="np.interp(%(value)s, %(x)s, %(y)s)" % arguments, + calls=arguments["value"].calls, + subscripts=arguments["value"].subscripts, + order=0) + + +class ReferenceBuilder(StructureBuilder): + def __init__(self, reference_str, component): + super().__init__(None, component) + self.mapping_subscripts = {} + self.reference = reference_str.reference + self.subscripts = reference_str.subscripts + self.arguments = {} + self.section.imports.add("xarray") + + @property + def subscripts(self): + return self._subscripts + + @subscripts.setter + def subscripts(self, subscripts): + """Get subscript dictionary from reference""" + self._subscripts = self.section.subscripts.make_coord_dict( + getattr(subscripts, "subscripts", {})) + + # get the subscripts after applying the mapping if necessary + for dim, coordinates in self._subscripts.items(): + if len(coordinates) > 1: + # we create the mapping only with those subscripts that are + # ranges as we need to ignore singular subscripts because + # that dimension is removed from final element + if dim not in self.def_subs and not dim.endswith("!"): + # the reference has a subscripts which is it not + # applied (!) and does not appear in the definition + # of the variable + for mapped in self.section.subscripts.mapping[dim]: + # check the mapped subscripts + # TODO update this and the parser to make it + # compatible with more complex mappings + if mapped in self.def_subs\ + and mapped not in self._subscripts: + # the mapped subscript appears in the definition + # and it is not already in the variable + self.mapping_subscripts[mapped] =\ + self.section.subscripts.subscripts[mapped] + break + else: + # the subscript is in the variable definition, + # do not change it + self.mapping_subscripts[dim] = coordinates + + def build(self, arguments): + if self.reference not in self.section.namespace.cleanspace: + # Manage references to subscripts (subscripts used as variables) + expression, subscripts =\ + self.section.subscripts.subscript2num[self.reference] + subscripts_out = self.section.subscripts.simplify_subscript_input( + subscripts, list(subscripts))[1] + if subscripts: + self.section.imports.add("numpy") + # NUMPY: not need this if + expression = "xr.DataArray(%s, %s, %s)" % ( + expression, subscripts_out, list(subscripts)) + return BuildAST( + expression=expression, + calls={}, + subscripts=subscripts, + order=0) + + reference = self.section.namespace.cleanspace[self.reference] + + # TODO lookups are passed as a reference first, in that case we will + # need to replace () in the lookup call + expression = reference + "()" + + if not self.subscripts: + return BuildAST( + expression=expression, + calls={reference: 1}, + subscripts={}, + order=0) + + original_subs = self.section.subscripts.make_coord_dict( + self.section.subscripts.elements[reference]) + + expression, final_subs = self.visit_subscripts(expression, original_subs) + + return BuildAST( + expression=expression, + calls={reference: 1}, + subscripts=final_subs, + order=0) + + def visit_subscripts(self, expression, original_subs): + final_subs, rename, loc, reset_coords, float = {}, {}, [], False, True + for (dim, coord), (orig_dim, orig_coord)\ + in zip(self.subscripts.items(), original_subs.items()): + if len(coord) == 1: + # subset a 1 dimension value + # NUMPY: subset value [:, N, :, :] + loc.append(repr(coord[0])) + reset_coords = True + elif len(coord) < len(orig_coord): + # subset a subrange + # NUMPY: subset value [:, :, np.array([1, 0]), :] + # NUMPY: as order may change we need to check if dim != orig_dim + # NUMPY: use also ranges [:, :, 2:5, :] when possible + loc.append("_subscript_dict['%s']" % dim) + final_subs[dim] = coord + float = False + else: + # do nothing + # NUMPY: same, we can remove float = False + loc.append(":") + final_subs[dim] = coord + float = False + + if dim != orig_dim and len(coord) != 1: + # NUMPY: check order of dimensions, make all subranges work with the same dimensions? + # NUMPY: this could be solved in the previous if/then/else + rename[orig_dim] = dim + + if any(dim != ":" for dim in loc): + # NUMPY: expression += "[%s]" % ", ".join(loc) + expression += ".loc[%s]" % ", ".join(loc) + if reset_coords and float: + # NUMPY: Not neccessary + expression = "float(" + expression + ")" + elif reset_coords: + # NUMPY: Not neccessary + expression += ".reset_coords(drop=True)" + if rename: + # NUMPY: Not neccessary + expression += ".rename(%s)" % rename + + # NUMPY: This will not be necessary, we only need to return + # self.mapping_subscripts + if self.mapping_subscripts != final_subs: + subscripts_out = self.section.subscripts.simplify_subscript_input( + self.mapping_subscripts, list(self.mapping_subscripts))[1] + expression = "xr.DataArray(%s.values, %s, %s)" % ( + expression, subscripts_out, list(self.mapping_subscripts) + ) + + return expression, self.mapping_subscripts + + +class NumericBuilder(StructureBuilder): + # Standard class, inherit all from StructureBuilder + pass + + +class ArrayBuilder(StructureBuilder): + # Standard class, inherit all from StructureBuilder + def build(self, arguments): + self.value = np.array2string( + self.value.reshape(compute_shape(self.def_subs)), + separator=",", + threshold=np.prod(self.value.shape) + ) + self.component.type = "Constant" + self.component.subtype = "Normal" + + final_subs, subscripts_out =\ + self.section.subscripts.simplify_subscript_input( + self.def_subs, self.element.subscripts) + + return BuildAST( + expression="xr.DataArray(%s, %s, %s)" % ( + self.value, subscripts_out, list(final_subs)), + calls={}, + subscripts=final_subs, + order=0) + + +def merge_dependencies(*dependencies, inplace=False): + # TODO improve dependencies in the next major release, include info + # about external objects and simplify the stateful objects, think about + # how to include data/lookups objects + current = dependencies[0] + if inplace: + current = dependencies[0] + else: + current = dependencies[0].copy() + for new in dependencies[1:]: + if not current: + current.update(new) + elif new: + # regular element + _merge_dependencies(current, new) + + return current + + +def _merge_dependencies(current, new): + """ + Merge two dependencies dicts of an element. + + Parameters + ---------- + current: dict + Current dependencies of the element. It will be mutated. + + new: dict + New dependencies to add. + + Returns + ------- + None + + """ + current_set, new_set = set(current), set(new) + for dep in current_set.intersection(new_set): + if dep.startswith("__"): + # if it is special (__lookup__, __external__) continue + continue + # if dependency is in both sum the number of calls + if dep in ["initial", "step"]: + _merge_dependencies(current[dep], new[dep]) + else: + current[dep] += new[dep] + for dep in new_set.difference(current_set): + # if dependency is only in new copy it + current[dep] = new[dep] + + +class ASTVisitor: + builders = { + ct.InitialStructure: InitialBuilder, + ct.IntegStructure: IntegBuilder, + ct.DelayStructure: lambda x, y: DelayBuilder("Delay", x, y), + ct.DelayNStructure: lambda x, y: DelayBuilder("DelayN", x, y), + ct.DelayFixedStructure: DelayFixedBuilder, + ct.SmoothStructure: SmoothBuilder, + ct.SmoothNStructure: SmoothBuilder, + ct.TrendStructure: TrendBuilder, + ct.ForecastStructure: ForecastBuilder, + ct.SampleIfTrueStructure: SampleIfTrueBuilder, + ct.GetConstantsStructure: ExtConstantBuilder, + ct.GetDataStructure: ExtDataBuilder, + ct.GetLookupsStructure: ExtLookupBuilder, + ct.LookupsStructure: LookupsBuilder, + ct.InlineLookupsStructure: InlineLookupsBuilder, + ct.DataStructure: TabDataBuilder, + ct.ReferenceStructure: ReferenceBuilder, + ct.CallStructure: CallBuilder, + ct.GameStructure: GameBuilder, + ct.LogicStructure: OperationBuilder, + ct.ArithmeticStructure: OperationBuilder, + int: NumericBuilder, + float: NumericBuilder, + np.ndarray: ArrayBuilder + } + + def __init__(self, component): + self.ast = component.ast + self.subscripts = component.subscripts_dict + self.component = component + # TODO add a attribute for "new structures" + + def visit(self): + # TODO: if final_subscripts == self.subscripts OK, else -> redimension + visit_out = self._visit(self.ast) + + if not visit_out: + # external objects that are declared with other expression + return None + + if not visit_out.calls and self.component.type == "Auxiliary": + self.component.type = "Constant" + self.component.subtype = "Normal" + + # include dependencies of the current component in the element + merge_dependencies( + self.component.element.dependencies, + visit_out.calls, + inplace=True) + + if not visit_out.subscripts\ + and self.subscripts != self.component.element.subs_dict: + return visit_out + + # NUMPY not needed + # get subscript in elements as name of the ranges may change + subscripts_in_element = { + dim: coords + for dim, coords + in zip(self.component.element.subscripts, self.subscripts.values()) + } + + reshape = ( + (visit_out.subscripts != self.subscripts + or list(visit_out.subscripts) != list(self.subscripts)) + and + (visit_out.subscripts != subscripts_in_element + or list(visit_out.subscripts) != list(subscripts_in_element)) + ) + + if reshape: + # We are only comparing the dictionaries (set of dimensions) + # and not the list (order). + # With xarray we don't need to compare the order because the + # decorator @subs will reorder the objects + # NUMPY: in this case we need to tile along dims if neccessary + # or reorder the dimensions + # NUMPY: if the output is a float or int and they are several + # definitions we can return float or int as we can + # safely do "var[:, 1, :] = 3" + visit_out.reshape( + self.component.section.subscripts, self.subscripts) + + return visit_out + + def _visit(self, ast_object): + builder = self.builders[type(ast_object)](ast_object, self.component) + arguments = {name: self._visit(value) for name, value in builder.arguments.items()} + return builder.build(arguments) diff --git a/pysd/py_backend/components.py b/pysd/py_backend/components.py index 5eccf370..6d4c41f0 100644 --- a/pysd/py_backend/components.py +++ b/pysd/py_backend/components.py @@ -4,6 +4,7 @@ import os import random +import numpy as np from importlib.machinery import SourceFileLoader from pysd._version import __version__ @@ -84,6 +85,8 @@ def _set_component(self, name, value): class Time(object): + rprec = 1e-10 # relative precission for final time and saving time + def __init__(self): self._time = None self.stage = None @@ -135,7 +138,7 @@ def in_bounds(self): True if time is smaller than final time. Otherwise, returns Fase. """ - return self._time < self.final_time() + return self._time + self.time_step()*self.rprec < self.final_time() def in_return(self): """ Check if current time should be returned """ @@ -144,9 +147,15 @@ def in_return(self): time_delay = self._time - self._initial_time save_per = self.saveper() - prec = self.time_step() * 1e-10 + prec = self.time_step() * self.rprec return time_delay % save_per < prec or -time_delay % save_per < prec + def round(self): + """ Return rounded time to outputs to avoid float precission error""" + return np.round( + self._time, + -int(np.log10(self.time_step()*self.rprec))) + def add_return_timestamps(self, return_timestamps): """ Add return timestamps """ if return_timestamps is None or hasattr(return_timestamps, '__len__'): diff --git a/pysd/py_backend/data.py b/pysd/py_backend/data.py index 4a69d6fa..a46af99f 100644 --- a/pysd/py_backend/data.py +++ b/pysd/py_backend/data.py @@ -1,5 +1,6 @@ import warnings import re +import random from pathlib import Path import numpy as np @@ -50,6 +51,8 @@ def read_file(cls, file_name, encoding=None): indicate if the output file is transposed. """ + # in the most cases variables will be split per columns, then + # read the first row to have all the column names out = cls.read_line(file_name, encoding) if out is None: raise ValueError( @@ -59,10 +62,16 @@ def read_file(cls, file_name, encoding=None): transpose = False try: - [float(col) for col in out] - out = cls.read_row(file_name, encoding) + # if we fail converting columns to float then they are + # not numeric values, so current direction is okay + [float(col) for col in random.sample(out, 3)] + # we did not fail, read the first column to see if variables + # are split per rows + out = cls.read_col(file_name, encoding) transpose = True - [float(col) for col in out] + # if we still are able to transform values to float the + # file is not valid + [float(col) for col in random.sample(out, 3)] except ValueError: return out, transpose else: @@ -91,7 +100,7 @@ def read_line(cls, file_name, encoding=None): return None @classmethod - def read_row(cls, file_name, encoding=None): + def read_col(cls, file_name, encoding=None): """ Read the firts column and return a set of it. """ @@ -190,9 +199,9 @@ def __call__(self, time): outdata = self.data[0] elif self.interp == "interpolate": outdata = self.data.interp(time=time) - elif self.interp == 'look forward': + elif self.interp == 'look_forward': outdata = self.data.sel(time=time, method="backfill") - elif self.interp == 'hold backward': + elif self.interp == 'hold_backward': outdata = self.data.sel(time=time, method="pad") if self.is_float: @@ -214,16 +223,23 @@ def __call__(self, time): class TabData(Data): """ - Data from tabular file tab/cls, it could be from Vensim output. + Data from tabular file tab/csv, it could be from Vensim output. """ def __init__(self, real_name, py_name, coords, interp="interpolate"): self.real_name = real_name self.py_name = py_name self.coords = coords - self.interp = interp + self.interp = interp.replace(" ", "_") if interp else None self.is_float = not bool(coords) self.data = None + if self.interp not in ["interpolate", "raw", + "look_forward", "hold_backward"]: + raise ValueError(self.py_name + "\n" + + " The interpolation method (interp) must be " + + "'raw', 'interpolate', " + + "'look_forward' or 'hold_backward") + def load_data(self, file_names): """ Load data values from files. diff --git a/pysd/py_backend/decorators.py b/pysd/py_backend/decorators.py index f796dee1..195a5c77 100644 --- a/pysd/py_backend/decorators.py +++ b/pysd/py_backend/decorators.py @@ -14,6 +14,7 @@ def subs(dims, subcoords): """ def decorator(function): function.dims = dims + function.args = inspect.getfullargspec(function)[0] @wraps(function) def wrapper(*args): diff --git a/pysd/py_backend/external.py b/pysd/py_backend/external.py index c54a815a..9da02cb9 100644 --- a/pysd/py_backend/external.py +++ b/pysd/py_backend/external.py @@ -13,6 +13,7 @@ from openpyxl import load_workbook from . import utils from .data import Data +from .lookups import Lookups class Excels(): @@ -551,9 +552,9 @@ def _interpolate_missing(self, x, xr, yr): y[i] = yr[-1] elif value <= xr[0]: y[i] = yr[0] - elif self.interp == 'look forward': + elif self.interp == 'look_forward': y[i] = yr[xr >= value][0] - elif self.interp == 'hold backward': + elif self.interp == 'hold_backward': y[i] = yr[xr <= value][-1] else: y[i] = np.interp(value, xr, yr) @@ -705,7 +706,8 @@ def __init__(self, file_name, sheet, time_row_or_col, cell, self.cells = [cell] self.coordss = [coords] self.root = root - self.interp = interp + # TODO remove in 3.0.0 (self.interp = interp) + self.interp = interp.replace(" ", "_") if interp else None self.is_float = not bool(coords) # check if the interpolation method is valid @@ -713,11 +715,11 @@ def __init__(self, file_name, sheet, time_row_or_col, cell, self.interp = "interpolate" if self.interp not in ["interpolate", "raw", - "look forward", "hold backward"]: + "look_forward", "hold_backward"]: raise ValueError(self.py_name + "\n" + " The interpolation method (interp) must be " + "'raw', 'interpolate', " - + "'look forward' or 'hold backward") + + "'look_forward' or 'hold_backward") def add(self, file_name, sheet, time_row_or_col, cell, interp, coords): @@ -732,7 +734,7 @@ def add(self, file_name, sheet, time_row_or_col, cell, if not interp: interp = "interpolate" - if interp != self.interp: + if interp.replace(" ", "_") != self.interp: raise ValueError(self.py_name + "\n" + "Error matching interpolation method with " + "previously defined one") @@ -753,7 +755,7 @@ def initialize(self): self.cells, self.coordss)]) -class ExtLookup(External): +class ExtLookup(External, Lookups): """ Class for Vensim GET XLS LOOKUPS/GET DIRECT LOOKUPS """ @@ -768,6 +770,7 @@ def __init__(self, file_name, sheet, x_row_or_col, cell, self.root = root self.coordss = [coords] self.interp = "interpolate" + self.is_float = not bool(coords) def add(self, file_name, sheet, x_row_or_col, cell, coords): """ @@ -794,58 +797,6 @@ def initialize(self): in zip(self.files, self.sheets, self.x_row_or_cols, self.cells, self.coordss)]) - def __call__(self, x): - return self._call(self.data, x) - - def _call(self, data, x): - if isinstance(x, xr.DataArray): - if not x.dims: - # shape 0 xarrays - return self._call(data, float(x)) - if np.all(x > data['lookup_dim'].values[-1]): - outdata, _ = xr.broadcast(data[-1], x) - warnings.warn( - self.py_name + "\n" - + "extrapolating data above the maximum value of the series") - elif np.all(x < data['lookup_dim'].values[0]): - outdata, _ = xr.broadcast(data[0], x) - warnings.warn( - self.py_name + "\n" - + "extrapolating data below the minimum value of the series") - else: - data, _ = xr.broadcast(data, x) - outdata = data[0].copy() - for a in utils.xrsplit(x): - outdata.loc[a.coords] = self._call( - data.loc[a.coords], - float(a)) - # the output will be always an xarray - return outdata.reset_coords('lookup_dim', drop=True) - - else: - if x in data['lookup_dim'].values: - outdata = data.sel(lookup_dim=x) - elif x > data['lookup_dim'].values[-1]: - outdata = data[-1] - warnings.warn( - self.py_name + "\n" - + "extrapolating data above the maximum value of the series") - elif x < data['lookup_dim'].values[0]: - outdata = data[0] - warnings.warn( - self.py_name + "\n" - + "extrapolating data below the minimum value of the series") - else: - outdata = data.interp(lookup_dim=x) - - # the output could be a float or an xarray - if self.coordss[0]: - # Remove lookup dimension coord from the DataArray - return outdata.reset_coords('lookup_dim', drop=True) - else: - # if lookup has no-coords return a float - return float(outdata) - class ExtConstant(External): """ diff --git a/pysd/py_backend/functions.py b/pysd/py_backend/functions.py index b609e48c..ea751e69 100644 --- a/pysd/py_backend/functions.py +++ b/pysd/py_backend/functions.py @@ -240,7 +240,19 @@ def if_then_else(condition, val_if_true, val_if_false): The value depending on the condition. """ + # NUMPY: replace xr by np if isinstance(condition, xr.DataArray): + # NUMPY: neccessarry for keep the same shape always + # if condition.all(): + # value = val_if_true() + # elif not condition.any(): + # value = val_if_false() + # else: + # return np.where(condition, val_if_true(), val_if_false()) + # + # if isinstance(value, np.ndarray): + # return value + # return np.full_like(condition, value) if condition.all(): return val_if_true() elif not condition.any(): @@ -293,7 +305,7 @@ def logical_or(*args): return current -def xidz(numerator, denominator, value_if_denom_is_zero): +def xidz(numerator, denominator, x): """ Implements Vensim's XIDZ function. https://www.vensim.com/documentation/fn_xidz.htm @@ -304,26 +316,34 @@ def xidz(numerator, denominator, value_if_denom_is_zero): Parameters ---------- numerator: float or xarray.DataArray + Numerator of the operation. denominator: float or xarray.DataArray - Components of the division operation - value_if_denom_is_zero: float or xarray.DataArray - The value to return if the denominator is zero + Denominator of the operation. + x: float or xarray.DataArray + The value to return if the denominator is zero. Returns ------- - numerator / denominator if denominator > 1e-6 + numerator/denominator if denominator > small_vensim otherwise, returns value_if_denom_is_zero """ + # NUMPY: replace DataArray by np.ndarray, xr.where -> np.where if isinstance(denominator, xr.DataArray): return xr.where(np.abs(denominator) < small_vensim, - value_if_denom_is_zero, - numerator * 1.0 / denominator) + x, + numerator/denominator) if abs(denominator) < small_vensim: - return value_if_denom_is_zero + # NUMPY: neccessarry for keep the same shape always + # if isinstance(numerator, np.ndarray): + # return np.full_like(numerator, x) + return x else: - return numerator * 1.0 / denominator + # NUMPY: neccessarry for keep the same shape always + # if isinstance(x, np.ndarray): + # return np.full_like(x, numerator/denominator) + return numerator/denominator def zidz(numerator, denominator): @@ -345,15 +365,21 @@ def zidz(numerator, denominator): otherwise zero. """ + # NUMPY: replace DataArray by np.ndarray, xr.where -> np.where if isinstance(denominator, xr.DataArray): return xr.where(np.abs(denominator) < small_vensim, 0, - numerator * 1.0 / denominator) + numerator/denominator) if abs(denominator) < small_vensim: + # NUMPY: neccessarry for keep the same shape always + # if isinstance(denominator, np.ndarray): + # return np.zeros_like(denominator) + if isinstance(numerator, xr.DataArray): + return xr.DataArray(0, numerator.coords, numerator.dims) return 0 else: - return numerator * 1.0 / denominator + return numerator/denominator def active_initial(time, expr, init_val): @@ -361,15 +387,19 @@ def active_initial(time, expr, init_val): Implements vensim's ACTIVE INITIAL function Parameters ---------- - time: function - The current time function - expr - init_val + stage: str + The stage of the model. + expr: function + Running stage value + init_val: float or xarray.DataArray + Initialization stage value. Returns ------- """ + # TODO replace time by stage when doing a non compatible version + # NUMPY: both must have same dimensions in inputs, remove time.stage if time.stage == 'Initialization': return init_val else: @@ -414,6 +444,7 @@ def log(x, base): float The log of 'x' in base 'base'. """ + # TODO remove with PySD 3.0.0, log could be directly created in the file return np.log(x) / np.log(base) @@ -431,6 +462,7 @@ def integer(x): Returns integer part of x. """ + # NUMPY: replace xr by np if isinstance(x, xr.DataArray): return x.astype(int) else: @@ -454,6 +486,7 @@ def quantum(a, b): If b > 0 returns b * integer(a/b). Otherwise, returns a. """ + # NUMPY: replace xr by np if isinstance(b, xr.DataArray): return xr.where(b < small_vensim, a, b*integer(a/b)) if b < small_vensim: @@ -500,6 +533,7 @@ def sum(x, dim=None): The result of the sum operation in the given dimensions. """ + # NUMPY: replace by np.sum(x, axis=axis) put directly in the file # float returned if the function is applied over all the dimensions if dim is None or set(x.dims) == set(dim): return float(x.sum()) @@ -525,6 +559,7 @@ def prod(x, dim=None): The result of the product operation in the given dimensions. """ + # NUMPY: replace by np.prod(x, axis=axis) put directly in the file # float returned if the function is applied over all the dimensions if dim is None or set(x.dims) == set(dim): return float(x.prod()) @@ -550,6 +585,7 @@ def vmin(x, dim=None): The result of the minimum value over the given dimensions. """ + # NUMPY: replace by np.min(x, axis=axis) put directly in the file # float returned if the function is applied over all the dimensions if dim is None or set(x.dims) == set(dim): return float(x.min()) @@ -575,6 +611,7 @@ def vmax(x, dim=None): The result of the maximum value over the dimensions. """ + # NUMPY: replace by np.max(x, axis=axis) put directly in the file # float returned if the function is applied over all the dimensions if dim is None or set(x.dims) == set(dim): return float(x.max()) @@ -599,4 +636,6 @@ def invert_matrix(mat): Inverted matrix. """ + # NUMPY: avoid converting to xarray, put directly the expression + # in the model return xr.DataArray(np.linalg.inv(mat.values), mat.coords, mat.dims) diff --git a/pysd/py_backend/lookups.py b/pysd/py_backend/lookups.py new file mode 100644 index 00000000..98ef2132 --- /dev/null +++ b/pysd/py_backend/lookups.py @@ -0,0 +1,115 @@ +import warnings + +import numpy as np +import xarray as xr + +from . import utils + + +class Lookups(object): + # TODO add __init__ and use this class for used input pandas.Series + # as Lookups + # def __init__(self, data, coords, interp="interpolate"): + + def __call__(self, x): + return self._call(self.data, x) + + def _call(self, data, x): + if isinstance(x, xr.DataArray): + if not x.dims: + # shape 0 xarrays + return self._call(data, float(x)) + if np.all(x > data['lookup_dim'].values[-1]): + outdata, _ = xr.broadcast(data[-1], x) + warnings.warn( + self.py_name + "\n" + + "extrapolating data above the maximum value of the series") + elif np.all(x < data['lookup_dim'].values[0]): + outdata, _ = xr.broadcast(data[0], x) + warnings.warn( + self.py_name + "\n" + + "extrapolating data below the minimum value of the series") + else: + data, _ = xr.broadcast(data, x) + outdata = data[0].copy() + for a in utils.xrsplit(x): + outdata.loc[a.coords] = self._call( + data.loc[a.coords], + float(a)) + # the output will be always an xarray + return outdata.reset_coords('lookup_dim', drop=True) + + else: + if x in data['lookup_dim'].values: + outdata = data.sel(lookup_dim=x) + elif x > data['lookup_dim'].values[-1]: + outdata = data[-1] + warnings.warn( + self.py_name + "\n" + + "extrapolating data above the maximum value of the series") + elif x < data['lookup_dim'].values[0]: + outdata = data[0] + warnings.warn( + self.py_name + "\n" + + "extrapolating data below the minimum value of the series") + else: + outdata = data.interp(lookup_dim=x) + + # the output could be a float or an xarray + if self.is_float: + # if lookup has no-coords return a float + return float(outdata) + else: + # Remove lookup dimension coord from the DataArray + return outdata.reset_coords('lookup_dim', drop=True) + + +class HardcodedLookups(Lookups): + """Class for lookups defined in the file""" + + def __init__(self, x, y, coords, py_name): + # TODO: avoid add and merge all declarations in one definition + self.is_float = not bool(coords) + self.py_name = py_name + self.data = xr.DataArray( + np.array(y).reshape(tuple([len(x)] + utils.compute_shape(coords))), + {"lookup_dim": x, **coords}, + ["lookup_dim"] + list(coords) + ) + self.x = set(x) + + def add(self, x, y, coords): + self.data = self.data.combine_first( + xr.DataArray( + np.array(y).reshape(tuple([len(x)] + utils.compute_shape(coords))), + {"lookup_dim": x, **coords}, + ["lookup_dim"] + list(coords) + )) + + if np.any(np.isnan(self.data)): + # fill missing values of different input lookup_dim values + values = self.data.values + self._fill_missing(self.data.lookup_dim.values, values) + self.data = xr.DataArray(values, self.data.coords, self.data.dims) + + def _fill_missing(self, series, data): + """ + Fills missing values in lookups to have a common series. + Mutates the values in data. + + Returns + ------- + None + + """ + if len(data.shape) > 1: + # break the data array until arrive to a vector + for i in range(data.shape[1]): + if np.any(np.isnan(data[:, i])): + self._fill_missing(series, data[:, i]) + elif not np.all(np.isnan(data)): + # interpolate missing values + data[np.isnan(data)] = np.interp( + series[np.isnan(data)], + series[~np.isnan(data)], + data[~np.isnan(data)]) diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 6567224e..8d36b743 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -889,7 +889,6 @@ def _isdynamic(self, dependencies): return True for dep in dependencies: if dep.startswith("_") and not dep.startswith("_initial_")\ - and not dep.startswith("_active_initial_")\ and not dep.startswith("__"): return True return False @@ -1148,6 +1147,7 @@ def get_series_data(self, param): func_name = utils.get_key_and_value_by_insensitive_key_or_value( param, self.components._namespace)[1] or param + print(func_name, self.get_args(getattr(self.components, func_name))) try: if func_name.startswith("_ext_"): @@ -1427,6 +1427,26 @@ def doc(self): variable names, and understand how they are translated into python safe names. + Returns + ------- + docs_df: pandas dataframe + Dataframe with columns for the model components: + - Real names + - Python safe identifiers (as used in model.components) + - Units string + - Documentation strings from the original model file + """ + warnings.warn( + "doc method will become an attribute in version 3.0.0...", + FutureWarning) + return self._doc + + def _build_doc(self): + """ + Formats a table of documentation strings to help users remember + variable names, and understand how they are translated into + python safe names. + Returns ------- docs_df: pandas dataframe @@ -1454,25 +1474,43 @@ def doc(self): eqn = '; '.join( [line.strip() for line in lines[3:unit_line]]) - collector.append( - {'Real Name': name, - 'Py Name': varname, - 'Eqn': eqn, - 'Unit': lines[unit_line].replace("Units:", "").strip(), - 'Lims': lines[unit_line+1].replace("Limits:", "").strip(), - 'Type': lines[unit_line+2].replace("Type:", "").strip(), - 'Subs': lines[unit_line+3].replace("Subs:", "").strip(), - 'Comment': '\n'.join(lines[(unit_line+4):]).strip()}) + vardoc = { + 'Real Name': name, + 'Py Name': varname, + 'Eqn': eqn, + 'Unit': lines[unit_line].replace("Units:", "").strip(), + 'Lims': lines[unit_line+1].replace("Limits:", "").strip(), + 'Type': lines[unit_line+2].replace("Type:", "").strip() + } + + if "Subtype:" in lines[unit_line+3]: + vardoc["Subtype"] =\ + lines[unit_line+3].replace("Subtype:", "").strip() + vardoc["Subs"] =\ + lines[unit_line+4].replace("Subs:", "").strip() + vardoc["Comment"] =\ + '\n'.join(lines[(unit_line+5):]).strip() + else: + vardoc["Subtype"] = None + vardoc["Subs"] =\ + lines[unit_line+3].replace("Subs:", "").strip() + vardoc["Comment"] =\ + '\n'.join(lines[(unit_line+4):]).strip() + + collector.append(vardoc) except Exception: pass - docs_df = pd.DataFrame(collector) - docs_df.fillna('None', inplace=True) - - order = ['Real Name', 'Py Name', 'Unit', 'Lims', - 'Type', 'Subs', 'Eqn', 'Comment'] - return docs_df[order].sort_values( - by='Real Name').reset_index(drop=True) + if collector: + docs_df = pd.DataFrame(collector) + docs_df.fillna("None", inplace=True) + order = ["Real Name", "Py Name", "Unit", "Lims", + "Type", "Subtype", "Subs", "Eqn", "Comment"] + return docs_df[order].sort_values( + by="Real Name").reset_index(drop=True) + else: + # manage models with no documentation (mainly test models) + return None def __str__(self): """ Return model source files """ @@ -1496,6 +1534,7 @@ def __init__(self, py_model_file, data_files, initialize, missing_values): self.time.set_control_vars(**self.components._control_vars) self.data_files = data_files self.missing_values = missing_values + self._doc = self._build_doc() if initialize: self.initialize() @@ -2125,8 +2164,9 @@ def _integrate(self, capture_elements): while self.time.in_bounds(): if self.time.in_return(): - outputs.at[self.time()] = [getattr(self.components, key)() - for key in capture_elements] + outputs.at[self.time.round()] = [ + getattr(self.components, key)() + for key in capture_elements] self._euler_step(self.time.time_step()) self.time.update(self.time()+self.time.time_step()) self.clean_caches() @@ -2135,8 +2175,8 @@ def _integrate(self, capture_elements): # need to add one more time step, because we run only the state # updates in the previous loop and thus may be one short. if self.time.in_return(): - outputs.at[self.time()] = [getattr(self.components, key)() - for key in capture_elements] + outputs.at[self.time.round()] = [getattr(self.components, key)() + for key in capture_elements] progressbar.finish() diff --git a/pysd/pysd.py b/pysd/pysd.py index 69eaccf9..1de73930 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -8,6 +8,7 @@ import sys from .py_backend.statefuls import Model + if sys.version_info[:2] < (3, 7): # pragma: no cover raise RuntimeError( "\n\n" @@ -69,7 +70,7 @@ def read_xmile(xmile_file, data_files=None, initialize=True, def read_vensim(mdl_file, data_files=None, initialize=True, missing_values="warning", split_views=False, - encoding=None, **kwargs): + encoding=None, old=False, **kwargs): """ Construct a model from Vensim `.mdl` file. @@ -124,9 +125,21 @@ def read_vensim(mdl_file, data_files=None, initialize=True, >>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl') """ - from .translation.vensim.vensim2py import translate_vensim + if old: + from .translation.vensim.vensim2py import translate_vensim + py_model_file = translate_vensim(mdl_file, split_views, encoding, **kwargs) + else: + from pysd.translation.vensim.vensin_file import VensimFile + from pysd.building.python.python_builder import ModelBuilder + ven_file = VensimFile(mdl_file) + ven_file.parse() + if split_views: + subview_sep = kwargs.get("subview_sep", "") + ven_file.parse_sketch(subview_sep) + + abs_model = ven_file.get_abstract_model() + py_model_file = ModelBuilder(abs_model).build_model() - py_model_file = translate_vensim(mdl_file, split_views, encoding, **kwargs) model = load(py_model_file, data_files, initialize, missing_values) model.mdl_file = str(mdl_file) return model diff --git a/pysd/tools/benchmarking.py b/pysd/tools/benchmarking.py index 8012e35a..16754ac0 100644 --- a/pysd/tools/benchmarking.py +++ b/pysd/tools/benchmarking.py @@ -2,18 +2,18 @@ Benchmarking tools for testing and comparing outputs between different files. Some of these functions are also used for testing. """ - -import os.path import warnings +from pathlib import Path import numpy as np import pandas as pd -from pysd import read_vensim, read_xmile +from pysd import read_vensim, read_xmile, load from ..py_backend.utils import load_outputs, detect_encoding -def runner(model_file, canonical_file=None, transpose=False, data_files=None): +def runner(model_file, canonical_file=None, transpose=False, data_files=None, + old=False): """ Translates and runs a model and returns its output and the canonical output. @@ -34,34 +34,42 @@ def runner(model_file, canonical_file=None, transpose=False, data_files=None): data_files: list (optional) List of the data files needed to run the model. + old: bool(optional) + If True use old translation method, used for testing backward compatibility. + Returns ------- output, canon: (pandas.DataFrame, pandas.DataFrame) pandas.DataFrame of the model output and the canonical output. """ - directory = os.path.dirname(model_file) + if isinstance(model_file, str): + model_file = Path(model_file) + + directory = model_file.parent # load canonical output if not canonical_file: - if os.path.isfile(os.path.join(directory, 'output.csv')): - canonical_file = os.path.join(directory, 'output.csv') - elif os.path.isfile(os.path.join(directory, 'output.tab')): - canonical_file = os.path.join(directory, 'output.tab') + if directory.joinpath('output.csv').is_file(): + canonical_file = directory.joinpath('output.csv') + elif directory.joinpath('output.tab').is_file(): + canonical_file = directory.joinpath('output.tab') else: - raise FileNotFoundError('\nCanonical output file not found.') + raise FileNotFoundError("\nCanonical output file not found.") canon = load_outputs(canonical_file, transpose=transpose, encoding=detect_encoding(canonical_file)) # load model - if model_file.lower().endswith('.mdl'): - model = read_vensim(model_file, data_files) - elif model_file.lower().endswith(".xmile"): + if model_file.suffix.lower() == ".mdl": + model = read_vensim(model_file, data_files, old=old) + elif model_file.suffix.lower() == ".xmile": model = read_xmile(model_file, data_files) + elif model_file.suffix.lower() == ".py": + model = load(model_file, data_files) else: - raise ValueError('\nModelfile should be *.mdl or *.xmile') + raise ValueError("\nModelfile should be *.mdl, *.xmile, or *.py") # run model and return the result @@ -87,8 +95,8 @@ def assert_frames_close(actual, expected, assertion="raise", assertion: str (optional) "raise" if an error should be raised when not able to assert - that two frames are close. Otherwise, it will show a warning - message. Default is "raise". + that two frames are close. If "warning", it will show a warning + message. If "return" it will return information. Default is "raise". verbose: bool (optional) If True, if any column is not close the actual and expected values @@ -166,15 +174,17 @@ def assert_frames_close(actual, expected, assertion="raise", message = "" if actual_cols.difference(expected_cols): - columns = ["'" + col + "'" for col - in actual_cols.difference(expected_cols)] + columns = sorted([ + "'" + col + "'" for col + in actual_cols.difference(expected_cols)]) columns = ", ".join(columns) message += '\nColumns ' + columns\ + ' from actual values not found in expected values.' if expected_cols.difference(actual_cols): - columns = ["'" + col + "'" for col - in expected_cols.difference(actual_cols)] + columns = sorted([ + "'" + col + "'" for col + in expected_cols.difference(actual_cols)]) columns = ", ".join(columns) message += '\nColumns ' + columns\ + ' from expected values not found in actual values.' @@ -190,8 +200,8 @@ def assert_frames_close(actual, expected, assertion="raise", # TODO let compare dataframes with different timestamps if "warn" assert np.all(np.equal(expected.index.values, actual.index.values)), \ - 'test set and actual set must share a common index' \ - 'instead found' + expected.index.values + 'vs' + actual.index.values + "test set and actual set must share a common index, "\ + "instead found %s vs %s" % (expected.index.values, actual.index.values) # if for Vensim outputs where constant values are only in the first row _remove_constant_nan(expected) @@ -201,13 +211,25 @@ def assert_frames_close(actual, expected, assertion="raise", actual[columns], **kwargs) - if c.all(): - return + if c.all().all(): + return (set(), np.nan, set()) if assertion == "return" else None - columns = np.array(columns, dtype=str)[~c.values] + # Get the columns that have the first different value, useful for + # debugging + false_index = c.apply( + lambda x: np.where(~x)[0][0] if not x.all() else np.nan) + index_first_false = int(np.nanmin(false_index)) + time_first_false = c.index[index_first_false] + variable_first_false = sorted( + false_index.index[false_index == index_first_false]) + + columns = sorted(np.array(columns, dtype=str)[~c.all().values]) assertion_details = "\nFollowing columns are not close:\n\t"\ - + ", ".join(columns) + + ", ".join(columns) + "\n\n"\ + + f"First false values ({time_first_false}):\n\t"\ + + ", ".join(variable_first_false) + if verbose: for col in columns: assertion_details += '\n\n'\ @@ -229,13 +251,15 @@ def assert_frames_close(actual, expected, assertion="raise", if assertion == "raise": raise AssertionError(assertion_details) + elif assertion == "return": + return (set(columns), time_first_false, set(variable_first_false)) else: warnings.warn(assertion_details) def assert_allclose(x, y, rtol=1.e-5, atol=1.e-5): """ - Asserts if all numeric values from two arrays are close. + Asserts if numeric values from two arrays are close. Parameters ---------- @@ -253,7 +277,7 @@ def assert_allclose(x, y, rtol=1.e-5, atol=1.e-5): None """ - return ((abs(x - y) <= atol + rtol * abs(y)) + x.isna()*y.isna()).all() + return ((abs(x - y) <= atol + rtol * abs(y)) + x.isna()*y.isna()) def _remove_constant_nan(df): diff --git a/pysd/translation/structures/__init__.py b/pysd/translation/structures/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pysd/translation/structures/abstract_model.py b/pysd/translation/structures/abstract_model.py new file mode 100644 index 00000000..b295eba9 --- /dev/null +++ b/pysd/translation/structures/abstract_model.py @@ -0,0 +1,168 @@ +from dataclasses import dataclass +from typing import Tuple, List, Union +from pathlib import Path + + +@dataclass +class AbstractComponent: + subscripts: Tuple[str] + ast: object + type: str = "Auxiliary" + subtype: str = "Normal" + + def __str__(self) -> str: + return "AbstractComponent %s\n" % ( + "%s" % repr(list(self.subscripts)) if self.subscripts else "") + + def dump(self, depth=None, indent="") -> str: + if depth == 0: + return self.__str__() + + return self.__str__() + "\n" + self._str_child(depth, indent) + + def _str_child(self, depth, indent) -> str: + return str(self.ast).replace("\t", indent).replace("\n", "\n" + indent) + + +@dataclass +class AbstractUnchangeableConstant(AbstractComponent): + subscripts: Tuple[str] + ast: object + type: str = "Constant" + subtype: str = "Unchangeable" + + def __str__(self) -> str: + return "AbstractLookup %s\n" % ( + "%s" % repr(list(self.subscripts)) if self.subscripts else "") + + +@dataclass +class AbstractLookup(AbstractComponent): + subscripts: Tuple[str] + ast: object + arguments: str = "x" + type: str = "Lookup" + subtype: str = "Hardcoded" + + def __str__(self) -> str: + return "AbstractLookup %s\n" % ( + "%s" % repr(list(self.subscripts)) if self.subscripts else "") + + +@dataclass +class AbstractData(AbstractComponent): + subscripts: Tuple[str] + ast: object + keyword: Union[str, None] = None + type: str = "Data" + subtype: str = "Normal" + + def __str__(self) -> str: + return "AbstractData (%s) %s\n" % ( + self.keyword, + "%s" % repr(list(self.subscripts)) if self.subscripts else "") + + def dump(self, depth=None, indent="") -> str: + if depth == 0: + return self.__str__() + + return self.__str__() + "\n" + self._str_child(depth, indent) + + def _str_child(self, depth, indent) -> str: + return str(self.ast).replace("\n", "\n" + indent) + + +@dataclass +class AbstractElement: + name: str + components: List[AbstractComponent] + units: str = "" + range: tuple = (None, None) + documentation: str = "" + + def __str__(self) -> str: + return "AbstractElement:\t%s (%s, %s)\n%s\n" % ( + self.name, self.units, self.range, self.documentation) + + def dump(self, depth=None, indent="") -> str: + if depth == 0: + return self.__str__() + elif depth is not None: + depth -= 1 + + return self.__str__() + "\n" + self._str_child(depth, indent) + + def _str_child(self, depth, indent) -> str: + return "\n".join([ + component.dump(depth, indent) for component in self.components + ]).replace("\n", "\n" + indent) + + +@dataclass +class AbstractSubscriptRange: + name: str + subscripts: Tuple[str] + mapping: Tuple[str] + + def __str__(self) -> str: + return "AbstractSubscriptRange:\t%s\n\t%s\n" % ( + self.name, + "%s <- %s" % (self.subscripts, self.mapping) + if self.mapping else self.subscripts) + + def dump(self, depth=None, indent="") -> str: + return self.__str__() + + +@dataclass +class AbstractSection: + name: str + path: Path + type: str # main, macro or module + params: List[str] + returns: List[str] + subscripts: Tuple[AbstractSubscriptRange] + elements: Tuple[AbstractElement] + split: bool + views_dict: Union[dict, None] + + def __str__(self) -> str: + return "AbstractSection (%s):\t%s (%s)\n" % ( + self.type, self.name, self.path) + + def dump(self, depth=None, indent="") -> str: + if depth == 0: + return self.__str__() + elif depth is not None: + depth -= 1 + + return self.__str__() + "\n" + self._str_child(depth, indent) + + def _str_child(self, depth, indent) -> str: + return "\n".join([ + element.dump(depth, indent) for element in self.subscripts + ] + [ + element.dump(depth, indent) for element in self.elements + ]).replace("\n", "\n" + indent) + + +@dataclass +class AbstractModel: + original_path: Path + sections: Tuple[AbstractSection] + + def __str__(self) -> str: + return "AbstractModel:\t%s\n" % self.original_path + + def dump(self, depth=None, indent="") -> str: + if depth == 0: + return self.__str__() + elif depth is not None: + depth -= 1 + + return self.__str__() + "\n" + self._str_child(depth, indent) + + def _str_child(self, depth, indent) -> str: + return "\n".join([ + section.dump(depth, indent) for section in self.sections + ]).replace("\n", "\n" + indent) diff --git a/pysd/translation/structures/components.py b/pysd/translation/structures/components.py new file mode 100644 index 00000000..7996269a --- /dev/null +++ b/pysd/translation/structures/components.py @@ -0,0 +1,272 @@ +from dataclasses import dataclass +from typing import Union + + +@dataclass +class ArithmeticStructure: + operators: str + arguments: tuple + + def __str__(self) -> str: + return "ArithmeticStructure:\n\t %s %s" % ( + self.operators, self.arguments) + + +@dataclass +class LogicStructure: + operators: str + arguments: tuple + + def __str__(self) -> str: + return "LogicStructure:\n\t %s %s" % ( + self.operators, self.arguments) + + +@dataclass +class SubscriptsReferenceStructure: + subscripts: tuple + + def __str__(self) -> str: + return "SubscriptReferenceStructure:\n\t %s" % self.subscripts + + +@dataclass +class ReferenceStructure: + reference: str + subscripts: Union[SubscriptsReferenceStructure, None] = None + + def __str__(self) -> str: + return "ReferenceStructure:\n\t %s%s" % ( + self.reference, + "\n\t" + str(self.subscripts or "").replace("\n", "\n\t")) + + +@dataclass +class CallStructure: + function: Union[str, object] + arguments: tuple + + def __str__(self) -> str: + return "CallStructure:\n\t%s(%s)" % ( + self.function, + "\n\t\t,".join([ + "\n\t\t" + str(arg).replace("\n", "\n\t\t") + for arg in self.arguments + ])) + + +@dataclass +class GameStructure: + expression: object + + def __str__(self) -> str: + return "GameStructure:\n\t%s" % self.expression + + +@dataclass +class InitialStructure: + initial: object + + def __str__(self) -> str: + return "InitialStructure:\n\t%s" % ( + self.initial) + + +@dataclass +class IntegStructure: + flow: object + initial: object + + def __str__(self) -> str: + return "IntegStructure:\n\t%s,\n\t%s" % ( + self.flow, + self.initial) + + +@dataclass +class DelayStructure: + input: object + delay_time: object + initial: object + order: float + + def __str__(self) -> str: + return "DelayStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % ( + self.order, + self.input, + self.delay_time, + self.initial) + + +@dataclass +class DelayNStructure: + input: object + delay_time: object + initial: object + order: object + + # DELAY N may behave different than other delays when the delay time + # changes during integration + + def __str__(self) -> str: + return "DelayNStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % ( + self.order, + self.input, + self.delay_time, + self.initial) + + +@dataclass +class DelayFixedStructure: + input: object + delay_time: object + initial: object + + def __str__(self) -> str: + return "DelayFixedStructure:\n\t%s,\n\t%s,\n\t%s" % ( + self.input, + self.delay_time, + self.initial) + + +@dataclass +class SmoothStructure: + input: object + smooth_time: object + initial: object + order: float + + def __str__(self) -> str: + return "SmoothStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % ( + self.order, + self.input, + self.smooth_time, + self.initial) + + +@dataclass +class SmoothNStructure: + input: object + smooth_time: object + initial: object + order: object + + # SMOOTH N may behave different than other smooths with RungeKutta + # integration + + def __str__(self) -> str: + return "SmoothNStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % ( + self.order, + self.input, + self.smooth_time, + self.initial) + + +@dataclass +class TrendStructure: + input: object + average_time: object + initial: object + + def __str__(self) -> str: + return "TrendStructure:\n\t%s,\n\t%s,\n\t%s" % ( + self.input, + self.average_time, + self.initial) + + +@dataclass +class ForecastStructure: + input: object + average_time: object + horizon: object + + def __str__(self) -> str: + return "ForecastStructure:\n\t%s,\n\t%s,\n\t%s" % ( + self.input, + self.average_time, + self.horizon) + + +@dataclass +class SampleIfTrueStructure: + condition: object + input: object + initial: object + + def __str__(self) -> str: + return "SampleIfTrueStructure:\n\t%s,\n\t%s,\n\t%s" % ( + self.condition, + self.input, + self.initial) + + +@dataclass +class LookupsStructure: + x: tuple + y: tuple + x_range: tuple + y_range: tuple + + def __str__(self) -> str: + return "LookupStructure:\n\tx %s = %s\n\ty %s = %s\n" % ( + self.x_range, self.x, self.y_range, self.y + ) + + +@dataclass +class InlineLookupsStructure: + argument: None + lookups: LookupsStructure + + def __str__(self) -> str: + return "InlineLookupsStructure:\n\t%s\n\t%s" % ( + str(self.argument).replace("\n", "\n\t"), + str(self.lookups).replace("\n", "\n\t") + ) + + +@dataclass +class DataStructure: + pass + + def __str__(self) -> str: + return "DataStructure" + + +@dataclass +class GetLookupsStructure: + file: str + tab: str + x_row_or_col: str + cell: str + + def __str__(self) -> str: + return "GetLookupStructure:\n\t'%s', '%s', '%s', '%s'\n" % ( + self.file, self.tab, self.x_row_or_col, self.cell + ) + + +@dataclass +class GetDataStructure: + file: str + tab: str + time_row_or_col: str + cell: str + + def __str__(self) -> str: + return "GetDataStructure:\n\t'%s', '%s', '%s', '%s'\n" % ( + self.file, self.tab, self.time_row_or_col, self.cell + ) + + +@dataclass +class GetConstantsStructure: + file: str + tab: str + cell: str + + def __str__(self) -> str: + return "GetConstantsStructure:\n\t'%s', '%s', '%s'\n" % ( + self.file, self.tab, self.cell + ) diff --git a/pysd/translation/utils.py b/pysd/translation/utils.py index 601926f6..d967c14d 100644 --- a/pysd/translation/utils.py +++ b/pysd/translation/utils.py @@ -487,9 +487,9 @@ def merge_nested_dicts(original_dict, dict_to_merge): Returns ------- - None - """ + None + """ for k, v in dict_to_merge.items(): if (k in original_dict and isinstance(original_dict[k], dict) and isinstance(dict_to_merge[k], Mapping)): diff --git a/pysd/translation/vensim/parsing_expr/common_grammar.peg b/pysd/translation/vensim/parsing_expr/common_grammar.peg new file mode 100644 index 00000000..880f52bd --- /dev/null +++ b/pysd/translation/vensim/parsing_expr/common_grammar.peg @@ -0,0 +1,18 @@ +# Parsing Expression Grammar: common_grammar + +name = basic_id / escape_group + +# This takes care of models with Unicode variable names +basic_id = id_start id_continue* + +id_start = ~r"[\w]"IU +id_continue = id_start / ~r"[0-9\'\$\s\_]" + +# between quotes, either escaped quote or character that is not a quote +escape_group = "\"" ( "\\\"" / ~r"[^\"]" )* "\"" + +number = ("+"/"-")? ~r"\d+\.?\d*([eE][+-]?\d+)?" +string = "\'" (~r"[^\']"IU)* "\'" +range = _ "[" ~r"[^\]]*" "]" _ "," + +_ = ~r"[\s\\]*" \ No newline at end of file diff --git a/pysd/translation/vensim/parsing_expr/components.peg b/pysd/translation/vensim/parsing_expr/components.peg new file mode 100644 index 00000000..263c0f7c --- /dev/null +++ b/pysd/translation/vensim/parsing_expr/components.peg @@ -0,0 +1,36 @@ +# Parsing Expression Grammar: components + +expr_type = array / final_expr / empty + +final_expr = logic_expr _ (logic_oper _ logic_expr)* # logic operators (:and:, :or:) +logic_expr = not_oper? _ comp_expr # :not: operator +comp_expr = add_expr _ (comp_oper _ add_expr)? # comparison (e.g. '<', '=>') +add_expr = prod_expr _ (add_oper _ prod_expr)* # addition and substraction +prod_expr = exp_expr _ (prod_oper _ exp_expr)* # product and division +exp_expr = neg_expr _ (exp_oper _ neg_expr)* # exponential +neg_expr = pre_oper? _ expr # pre operators (-, +) +expr = lookup_with_def / call / parens / number / reference / nan + +lookup_with_def = ~r"(WITH\ LOOKUP)"I _ "(" _ final_expr _ "," _ "(" _ range? ( "(" _ number _ "," _ number _ ")" _ ","? _ )+ _ ")" _ ")" + +nan = ":NA:" + +arguments = ((string / final_expr) _ ","? _)* +parens = "(" _ final_expr _ ")" + +call = reference _ "(" _ arguments _ ")" + +reference = (name _ subscript_list) / name # check first for subscript +subscript_list = "[" _ (name _ "!"? _ ","? _)+ _ "]" + +array = (number _ ("," / ";")? _)+ !~r"." # negative lookahead for + +logic_oper = ~r"(%(logic_ops)s)"IU +not_oper = ~r"(%(not_ops)s)"IU +comp_oper = ~r"(%(comp_ops)s)"IU +add_oper = ~r"(%(add_ops)s)"IU +prod_oper = ~r"(%(prod_ops)s)"IU +exp_oper = ~r"(%(exp_ops)s)"IU +pre_oper = ~r"(%(pre_ops)s)"IU + +empty = "" # empty string \ No newline at end of file diff --git a/pysd/translation/vensim/parsing_expr/element_object.peg b/pysd/translation/vensim/parsing_expr/element_object.peg new file mode 100644 index 00000000..58fb8310 --- /dev/null +++ b/pysd/translation/vensim/parsing_expr/element_object.peg @@ -0,0 +1,47 @@ +# Parsing Expression Grammar: element_object + +entry = unchangeable_constant / component / data_definition / subscript_definition / lookup_definition / subscript_copy + +# Regular component definition "=" +component = name _ subscript_component? _ "=" _ expression + +# Unchangeable constant definition "==" +unchangeable_constant = name _ subscript_component? _ "==" _ expression + +# Lookup definition "()", uses lookahead assertion to capture whole group +lookup_definition = name _ subscript_component? &"(" _ expression + + +# Data type definition ":=" or empty with keyword +data_definition = component_data_definition / empty_data_definition +component_data_definition = name _ subscript_component? _ keyword? _ ":=" _ expression +empty_data_definition = name _ subscript_component? _ keyword + +# Subscript ranges +# Subcript range regular definition ":" +subscript_definition = name _ ":" _ (imported_subscript / literal_subscript) _ subscript_mapping_list? +imported_subscript = basic_id _ "(" _ (string _ ","? _)* ")" +literal_subscript = (subscript_range / subscript) _ ("," _ (subscript_range / subscript) _)* +subscript_range = "(" _ basic_id _ "-" _ basic_id _ ")" + +# Subcript range definition by copy "<->" +subscript_copy = name _ "<->" _ name_mapping + +# Subscript mapping +subscript_mapping_list = "->" _ subscript_mapping _ ("," _ subscript_mapping _)* +subscript_mapping = (_ name_mapping _) / (_ "(" _ name_mapping _ ":" _ index_list _")" ) +name_mapping = basic_id / escape_group + +# Subscript except match +subscript_list_except = ":EXCEPT:" _ '[' _ subscript_except _ ("," _ subscript_except _)* _ ']' +subscript_except = basic_id / escape_group + +# Subscript match +subscript_list = "[" _ index_list _ "]" +index_list = subscript _ ("," _ subscript _)* +subscript = basic_id / escape_group + +# Other definitions +subscript_component = subscript_list _ subscript_list_except? +expression = ~r".*" # expression could be anything, at this point. +keyword = ":" _ basic_id _ ":" \ No newline at end of file diff --git a/pysd/translation/vensim/parsing_expr/file_sections.peg b/pysd/translation/vensim/parsing_expr/file_sections.peg new file mode 100644 index 00000000..2dd9c463 --- /dev/null +++ b/pysd/translation/vensim/parsing_expr/file_sections.peg @@ -0,0 +1,15 @@ +# Parsing Expression Grammar: file_sections + +# full file +file = encoding? _ ((macro / main) _)+ + +# macro definition +macro = ":MACRO:" _ name _ "(" _ (name _ ","? _)+ _ ":"? _ (name _ ","? _)* _ ")" ~r".+?(?=:END OF MACRO:)" ":END OF MACRO:" + +# regular expressions +main = main_part / main_end +main_part = !":MACRO:" ~r".+(?=:MACRO:)" +main_end = !":MACRO:" ~r".+" + +# encoding +encoding = ~r"\{[^\}]*\}" diff --git a/pysd/translation/vensim/parsing_expr/lookups.peg b/pysd/translation/vensim/parsing_expr/lookups.peg new file mode 100644 index 00000000..ef088c9e --- /dev/null +++ b/pysd/translation/vensim/parsing_expr/lookups.peg @@ -0,0 +1,7 @@ +# Parsing Expression Grammar: lookups + +lookup = _ "(" _ (regularLookup / excelLookup) _ ")" +regularLookup = range? _ ( "(" _ number _ "," _ number _ ")" _ ","? _ )+ +excelLookup = ~"GET( |_)(XLS|DIRECT)( |_)LOOKUPS"I _ "(" (args _ ","? _)+ ")" + +args = ~r"[^,()]*" diff --git a/pysd/translation/vensim/parsing_expr/section_elements.peg b/pysd/translation/vensim/parsing_expr/section_elements.peg new file mode 100644 index 00000000..40250fa5 --- /dev/null +++ b/pysd/translation/vensim/parsing_expr/section_elements.peg @@ -0,0 +1,12 @@ +# Parsing Expression Grammar: section_elements + +model = (entry / section)+ sketch? +entry = element "~" element "~" doc ("~" element)? "|" +section = element "~" element "|" +sketch = ~r".*" #anything + +# Either an escape group, or a character that is not tilde or pipe +element = ( escape_group / ~r"[^~|]")* + +# Anything other that is not a tilde or pipe +doc = (~r"[^~|]")* diff --git a/pysd/translation/vensim/parsing_expr/sketch.peg b/pysd/translation/vensim/parsing_expr/sketch.peg new file mode 100644 index 00000000..b4dd0546 --- /dev/null +++ b/pysd/translation/vensim/parsing_expr/sketch.peg @@ -0,0 +1,58 @@ +# Parsing Expression Grammar: sketch + +line = var_definition / view_intro / view_title / view_definition / arrow / flow / other_objects / anything +view_intro = ~r"\s*Sketch.*?names$" / ~r"^V300.*?ignored$" +view_title = "*" view_name +view_name = ~r"(?<=\*)[^\n]+$" +view_definition = "$" color "," digit "," font_properties "|" ( ( color / ones_and_dashes ) "|")* view_code +var_definition = var_code "," var_number "," var_name "," position "," var_box_type "," arrows_in_allowed "," hide_level "," var_face "," var_word_position "," var_thickness "," var_rest_conf ","? ( ( ones_and_dashes / color) ",")* font_properties? ","? extra_bytes? + +# elements used in a line defining the properties of a variable or stock +var_name = element +var_name = ~r"(?<=,)[^,]+(?=,)" +var_number = digit +var_box_type = ~r"(?<=,)\d+,\d+,\d+(?=,)" # improve this regex +arrows_in_allowed = ~r"(?<=,)\d+(?=,)" # if this is an even number it's a shadow variable +hide_level = digit +var_face = digit +var_word_position = ~r"(?<=,)\-*\d+(?=,)" +var_thickness = digit +var_rest_conf = digit "," ~r"\d+" +extra_bytes = ~r"\d+,\d+,\d+,\d+,\d+,\d+" # required since Vensim 8.2.1 +arrow = arrow_code "," digit "," origin_var "," destination_var "," (digit ",")+ (ones_and_dashes ",")? ((color ",") / ("," ~r"\d+") / (font_properties "," ~r"\d+"))* "|(" position ")|" + +# arrow origin and destination (this may be useful if further parsing is required) +origin_var = digit +destination_var = digit + +# flow arrows +flow = source_or_sink_or_plot / flow_arrow + +# if you want to extend the parsing, these three would be a good starting point (they are followed by "anything") +source_or_sink_or_plot = multipurpose_code "," anything +flow_arrow = flow_arrow_code "," anything +other_objects = other_objects_code "," anything + +# fonts +font_properties = font_name? "|" font_size "|" font_style? "|" color +font_style = "B" / "I" / "U" / "S" / "V" # italics, bold, underline, etc +font_size = ~r"\d+" # this needs to be made a regex to match any font +font_name = ~r"(?<=,)[^\|\d]+(?=\|)" + +# x and y within the view layout. This may be useful if further parsing is required +position = ~r"-*\d+,-*\d+" + +# rgb color (e.g. 255-255-255) +color = ~r"((?= num_end: + raise ValueError( + "\nThe number of the first subscript value must be " + "lower than the second subscript value in a " + "subscript numeric range.") + elif prefix_start != prefix_end: + raise ValueError( + "\nOnly matching names ending in numbers are valid.") + + self.subscripts += [ + prefix_start + str(i) for i in range(num_start, num_end + 1) + ] + + def visit_name(self, n, vc): + self.name = vc[0].strip() + + def visit_subscript(self, n, vc): + self.subscripts.append(n.text.strip()) + + def visit_subscript_except(self, n, vc): + self.subscripts_except.append(n.text.strip()) + + def visit_expression(self, n, vc): + self.expression = n.text.strip() + + def generic_visit(self, n, vc): + return "".join(filter(None, vc)) or n.text + + def visit__(self, n, vc): + # TODO check if necessary when finished + return " " + + +class SubscriptRange(): + """Subscript range definition, defined by ":" or "<->" in Vensim.""" + + def __init__(self, name, definition, mapping=[]): + self.name = name + self.definition = definition + self.mapping = mapping + + def __str__(self): + return "\nSubscript range definition: %s\n\t%s\n" % ( + self.name, + "%s <- %s" % (self.definition, self.mapping) + if self.mapping else self.definition) + + @property + def _verbose(self): + return self.__str__() + + @property + def verbose(self): + print(self._verbose) + + +class Component(): + """Model component defined by "name = expr" in Vensim.""" + kind = "Model component" + + def __init__(self, name, subscripts, expression): + self.name = name + self.subscripts = subscripts + self.expression = expression + + def __str__(self): + text = "\n%s definition: %s" % (self.kind, self.name) + text += "\nSubscrips: %s" % repr(self.subscripts[0])\ + if self.subscripts[0] else "" + text += " EXCEPT %s" % repr(self.subscripts[1])\ + if self.subscripts[1] else "" + text += "\n\t%s" % self._expression + return text + + @property + def _expression(self): + if hasattr(self, "ast"): + return str(self.ast).replace("\n", "\n\t") + + else: + return self.expression.replace("\n", "\n\t") + + @property + def _verbose(self): + return self.__str__() + + @property + def verbose(self): + print(self._verbose) + + def _parse(self): + tree = vu.Grammar.get("components", parsing_ops).parse(self.expression) + self.ast = ComponentsParser(tree).translation + if isinstance(self.ast, structures["get_xls_lookups"]): + self.lookup = True + else: + self.lookup = False + + def get_abstract_component(self): + if self.lookup: + return AbstractLookup(subscripts=self.subscripts, ast=self.ast) + else: + return AbstractComponent(subscripts=self.subscripts, ast=self.ast) + + +class UnchangeableConstant(Component): + """Unchangeable constant defined by "name == expr" in Vensim.""" + kind = "Unchangeable constant component" + + def __init__(self, name, subscripts, expression): + super().__init__(name, subscripts, expression) + + def get_abstract_component(self): + return AbstractUnchangeableConstant( + subscripts=self.subscripts, ast=self.ast) + + +class Lookup(Component): + """Lookup variable, defined by "name(expr)" in Vensim.""" + kind = "Lookup component" + + def __init__(self, name, subscripts, expression): + super().__init__(name, subscripts, expression) + + def _parse(self): + tree = vu.Grammar.get("lookups").parse(self.expression) + self.ast = LookupsParser(tree).translation + + def get_abstract_component(self): + return AbstractLookup(subscripts=self.subscripts, ast=self.ast) + + +class Data(Component): + """Data variable, defined by "name := expr" in Vensim.""" + kind = "Data component" + + def __init__(self, name, subscripts, keyword, expression): + super().__init__(name, subscripts, expression) + self.keyword = keyword + + def __str__(self): + text = "\n%s definition: %s" % (self.kind, self.name) + text += "\nSubscrips: %s" % repr(self.subscripts[0])\ + if self.subscripts[0] else "" + text += " EXCEPT %s" % repr(self.subscripts[1])\ + if self.subscripts[1] else "" + text += "\nKeyword: %s" % self.keyword if self.keyword else "" + text += "\n\t%s" % self._expression + return text + + def _parse(self): + if not self.expression: + # empty data vars, read from vdf file + self.ast = structures["data"]() + else: + super()._parse() + + def get_abstract_component(self): + return AbstractData( + subscripts=self.subscripts, ast=self.ast, keyword=self.keyword) + + +class LookupsParser(parsimonious.NodeVisitor): + def __init__(self, ast): + self.translation = None + self.visit(ast) + + def visit_range(self, n, vc): + return n.text.strip()[:-1].replace(")-(", "),(") + + def visit_regularLookup(self, n, vc): + if vc[0]: + xy_range = np.array(eval(vc[0])) + else: + xy_range = np.full((2, 2), np.nan) + + values = np.array((eval(vc[2]))) + values = values[np.argsort(values[:, 0])] + + self.translation = structures["lookup"]( + x=tuple(values[:, 0]), + y=tuple(values[:, 1]), + x_range=tuple(xy_range[:, 0]), + y_range=tuple(xy_range[:, 1]) + ) + + def visit_excelLookup(self, n, vc): + arglist = vc[3].split(",") + + self.translation = structures["get_xls_lookups"]( + file=eval(arglist[0]), + tab=eval(arglist[1]), + x_row_or_col=eval(arglist[2]), + cell=eval(arglist[3]) + ) + + def generic_visit(self, n, vc): + return "".join(filter(None, vc)) or n.text + + +class ComponentsParser(parsimonious.NodeVisitor): + def __init__(self, ast): + self.translation = None + self.elements = {} + self.subs = None # the subscripts if given + self.negatives = set() + self.visit(ast) + + def visit_expr_type(self, n, vc): + self.translation = self.elements[vc[0]] + + def visit_final_expr(self, n, vc): + return vu.split_arithmetic( + structures["logic"], parsing_ops["logic_ops"], + "".join(vc).strip(), self.elements) + + def visit_logic_expr(self, n, vc): + id = vc[2] + if vc[0].lower() == ":not:": + id = self.add_element(structures["logic"]( + [":NOT:"], + (self.elements[id] if id in self.elements else eval(id),) + )) + return id + + def visit_comp_expr(self, n, vc): + return vu.split_arithmetic( + structures["logic"], parsing_ops["comp_ops"], + "".join(vc).strip(), self.elements) + + def visit_add_expr(self, n, vc): + return vu.split_arithmetic( + structures["arithmetic"], parsing_ops["add_ops"], + "".join(vc).strip(), self.elements) + + def visit_prod_expr(self, n, vc): + return vu.split_arithmetic( + structures["arithmetic"], parsing_ops["prod_ops"], + "".join(vc).strip(), self.elements) + + def visit_exp_expr(self, n, vc): + return vu.split_arithmetic( + structures["arithmetic"], parsing_ops["exp_ops"], + "".join(vc).strip(), self.elements, self.negatives) + + def visit_neg_expr(self, n, vc): + id = vc[2] + if vc[0] == "-": + if id in self.elements: + self.negatives.add(id) + else: + return self.add_element(eval(n.text)) + return id + + def visit_call(self, n, vc): + func = self.elements[vc[0]] + args = self.elements[vc[4]] + if func.reference in structures: + return self.add_element(structures[func.reference](*args)) + else: + return self.add_element(structures["call"](func, args)) + + def visit_reference(self, n, vc): + id = self.add_element(structures["reference"]( + vc[0].lower().replace(" ", "_"), self.subs)) + self.subs = None + return id + + def visit_range(self, n, vc): + return self.add_element(n.text.strip()[:-1].replace(")-(", "),(")) + + def visit_lookup_with_def(self, n, vc): + if vc[10]: + xy_range = np.array(eval(self.elements[vc[10]])) + else: + xy_range = np.full((2, 2), np.nan) + + values = np.array((eval(vc[11]))) + values = values[np.argsort(values[:, 0])] + + lookup = structures["lookup"]( + x=tuple(values[:, 0]), + y=tuple(values[:, 1]), + x_range=tuple(xy_range[:, 0]), + y_range=tuple(xy_range[:, 1]) + ) + + return self.add_element(structures["with_lookup"]( + self.elements[vc[4]], lookup)) + + def visit_array(self, n, vc): + if ";" in n.text or "," in n.text: + return self.add_element(np.squeeze(np.array( + [row.split(",") for row in n.text.strip(";").split(";")], + dtype=float))) + else: + return self.add_element(eval(n.text)) + + def visit_subscript_list(self, n, vc): + subs = [x.strip() for x in vc[2].split(",")] + self.subs = structures["subscripts_ref"](subs) + return "" + + def visit_name(self, n, vc): + return n.text.strip() + + def visit_string(self, n, vc): + return self.add_element(eval(n.text)) + + def visit_arguments(self, n, vc): + arglist = tuple(x.strip(",") for x in vc) + return self.add_element(tuple( + self.elements[arg] if arg in self.elements + else eval(arg) for arg in arglist)) + + def visit_parens(self, n, vc): + return vc[2] + + def visit__(self, n, vc): + """Handles whitespace characters""" + return "" + + def visit_nan(self, n, vc): + return "np.nan" + + def visit_empty(self, n, vc): + #warnings.warn(f"Empty expression for '{element['real_name']}''.") + return self.add_element(None) + + def generic_visit(self, n, vc): + return "".join(filter(None, vc)) or n.text + + def add_element(self, element): + return vu.add_element(self.elements, element) diff --git a/pysd/translation/vensim/vensim_section.py b/pysd/translation/vensim/vensim_section.py new file mode 100644 index 00000000..30293662 --- /dev/null +++ b/pysd/translation/vensim/vensim_section.py @@ -0,0 +1,124 @@ +from typing import List, Union +from pathlib import Path +import parsimonious + +from ..structures.abstract_model import\ + AbstractElement, AbstractSubscriptRange, AbstractSection + +from . import vensim_utils as vu +from .vensim_element import Element, SubscriptRange, Component + + +class FileSection(): # File section dataclass + + def __init__(self, name: str, path: Path, type: str, + params: List[str], returns: List[str], + content: str, split: bool, views_dict: Union[dict, None] + ) -> object: + self.name = name + self.path = path + self.type = type + self.params = params + self.returns = returns + self.content = content + self.split = split + self.views_dict = views_dict + self.elements = None + + def __str__(self): + return "\nFile section: %s\n" % self.name + + @property + def _verbose(self): + text = self.__str__() + if self.elements: + for element in self.elements: + text += element._verbose + else: + text += self.content + + return text + + @property + def verbose(self): + print(self._verbose) + + def _parse(self): + tree = vu.Grammar.get("section_elements").parse(self.content) + self.elements = SectionElementsParser(tree).entries + self.elements = [element._parse() for element in self.elements] + # split subscript from other components + self.subscripts = [ + element for element in self.elements + if isinstance(element, SubscriptRange) + ] + self.components = [ + element for element in self.elements + if isinstance(element, Component) + ] + # reorder element list for better printing + self.elements = self.subscripts + self.components + + [component._parse() for component in self.components] + + def get_abstract_section(self): + return AbstractSection( + name=self.name, + path=self.path, + type=self.type, + params=self.params, + returns=self.returns, + subscripts=self.solve_subscripts(), + elements=self.merge_components(), + split=self.split, + views_dict=self.views_dict + ) + + def solve_subscripts(self): + return [AbstractSubscriptRange( + name=subs_range.name, + subscripts=subs_range.definition, + mapping=subs_range.mapping + ) for subs_range in self.subscripts] + + def merge_components(self): + merged = {} + for component in self.components: + name = component.name.lower().replace(" ", "_") + if name not in merged: + merged[name] = AbstractElement( + name=component.name, + components=[]) + + if component.units: + merged[name].units = component.units + if component.limits[0] is not None\ + or component.limits[1] is not None: + merged[name].range = component.limits + if component.documentation: + merged[name].documentation = component.documentation + + merged[name].components.append(component.get_abstract_component()) + + + + return list(merged.values()) + + +class SectionElementsParser(parsimonious.NodeVisitor): + # TODO include units parsing + def __init__(self, ast): + self.entries = [] + self.visit(ast) + + def visit_entry(self, n, vc): + self.entries.append( + Element( + equation=vc[0].strip(), + units=vc[2].strip(), + documentation=vc[4].strip(), + ) + ) + + def generic_visit(self, n, vc): + return "".join(filter(None, vc)) or n.text or "" diff --git a/pysd/translation/vensim/vensim_structures.py b/pysd/translation/vensim/vensim_structures.py new file mode 100644 index 00000000..5341358c --- /dev/null +++ b/pysd/translation/vensim/vensim_structures.py @@ -0,0 +1,54 @@ +import re +from ..structures import components as cs + + +structures = { + "reference": cs.ReferenceStructure, + "subscripts_ref": cs.SubscriptsReferenceStructure, + "arithmetic": cs.ArithmeticStructure, + "logic": cs.LogicStructure, + "with_lookup": cs.InlineLookupsStructure, + "call": cs.CallStructure, + "game": cs.GameStructure, + "get_xls_lookups": cs.GetLookupsStructure, + "get_direct_lookups": cs.GetLookupsStructure, + "get_xls_data": cs.GetDataStructure, + "get_direct_data": cs.GetDataStructure, + "get_xls_constants": cs.GetConstantsStructure, + "get_direct_constants": cs.GetConstantsStructure, + "initial": cs.InitialStructure, + "integ": cs.IntegStructure, + "delay1": lambda x, y: cs.DelayStructure(x, y, x, 1), + "delay1i": lambda x, y, z: cs.DelayStructure(x, y, z, 1), + "delay3": lambda x, y: cs.DelayStructure(x, y, x, 3), + "delay3i": lambda x, y, z: cs.DelayStructure(x, y, z, 3), + "delay_n": cs.DelayNStructure, + "delay_fixed": cs.DelayFixedStructure, + "smooth": lambda x, y: cs.SmoothStructure(x, y, x, 1), + "smoothi": lambda x, y, z: cs.SmoothStructure(x, y, z, 1), + "smooth3": lambda x, y: cs.SmoothStructure(x, y, x, 3), + "smooth3i": lambda x, y, z: cs.SmoothStructure(x, y, z, 3), + "smooth_n": cs.SmoothNStructure, + "trend": cs.TrendStructure, + "forecast": cs.ForecastStructure, + "sample_if_true": cs.SampleIfTrueStructure, + "lookup": cs.LookupsStructure, + "data": cs.DataStructure +} + + +operators = { + "logic_ops": [":AND:", ":OR:"], + "not_ops": [":NOT:"], + "comp_ops": ["=", "<>", "<=", "<", ">=", ">"], + "add_ops": ["+", "-"], + "prod_ops": ["*", "/"], + "exp_ops": ["^"], + "pre_ops": ["+", "-"] +} + + +parsing_ops = { + key: "|".join(re.escape(x) for x in values) + for key, values in operators.items() +} diff --git a/pysd/translation/vensim/vensim_utils.py b/pysd/translation/vensim/vensim_utils.py new file mode 100644 index 00000000..db790c8f --- /dev/null +++ b/pysd/translation/vensim/vensim_utils.py @@ -0,0 +1,116 @@ +import re +import warnings +import uuid + +import parsimonious +from typing import Dict +from pathlib import Path +from chardet import detect + + +class Grammar(): + _common_grammar = None + _grammar_path: Path = Path(__file__).parent.joinpath("parsing_expr") + _grammar: Dict = {} + + @classmethod + def get(cls, grammar: str, subs: dict = {}) -> parsimonious.Grammar: + """Get parsimonious grammar for parsing""" + if grammar not in cls._grammar: + # include grammar in the class singleton + cls._grammar[grammar] = parsimonious.Grammar( + cls._read_grammar(grammar) % subs + ) + + return cls._grammar[grammar] + + @classmethod + def _read_grammar(cls, grammar: str) -> str: + """Read grammar from a file and include common grammar""" + with cls._gpath(grammar).open(encoding="ascii") as gfile: + source_grammar: str = gfile.read() + + return cls._include_common_grammar(source_grammar) + + @classmethod + def _include_common_grammar(cls, source_grammar: str) -> str: + """Include common grammar""" + if not cls._common_grammar: + with cls._gpath("common_grammar").open(encoding="ascii") as gfile: + cls._common_grammar: str = gfile.read() + + return r"{source_grammar}{common_grammar}".format( + source_grammar=source_grammar, common_grammar=cls._common_grammar + ) + + @classmethod + def _gpath(cls, grammar: str) -> Path: + """Get the grammar file path""" + return cls._grammar_path.joinpath(grammar).with_suffix(".peg") + + @classmethod + def clean(cls) -> None: + """Clean the saved grammars (used for debugging)""" + cls._common_grammar = None + cls._grammar: Dict = {} + + +def _detect_encoding_from_file(mdl_file: Path) -> str: + """Detect and return the encoding from a Vensim file""" + try: + with mdl_file.open("rb") as in_file: + f_line: bytes = in_file.readline() + f_line: str = f_line.decode(detect(f_line)['encoding']) + return re.search(r"(?<={)(.*)(?=})", f_line).group() + except (AttributeError, UnicodeDecodeError): + warnings.warn( + "No encoding specified or detected to translate the model " + "file. 'UTF-8' encoding will be used.") + return "UTF-8" + + +def split_arithmetic(structure: object, parsing_ops: dict, + expression: str, elements: dict, + negatives: set = set()) -> object: + pattern = re.compile(parsing_ops) + parts = pattern.split(expression) + ops = pattern.findall(expression) + if not ops: + if parts[0] in negatives: + negatives.remove(parts[0]) + return add_element( + elements, + structure(["negative"], (elements[parts[0]],))) + else: + return expression + else: + if not negatives: + return add_element( + elements, + structure( + ops, + tuple([elements[id] if id in elements + else eval(id) for id in parts]))) + else: + # manage negative expressions + current_id = parts.pop() + current = elements[current_id] + if current_id in negatives: + negatives.remove(current_id) + current = structure(["negative"], (current,)) + while ops: + current_id = parts.pop() + current = structure( + [ops.pop()], + (elements[current_id], current)) + if current_id in negatives: + negatives.remove(current_id) + current = structure(["negative"], (current,)) + + return add_element(elements, current) + + +def add_element(elements: dict, element: object) -> str: + id = uuid.uuid4().hex + elements[id] = element + return id diff --git a/pysd/translation/vensim/vensin_file.py b/pysd/translation/vensim/vensin_file.py new file mode 100644 index 00000000..fbe63f31 --- /dev/null +++ b/pysd/translation/vensim/vensin_file.py @@ -0,0 +1,276 @@ +import re +from pathlib import Path +import warnings +import parsimonious +from collections.abc import Mapping + +from ..structures.abstract_model import AbstractModel + +from . import vensim_utils as vu +from .vensim_section import FileSection + + +class VensimFile(): + """ + Create a VensimFile object which allows parsing a mdl file. + + Parameters + ---------- + mdl_path: str or pathlib.Path + Path to the Vensim model. + + encoding: str or None (optional) + Encoding of the source model file. If None, the encoding will be + read from the model, if the encoding is not defined in the model + file it will be set to 'UTF-8'. Default is None. + + """ + def __init__(self, mdl_path, encoding=None): + self.mdl_path = Path(mdl_path) + self.root_path = self.mdl_path.parent + self.model_text = self._read(encoding) + self.sketch = "" + self.view_elements = None + self._split_sketch() + + def __str__(self): + return "\nVensim model file, loaded from:\n\t%s\n" % self.mdl_path + + @property + def _verbose(self): + text = self.__str__() + for section in self.sections: + text += section._verbose + + return text + + @property + def verbose(self): + print(self._verbose) + + def _read(self, encoding): + """Read a Vensim file and assign its content to self.model_text""" + # check for model extension + if self.mdl_path.suffix.lower() != ".mdl": + raise ValueError( + "The file to translate, '%s' " % self.mdl_path + + "is not a vensim model. It must end with mdl extension." + ) + + if encoding is None: + encoding = vu._detect_encoding_from_file(self.mdl_path) + + with self.mdl_path.open("r", encoding=encoding, + errors="ignore") as in_file: + model_text = in_file.read() + + return model_text + + def _split_sketch(self): + """Split model from the sketch""" + try: + split_model = self.model_text.split("\\\\\\---///", 1) + self.model_text = self._clean(split_model[0]) + # remove plots section, if it exists + self.sketch = split_model[1].split("///---\\\\\\")[0] + except LookupError: + pass + + def _clean(self, text): + return re.sub(r"[\n\t\s]+", " ", re.sub(r"\\\n\t", " ", text)) + + def parse(self): + tree = vu.Grammar.get("file_sections").parse(self.model_text) + self.sections = FileSectionsParser(tree).entries + self.sections[0].path = self.mdl_path.with_suffix(".py") + for section in self.sections[1:]: + section.path = self.mdl_path.parent.joinpath( + self.clean_file_names(section.name)[0] + ).with_suffix(".py") + # TODO modify names and paths of macros + for section in self.sections: + section._parse() + + def parse_sketch(self, subview_sep): + if self.sketch: + sketch = list(map( + lambda x: x.strip(), + self.sketch.split("\\\\\\---/// ") + )) + else: + warnings.warn( + "No sketch detected. The model will be built in a " + "single file.") + return None + + grammar = vu.Grammar.get("sketch") + view_elements = {} + for module in sketch: + for sketch_line in module.split("\n"): + # parsed line could have information about new view name + # or of a variable inside a view + parsed = SketchParser(grammar.parse(sketch_line)) + + if parsed.view_name: + view_name = parsed.view_name + view_elements[view_name] = set() + + elif parsed.variable_name: + view_elements[view_name].add(parsed.variable_name) + + # removes views that do not include any variable in them + non_empty_views = { + key: value for key, value in view_elements.items() if value + } + + # split into subviews, if subview_sep is provided + views_dict = {} + + if len(non_empty_views) == 1: + warnings.warn( + "Only a single view with no subviews was detected. The model" + " will be built in a single file.") + return + elif subview_sep and any( + sep in view for sep in subview_sep for view in non_empty_views): + escaped_separators = list(map(lambda x: re.escape(x), subview_sep)) + for full_name, values in non_empty_views.items(): + # split the full view name using the separator and make the + # individual parts safe file or directory names + clean_view_parts = self.clean_file_names( + *re.split("|".join(escaped_separators), full_name)) + # creating a nested dict for each view.subview + # (e.g. {view_name: {subview_name: [values]}}) + nested_dict = values + + for item in reversed(clean_view_parts): + nested_dict = {item: nested_dict} + # merging the new nested_dict into the views_dict, preserving + # repeated keys + self.merge_nested_dicts(views_dict, nested_dict) + else: + # view names do not have separators or separator characters + # not provided + + if subview_sep and not any( + sep in view for sep in subview_sep for view in non_empty_views): + warnings.warn( + "The given subview separators were not matched in " + "any view name.") + + for view_name, elements in non_empty_views.items(): + views_dict[self.clean_file_names(view_name)[0]] = elements + + self.sections[0].split = True + self.sections[0].views_dict = views_dict + + def get_abstract_model(self): + return AbstractModel( + original_path=self.mdl_path, + sections=tuple(section.get_abstract_section() + for section in self.sections)) + + @staticmethod + def clean_file_names(*args): + """ + Removes special characters and makes clean file names. + + Parameters + ---------- + *args: tuple + Any number of strings to to clean. + + Returns + ------- + clean: list + List containing the clean strings. + + """ + return [ + re.sub( + r"[\W]+", "", + name.replace(" ", "_") + ).lstrip("0123456789") + for name in args] + + def merge_nested_dicts(self, original_dict, dict_to_merge): + """ + Merge dictionaries recursively, preserving common keys. + + Parameters + ---------- + original_dict: dict + Dictionary onto which the merge is executed. + + dict_to_merge: dict + Dictionary to be merged to the original_dict. + + Returns + ------- + None + + """ + for key, value in dict_to_merge.items(): + if (key in original_dict and isinstance(original_dict[key], dict) + and isinstance(value, Mapping)): + self.merge_nested_dicts(original_dict[key], value) + else: + original_dict[key] = value + + +class FileSectionsParser(parsimonious.NodeVisitor): + """Parse file sections""" + def __init__(self, ast): + self.entries = [None] + self.visit(ast) + + def visit_main(self, n, vc): + # main will be always stored as the first entry + if self.entries[0] is None: + self.entries[0] = FileSection( + name="__main__", + path=Path("."), + type="main", + params=[], + returns=[], + content=n.text.strip(), + split=False, + views_dict=None + ) + else: + # this is needed when macro parts are in the middle of the file + self.entries[0].content += n.text.strip() + + def visit_macro(self, n, vc): + self.entries.append( + FileSection( + name=vc[2].strip().lower().replace(" ", "_"), + path=Path("."), + type="macro", + params=[x.strip() for x in vc[6].split(",")] if vc[6] else [], + returns=[x.strip() for x in vc[10].split(",")] if vc[10] else [], + content=vc[13].strip(), + split=False, + views_dict=None + ) + ) + + def generic_visit(self, n, vc): + return "".join(filter(None, vc)) or n.text or "" + + +class SketchParser(parsimonious.NodeVisitor): + def __init__(self, ast): + self.variable_name = None + self.view_name = None + self.visit(ast) + + def visit_view_name(self, n, vc): + self.view_name = n.text.lower() + + def visit_var_definition(self, n, vc): + if int(vc[10]) % 2 != 0: # not a shadow variable + self.variable_name = vc[4].replace(" ", "_").lower() + + def generic_visit(self, n, vc): + return "".join(filter(None, vc)) or n.text.strip() or "" diff --git a/pysd/translation/xmile/xmile2py.py b/pysd/translation/xmile/xmile2py.py index e714d4ed..e2b973fd 100644 --- a/pysd/translation/xmile/xmile2py.py +++ b/pysd/translation/xmile/xmile2py.py @@ -21,6 +21,8 @@ def translate_xmile(xmile_file): Functionality is currently limited. """ + if not isinstance(xmile_file, str): + xmile_file = str(xmile_file) # process xml file xml_parser = etree.XMLParser(encoding="utf-8", recover=True) root = etree.parse(xmile_file, parser=xml_parser).getroot() diff --git a/tests/conftest.py b/tests/conftest.py index a6be8df1..b4fd902b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,6 +8,12 @@ def _root(): return Path(__file__).parent.resolve() +@pytest.fixture(scope="session") +def _test_models(_root): + # test-models directory + return _root.joinpath("test-models/tests") + + @pytest.fixture(scope="class") def shared_tmpdir(tmpdir_factory): # shared temporary directory for each class diff --git a/tests/integration_test_factory.py b/tests/integration_test_factory.py index ea85aeab..7ad08458 100644 --- a/tests/integration_test_factory.py +++ b/tests/integration_test_factory.py @@ -1,53 +1,32 @@ -from __future__ import print_function +import os.path +import glob -run = False - -if run: - - - import os.path - import textwrap - import glob - from pysd import utils - - test_dir = 'test-models/' - vensim_test_files = glob.glob(test_dir+'tests/*/*.mdl') +if False: + vensim_test_files = glob.glob("test-models/tests/*/*.mdl") + vensim_test_files.sort() tests = [] for file_path in vensim_test_files: - (path, file_name) = os.path.split(file_path) - (name, ext) = os.path.splitext(file_name) - - test_name = utils.make_python_identifier(path.split('/')[-1])[0] + path, file_name = os.path.split(file_path) + folder = path.split("/")[-1] test_func_string = """ - def test_%(test_name)s(self): - output, canon = runner('%(file_path)s') - assert_frames_close(output, canon, rtol=rtol) - """ % { - 'file_path': file_path, - 'test_name': test_name, + "%(test_name)s": { + "folder": "%(folder)s", + "file": "%(file_name)s" + },""" % { + "folder": folder, + "test_name": folder, + "file_name": file_name, } tests.append(test_func_string) - file_string = textwrap.dedent(''' - """ - Note that this file is autogenerated by `integration_test_factory.py` - and changes are likely to be overwritten. - """ - - import unittest - from pysd.tools.benchmarking import runner, assert_frames_close - - rtol = .05 - - - class TestIntegrationExamples(unittest.TestCase): - %(tests)s - - ''' % {'tests': ''.join(tests)}) + file_string = """ + vensim_test = {%(tests)s + } + """ % {"tests": "".join(tests)} - with open('integration_test_pysd.py', 'w', encoding='UTF-8') as ofile: + with open("test_factory_result.py", "w", encoding="UTF-8") as ofile: ofile.write(file_string) - print('generated %i integration tests' % len(tests)) + print("Generated %i integration tests" % len(tests)) diff --git a/tests/integration_test_vensim_pathway.py b/tests/integration_test_vensim_pathway.py index e2efaff2..fb564e1c 100644 --- a/tests/integration_test_vensim_pathway.py +++ b/tests/integration_test_vensim_pathway.py @@ -17,119 +17,119 @@ class TestIntegrationExamples(unittest.TestCase): def test_abs(self): - output, canon = runner(test_models + '/abs/test_abs.mdl') + output, canon = runner(test_models + '/abs/test_abs.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_active_initial(self): - output, canon = runner(test_models + '/active_initial/test_active_initial.mdl') + output, canon = runner(test_models + '/active_initial/test_active_initial.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_active_initial_circular(self): - output, canon = runner(test_models + '/active_initial_circular/test_active_initial_circular.mdl') + output, canon = runner(test_models + '/active_initial_circular/test_active_initial_circular.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_arguments(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") - output, canon = runner(test_models + '/arguments/test_arguments.mdl') + output, canon = runner(test_models + '/arguments/test_arguments.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_array_with_line_break(self): - output, canon = runner(test_models + '/array_with_line_break/test_array_with_line_break.mdl') + output, canon = runner(test_models + '/array_with_line_break/test_array_with_line_break.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_builtin_max(self): - output, canon = runner(test_models + '/builtin_max/builtin_max.mdl') + output, canon = runner(test_models + '/builtin_max/builtin_max.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_builtin_min(self): - output, canon = runner(test_models + '/builtin_min/builtin_min.mdl') + output, canon = runner(test_models + '/builtin_min/builtin_min.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_chained_initialization(self): - output, canon = runner(test_models + '/chained_initialization/test_chained_initialization.mdl') + output, canon = runner(test_models + '/chained_initialization/test_chained_initialization.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) @unittest.skip("Working on it") def test_conditional_subscripts(self): - output, canon = runner(test_models + '/conditional_subscripts/test_conditional_subscripts.mdl') + output, canon = runner(test_models + '/conditional_subscripts/test_conditional_subscripts.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_control_vars(self): - output, canon = runner(test_models + '/control_vars/test_control_vars.mdl') + output, canon = runner(test_models + '/control_vars/test_control_vars.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_constant_expressions(self): - output, canon = runner(test_models + '/constant_expressions/test_constant_expressions.mdl') + output, canon = runner(test_models + '/constant_expressions/test_constant_expressions.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_data_from_other_model(self): output, canon = runner( test_models + '/data_from_other_model/test_data_from_other_model.mdl', - data_files=test_models + '/data_from_other_model/data.tab') + data_files=test_models + '/data_from_other_model/data.tab', old=True) assert_frames_close(output, canon, rtol=rtol) def test_delay_fixed(self): # issue https://github.com/JamesPHoughton/pysd/issues/147 with warnings.catch_warnings(): warnings.simplefilter("ignore") - output, canon = runner(test_models + '/delay_fixed/test_delay_fixed.mdl') + output, canon = runner(test_models + '/delay_fixed/test_delay_fixed.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_delay_numeric_error(self): # issue https://github.com/JamesPHoughton/pysd/issues/225 - output, canon = runner(test_models + '/delay_numeric_error/test_delay_numeric_error.mdl') + output, canon = runner(test_models + '/delay_numeric_error/test_delay_numeric_error.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_delay_parentheses(self): - output, canon = runner(test_models + '/delay_parentheses/test_delay_parentheses.mdl') + output, canon = runner(test_models + '/delay_parentheses/test_delay_parentheses.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_delay_pipeline(self): # issue https://github.com/JamesPHoughton/pysd/issues/147 with warnings.catch_warnings(): warnings.simplefilter("ignore") - output, canon = runner(test_models + '/delay_pipeline/test_pipeline_delays.mdl') + output, canon = runner(test_models + '/delay_pipeline/test_pipeline_delays.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_delays(self): # issue https://github.com/JamesPHoughton/pysd/issues/147 - output, canon = runner(test_models + '/delays/test_delays.mdl') + output, canon = runner(test_models + '/delays/test_delays.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_dynamic_final_time(self): # issue https://github.com/JamesPHoughton/pysd/issues/278 - output, canon = runner(test_models + '/dynamic_final_time/test_dynamic_final_time.mdl') + output, canon = runner(test_models + '/dynamic_final_time/test_dynamic_final_time.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_euler_step_vs_saveper(self): - output, canon = runner(test_models + '/euler_step_vs_saveper/test_euler_step_vs_saveper.mdl') + output, canon = runner(test_models + '/euler_step_vs_saveper/test_euler_step_vs_saveper.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_exp(self): - output, canon = runner(test_models + '/exp/test_exp.mdl') + output, canon = runner(test_models + '/exp/test_exp.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_exponentiation(self): - output, canon = runner(test_models + '/exponentiation/exponentiation.mdl') + output, canon = runner(test_models + '/exponentiation/exponentiation.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_forecast(self): - output, canon = runner(test_models + '/forecast/test_forecast.mdl') + output, canon = runner(test_models + '/forecast/test_forecast.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_function_capitalization(self): - output, canon = runner(test_models + '/function_capitalization/test_function_capitalization.mdl') + output, canon = runner(test_models + '/function_capitalization/test_function_capitalization.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_game(self): - output, canon = runner(test_models + '/game/test_game.mdl') + output, canon = runner(test_models + '/game/test_game.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_get_constants_subrange(self): output, canon = runner( test_models + '/get_constants_subranges/' - + 'test_get_constants_subranges.mdl' + + 'test_get_constants_subranges.mdl', old=True ) assert_frames_close(output, canon, rtol=rtol) @@ -142,7 +142,7 @@ def test_get_data_args_3d_xls(self): """ output, canon = runner( test_models + '/get_data_args_3d_xls/' - + 'test_get_data_args_3d_xls.mdl' + + 'test_get_data_args_3d_xls.mdl', old=True ) assert_frames_close(output, canon, rtol=rtol) @@ -155,7 +155,7 @@ def test_get_lookups_data_3d_xls(self): """ output, canon = runner( test_models + '/get_lookups_data_3d_xls/' - + 'test_get_lookups_data_3d_xls.mdl' + + 'test_get_lookups_data_3d_xls.mdl', old=True ) assert_frames_close(output, canon, rtol=rtol) @@ -164,14 +164,14 @@ def test_get_lookups_subscripted_args(self): warnings.simplefilter("ignore") output, canon = runner( test_models + '/get_lookups_subscripted_args/' - + 'test_get_lookups_subscripted_args.mdl' + + 'test_get_lookups_subscripted_args.mdl', old=True ) assert_frames_close(output, canon, rtol=rtol) def test_get_lookups_subset(self): output, canon = runner( test_models + '/get_lookups_subset/' - + 'test_get_lookups_subset.mdl' + + 'test_get_lookups_subset.mdl', old=True ) assert_frames_close(output, canon, rtol=rtol) @@ -180,7 +180,7 @@ def test_get_with_missing_values_xlsx(self): warnings.simplefilter("ignore") output, canon = runner( test_models + '/get_with_missing_values_xlsx/' - + 'test_get_with_missing_values_xlsx.mdl' + + 'test_get_with_missing_values_xlsx.mdl', old=True ) assert_frames_close(output, canon, rtol=rtol) @@ -188,7 +188,7 @@ def test_get_with_missing_values_xlsx(self): def test_get_mixed_definitions(self): output, canon = runner( test_models + '/get_mixed_definitions/' - + 'test_get_mixed_definitions.mdl' + + 'test_get_mixed_definitions.mdl', old=True ) assert_frames_close(output, canon, rtol=rtol) @@ -201,343 +201,343 @@ def test_get_subscript_3d_arrays_xls(self): """ output, canon = runner( test_models + '/get_subscript_3d_arrays_xls/' - + 'test_get_subscript_3d_arrays_xls.mdl' + + 'test_get_subscript_3d_arrays_xls.mdl', old=True ) assert_frames_close(output, canon, rtol=rtol) def test_get_xls_cellrange(self): output, canon = runner( test_models + '/get_xls_cellrange/' - + 'test_get_xls_cellrange.mdl' + + 'test_get_xls_cellrange.mdl', old=True ) assert_frames_close(output, canon, rtol=rtol) def test_if_stmt(self): - output, canon = runner(test_models + '/if_stmt/if_stmt.mdl') + output, canon = runner(test_models + '/if_stmt/if_stmt.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_initial_function(self): - output, canon = runner(test_models + '/initial_function/test_initial.mdl') + output, canon = runner(test_models + '/initial_function/test_initial.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_input_functions(self): - output, canon = runner(test_models + '/input_functions/test_inputs.mdl') + output, canon = runner(test_models + '/input_functions/test_inputs.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscripted_round(self): - output, canon = runner(test_models + '/subscripted_round/test_subscripted_round.mdl') + output, canon = runner(test_models + '/subscripted_round/test_subscripted_round.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_invert_matrix(self): - output, canon = runner(test_models + '/invert_matrix/test_invert_matrix.mdl') + output, canon = runner(test_models + '/invert_matrix/test_invert_matrix.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_limits(self): - output, canon = runner(test_models + '/limits/test_limits.mdl') + output, canon = runner(test_models + '/limits/test_limits.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_line_breaks(self): - output, canon = runner(test_models + '/line_breaks/test_line_breaks.mdl') + output, canon = runner(test_models + '/line_breaks/test_line_breaks.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_line_continuation(self): - output, canon = runner(test_models + '/line_continuation/test_line_continuation.mdl') + output, canon = runner(test_models + '/line_continuation/test_line_continuation.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_ln(self): - output, canon = runner(test_models + '/ln/test_ln.mdl') + output, canon = runner(test_models + '/ln/test_ln.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_log(self): - output, canon = runner(test_models + '/log/test_log.mdl') + output, canon = runner(test_models + '/log/test_log.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_logicals(self): - output, canon = runner(test_models + '/logicals/test_logicals.mdl') + output, canon = runner(test_models + '/logicals/test_logicals.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_lookups(self): - output, canon = runner(test_models + '/lookups/test_lookups.mdl') + output, canon = runner(test_models + '/lookups/test_lookups.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_lookups_without_range(self): - output, canon = runner(test_models + '/lookups_without_range/test_lookups_without_range.mdl') + output, canon = runner(test_models + '/lookups_without_range/test_lookups_without_range.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_lookups_funcnames(self): - output, canon = runner(test_models + '/lookups_funcnames/test_lookups_funcnames.mdl') + output, canon = runner(test_models + '/lookups_funcnames/test_lookups_funcnames.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_lookups_inline(self): - output, canon = runner(test_models + '/lookups_inline/test_lookups_inline.mdl') + output, canon = runner(test_models + '/lookups_inline/test_lookups_inline.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_lookups_inline_bounded(self): - output, canon = runner(test_models + '/lookups_inline_bounded/test_lookups_inline_bounded.mdl') + output, canon = runner(test_models + '/lookups_inline_bounded/test_lookups_inline_bounded.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_lookups_with_expr(self): - output, canon = runner(test_models + '/lookups_with_expr/test_lookups_with_expr.mdl') + output, canon = runner(test_models + '/lookups_with_expr/test_lookups_with_expr.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_macro_cross_reference(self): - output, canon = runner(test_models + '/macro_cross_reference/test_macro_cross_reference.mdl') + output, canon = runner(test_models + '/macro_cross_reference/test_macro_cross_reference.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_macro_expression(self): - output, canon = runner(test_models + '/macro_expression/test_macro_expression.mdl') + output, canon = runner(test_models + '/macro_expression/test_macro_expression.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_macro_multi_expression(self): - output, canon = runner(test_models + '/macro_multi_expression/test_macro_multi_expression.mdl') + output, canon = runner(test_models + '/macro_multi_expression/test_macro_multi_expression.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_macro_multi_macros(self): - output, canon = runner(test_models + '/macro_multi_macros/test_macro_multi_macros.mdl') + output, canon = runner(test_models + '/macro_multi_macros/test_macro_multi_macros.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) @unittest.skip('working') def test_macro_output(self): - output, canon = runner(test_models + '/macro_output/test_macro_output.mdl') + output, canon = runner(test_models + '/macro_output/test_macro_output.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_macro_stock(self): - output, canon = runner(test_models + '/macro_stock/test_macro_stock.mdl') + output, canon = runner(test_models + '/macro_stock/test_macro_stock.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) - @unittest.skip('do we need this?') + @unittest.skip("Working on it") def test_macro_trailing_definition(self): - output, canon = runner(test_models + '/macro_trailing_definition/test_macro_trailing_definition.mdl') + output, canon = runner(test_models + '/macro_trailing_definition/test_macro_trailing_definition.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_model_doc(self): - output, canon = runner(test_models + '/model_doc/model_doc.mdl') + output, canon = runner(test_models + '/model_doc/model_doc.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_nested_functions(self): - output, canon = runner(test_models + '/nested_functions/test_nested_functions.mdl') + output, canon = runner(test_models + '/nested_functions/test_nested_functions.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_number_handling(self): - output, canon = runner(test_models + '/number_handling/test_number_handling.mdl') + output, canon = runner(test_models + '/number_handling/test_number_handling.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_parentheses(self): - output, canon = runner(test_models + '/parentheses/test_parens.mdl') + output, canon = runner(test_models + '/parentheses/test_parens.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) @unittest.skip('low priority') def test_reference_capitalization(self): """A properly formatted Vensim model should never create this failure""" - output, canon = runner(test_models + '/reference_capitalization/test_reference_capitalization.mdl') + output, canon = runner(test_models + '/reference_capitalization/test_reference_capitalization.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_repeated_subscript(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") - output, canon = runner(test_models + '/repeated_subscript/test_repeated_subscript.mdl') + output, canon = runner(test_models + '/repeated_subscript/test_repeated_subscript.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_rounding(self): - output, canon = runner(test_models + '/rounding/test_rounding.mdl') + output, canon = runner(test_models + '/rounding/test_rounding.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_sample_if_true(self): - output, canon = runner(test_models + '/sample_if_true/test_sample_if_true.mdl') + output, canon = runner(test_models + '/sample_if_true/test_sample_if_true.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_smooth(self): - output, canon = runner(test_models + '/smooth/test_smooth.mdl') + output, canon = runner(test_models + '/smooth/test_smooth.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_smooth_and_stock(self): - output, canon = runner(test_models + '/smooth_and_stock/test_smooth_and_stock.mdl') + output, canon = runner(test_models + '/smooth_and_stock/test_smooth_and_stock.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_special_characters(self): - output, canon = runner(test_models + '/special_characters/test_special_variable_names.mdl') + output, canon = runner(test_models + '/special_characters/test_special_variable_names.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_sqrt(self): - output, canon = runner(test_models + '/sqrt/test_sqrt.mdl') + output, canon = runner(test_models + '/sqrt/test_sqrt.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subrange_merge(self): - output, canon = runner(test_models + '/subrange_merge/test_subrange_merge.mdl') + output, canon = runner(test_models + '/subrange_merge/test_subrange_merge.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_logicals(self): - output, canon = runner(test_models + '/subscript_logicals/test_subscript_logicals.mdl') + output, canon = runner(test_models + '/subscript_logicals/test_subscript_logicals.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_multiples(self): - output, canon = runner(test_models + '/subscript_multiples/test_multiple_subscripts.mdl') + output, canon = runner(test_models + '/subscript_multiples/test_multiple_subscripts.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_1d_arrays(self): - output, canon = runner(test_models + '/subscript_1d_arrays/test_subscript_1d_arrays.mdl') + output, canon = runner(test_models + '/subscript_1d_arrays/test_subscript_1d_arrays.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_2d_arrays(self): - output, canon = runner(test_models + '/subscript_2d_arrays/test_subscript_2d_arrays.mdl') + output, canon = runner(test_models + '/subscript_2d_arrays/test_subscript_2d_arrays.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_3d_arrays(self): - output, canon = runner(test_models + '/subscript_3d_arrays/test_subscript_3d_arrays.mdl') + output, canon = runner(test_models + '/subscript_3d_arrays/test_subscript_3d_arrays.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_3d_arrays_lengthwise(self): - output, canon = runner(test_models + '/subscript_3d_arrays_lengthwise/test_subscript_3d_arrays_lengthwise.mdl') + output, canon = runner(test_models + '/subscript_3d_arrays_lengthwise/test_subscript_3d_arrays_lengthwise.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_3d_arrays_widthwise(self): - output, canon = runner(test_models + '/subscript_3d_arrays_widthwise/test_subscript_3d_arrays_widthwise.mdl') + output, canon = runner(test_models + '/subscript_3d_arrays_widthwise/test_subscript_3d_arrays_widthwise.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_aggregation(self): - output, canon = runner(test_models + '/subscript_aggregation/test_subscript_aggregation.mdl') + output, canon = runner(test_models + '/subscript_aggregation/test_subscript_aggregation.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_constant_call(self): - output, canon = runner(test_models + '/subscript_constant_call/test_subscript_constant_call.mdl') + output, canon = runner(test_models + '/subscript_constant_call/test_subscript_constant_call.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_copy(self): - output, canon = runner(test_models + '/subscript_copy/test_subscript_copy.mdl') + output, canon = runner(test_models + '/subscript_copy/test_subscript_copy.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_docs(self): - output, canon = runner(test_models + '/subscript_docs/subscript_docs.mdl') + output, canon = runner(test_models + '/subscript_docs/subscript_docs.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_element_name(self): # issue https://github.com/JamesPHoughton/pysd/issues/216 - output, canon = runner(test_models + '/subscript_element_name/test_subscript_element_name.mdl') + output, canon = runner(test_models + '/subscript_element_name/test_subscript_element_name.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_individually_defined_1_of_2d_arrays(self): - output, canon = runner(test_models + '/subscript_individually_defined_1_of_2d_arrays/subscript_individually_defined_1_of_2d_arrays.mdl') + output, canon = runner(test_models + '/subscript_individually_defined_1_of_2d_arrays/subscript_individually_defined_1_of_2d_arrays.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_individually_defined_1_of_2d_arrays_from_floats(self): - output, canon = runner(test_models + '/subscript_individually_defined_1_of_2d_arrays_from_floats/subscript_individually_defined_1_of_2d_arrays_from_floats.mdl') + output, canon = runner(test_models + '/subscript_individually_defined_1_of_2d_arrays_from_floats/subscript_individually_defined_1_of_2d_arrays_from_floats.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_individually_defined_1d_arrays(self): - output, canon = runner(test_models + '/subscript_individually_defined_1d_arrays/subscript_individually_defined_1d_arrays.mdl') + output, canon = runner(test_models + '/subscript_individually_defined_1d_arrays/subscript_individually_defined_1d_arrays.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_individually_defined_stocks(self): - output, canon = runner(test_models + '/subscript_individually_defined_stocks/test_subscript_individually_defined_stocks.mdl') + output, canon = runner(test_models + '/subscript_individually_defined_stocks/test_subscript_individually_defined_stocks.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_mapping_simple(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") - output, canon = runner(test_models + '/subscript_mapping_simple/test_subscript_mapping_simple.mdl') + output, canon = runner(test_models + '/subscript_mapping_simple/test_subscript_mapping_simple.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_mapping_vensim(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") - output, canon = runner(test_models + '/subscript_mapping_vensim/test_subscript_mapping_vensim.mdl') + output, canon = runner(test_models + '/subscript_mapping_vensim/test_subscript_mapping_vensim.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_mixed_assembly(self): - output, canon = runner(test_models + '/subscript_mixed_assembly/test_subscript_mixed_assembly.mdl') + output, canon = runner(test_models + '/subscript_mixed_assembly/test_subscript_mixed_assembly.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_selection(self): - output, canon = runner(test_models + '/subscript_selection/subscript_selection.mdl') + output, canon = runner(test_models + '/subscript_selection/subscript_selection.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_numeric_range(self): - output, canon = runner(test_models + '/subscript_numeric_range/test_subscript_numeric_range.mdl') + output, canon = runner(test_models + '/subscript_numeric_range/test_subscript_numeric_range.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_subranges(self): - output, canon = runner(test_models + '/subscript_subranges/test_subscript_subrange.mdl') + output, canon = runner(test_models + '/subscript_subranges/test_subscript_subrange.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_subranges_equal(self): - output, canon = runner(test_models + '/subscript_subranges_equal/test_subscript_subrange_equal.mdl') + output, canon = runner(test_models + '/subscript_subranges_equal/test_subscript_subrange_equal.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_switching(self): - output, canon = runner(test_models + '/subscript_switching/subscript_switching.mdl') + output, canon = runner(test_models + '/subscript_switching/subscript_switching.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_transposition(self): - output, canon = runner(test_models + '/subscript_transposition/test_subscript_transposition.mdl') + output, canon = runner(test_models + '/subscript_transposition/test_subscript_transposition.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscript_updimensioning(self): - output, canon = runner(test_models + '/subscript_updimensioning/test_subscript_updimensioning.mdl') + output, canon = runner(test_models + '/subscript_updimensioning/test_subscript_updimensioning.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscripted_delays(self): - output, canon = runner(test_models + '/subscripted_delays/test_subscripted_delays.mdl') + output, canon = runner(test_models + '/subscripted_delays/test_subscripted_delays.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscripted_flows(self): - output, canon = runner(test_models + '/subscripted_flows/test_subscripted_flows.mdl') + output, canon = runner(test_models + '/subscripted_flows/test_subscripted_flows.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscripted_if_then_else(self): - output, canon = runner(test_models + '/subscripted_if_then_else/test_subscripted_if_then_else.mdl') + output, canon = runner(test_models + '/subscripted_if_then_else/test_subscripted_if_then_else.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscripted_logicals(self): - output, canon = runner(test_models + '/subscripted_logicals/test_subscripted_logicals.mdl') + output, canon = runner(test_models + '/subscripted_logicals/test_subscripted_logicals.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscripted_smooth(self): # issue https://github.com/JamesPHoughton/pysd/issues/226 - output, canon = runner(test_models + '/subscripted_smooth/test_subscripted_smooth.mdl') + output, canon = runner(test_models + '/subscripted_smooth/test_subscripted_smooth.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscripted_trend(self): # issue https://github.com/JamesPHoughton/pysd/issues/226 - output, canon = runner(test_models + '/subscripted_trend/test_subscripted_trend.mdl') + output, canon = runner(test_models + '/subscripted_trend/test_subscripted_trend.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subscripted_xidz(self): - output, canon = runner(test_models + '/subscripted_xidz/test_subscripted_xidz.mdl') + output, canon = runner(test_models + '/subscripted_xidz/test_subscripted_xidz.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_subset_duplicated_coord(self): output, canon = runner(test_models + '/subset_duplicated_coord/' - + 'test_subset_duplicated_coord.mdl') + + 'test_subset_duplicated_coord.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_time(self): - output, canon = runner(test_models + '/time/test_time.mdl') + output, canon = runner(test_models + '/time/test_time.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_trend(self): - output, canon = runner(test_models + '/trend/test_trend.mdl') + output, canon = runner(test_models + '/trend/test_trend.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_trig(self): - output, canon = runner(test_models + '/trig/test_trig.mdl') + output, canon = runner(test_models + '/trig/test_trig.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_variable_ranges(self): - output, canon = runner(test_models + '/variable_ranges/test_variable_ranges.mdl') + output, canon = runner(test_models + '/variable_ranges/test_variable_ranges.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_unicode_characters(self): - output, canon = runner(test_models + '/unicode_characters/unicode_test_model.mdl') + output, canon = runner(test_models + '/unicode_characters/unicode_test_model.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_xidz_zidz(self): - output, canon = runner(test_models + '/xidz_zidz/xidz_zidz.mdl') + output, canon = runner(test_models + '/xidz_zidz/xidz_zidz.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) def test_run_uppercase(self): - output, canon = runner(test_models + '/case_sensitive_extension/teacup-upper.MDL') + output, canon = runner(test_models + '/case_sensitive_extension/teacup-upper.MDL', old=True) assert_frames_close(output, canon, rtol=rtol) def test_odd_number_quotes(self): - output, canon = runner(test_models + '/odd_number_quotes/teacup_3quotes.mdl') + output, canon = runner(test_models + '/odd_number_quotes/teacup_3quotes.mdl', old=True) assert_frames_close(output, canon, rtol=rtol) diff --git a/tests/more-tests/subscript_individually_defined_stocks2/test_subscript_individually_defined_stocks2.mdl b/tests/more-tests/subscript_individually_defined_stocks2/test_subscript_individually_defined_stocks2.mdl index aa30749e..e9470cf1 100644 --- a/tests/more-tests/subscript_individually_defined_stocks2/test_subscript_individually_defined_stocks2.mdl +++ b/tests/more-tests/subscript_individually_defined_stocks2/test_subscript_individually_defined_stocks2.mdl @@ -23,9 +23,9 @@ Third Dimension Subscript: ~ | Initial Values[One Dimensional Subscript,Second Dimension Subscript,Depth 1]= - Initial Values A ~~| + Initial Values A[One Dimensional Subscript,Second Dimension Subscript] ~~| Initial Values[One Dimensional Subscript,Second Dimension Subscript,Depth 2]= - Initial Values B + Initial Values B[One Dimensional Subscript,Second Dimension Subscript] ~ ~ | diff --git a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py new file mode 100644 index 00000000..22876b26 --- /dev/null +++ b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py @@ -0,0 +1,523 @@ +import pytest +from pysd.tools.benchmarking import runner, assert_frames_close + +# TODO add warnings catcher per test + +vensim_test = { + "abs": { + "folder": "abs", + "file": "test_abs.mdl" + }, + "active_initial": { + "folder": "active_initial", + "file": "test_active_initial.mdl" + }, + "active_initial_circular": { + "folder": "active_initial_circular", + "file": "test_active_initial_circular.mdl" + }, + "arithmetics": { + "folder": "arithmetics", + "file": "test_arithmetics.mdl" + }, + "arguments": { + "folder": "arguments", + "file": "test_arguments.mdl", + "rtol": 1e-2 # TODO test why it is failing with smaller tolerance + }, + "array_with_line_break": { + "folder": "array_with_line_break", + "file": "test_array_with_line_break.mdl" + }, + "builtin_max": { + "folder": "builtin_max", + "file": "builtin_max.mdl" + }, + "builtin_min": { + "folder": "builtin_min", + "file": "builtin_min.mdl" + }, + "chained_initialization": { + "folder": "chained_initialization", + "file": "test_chained_initialization.mdl" + }, + "conditional_subscripts": { + "folder": "conditional_subscripts", + "file": "test_conditional_subscripts.mdl" + }, + "constant_expressions": { + "folder": "constant_expressions", + "file": "test_constant_expressions.mdl" + }, + "control_vars": { + "folder": "control_vars", + "file": "test_control_vars.mdl" + }, + "data_from_other_model": { + "folder": "data_from_other_model", + "file": "test_data_from_other_model.mdl", + "data_files": "data.tab" + }, + "delay_fixed": { + "folder": "delay_fixed", + "file": "test_delay_fixed.mdl" + }, + "delay_numeric_error": { + "folder": "delay_numeric_error", + "file": "test_delay_numeric_error.mdl" + }, + "delay_parentheses": { + "folder": "delay_parentheses", + "file": "test_delay_parentheses.mdl" + }, + "delay_pipeline": { + "folder": "delay_pipeline", + "file": "test_pipeline_delays.mdl" + }, + "delays": { + "folder": "delays", + "file": "test_delays.mdl" + }, + "dynamic_final_time": { + "folder": "dynamic_final_time", + "file": "test_dynamic_final_time.mdl" + }, + "euler_step_vs_saveper": { + "folder": "euler_step_vs_saveper", + "file": "test_euler_step_vs_saveper.mdl" + }, + "except": { + "folder": "except", + "file": "test_except.mdl" + }, + "exp": { + "folder": "exp", + "file": "test_exp.mdl" + }, + "exponentiation": { + "folder": "exponentiation", + "file": "exponentiation.mdl" + }, + "forecast": { + "folder": "forecast", + "file": "test_forecast.mdl" + }, + "function_capitalization": { + "folder": "function_capitalization", + "file": "test_function_capitalization.mdl" + }, + "game": { + "folder": "game", + "file": "test_game.mdl" + }, + "get_constants": pytest.param({ + "folder": "get_constants", + "file": "test_get_constants.mdl" + }, marks=pytest.mark.xfail(reason="csv files not implemented")), + "get_constants_subranges": { + "folder": "get_constants_subranges", + "file": "test_get_constants_subranges.mdl" + }, + "get_data": pytest.param({ + "folder": "get_data", + "file": "test_get_data.mdl" + }, marks=pytest.mark.xfail(reason="csv files not implemented")), + "get_data_args_3d_xls": { + "folder": "get_data_args_3d_xls", + "file": "test_get_data_args_3d_xls.mdl" + }, + "get_lookups_data_3d_xls": { + "folder": "get_lookups_data_3d_xls", + "file": "test_get_lookups_data_3d_xls.mdl" + }, + "get_lookups_subscripted_args": { + "folder": "get_lookups_subscripted_args", + "file": "test_get_lookups_subscripted_args.mdl" + }, + "get_lookups_subset": { + "folder": "get_lookups_subset", + "file": "test_get_lookups_subset.mdl" + }, + "get_mixed_definitions": { + "folder": "get_mixed_definitions", + "file": "test_get_mixed_definitions.mdl" + }, + "get_subscript_3d_arrays_xls": { + "folder": "get_subscript_3d_arrays_xls", + "file": "test_get_subscript_3d_arrays_xls.mdl" + }, + "get_with_missing_values_xlsx": { + "folder": "get_with_missing_values_xlsx", + "file": "test_get_with_missing_values_xlsx.mdl" + }, + "get_xls_cellrange": { + "folder": "get_xls_cellrange", + "file": "test_get_xls_cellrange.mdl" + }, + "if_stmt": { + "folder": "if_stmt", + "file": "if_stmt.mdl" + }, + "initial_function": { + "folder": "initial_function", + "file": "test_initial.mdl" + }, + "input_functions": { + "folder": "input_functions", + "file": "test_inputs.mdl" + }, + "invert_matrix": { + "folder": "invert_matrix", + "file": "test_invert_matrix.mdl" + }, + "limits": { + "folder": "limits", + "file": "test_limits.mdl" + }, + "line_breaks": { + "folder": "line_breaks", + "file": "test_line_breaks.mdl" + }, + "line_continuation": { + "folder": "line_continuation", + "file": "test_line_continuation.mdl" + }, + "ln": { + "folder": "ln", + "file": "test_ln.mdl" + }, + "log": { + "folder": "log", + "file": "test_log.mdl" + }, + "logicals": { + "folder": "logicals", + "file": "test_logicals.mdl" + }, + "lookups": { + "folder": "lookups", + "file": "test_lookups.mdl" + }, + "lookups_funcnames": { + "folder": "lookups_funcnames", + "file": "test_lookups_funcnames.mdl" + }, + "lookups_inline": { + "folder": "lookups_inline", + "file": "test_lookups_inline.mdl" + }, + "lookups_inline_bounded": { + "folder": "lookups_inline_bounded", + "file": "test_lookups_inline_bounded.mdl" + }, + "lookups_with_expr": { + "folder": "lookups_with_expr", + "file": "test_lookups_with_expr.mdl" + }, + "lookups_without_range": { + "folder": "lookups_without_range", + "file": "test_lookups_without_range.mdl" + }, + "macro_cross_reference": { + "folder": "macro_cross_reference", + "file": "test_macro_cross_reference.mdl" + }, + "macro_expression": { + "folder": "macro_expression", + "file": "test_macro_expression.mdl" + }, + "macro_multi_expression": { + "folder": "macro_multi_expression", + "file": "test_macro_multi_expression.mdl" + }, + "macro_multi_macros": { + "folder": "macro_multi_macros", + "file": "test_macro_multi_macros.mdl" + }, + "macro_stock": { + "folder": "macro_stock", + "file": "test_macro_stock.mdl" + }, + "macro_trailing_definition": { + "folder": "macro_trailing_definition", + "file": "test_macro_trailing_definition.mdl" + }, + "model_doc": { + "folder": "model_doc", + "file": "model_doc.mdl" + }, + "multiple_lines_def": { + "folder": "multiple_lines_def", + "file": "test_multiple_lines_def.mdl" + }, + "nested_functions": { + "folder": "nested_functions", + "file": "test_nested_functions.mdl" + }, + "number_handling": { + "folder": "number_handling", + "file": "test_number_handling.mdl" + }, + "odd_number_quotes": { + "folder": "odd_number_quotes", + "file": "teacup_3quotes.mdl" + }, + "parentheses": { + "folder": "parentheses", + "file": "test_parens.mdl" + }, + "reference_capitalization": { + "folder": "reference_capitalization", + "file": "test_reference_capitalization.mdl" + }, + "repeated_subscript": { + "folder": "repeated_subscript", + "file": "test_repeated_subscript.mdl" + }, + "rounding": { + "folder": "rounding", + "file": "test_rounding.mdl" + }, + "sample_if_true": { + "folder": "sample_if_true", + "file": "test_sample_if_true.mdl" + }, + "smooth": { + "folder": "smooth", + "file": "test_smooth.mdl" + }, + "smooth_and_stock": { + "folder": "smooth_and_stock", + "file": "test_smooth_and_stock.mdl" + }, + "special_characters": { + "folder": "special_characters", + "file": "test_special_variable_names.mdl" + }, + "sqrt": { + "folder": "sqrt", + "file": "test_sqrt.mdl" + }, + "subrange_merge": { + "folder": "subrange_merge", + "file": "test_subrange_merge.mdl" + }, + "subscript_1d_arrays": { + "folder": "subscript_1d_arrays", + "file": "test_subscript_1d_arrays.mdl" + }, + "subscript_2d_arrays": { + "folder": "subscript_2d_arrays", + "file": "test_subscript_2d_arrays.mdl" + }, + "subscript_3d_arrays": { + "folder": "subscript_3d_arrays", + "file": "test_subscript_3d_arrays.mdl" + }, + "subscript_3d_arrays_lengthwise": { + "folder": "subscript_3d_arrays_lengthwise", + "file": "test_subscript_3d_arrays_lengthwise.mdl" + }, + "subscript_3d_arrays_widthwise": { + "folder": "subscript_3d_arrays_widthwise", + "file": "test_subscript_3d_arrays_widthwise.mdl" + }, + "subscript_aggregation": { + "folder": "subscript_aggregation", + "file": "test_subscript_aggregation.mdl" + }, + "subscript_constant_call": { + "folder": "subscript_constant_call", + "file": "test_subscript_constant_call.mdl" + }, + "subscript_copy": { + "folder": "subscript_copy", + "file": "test_subscript_copy.mdl" + }, + "subscript_docs": { + "folder": "subscript_docs", + "file": "subscript_docs.mdl" + }, + "subscript_element_name": { + "folder": "subscript_element_name", + "file": "test_subscript_element_name.mdl" + }, + "subscript_individually_defined_1_of_2d_arrays": { + "folder": "subscript_individually_defined_1_of_2d_arrays", + "file": "subscript_individually_defined_1_of_2d_arrays.mdl" + }, + "subscript_individually_defined_1_of_2d_arrays_from_floats": { + "folder": "subscript_individually_defined_1_of_2d_arrays_from_floats", + "file": "subscript_individually_defined_1_of_2d_arrays_from_floats.mdl" + }, + "subscript_individually_defined_1d_arrays": { + "folder": "subscript_individually_defined_1d_arrays", + "file": "subscript_individually_defined_1d_arrays.mdl" + }, + "subscript_individually_defined_stocks": { + "folder": "subscript_individually_defined_stocks", + "file": "test_subscript_individually_defined_stocks.mdl" + }, + "subscript_logicals": { + "folder": "subscript_logicals", + "file": "test_subscript_logicals.mdl" + }, + "subscript_mapping_simple": { + "folder": "subscript_mapping_simple", + "file": "test_subscript_mapping_simple.mdl" + }, + "subscript_mapping_vensim": { + "folder": "subscript_mapping_vensim", + "file": "test_subscript_mapping_vensim.mdl" + }, + "subscript_mixed_assembly": { + "folder": "subscript_mixed_assembly", + "file": "test_subscript_mixed_assembly.mdl" + }, + "subscript_multiples": { + "folder": "subscript_multiples", + "file": "test_multiple_subscripts.mdl" + }, + "subscript_numeric_range": { + "folder": "subscript_numeric_range", + "file": "test_subscript_numeric_range.mdl" + }, + "subscript_selection": { + "folder": "subscript_selection", + "file": "subscript_selection.mdl" + }, + "subscript_subranges": { + "folder": "subscript_subranges", + "file": "test_subscript_subrange.mdl" + }, + "subscript_subranges_equal": { + "folder": "subscript_subranges_equal", + "file": "test_subscript_subrange_equal.mdl" + }, + "subscript_switching": { + "folder": "subscript_switching", + "file": "subscript_switching.mdl" + }, + "subscript_transposition": { + "folder": "subscript_transposition", + "file": "test_subscript_transposition.mdl" + }, + "subscript_updimensioning": { + "folder": "subscript_updimensioning", + "file": "test_subscript_updimensioning.mdl" + }, + "subscripted_delays": { + "folder": "subscripted_delays", + "file": "test_subscripted_delays.mdl" + }, + "subscripted_flows": { + "folder": "subscripted_flows", + "file": "test_subscripted_flows.mdl" + }, + "subscripted_if_then_else": { + "folder": "subscripted_if_then_else", + "file": "test_subscripted_if_then_else.mdl" + }, + "subscripted_logicals": { + "folder": "subscripted_logicals", + "file": "test_subscripted_logicals.mdl" + }, + "subscripted_lookups": { + "folder": "subscripted_lookups", + "file": "test_subscripted_lookups.mdl" + }, + "subscripted_round": { + "folder": "subscripted_round", + "file": "test_subscripted_round.mdl" + }, + "subscripted_smooth": { + "folder": "subscripted_smooth", + "file": "test_subscripted_smooth.mdl" + }, + "subscripted_trend": { + "folder": "subscripted_trend", + "file": "test_subscripted_trend.mdl" + }, + "subscripted_xidz": { + "folder": "subscripted_xidz", + "file": "test_subscripted_xidz.mdl" + }, + "subset_duplicated_coord": { + "folder": "subset_duplicated_coord", + "file": "test_subset_duplicated_coord.mdl" + }, + "time": { + "folder": "time", + "file": "test_time.mdl" + }, + "trend": { + "folder": "trend", + "file": "test_trend.mdl" + }, + "trig": { + "folder": "trig", + "file": "test_trig.mdl" + }, + "unicode_characters": { + "folder": "unicode_characters", + "file": "unicode_test_model.mdl" + }, + "variable_ranges": { + "folder": "variable_ranges", + "file": "test_variable_ranges.mdl" + }, + "xidz_zidz": { + "folder": "xidz_zidz", + "file": "xidz_zidz.mdl" + } +} + + +@pytest.mark.parametrize( + "test_data", + [item for item in vensim_test.values()], + ids=list(vensim_test) +) +class TestIntegrateVensim: + """ + Test for splitting Vensim views in modules and submodules + """ + + @pytest.fixture + def model_path(self, _test_models, test_data): + return _test_models.joinpath( + test_data["folder"]).joinpath(test_data["file"]) + + @pytest.fixture + def data_path(self, _test_models, test_data): + """Fixture for models with data_path""" + if "data_files" in test_data: + if isinstance(test_data["data_files"], str): + return _test_models.joinpath( + test_data["folder"]).joinpath(test_data["data_files"]) + elif isinstance(test_data["data_files"], list): + return [ + _test_models.joinpath(test_data["folder"]).joinpath(file) + for file in test_data["data_files"] + ] + else: + return { + _test_models.joinpath(test_data["folder"]).joinpath(file): + values for file, values in test_data["data_files"].items() + } + else: + return None + + @pytest.fixture + def kwargs(self, test_data): + """Fixture for atol and rtol""" + kwargs = {} + if "atol" in test_data: + kwargs["atol"] = test_data["atol"] + if "rtol" in test_data: + kwargs["rtol"] = test_data["rtol"] + return kwargs + + def test_read_vensim_file(self, model_path, data_path, kwargs): + output, canon = runner(model_path, data_files=data_path) + assert_frames_close(output, canon, **kwargs) diff --git a/tests/pytest_pysd/user_interaction/pytest_select_submodel.py b/tests/pytest_pysd/user_interaction/pytest_select_submodel.py index 0e101fe0..e868e06d 100644 --- a/tests/pytest_pysd/user_interaction/pytest_select_submodel.py +++ b/tests/pytest_pysd/user_interaction/pytest_select_submodel.py @@ -146,18 +146,16 @@ def test_select_submodel(self, model, variables, modules, if not dep_vars: # totally independent submodels can run without producing # nan values - assert len(record) == 1 assert not np.any(np.isnan(model.run())) else: # running the model without redefining dependencies will # produce nan values - assert len(record) == 2 assert "Exogenous components for the following variables are"\ - + " necessary but not given:" in str(record[1].message) + + " necessary but not given:" in str(record[-1].message) assert "Please, set them before running the model using "\ - + "set_components method..." in str(record[1].message) + + "set_components method..." in str(record[-1].message) for var in dep_vars: - assert var in str(record[1].message) + assert var in str(record[-1].message) assert np.any(np.isnan(model.run())) # redefine dependencies assert not np.any(np.isnan(model.run(params=dep_vars))) @@ -168,7 +166,6 @@ def test_select_submodel(self, model, variables, modules, model.select_submodel(vars=variables, modules=modules, exogenous_components=dep_vars) - assert len(record) == 1 assert not np.any(np.isnan(model.run())) diff --git a/tests/pytest_translation/vensim_parser/pytest_vensim_file.py b/tests/pytest_translation/vensim_parser/pytest_vensim_file.py new file mode 100644 index 00000000..135dd7d8 --- /dev/null +++ b/tests/pytest_translation/vensim_parser/pytest_vensim_file.py @@ -0,0 +1,54 @@ + +import pytest +from pathlib import Path + +from pysd.translation.vensim.vensin_file import VensimFile + + +@pytest.mark.parametrize( + "path", + [ + ( # teacup + "test-models/samples/teacup/teacup.mdl" + ), + ( # macros + "test-models/tests/macro_multi_expression/test_macro_multi_expression.mdl" + ), + ( # mapping + "test-models/tests/subscript_mapping_vensim/test_subscript_mapping_vensim.mdl" + ), + ( # data + "test-models/tests/data_from_other_model/test_data_from_other_model.mdl" + ), + ( # except + "test-models/tests/except/test_except.mdl" + ) + ], + ids=["teacup", "macros", "mapping", "data", "except"] +) +class TestVensimFile: + """ + Test for splitting Vensim views in modules and submodules + """ + @pytest.fixture + def model_path(self, _root, path): + return _root.joinpath(path) + + @pytest.mark.dependency(name="read_vensim_file") + def test_read_vensim_file(self, request, path, model_path): + # assert that the files don't exist in the temporary directory + ven_file = VensimFile(model_path) + + assert hasattr(ven_file, "mdl_path") + assert hasattr(ven_file, "root_path") + assert hasattr(ven_file, "model_text") + + assert isinstance(getattr(ven_file, "mdl_path"), Path) + assert isinstance(getattr(ven_file, "root_path"), Path) + assert isinstance(getattr(ven_file, "model_text"), str) + + @pytest.mark.dependency(depends=["read_vensim_file"]) + def test_file_split_file_sections(self, request, path, model_path): + ven_file = VensimFile(model_path) + ven_file.parse() + print(ven_file.verbose) \ No newline at end of file diff --git a/tests/test-models b/tests/test-models index de294a8a..75ea19ba 160000 --- a/tests/test-models +++ b/tests/test-models @@ -1 +1 @@ -Subproject commit de294a8ad0f2c1a2bf41c351cfc4ab637bc39825 +Subproject commit 75ea19badf2ed6e94aba0f707ef5d6c97d80195b diff --git a/tests/unit_test_benchmarking.py b/tests/unit_test_benchmarking.py index 696c6c46..2bc74fc2 100644 --- a/tests/unit_test_benchmarking.py +++ b/tests/unit_test_benchmarking.py @@ -28,7 +28,7 @@ def test_non_valid_model(self): "more-tests/not_vensim/test_not_vensim.txt")) self.assertIn( - 'Modelfile should be *.mdl or *.xmile', + 'Modelfile should be *.mdl, *.xmile, or *.py', str(err.exception)) def test_different_frames_error(self): diff --git a/tests/unit_test_external.py b/tests/unit_test_external.py index 78f0aae3..0b2cadbe 100644 --- a/tests/unit_test_external.py +++ b/tests/unit_test_external.py @@ -207,12 +207,12 @@ def test_fill_missing(self): interp = np.array([1., 1., 1., 3., 3.5, 4., 5., 6., 7., 8., 8., 8.]) - ext.interp = "hold backward" + ext.interp = "hold_backward" datac = data.copy() ext._fill_missing(series, datac) self.assertTrue(np.all(hold_back == datac)) - ext.interp = "look forward" + ext.interp = "look_forward" datac = data.copy() ext._fill_missing(series, datac) self.assertTrue(np.all(look_for == datac)) @@ -456,7 +456,7 @@ def test_data_interp_vn1d(self): def test_data_forward_h1d(self): """ - ExtData test for 1d horizontal series look forward + ExtData test for 1d horizontal series look_forward """ import pysd @@ -465,7 +465,7 @@ def test_data_forward_h1d(self): time_row_or_col = "4" cell = "C5" coords = {} - interp = "look forward" + interp = "look_forward" py_name = "test_data_forward_h1d" data = pysd.external.ExtData(file_name=file_name, @@ -486,7 +486,7 @@ def test_data_forward_h1d(self): def test_data_forward_v1d(self): """ - ExtData test for 1d vertical series look forward + ExtData test for 1d vertical series look_forward """ import pysd @@ -495,7 +495,7 @@ def test_data_forward_v1d(self): time_row_or_col = "B" cell = "C5" coords = {} - interp = "look forward" + interp = "look_forward" py_name = "test_data_forward_v1d" data = pysd.external.ExtData(file_name=file_name, @@ -516,7 +516,7 @@ def test_data_forward_v1d(self): def test_data_forward_hn1d(self): """ - ExtData test for 1d horizontal series look forward by cell range names + ExtData test for 1d horizontal series look_forward by cell range names """ import pysd @@ -525,7 +525,7 @@ def test_data_forward_hn1d(self): time_row_or_col = "time" cell = "data_1d" coords = {} - interp = "look forward" + interp = "look_forward" py_name = "test_data_forward_hn1d" data = pysd.external.ExtData(file_name=file_name, @@ -546,7 +546,7 @@ def test_data_forward_hn1d(self): def test_data_forward_vn1d(self): """ - ExtData test for 1d vertical series look forward by cell range names + ExtData test for 1d vertical series look_forward by cell range names """ import pysd @@ -555,7 +555,7 @@ def test_data_forward_vn1d(self): time_row_or_col = "time" cell = "data_1d" coords = {} - interp = "look forward" + interp = "look_forward" py_name = "test_data_forward_vn1d" data = pysd.external.ExtData(file_name=file_name, @@ -576,7 +576,7 @@ def test_data_forward_vn1d(self): def test_data_backward_h1d(self): """ - ExtData test for 1d horizontal series hold backward + ExtData test for 1d horizontal series hold_backward """ import pysd @@ -585,7 +585,7 @@ def test_data_backward_h1d(self): time_row_or_col = "4" cell = "C5" coords = {} - interp = "hold backward" + interp = "hold_backward" py_name = "test_data_backward_h1d" data = pysd.external.ExtData(file_name=file_name, @@ -606,7 +606,7 @@ def test_data_backward_h1d(self): def test_data_backward_v1d(self): """ - ExtData test for 1d vertical series hold backward by cell range names + ExtData test for 1d vertical series hold_backward by cell range names """ import pysd @@ -615,7 +615,7 @@ def test_data_backward_v1d(self): time_row_or_col = "B" cell = "C5" coords = {} - interp = "hold backward" + interp = "hold_backward" py_name = "test_data_backward_v1d" data = pysd.external.ExtData(file_name=file_name, @@ -636,7 +636,7 @@ def test_data_backward_v1d(self): def test_data_backward_hn1d(self): """ - ExtData test for 1d horizontal series hold backward by cell range names + ExtData test for 1d horizontal series hold_backward by cell range names """ import pysd @@ -645,7 +645,7 @@ def test_data_backward_hn1d(self): time_row_or_col = "time" cell = "data_1d" coords = {} - interp = "hold backward" + interp = "hold_backward" py_name = "test_data_backward_hn1d" data = pysd.external.ExtData(file_name=file_name, @@ -666,7 +666,7 @@ def test_data_backward_hn1d(self): def test_data_backward_vn1d(self): """ - ExtData test for 1d vertical series hold backward by cell range names + ExtData test for 1d vertical series hold_backward by cell range names """ import pysd @@ -675,7 +675,7 @@ def test_data_backward_vn1d(self): time_row_or_col = "time" cell = "data_1d" coords = {} - interp = "hold backward" + interp = "hold_backward" py_name = "test_data_backward_vn1d" data = pysd.external.ExtData(file_name=file_name, @@ -726,7 +726,7 @@ def test_data_interp_vn2d(self): def test_data_forward_hn2d(self): """ - ExtData test for 2d vertical series look forward by cell range names + ExtData test for 2d vertical series look_forward by cell range names """ import pysd @@ -735,7 +735,7 @@ def test_data_forward_hn2d(self): time_row_or_col = "time" cell = "data_2d" coords = {'ABC': ['A', 'B', 'C']} - interp = "look forward" + interp = "look_forward" py_name = "test_data_forward_hn2d" data = pysd.external.ExtData(file_name=file_name, @@ -756,7 +756,7 @@ def test_data_forward_hn2d(self): def test_data_backward_v2d(self): """ - ExtData test for 2d vertical series hold backward + ExtData test for 2d vertical series hold_backward """ import pysd @@ -765,7 +765,7 @@ def test_data_backward_v2d(self): time_row_or_col = "B" cell = "C5" coords = {'ABC': ['A', 'B', 'C']} - interp = "hold backward" + interp = "hold_backward" py_name = "test_data_backward_v2d" data = pysd.external.ExtData(file_name=file_name, @@ -827,7 +827,7 @@ def test_data_interp_h3d(self): def test_data_forward_v3d(self): """ - ExtData test for 3d vertical series look forward + ExtData test for 3d vertical series look_forward """ import pysd @@ -838,7 +838,7 @@ def test_data_forward_v3d(self): cell_2 = "F5" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} - interp = "look forward" + interp = "look_forward" py_name = "test_data_forward_v3d" data = pysd.external.ExtData(file_name=file_name, @@ -867,7 +867,7 @@ def test_data_forward_v3d(self): def test_data_backward_hn3d(self): """ - ExtData test for 3d horizontal series hold backward by cellrange names + ExtData test for 3d horizontal series hold_backward by cellrange names """ import pysd @@ -878,7 +878,7 @@ def test_data_backward_hn3d(self): cell_2 = "data_2db" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} - interp = "hold backward" + interp = "hold_backward" py_name = "test_data_backward_hn3d" data = pysd.external.ExtData(file_name=file_name, @@ -2890,7 +2890,7 @@ def test_data_h3d_interp(self): coords_1 = {'ABC': ['A', 'B', 'C'], 'XY': ['X']} coords_2 = {'ABC': ['A', 'B', 'C'], 'XY': ['Y']} interp = None - interp2 = "look forward" + interp2 = "look_forward" py_name = "test_data_h3d_interp" data = pysd.external.ExtData(file_name=file_name, diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index f3cafa88..1502a4a3 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -581,6 +581,55 @@ def test_docs(self): model = pysd.read_vensim(test_model) self.assertIsInstance(str(model), str) # tests string conversion of # model + print(model.doc().columns) + + doc = model._doc + self.assertIsInstance(doc, pd.DataFrame) + self.assertSetEqual( + { + "Characteristic Time", + "Teacup Temperature", + "FINAL TIME", + "Heat Loss to Room", + "INITIAL TIME", + "Room Temperature", + "SAVEPER", + "TIME STEP", + }, + set(doc["Real Name"].values), + ) + + self.assertEqual( + doc[doc["Real Name"] == "Heat Loss to Room"]["Unit"].values[0], + "Degrees Fahrenheit/Minute", + ) + self.assertEqual( + doc[doc["Real Name"] == "Teacup Temperature"]["Py Name"].values[0], + "teacup_temperature", + ) + self.assertEqual( + doc[doc["Real Name"] == "INITIAL TIME"]["Comment"].values[0], + "The initial time for the simulation.", + ) + self.assertEqual( + doc[doc["Real Name"] == "Characteristic Time"]["Type"].values[0], + "Constant" + ) + self.assertEqual( + doc[doc["Real Name"] == "Characteristic Time"]["Subtype"].values[0], + "Normal" + ) + self.assertEqual( + doc[doc["Real Name"] == "Teacup Temperature"]["Lims"].values[0], + "(32.0, 212.0)", + ) + + def test_docs_old(self): + """ Test that the model prints some documentation """ + + model = pysd.read_vensim(test_model, old=True) + self.assertIsInstance(str(model), str) # tests string conversion of + # model doc = model.doc() self.assertIsInstance(doc, pd.DataFrame) @@ -636,8 +685,9 @@ def test_docs_multiline_eqn(self): self.assertEqual( doc[doc["Real Name"] == "price"]["Subs"].values[0], "['fruits']" ) - self.assertEqual(doc[doc["Real Name"] == "price"]["Eqn"].values[0], - "1.2; .; .; .; 1.4") + # TODO: keep eqn? + #self.assertEqual(doc[doc["Real Name"] == "price"]["Eqn"].values[0], + # "1.2; .; .; .; 1.4") def test_stepwise_cache(self): from pysd.py_backend.decorators import Cache @@ -1382,7 +1432,7 @@ def test_multiple_deps(self): + "test_subscript_individually_defined_stocks2.mdl")) expected_dep = { - "stock_a": {"_integ_stock_a": 2}, + "stock_a": {"_integ_stock_a": 1, "_integ_stock_a_1": 1}, "inflow_a": {"rate_a": 1}, "inflow_b": {"rate_a": 1}, "initial_values": {"initial_values_a": 1, "initial_values_b": 1}, @@ -1394,9 +1444,13 @@ def test_multiple_deps(self): "saveper": {"time_step": 1}, "time_step": {}, "_integ_stock_a": { - "initial": {"initial_values": 2}, - "step": {"inflow_a": 1, "inflow_b": 1} + "initial": {"initial_values": 1}, + "step": {"inflow_a": 1} }, + '_integ_stock_a_1': { + 'initial': {'initial_values': 1}, + 'step': {'inflow_b': 1} + } } self.assertEqual(model.components._dependencies, expected_dep) From 1cd08ba925c86101319b25b35172343209e67eba Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 3 Mar 2022 14:08:44 +0100 Subject: [PATCH 02/96] Fix bugs and allow to translate models with no included functions --- pysd/building/python/python_functions.py | 2 +- pysd/building/python/visitors.py | 27 ++++++++++++++++--- pysd/py_backend/data.py | 4 +-- pysd/py_backend/lookups.py | 7 ++--- .../vensim/parsing_expr/components.peg | 10 +++---- 5 files changed, 36 insertions(+), 14 deletions(-) diff --git a/pysd/building/python/python_functions.py b/pysd/building/python/python_functions.py index bdde7130..73288363 100644 --- a/pysd/building/python/python_functions.py +++ b/pysd/building/python/python_functions.py @@ -2,7 +2,7 @@ # functions that can be diretcly applied over an array functionspace = { # directly build functions without dependencies - "elmcount": ("len(_subscript_dict['%(0)s'])", None), + "elmcount": ("len(%(0)s)", None), # directly build numpy based functions "abs": ("np.abs(%(0)s)", ("numpy",)), diff --git a/pysd/building/python/visitors.py b/pysd/building/python/visitors.py index 3034ad18..30158924 100644 --- a/pysd/building/python/visitors.py +++ b/pysd/building/python/visitors.py @@ -216,8 +216,26 @@ def __init__(self, call_str, component): elif function_name == "a_function_of": self.build = self.build_incomplete_call else: - # error - raise ValueError("Undefined function %s" % function_name) + self.function = function_name + self.build = self.build_not_implemented + + def build_not_implemented(self, arguments): + final_subscripts = self.reorder(arguments, def_subs=self.def_subs) + warnings.warn( + "\n\nTrying to translate " + + self.function + + " which it is not implemented on PySD. The translated " + + "model will crash... " + ) + self.section.imports.add("functions", "not_implemented_function") + + return BuildAST( + expression="not_implemented_function('%s', %s)" % ( + self.function, + ", ".join(arg.expression for arg in arguments.values())), + calls=self.join_calls(arguments), + subscripts=final_subscripts, + order=0) def build_macro_call(self, arguments): self.section.imports.add("statefuls", "Macro") @@ -1045,7 +1063,10 @@ def visit_subscripts(self, expression, original_subs): # NUMPY: subset value [:, :, np.array([1, 0]), :] # NUMPY: as order may change we need to check if dim != orig_dim # NUMPY: use also ranges [:, :, 2:5, :] when possible - loc.append("_subscript_dict['%s']" % dim) + if dim.endswith("!"): + loc.append("_subscript_dict['%s']" % dim[:-1]) + else: + loc.append("_subscript_dict['%s']" % dim) final_subs[dim] = coord float = False else: diff --git a/pysd/py_backend/data.py b/pysd/py_backend/data.py index a46af99f..2ead4bf5 100644 --- a/pysd/py_backend/data.py +++ b/pysd/py_backend/data.py @@ -64,14 +64,14 @@ def read_file(cls, file_name, encoding=None): try: # if we fail converting columns to float then they are # not numeric values, so current direction is okay - [float(col) for col in random.sample(out, 3)] + [float(col) for col in random.sample(out, min(3, len(out)))] # we did not fail, read the first column to see if variables # are split per rows out = cls.read_col(file_name, encoding) transpose = True # if we still are able to transform values to float the # file is not valid - [float(col) for col in random.sample(out, 3)] + [float(col) for col in random.sample(out, min(3, len(out)))] except ValueError: return out, transpose else: diff --git a/pysd/py_backend/lookups.py b/pysd/py_backend/lookups.py index 98ef2132..5d92eb7b 100644 --- a/pysd/py_backend/lookups.py +++ b/pysd/py_backend/lookups.py @@ -71,21 +71,22 @@ def __init__(self, x, y, coords, py_name): # TODO: avoid add and merge all declarations in one definition self.is_float = not bool(coords) self.py_name = py_name + y = np.array(y).reshape((len(x),) + (1,)*len(coords)) self.data = xr.DataArray( - np.array(y).reshape(tuple([len(x)] + utils.compute_shape(coords))), + np.tile(y, [1] + utils.compute_shape(coords)), {"lookup_dim": x, **coords}, ["lookup_dim"] + list(coords) ) self.x = set(x) def add(self, x, y, coords): + y = np.array(y).reshape((len(x),) + (1,)*len(coords)) self.data = self.data.combine_first( xr.DataArray( - np.array(y).reshape(tuple([len(x)] + utils.compute_shape(coords))), + np.tile(y, [1] + utils.compute_shape(coords)), {"lookup_dim": x, **coords}, ["lookup_dim"] + list(coords) )) - if np.any(np.isnan(self.data)): # fill missing values of different input lookup_dim values values = self.data.values diff --git a/pysd/translation/vensim/parsing_expr/components.peg b/pysd/translation/vensim/parsing_expr/components.peg index 09afc0f0..27592de3 100644 --- a/pysd/translation/vensim/parsing_expr/components.peg +++ b/pysd/translation/vensim/parsing_expr/components.peg @@ -2,12 +2,12 @@ expr_type = array / final_expr / empty -final_expr = logic_expr _ (logic_oper _ logic_expr)* # logic operators (:and:, :or:) +final_expr = logic_expr (_ logic_oper _ logic_expr)* # logic operators (:and:, :or:) logic_expr = not_oper? _ comp_expr # :not: operator -comp_expr = add_expr _ (comp_oper _ add_expr)? # comparison (e.g. '<', '=>') -add_expr = prod_expr _ (add_oper _ prod_expr)* # addition and substraction -prod_expr = exp_expr _ (prod_oper _ exp_expr)* # product and division -exp_expr = neg_expr _ (exp_oper _ neg_expr)* # exponential +comp_expr = add_expr (_ comp_oper _ add_expr)? # comparison (e.g. '<', '=>') +add_expr = prod_expr (_ add_oper _ prod_expr)* # addition and substraction +prod_expr = exp_expr (_ prod_oper _ exp_expr)* # product and division +exp_expr = neg_expr (_ exp_oper _ neg_expr)* # exponential neg_expr = pre_oper? _ expr # pre operators (-, +) expr = lookup_with_def / call / parens / number / reference / nan From 3518ac1f8f5ab4e9789efaf9fec8275dfac51ae4 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Mon, 7 Mar 2022 08:39:16 +0100 Subject: [PATCH 03/96] Avoid transposition 0-d constants --- pysd/py_backend/external.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pysd/py_backend/external.py b/pysd/py_backend/external.py index 9da02cb9..2d89d99b 100644 --- a/pysd/py_backend/external.py +++ b/pysd/py_backend/external.py @@ -807,7 +807,8 @@ def __init__(self, file_name, sheet, cell, coords, root, py_name): super().__init__(py_name) self.files = [file_name] self.sheets = [sheet] - self.transposes = [cell[-1] == '*'] + self.transposes = [ + cell[-1] == '*' and np.prod(utils.compute_shape(coords)) > 1] self.cells = [cell.strip('*')] self.root = root self.coordss = [coords] @@ -818,7 +819,8 @@ def add(self, file_name, sheet, cell, coords): """ self.files.append(file_name) self.sheets.append(sheet) - self.transposes.append(cell[-1] == '*') + self.transposes.append( + cell[-1] == '*' and np.prod(utils.compute_shape(coords)) > 1) self.cells.append(cell.strip('*')) self.coordss.append(coords) From ee2240ac49cdf9264547dd8ffb1e25e3ea7ae8c7 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Tue, 8 Mar 2022 17:18:41 +0100 Subject: [PATCH 04/96] Start working on the new Xmile translator to AM --- ...itors.py => python_expressions_builder.py} | 73 ++-- ...hon_builder.py => python_model_builder.py} | 2 +- pysd/pysd.py | 31 +- pysd/tools/benchmarking.py | 2 +- ...{components.py => abstract_expressions.py} | 0 .../common_grammar.peg | 0 .../components.peg | 0 .../element_object.peg | 0 .../file_sections.peg | 0 .../lookups.peg | 0 .../section_elements.peg | 0 .../sketch.peg | 0 pysd/translation/vensim/vensim_element.py | 8 +- .../vensim/{vensin_file.py => vensim_file.py} | 2 +- pysd/translation/vensim/vensim_section.py | 2 - pysd/translation/vensim/vensim_structures.py | 64 +-- pysd/translation/vensim/vensim_utils.py | 2 +- pysd/translation/xmile/xmile_element.py | 395 ++++++++++++++++++ pysd/translation/xmile/xmile_file.py | 82 ++++ pysd/translation/xmile/xmile_section.py | 125 ++++++ pysd/translation/xmile/xmile_structures.py | 54 +++ pysd/translation/xmile/xmile_utils.py | 115 +++++ tests/integration_test_xmile_pathway.py | 293 ++----------- .../pytest_integration_vensim_pathway.py | 30 +- .../pytest_integration_xmile_pathway.py | 248 +++++++++++ .../vensim_parser/pytest_vensim_file.py | 8 +- 26 files changed, 1176 insertions(+), 360 deletions(-) rename pysd/building/python/{visitors.py => python_expressions_builder.py} (96%) rename pysd/building/python/{python_builder.py => python_model_builder.py} (99%) rename pysd/translation/structures/{components.py => abstract_expressions.py} (100%) rename pysd/translation/vensim/{parsing_expr => parsing_grammars}/common_grammar.peg (100%) rename pysd/translation/vensim/{parsing_expr => parsing_grammars}/components.peg (100%) rename pysd/translation/vensim/{parsing_expr => parsing_grammars}/element_object.peg (100%) rename pysd/translation/vensim/{parsing_expr => parsing_grammars}/file_sections.peg (100%) rename pysd/translation/vensim/{parsing_expr => parsing_grammars}/lookups.peg (100%) rename pysd/translation/vensim/{parsing_expr => parsing_grammars}/section_elements.peg (100%) rename pysd/translation/vensim/{parsing_expr => parsing_grammars}/sketch.peg (100%) rename pysd/translation/vensim/{vensin_file.py => vensim_file.py} (99%) create mode 100644 pysd/translation/xmile/xmile_element.py create mode 100644 pysd/translation/xmile/xmile_file.py create mode 100644 pysd/translation/xmile/xmile_section.py create mode 100644 pysd/translation/xmile/xmile_structures.py create mode 100644 pysd/translation/xmile/xmile_utils.py create mode 100644 tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py diff --git a/pysd/building/python/visitors.py b/pysd/building/python/python_expressions_builder.py similarity index 96% rename from pysd/building/python/visitors.py rename to pysd/building/python/python_expressions_builder.py index 30158924..88803a3b 100644 --- a/pysd/building/python/visitors.py +++ b/pysd/building/python/python_expressions_builder.py @@ -1,11 +1,10 @@ -from re import X import warnings from dataclasses import dataclass import numpy as np from pysd.py_backend.utils import compute_shape -from pysd.translation.structures import components as ct +from pysd.translation.structures import abstract_expressions as ae from .python_functions import functionspace @@ -435,6 +434,7 @@ def build(self, arguments): subscripts=final_subs, order=0) + class ExtDataBuilder(StructureBuilder): def __init__(self, getdata_str, component): super().__init__(None, component) @@ -728,7 +728,8 @@ def build(self, arguments): self.component.subtype = "Smooth" self.section.imports.add("statefuls", "Smooth") arguments["input"].reshape(self.section.subscripts, self.def_subs) - arguments["smooth_time"].reshape(self.section.subscripts, self.def_subs) + arguments["smooth_time"].reshape( + self.section.subscripts, self.def_subs) arguments["initial"].reshape(self.section.subscripts, self.def_subs) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_smooth") @@ -775,7 +776,8 @@ def build(self, arguments): self.component.subtype = "Trend" self.section.imports.add("statefuls", "Trend") arguments["input"].reshape(self.section.subscripts, self.def_subs) - arguments["average_time"].reshape(self.section.subscripts, self.def_subs) + arguments["average_time"].reshape( + self.section.subscripts, self.def_subs) arguments["initial"].reshape(self.section.subscripts, self.def_subs) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_trend") @@ -817,7 +819,8 @@ def build(self, arguments): self.component.subtype = "Forecast" self.section.imports.add("statefuls", "Forecast") arguments["input"].reshape(self.section.subscripts, self.def_subs) - arguments["average_time"].reshape(self.section.subscripts, self.def_subs) + arguments["average_time"].reshape( + self.section.subscripts, self.def_subs) arguments["horizon"].reshape(self.section.subscripts, self.def_subs) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_forecast") @@ -1041,7 +1044,8 @@ def build(self, arguments): original_subs = self.section.subscripts.make_coord_dict( self.section.subscripts.elements[reference]) - expression, final_subs = self.visit_subscripts(expression, original_subs) + expression, final_subs = self.visit_subscripts( + expression, original_subs) return BuildAST( expression=expression, @@ -1061,7 +1065,8 @@ def visit_subscripts(self, expression, original_subs): elif len(coord) < len(orig_coord): # subset a subrange # NUMPY: subset value [:, :, np.array([1, 0]), :] - # NUMPY: as order may change we need to check if dim != orig_dim + # NUMPY: as order may change we need to check if + # dim != orig_dim # NUMPY: use also ranges [:, :, 2:5, :] when possible if dim.endswith("!"): loc.append("_subscript_dict['%s']" % dim[:-1]) @@ -1077,7 +1082,8 @@ def visit_subscripts(self, expression, original_subs): float = False if dim != orig_dim and len(coord) != 1: - # NUMPY: check order of dimensions, make all subranges work with the same dimensions? + # NUMPY: check order of dimensions, make all subranges work + # with the same dimensions? # NUMPY: this could be solved in the previous if/then/else rename[orig_dim] = dim @@ -1187,27 +1193,27 @@ def _merge_dependencies(current, new): class ASTVisitor: builders = { - ct.InitialStructure: InitialBuilder, - ct.IntegStructure: IntegBuilder, - ct.DelayStructure: lambda x, y: DelayBuilder("Delay", x, y), - ct.DelayNStructure: lambda x, y: DelayBuilder("DelayN", x, y), - ct.DelayFixedStructure: DelayFixedBuilder, - ct.SmoothStructure: SmoothBuilder, - ct.SmoothNStructure: SmoothBuilder, - ct.TrendStructure: TrendBuilder, - ct.ForecastStructure: ForecastBuilder, - ct.SampleIfTrueStructure: SampleIfTrueBuilder, - ct.GetConstantsStructure: ExtConstantBuilder, - ct.GetDataStructure: ExtDataBuilder, - ct.GetLookupsStructure: ExtLookupBuilder, - ct.LookupsStructure: LookupsBuilder, - ct.InlineLookupsStructure: InlineLookupsBuilder, - ct.DataStructure: TabDataBuilder, - ct.ReferenceStructure: ReferenceBuilder, - ct.CallStructure: CallBuilder, - ct.GameStructure: GameBuilder, - ct.LogicStructure: OperationBuilder, - ct.ArithmeticStructure: OperationBuilder, + ae.InitialStructure: InitialBuilder, + ae.IntegStructure: IntegBuilder, + ae.DelayStructure: lambda x, y: DelayBuilder("Delay", x, y), + ae.DelayNStructure: lambda x, y: DelayBuilder("DelayN", x, y), + ae.DelayFixedStructure: DelayFixedBuilder, + ae.SmoothStructure: SmoothBuilder, + ae.SmoothNStructure: SmoothBuilder, + ae.TrendStructure: TrendBuilder, + ae.ForecastStructure: ForecastBuilder, + ae.SampleIfTrueStructure: SampleIfTrueBuilder, + ae.GetConstantsStructure: ExtConstantBuilder, + ae.GetDataStructure: ExtDataBuilder, + ae.GetLookupsStructure: ExtLookupBuilder, + ae.LookupsStructure: LookupsBuilder, + ae.InlineLookupsStructure: InlineLookupsBuilder, + ae.DataStructure: TabDataBuilder, + ae.ReferenceStructure: ReferenceBuilder, + ae.CallStructure: CallBuilder, + ae.GameStructure: GameBuilder, + ae.LogicStructure: OperationBuilder, + ae.ArithmeticStructure: OperationBuilder, int: NumericBuilder, float: NumericBuilder, np.ndarray: ArrayBuilder @@ -1217,10 +1223,8 @@ def __init__(self, component): self.ast = component.ast self.subscripts = component.subscripts_dict self.component = component - # TODO add a attribute for "new structures" def visit(self): - # TODO: if final_subscripts == self.subscripts OK, else -> redimension visit_out = self._visit(self.ast) if not visit_out: @@ -1239,6 +1243,8 @@ def visit(self): if not visit_out.subscripts\ and self.subscripts != self.component.element.subs_dict: + # expression is a float, but it will be upper dimensioned + # when assigning values to the xarray.DataArray return visit_out # NUMPY not needed @@ -1274,5 +1280,8 @@ def visit(self): def _visit(self, ast_object): builder = self.builders[type(ast_object)](ast_object, self.component) - arguments = {name: self._visit(value) for name, value in builder.arguments.items()} + arguments = { + name: self._visit(value) + for name, value in builder.arguments.items() + } return builder.build(arguments) diff --git a/pysd/building/python/python_builder.py b/pysd/building/python/python_model_builder.py similarity index 99% rename from pysd/building/python/python_builder.py rename to pysd/building/python/python_model_builder.py index 10b70aa5..6d596556 100644 --- a/pysd/building/python/python_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -5,7 +5,7 @@ from pysd.translation.structures.abstract_model import\ AbstractComponent, AbstractElement, AbstractModel, AbstractSection -from . import visitors as vs +from . import python_expressions_builder as vs from .namespace import NamespaceManager from .subscripts import SubscriptManager from .imports import ImportsManager diff --git a/pysd/pysd.py b/pysd/pysd.py index 1de73930..0aaa574a 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -23,14 +23,14 @@ ) -def read_xmile(xmile_file, data_files=None, initialize=True, +def read_xmile(xmile_file, data_files=None, initialize=True, old=False, missing_values="warning"): """ Construct a model from `.xmile` file. Parameters ---------- - xmile_file : str + xmile_file: str or pathlib.Path The relative path filename for a raw `.xmile` file. initialize: bool (optional) @@ -60,11 +60,20 @@ def read_xmile(xmile_file, data_files=None, initialize=True, >>> model = read_xmile('../tests/test-models/samples/teacup/teacup.xmile') """ - from .translation.xmile.xmile2py import translate_xmile + if old: + # TODO: remove when this branch is ready to merge + from .translation.xmile.xmile2py import translate_xmile + py_model_file = translate_xmile(xmile_file) + else: + from pysd.translation.xmile.xmile_file import XmileFile + from pysd.building.python.python_model_builder import ModelBuilder + xmile_file_obj = XmileFile(xmile_file) + xmile_file_obj.parse() - py_model_file = translate_xmile(xmile_file) + abs_model = xmile_file_obj.get_abstract_model() + py_model_file = ModelBuilder(abs_model).build_model() model = load(py_model_file, data_files, initialize, missing_values) - model.xmile_file = xmile_file + model.xmile_file = str(xmile_file) return model @@ -76,7 +85,7 @@ def read_vensim(mdl_file, data_files=None, initialize=True, Parameters ---------- - mdl_file : str + mdl_file: str or pathlib.Path The relative path filename for a raw Vensim `.mdl` file. initialize: bool (optional) @@ -87,7 +96,7 @@ def read_vensim(mdl_file, data_files=None, initialize=True, If given the list of files where the necessary data to run the model is given. Default is None. - missing_values : str ("warning", "error", "ignore", "keep") (optional) + missing_values: str ("warning", "error", "ignore", "keep") (optional) What to do with missing values. If "warning" (default) shows a warning message and interpolates the values. If "raise" raises an error. If "ignore" interpolates @@ -126,11 +135,13 @@ def read_vensim(mdl_file, data_files=None, initialize=True, """ if old: + # TODO: remove when this branch is ready to merge from .translation.vensim.vensim2py import translate_vensim - py_model_file = translate_vensim(mdl_file, split_views, encoding, **kwargs) + py_model_file = translate_vensim( + mdl_file, split_views, encoding, **kwargs) else: - from pysd.translation.vensim.vensin_file import VensimFile - from pysd.building.python.python_builder import ModelBuilder + from pysd.translation.vensim.vensim_file import VensimFile + from pysd.building.python.python_model_builder import ModelBuilder ven_file = VensimFile(mdl_file) ven_file.parse() if split_views: diff --git a/pysd/tools/benchmarking.py b/pysd/tools/benchmarking.py index 16754ac0..debcf99c 100644 --- a/pysd/tools/benchmarking.py +++ b/pysd/tools/benchmarking.py @@ -65,7 +65,7 @@ def runner(model_file, canonical_file=None, transpose=False, data_files=None, if model_file.suffix.lower() == ".mdl": model = read_vensim(model_file, data_files, old=old) elif model_file.suffix.lower() == ".xmile": - model = read_xmile(model_file, data_files) + model = read_xmile(model_file, data_files, old=old) elif model_file.suffix.lower() == ".py": model = load(model_file, data_files) else: diff --git a/pysd/translation/structures/components.py b/pysd/translation/structures/abstract_expressions.py similarity index 100% rename from pysd/translation/structures/components.py rename to pysd/translation/structures/abstract_expressions.py diff --git a/pysd/translation/vensim/parsing_expr/common_grammar.peg b/pysd/translation/vensim/parsing_grammars/common_grammar.peg similarity index 100% rename from pysd/translation/vensim/parsing_expr/common_grammar.peg rename to pysd/translation/vensim/parsing_grammars/common_grammar.peg diff --git a/pysd/translation/vensim/parsing_expr/components.peg b/pysd/translation/vensim/parsing_grammars/components.peg similarity index 100% rename from pysd/translation/vensim/parsing_expr/components.peg rename to pysd/translation/vensim/parsing_grammars/components.peg diff --git a/pysd/translation/vensim/parsing_expr/element_object.peg b/pysd/translation/vensim/parsing_grammars/element_object.peg similarity index 100% rename from pysd/translation/vensim/parsing_expr/element_object.peg rename to pysd/translation/vensim/parsing_grammars/element_object.peg diff --git a/pysd/translation/vensim/parsing_expr/file_sections.peg b/pysd/translation/vensim/parsing_grammars/file_sections.peg similarity index 100% rename from pysd/translation/vensim/parsing_expr/file_sections.peg rename to pysd/translation/vensim/parsing_grammars/file_sections.peg diff --git a/pysd/translation/vensim/parsing_expr/lookups.peg b/pysd/translation/vensim/parsing_grammars/lookups.peg similarity index 100% rename from pysd/translation/vensim/parsing_expr/lookups.peg rename to pysd/translation/vensim/parsing_grammars/lookups.peg diff --git a/pysd/translation/vensim/parsing_expr/section_elements.peg b/pysd/translation/vensim/parsing_grammars/section_elements.peg similarity index 100% rename from pysd/translation/vensim/parsing_expr/section_elements.peg rename to pysd/translation/vensim/parsing_grammars/section_elements.peg diff --git a/pysd/translation/vensim/parsing_expr/sketch.peg b/pysd/translation/vensim/parsing_grammars/sketch.peg similarity index 100% rename from pysd/translation/vensim/parsing_expr/sketch.peg rename to pysd/translation/vensim/parsing_grammars/sketch.peg diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index 1bdcf86e..db09a269 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -3,13 +3,15 @@ import parsimonious import numpy as np -from ..structures.abstract_model import AbstractData, AbstractLookup, AbstractComponent, AbstractUnchangeableConstant +from ..structures.abstract_model import\ + AbstractData, AbstractLookup, AbstractComponent,\ + AbstractUnchangeableConstant from . import vensim_utils as vu -from .vensim_structures import structures, operators, parsing_ops +from .vensim_structures import structures, parsing_ops -class Element(): # File section dataclass +class Element(): def __init__(self, equation, units, documentation): self.equation = equation diff --git a/pysd/translation/vensim/vensin_file.py b/pysd/translation/vensim/vensim_file.py similarity index 99% rename from pysd/translation/vensim/vensin_file.py rename to pysd/translation/vensim/vensim_file.py index fbe63f31..579c214a 100644 --- a/pysd/translation/vensim/vensin_file.py +++ b/pysd/translation/vensim/vensim_file.py @@ -87,7 +87,7 @@ def parse(self): section.path = self.mdl_path.parent.joinpath( self.clean_file_names(section.name)[0] ).with_suffix(".py") - # TODO modify names and paths of macros + for section in self.sections: section._parse() diff --git a/pysd/translation/vensim/vensim_section.py b/pysd/translation/vensim/vensim_section.py index 30293662..4cdee6b0 100644 --- a/pysd/translation/vensim/vensim_section.py +++ b/pysd/translation/vensim/vensim_section.py @@ -100,8 +100,6 @@ def merge_components(self): merged[name].components.append(component.get_abstract_component()) - - return list(merged.values()) diff --git a/pysd/translation/vensim/vensim_structures.py b/pysd/translation/vensim/vensim_structures.py index 5341358c..9e6a439e 100644 --- a/pysd/translation/vensim/vensim_structures.py +++ b/pysd/translation/vensim/vensim_structures.py @@ -1,39 +1,39 @@ import re -from ..structures import components as cs +from ..structures import abstract_expressions as ae structures = { - "reference": cs.ReferenceStructure, - "subscripts_ref": cs.SubscriptsReferenceStructure, - "arithmetic": cs.ArithmeticStructure, - "logic": cs.LogicStructure, - "with_lookup": cs.InlineLookupsStructure, - "call": cs.CallStructure, - "game": cs.GameStructure, - "get_xls_lookups": cs.GetLookupsStructure, - "get_direct_lookups": cs.GetLookupsStructure, - "get_xls_data": cs.GetDataStructure, - "get_direct_data": cs.GetDataStructure, - "get_xls_constants": cs.GetConstantsStructure, - "get_direct_constants": cs.GetConstantsStructure, - "initial": cs.InitialStructure, - "integ": cs.IntegStructure, - "delay1": lambda x, y: cs.DelayStructure(x, y, x, 1), - "delay1i": lambda x, y, z: cs.DelayStructure(x, y, z, 1), - "delay3": lambda x, y: cs.DelayStructure(x, y, x, 3), - "delay3i": lambda x, y, z: cs.DelayStructure(x, y, z, 3), - "delay_n": cs.DelayNStructure, - "delay_fixed": cs.DelayFixedStructure, - "smooth": lambda x, y: cs.SmoothStructure(x, y, x, 1), - "smoothi": lambda x, y, z: cs.SmoothStructure(x, y, z, 1), - "smooth3": lambda x, y: cs.SmoothStructure(x, y, x, 3), - "smooth3i": lambda x, y, z: cs.SmoothStructure(x, y, z, 3), - "smooth_n": cs.SmoothNStructure, - "trend": cs.TrendStructure, - "forecast": cs.ForecastStructure, - "sample_if_true": cs.SampleIfTrueStructure, - "lookup": cs.LookupsStructure, - "data": cs.DataStructure + "reference": ae.ReferenceStructure, + "subscripts_ref": ae.SubscriptsReferenceStructure, + "arithmetic": ae.ArithmeticStructure, + "logic": ae.LogicStructure, + "with_lookup": ae.InlineLookupsStructure, + "call": ae.CallStructure, + "game": ae.GameStructure, + "get_xls_lookups": ae.GetLookupsStructure, + "get_direct_lookups": ae.GetLookupsStructure, + "get_xls_data": ae.GetDataStructure, + "get_direct_data": ae.GetDataStructure, + "get_xls_constants": ae.GetConstantsStructure, + "get_direct_constants": ae.GetConstantsStructure, + "initial": ae.InitialStructure, + "integ": ae.IntegStructure, + "delay1": lambda x, y: ae.DelayStructure(x, y, x, 1), + "delay1i": lambda x, y, z: ae.DelayStructure(x, y, z, 1), + "delay3": lambda x, y: ae.DelayStructure(x, y, x, 3), + "delay3i": lambda x, y, z: ae.DelayStructure(x, y, z, 3), + "delay_n": ae.DelayNStructure, + "delay_fixed": ae.DelayFixedStructure, + "smooth": lambda x, y: ae.SmoothStructure(x, y, x, 1), + "smoothi": lambda x, y, z: ae.SmoothStructure(x, y, z, 1), + "smooth3": lambda x, y: ae.SmoothStructure(x, y, x, 3), + "smooth3i": lambda x, y, z: ae.SmoothStructure(x, y, z, 3), + "smooth_n": ae.SmoothNStructure, + "trend": ae.TrendStructure, + "forecast": ae.ForecastStructure, + "sample_if_true": ae.SampleIfTrueStructure, + "lookup": ae.LookupsStructure, + "data": ae.DataStructure } diff --git a/pysd/translation/vensim/vensim_utils.py b/pysd/translation/vensim/vensim_utils.py index ca369555..7c32b378 100644 --- a/pysd/translation/vensim/vensim_utils.py +++ b/pysd/translation/vensim/vensim_utils.py @@ -10,7 +10,7 @@ class Grammar(): _common_grammar = None - _grammar_path: Path = Path(__file__).parent.joinpath("parsing_expr") + _grammar_path: Path = Path(__file__).parent.joinpath("parsing_grammars") _grammar: Dict = {} @classmethod diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py new file mode 100644 index 00000000..ca0f25e1 --- /dev/null +++ b/pysd/translation/xmile/xmile_element.py @@ -0,0 +1,395 @@ +import re +import warnings +import parsimonious +import numpy as np + +from ..structures.abstract_model import AbstractData, AbstractLookup,\ + AbstractComponent + +from . import xmile_utils as vu +from .xmile_structures import structures, parsing_ops + + +class Element(): + + def __init__(self, node, ns): + self.node = node + self.ns = ns + self.name = node.attrib['name'] + self.units = self.get_xpath_text(node, 'ns:units') or "" + self.documentation = self.get_xpath_text(node, 'ns:doc') or "" + + def __str__(self): + text = "\n%s definition: %s" % (self.kind, self.name) + text += "\nSubscrips: %s" % repr(self.subscripts)\ + if self.subscripts else "" + text += "\n\t%s" % self._expression + return text + + @property + def _expression(self): + if hasattr(self, "ast"): + return str(self.ast).replace("\n", "\n\t") + + else: + return self.node.text.replace("\n", "\n\t") + + @property + def _verbose(self): + return self.__str__() + + @property + def verbose(self): + print(self._verbose) + + def get_xpath_text(self, node, xpath): + """ Safe access of occassionally missing text""" + try: + return node.xpath(xpath, namespaces=self.ns)[0].text + except IndexError: + return None + + def get_xpath_attrib(self, node, xpath, attrib): + """ Safe access of occassionally missing attributes""" + # defined here to take advantage of NS in default + try: + return node.xpath(xpath, namespaces=self.ns)[0].attrib[attrib] + except IndexError: + return None + + def get_lims(self): + lims = ( + self.get_xpath_attrib(self.node, 'ns:range', 'min'), + self.get_xpath_attrib(self.node, 'ns:range', 'max') + ) + return tuple(float(x) if x is not None else x for x in lims) + + def parse_lookup_xml_node(self, node): + ys_node = node.xpath('ns:ypts', namespaces=self.ns)[0] + ys = np.fromstring( + ys_node.text, + dtype=float, + sep=ys_node.attrib['sep'] if 'sep' in ys_node.attrib else ',' + ) + xscale_node = node.xpath('ns:xscale', namespaces=self.ns) + if len(xscale_node) > 0: + xmin = xscale_node[0].attrib['min'] + xmax = xscale_node[0].attrib['max'] + xs = np.linspace(float(xmin), float(xmax), len(ys)) + else: + xs_node = node.xpath('ns:xpts', namespaces=self.ns)[0] + xs = np.fromstring( + xs_node.text, + dtype=float, + sep=xs_node.attrib['sep'] if 'sep' in xs_node.attrib else ',' + ) + + type = node.attrib['type'] if 'type' in node.attrib else 'continuous' + + functions_map = { + "continuous": { + "name": "lookup", + "module": "functions" + }, + 'extrapolation': { + "name": "lookup_extrapolation", + "module": "functions" + }, + 'discrete': { + "name": "lookup_discrete", + "module": "functions" + } + } + lookup_function = functions_map[type] if type in functions_map\ + else functions_map['continuous'] + + return { + 'name': node.attrib['name'] if 'name' in node.attrib else '', + 'xs': xs, + 'ys': ys, + 'type': type, + 'function': lookup_function + } + + +class Flaux(Element): + """Flow or auxiliary variable""" + def __init__(self, node, ns): + super.__init__(node, ns) + self.limits = self.get_lims() + + @property + def _verbose(self): + return self.__str__() + + @property + def verbose(self): + print(self._verbose) + + def _parse(self): + eqn = self.get_xpath_text(self.node, 'ns:eqn') + + # Replace new lines with space, and replace 2 or more spaces with + # single space. Then ensure there is no space at start or end of + # equation + eqn = re.sub(r"(\s{2,})", " ", eqn.replace("\n", ' ')).strip() + ast = smile_parser.parse(eqn, element) + + gf_node = self.node.xpath("ns:gf", namespace=self.ns) + if len(gf_node) > 0: + gf_data = parse_lookup_xml_node(gf_node[0]) + xs = '[' + ','.join("%10.3f" % x for x in gf_data['xs']) + ']' + ys = '[' + ','.join("%10.3f" % x for x in gf_data['ys']) + ']' + py_expr =\ + builder.build_function_call(gf_data['function'], + [element['py_expr'], xs, ys])\ + + ' if x is None else '\ + + builder.build_function_call(gf_data['function'], + ['x', xs, ys]) + element.update({ + 'kind': 'lookup', + # This lookup declared as inline, so we should implement + # inline mode for flow and aux + 'arguments': "x = None", + 'py_expr': py_expr + }) + + self.ast = ast + + def get_abstract_component(self): + return AbstractComponent(subscripts=self.subscripts, ast=self.ast) + + +class Gf(Element): + """Gf variable (lookup)""" + kind = "Gf component" + + def __init__(self, node, ns): + super.__init__(node, ns) + self.limits = self.get_lims() + + def get_lims(self): + lims = ( + self.get_xpath_attrib(self.node, 'ns:yscale', 'min'), + self.get_xpath_attrib(self.node, 'ns:yscale', 'max') + ) + return tuple(float(x) if x is not None else x for x in lims) + + def _parse(self): + gf_data = self.parse_lookup_xml_node(self.node) + xs = gf_data['xs'] + ys = gf_data['ys'] + self.ast = None + + def get_abstract_component(self): + return AbstractLookup(subscripts=self.subscripts, ast=self.ast) + + +class Stock(Element): + """Stock component (Integ)""" + kind = "Stock component" + + def __init__(self, node, ns): + super.__init__(node, ns) + self.limits = self.get_lims() + + def _parse(self): + # Parse each flow equations + inflows = [ + smile_parser.parse(inflow.text) + for inflow in self.node.xpath('ns:inflow', namespaces=self.ns)] + outflows = [ + smile_parser.parse(outflow.text) + for outflow in self.node.xpath('ns:outflow', namespaces=self.ns)] + + if inflows: + # stock has inflows + expr = ["+"] * (len(inflows)-1) + ["-"] * len(outflows) + elif outflows: + # stock has no inflows but outflows + outflows[0] = structures["negative"](outflows[0]) + expr = ["-"] * (len(outflows)-1) + else: + # stock is constant + expr = [] + inflows = [0] + + if expr: + # stock has more than one flow + flows = structures["arithmetic"](expr, inflows+outflows) + else: + # stock has only one flow + flows = inflows + outflows + + # Read the initial value equation for stock element + initial = smile_parser.parse(self.get_xpath_text(self.node, 'ns:eqn')) + + self.ast = structures["stock"](flows, initial) + + def get_abstract_component(self): + return AbstractComponent(subscripts=self.subscripts, ast=self.ast) + + +class SubscriptRange(): + """Subscript range definition.""" + + def __init__(self, name, definition, mapping=[]): + self.name = name + self.definition = definition + self.mapping = mapping + + def __str__(self): + return "\nSubscript range definition: %s\n\t%s\n" % ( + self.name, + self.definition) + + @property + def _verbose(self): + return self.__str__() + + @property + def verbose(self): + print(self._verbose) + + + +class ComponentsParser(parsimonious.NodeVisitor): + def __init__(self, ast): + self.translation = None + self.elements = {} + self.subs = None # the subscripts if given + self.negatives = set() + self.visit(ast) + + def visit_expr_type(self, n, vc): + self.translation = self.elements[vc[0]] + + def visit_final_expr(self, n, vc): + return vu.split_arithmetic( + structures["logic"], parsing_ops["logic_ops"], + "".join(vc).strip(), self.elements) + + def visit_logic_expr(self, n, vc): + id = vc[2] + if vc[0].lower() == ":not:": + id = self.add_element(structures["logic"]( + [":NOT:"], + (self.elements[id],) + )) + return id + + def visit_comp_expr(self, n, vc): + return vu.split_arithmetic( + structures["logic"], parsing_ops["comp_ops"], + "".join(vc).strip(), self.elements) + + def visit_add_expr(self, n, vc): + return vu.split_arithmetic( + structures["arithmetic"], parsing_ops["add_ops"], + "".join(vc).strip(), self.elements) + + def visit_prod_expr(self, n, vc): + return vu.split_arithmetic( + structures["arithmetic"], parsing_ops["prod_ops"], + "".join(vc).strip(), self.elements) + + def visit_exp_expr(self, n, vc): + return vu.split_arithmetic( + structures["arithmetic"], parsing_ops["exp_ops"], + "".join(vc).strip(), self.elements, self.negatives) + + def visit_neg_expr(self, n, vc): + id = vc[2] + if vc[0] == "-": + if isinstance(self.elements[id], (float, int)): + self.elements[id] = -self.elements[id] + else: + self.negatives.add(id) + return id + + def visit_call(self, n, vc): + func = self.elements[vc[0]] + args = self.elements[vc[4]] + if func.reference in structures: + return self.add_element(structures[func.reference](*args)) + else: + return self.add_element(structures["call"](func, args)) + + def visit_reference(self, n, vc): + id = self.add_element(structures["reference"]( + vc[0].lower().replace(" ", "_"), self.subs)) + self.subs = None + return id + + def visit_range(self, n, vc): + return self.add_element(n.text.strip()[:-1].replace(")-(", "),(")) + + def visit_lookup_with_def(self, n, vc): + if vc[10]: + xy_range = np.array(eval(self.elements[vc[10]])) + else: + xy_range = np.full((2, 2), np.nan) + + values = np.array((eval(vc[11]))) + values = values[np.argsort(values[:, 0])] + + lookup = structures["lookup"]( + x=tuple(values[:, 0]), + y=tuple(values[:, 1]), + x_range=tuple(xy_range[:, 0]), + y_range=tuple(xy_range[:, 1]) + ) + + return self.add_element(structures["with_lookup"]( + self.elements[vc[4]], lookup)) + + def visit_array(self, n, vc): + if ";" in n.text or "," in n.text: + return self.add_element(np.squeeze(np.array( + [row.split(",") for row in n.text.strip(";").split(";")], + dtype=float))) + else: + return self.add_element(eval(n.text)) + + def visit_subscript_list(self, n, vc): + subs = [x.strip() for x in vc[2].split(",")] + self.subs = structures["subscripts_ref"](subs) + return "" + + def visit_name(self, n, vc): + return n.text.strip() + + def visit_expr(self, n, vc): + if vc[0] not in self.elements: + return self.add_element(eval(vc[0])) + else: + return vc[0] + + def visit_string(self, n, vc): + return self.add_element(eval(n.text)) + + def visit_arguments(self, n, vc): + arglist = tuple(x.strip(",") for x in vc) + return self.add_element(tuple( + self.elements[arg] if arg in self.elements + else eval(arg) for arg in arglist)) + + def visit_parens(self, n, vc): + return vc[2] + + def visit__(self, n, vc): + """Handles whitespace characters""" + return "" + + def visit_nan(self, n, vc): + return "np.nan" + + def visit_empty(self, n, vc): + #warnings.warn(f"Empty expression for '{element['real_name']}''.") + return self.add_element(None) + + def generic_visit(self, n, vc): + return "".join(filter(None, vc)) or n.text + + def add_element(self, element): + return vu.add_element(self.elements, element) diff --git a/pysd/translation/xmile/xmile_file.py b/pysd/translation/xmile/xmile_file.py new file mode 100644 index 00000000..8e232ef6 --- /dev/null +++ b/pysd/translation/xmile/xmile_file.py @@ -0,0 +1,82 @@ +from pathlib import Path +from lxml import etree + +from ..structures.abstract_model import AbstractModel + +from .xmile_section import FileSection + + +class XmileFile(): + """ + Create a XmileFile object which allows parsing a xmile file. + + Parameters + ---------- + xmile_path: str or pathlib.Path + Path to the Xmile model. + + encoding: str or None (optional) + Encoding of the source model file. If None, the encoding will be + read from the model, if the encoding is not defined in the model + file it will be set to 'UTF-8'. Default is None. + + """ + def __init__(self, xmile_path, encoding=None): + self.xmile_path = Path(xmile_path) + self.root_path = self.xmile_path.parent + self.xmile_root = self.get_root() + self.ns = self.xmile_root.nsmap[None] # namespace of the xmile + self.view_elements = None + + def __str__(self): + return "\nXmile model file, loaded from:\n\t%s\n" % self.xmile_path + + @property + def _verbose(self): + text = self.__str__() + for section in self.sections: + text += section._verbose + + return text + + @property + def verbose(self): + print(self._verbose) + + def get_root(self): + """Read a Xmile file and assign its content to self.model_text""" + # check for model extension + if self.xmile_path.suffix.lower() != ".xmile": + raise ValueError( + "The file to translate, '%s' " % self.xmile_path + + "is not a Xmile model. It must end with xmile extension." + ) + + return etree.parse( + str(self.xmile_path), + parser=etree.XMLParser(encoding="utf-8", recover=True) + ).getroot() + + def parse(self): + # We keep everything in a single section + # TODO: in order to make macros work we need to split them here in + # several sections + self.sections = [FileSection( + name="__main__", + path=self.xmile_path.with_suffix(".py"), + type="main", + params=[], + returns=[], + content_root=self.xmile_root, + namespace=self.ns, + split=False, + views_dict=None)] + + for section in self.sections: + section._parse() + + def get_abstract_model(self): + return AbstractModel( + original_path=self.xmile_path, + sections=tuple(section.get_abstract_section() + for section in self.sections)) diff --git a/pysd/translation/xmile/xmile_section.py b/pysd/translation/xmile/xmile_section.py new file mode 100644 index 00000000..eea777df --- /dev/null +++ b/pysd/translation/xmile/xmile_section.py @@ -0,0 +1,125 @@ +from typing import List, Union +from pathlib import Path + +from ..structures.abstract_model import\ + AbstractElement, AbstractSubscriptRange, AbstractSection + +from .xmile_element import SubscriptRange, Flaux, Gf, Stock + + +class FileSection(): # File section dataclass + + def __init__(self, name: str, path: Path, type: str, + params: List[str], returns: List[str], + content_root: str, namespace: str, split: bool, + views_dict: Union[dict, None] + ) -> object: + self.name = name + self.path = path + self.type = type + self.params = params + self.returns = returns + self.content = content_root + self.ns = {"ns": namespace} + self.split = split + self.views_dict = views_dict + self.elements = None + + def __str__(self): + return "\nFile section: %s\n" % self.name + + @property + def _verbose(self): + text = self.__str__() + if self.elements: + for element in self.elements: + text += element._verbose + else: + text += self.content + + return text + + @property + def verbose(self): + print(self._verbose) + + def _parse(self): + self.subscripts = self._parse_subscripts() + self.components = self._parse_components() + self.elements = self.subscripts + self.components + + def _parse_subscripts(self): + """Parse the subscripts of the section""" + subscripts_ranges = [] + path = "ns:dimensions/ns:dim" + for node in self.content.xpath(path, namespaces=self.ns): + name = node.attrib["name"] + subscripts = [ + sub.attrib["name"] + for sub in node.xpath("ns:elem", namespaces=self.ns) + ] + subscripts_ranges.append(SubscriptRange(name, subscripts, [])) + return subscripts_ranges + + def _parse_components(self): + components = [] + + flaux_xpath = "ns:model/ns:variables/ns:aux|"\ + "ns:model/ns:variables/ns:flow" + for node in self.conten.xpath(flaux_xpath, namespace=self.ns): + # flows and auxiliary variables + components.append(Flaux(node, self.ns)) + + gf_xpath = "ns:model/ns:variables/ns:gf" + for node in self.conten.xpath(gf_xpath, namespace=self.ns): + # Lookups + components.append(Gf(node, self.ns)) + + stock_xpath = "ns:model/ns:variables/ns:stock" + for node in self.conten.xpath(stock_xpath, namespace=self.ns): + # Integs (stocks) + components.append(Stock(node, self.ns)) + + [component._parse() for component in components] + return components + + def get_abstract_section(self): + return AbstractSection( + name=self.name, + path=self.path, + type=self.type, + params=self.params, + returns=self.returns, + subscripts=self.solve_subscripts(), + elements=self.merge_components(), + split=self.split, + views_dict=self.views_dict + ) + + def solve_subscripts(self): + return [AbstractSubscriptRange( + name=subs_range.name, + subscripts=subs_range.definition, + mapping=subs_range.mapping + ) for subs_range in self.subscripts] + + def merge_components(self): + merged = {} + for component in self.components: + name = component.name.lower().replace(" ", "_") + if name not in merged: + merged[name] = AbstractElement( + name=component.name, + components=[]) + + if component.units: + merged[name].units = component.units + if component.limits[0] is not None\ + or component.limits[1] is not None: + merged[name].range = component.limits + if component.documentation: + merged[name].documentation = component.documentation + + merged[name].components.append(component.get_abstract_component()) + + return list(merged.values()) diff --git a/pysd/translation/xmile/xmile_structures.py b/pysd/translation/xmile/xmile_structures.py new file mode 100644 index 00000000..33d8ce9e --- /dev/null +++ b/pysd/translation/xmile/xmile_structures.py @@ -0,0 +1,54 @@ +import re +from ..structures import abstract_expressions as ae + + +structures = { + "reference": ae.ReferenceStructure, + "subscripts_ref": ae.SubscriptsReferenceStructure, + "arithmetic": ae.ArithmeticStructure, + "logic": ae.LogicStructure, + "inline_lookup": ae.InlineLookupsStructure, + "call": ae.CallStructure, + "game": ae.GameStructure, + "get_xls_lookups": ae.GetLookupsStructure, + "get_direct_lookups": ae.GetLookupsStructure, + "get_xls_data": ae.GetDataStructure, + "get_direct_data": ae.GetDataStructure, + "get_xls_constants": ae.GetConstantsStructure, + "get_direct_constants": ae.GetConstantsStructure, + "initial": ae.InitialStructure, + "stock": ae.IntegStructure, + "delay1": lambda x, y: ae.DelayStructure(x, y, x, 1), + "delay1i": lambda x, y, z: ae.DelayStructure(x, y, z, 1), + "delay3": lambda x, y: ae.DelayStructure(x, y, x, 3), + "delay3i": lambda x, y, z: ae.DelayStructure(x, y, z, 3), + "delay_n": ae.DelayNStructure, + "delay_fixed": ae.DelayFixedStructure, + "smooth": lambda x, y: ae.SmoothStructure(x, y, x, 1), + "smoothi": lambda x, y, z: ae.SmoothStructure(x, y, z, 1), + "smooth3": lambda x, y: ae.SmoothStructure(x, y, x, 3), + "smooth3i": lambda x, y, z: ae.SmoothStructure(x, y, z, 3), + "smooth_n": ae.SmoothNStructure, + "trend": ae.TrendStructure, + "forecast": ae.ForecastStructure, + "sample_if_true": ae.SampleIfTrueStructure, + "lookup": ae.LookupsStructure, + "data": ae.DataStructure +} + + +operators = { + "logic_ops": [":AND:", ":OR:"], + "not_ops": [":NOT:"], + "comp_ops": ["=", "<>", "<=", "<", ">=", ">"], + "add_ops": ["+", "-"], + "prod_ops": ["*", "/"], + "exp_ops": ["^"], + "pre_ops": ["+", "-"] +} + + +parsing_ops = { + key: "|".join(re.escape(x) for x in values) + for key, values in operators.items() +} diff --git a/pysd/translation/xmile/xmile_utils.py b/pysd/translation/xmile/xmile_utils.py new file mode 100644 index 00000000..ca369555 --- /dev/null +++ b/pysd/translation/xmile/xmile_utils.py @@ -0,0 +1,115 @@ +import re +import warnings +import uuid + +import parsimonious +from typing import Dict +from pathlib import Path +from chardet import detect + + +class Grammar(): + _common_grammar = None + _grammar_path: Path = Path(__file__).parent.joinpath("parsing_expr") + _grammar: Dict = {} + + @classmethod + def get(cls, grammar: str, subs: dict = {}) -> parsimonious.Grammar: + """Get parsimonious grammar for parsing""" + if grammar not in cls._grammar: + # include grammar in the class singleton + cls._grammar[grammar] = parsimonious.Grammar( + cls._read_grammar(grammar) % subs + ) + + return cls._grammar[grammar] + + @classmethod + def _read_grammar(cls, grammar: str) -> str: + """Read grammar from a file and include common grammar""" + with cls._gpath(grammar).open(encoding="ascii") as gfile: + source_grammar: str = gfile.read() + + return cls._include_common_grammar(source_grammar) + + @classmethod + def _include_common_grammar(cls, source_grammar: str) -> str: + """Include common grammar""" + if not cls._common_grammar: + with cls._gpath("common_grammar").open(encoding="ascii") as gfile: + cls._common_grammar: str = gfile.read() + + return r"{source_grammar}{common_grammar}".format( + source_grammar=source_grammar, common_grammar=cls._common_grammar + ) + + @classmethod + def _gpath(cls, grammar: str) -> Path: + """Get the grammar file path""" + return cls._grammar_path.joinpath(grammar).with_suffix(".peg") + + @classmethod + def clean(cls) -> None: + """Clean the saved grammars (used for debugging)""" + cls._common_grammar = None + cls._grammar: Dict = {} + + +def _detect_encoding_from_file(mdl_file: Path) -> str: + """Detect and return the encoding from a Vensim file""" + try: + with mdl_file.open("rb") as in_file: + f_line: bytes = in_file.readline() + f_line: str = f_line.decode(detect(f_line)['encoding']) + return re.search(r"(?<={)(.*)(?=})", f_line).group() + except (AttributeError, UnicodeDecodeError): + warnings.warn( + "No encoding specified or detected to translate the model " + "file. 'UTF-8' encoding will be used.") + return "UTF-8" + + +def split_arithmetic(structure: object, parsing_ops: dict, + expression: str, elements: dict, + negatives: set = set()) -> object: + pattern = re.compile(parsing_ops) + parts = pattern.split(expression) + ops = pattern.findall(expression) + if not ops: + if parts[0] in negatives: + negatives.remove(parts[0]) + return add_element( + elements, + structure(["negative"], (elements[parts[0]],))) + else: + return expression + else: + if not negatives: + return add_element( + elements, + structure( + ops, + tuple([elements[id] for id in parts]))) + else: + # manage negative expressions + current_id = parts.pop() + current = elements[current_id] + if current_id in negatives: + negatives.remove(current_id) + current = structure(["negative"], (current,)) + while ops: + current_id = parts.pop() + current = structure( + [ops.pop()], + (elements[current_id], current)) + if current_id in negatives: + negatives.remove(current_id) + current = structure(["negative"], (current,)) + + return add_element(elements, current) + + +def add_element(elements: dict, element: object) -> str: + id = uuid.uuid4().hex + elements[id] = element + return id diff --git a/tests/integration_test_xmile_pathway.py b/tests/integration_test_xmile_pathway.py index 3fcfc1ad..5e7f8186 100644 --- a/tests/integration_test_xmile_pathway.py +++ b/tests/integration_test_xmile_pathway.py @@ -12,359 +12,124 @@ class TestIntegrationExamples(unittest.TestCase): def test_abs(self): - output, canon = runner(test_models + '/abs/test_abs.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('error in model file') - def test_active_initial(self): - output, canon = runner(test_models + '/active_initial/test_active_initial.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing model file') - def test_arguments(self): - output, canon = runner(test_models + '/arguments/test_arguments.mdl') + output, canon = runner(test_models + '/abs/test_abs.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_builtin_max(self): - output, canon = runner(test_models + '/builtin_max/builtin_max.xmile') + output, canon = runner(test_models + '/builtin_max/builtin_max.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_builtin_min(self): - output, canon = runner(test_models + '/builtin_min/builtin_min.xmile') + output, canon = runner(test_models + '/builtin_min/builtin_min.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_chained_initialization(self): output, canon = runner( - test_models + '/chained_initialization/test_chained_initialization.xmile') + test_models + '/chained_initialization/test_chained_initialization.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_comparisons(self): output, canon = runner( - test_models + '/comparisons/comparisons.xmile') + test_models + '/comparisons/comparisons.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_constant_expressions(self): output, canon = runner( - test_models + '/constant_expressions/test_constant_expressions.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_delay_parentheses(self): - output, canon = runner( - test_models + '/delay_parentheses/test_delay_parentheses.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_delays(self): - output, canon = runner(test_models + '/delays/test_delays.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_euler_step_vs_saveper(self): - output, canon = runner( - test_models + '/euler_step_vs_saveper/test_euler_step_vs_saveper.xmile') + test_models + '/constant_expressions/test_constant_expressions.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_eval_order(self): output, canon = runner( - test_models + '/eval_order/eval_order.xmile') + test_models + '/eval_order/eval_order.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_exp(self): - output, canon = runner(test_models + '/exp/test_exp.xmile') + output, canon = runner(test_models + '/exp/test_exp.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_exponentiation(self): - output, canon = runner(test_models + '/exponentiation/exponentiation.xmile') + output, canon = runner(test_models + '/exponentiation/exponentiation.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_function_capitalization(self): output, canon = runner( - test_models + '/function_capitalization/test_function_capitalization.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('not sure if this is implemented in xmile?') - def test_game(self): - output, canon = runner(test_models + '/game/test_game.xmile') + test_models + '/function_capitalization/test_function_capitalization.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_if_stmt(self): - output, canon = runner(test_models + '/if_stmt/if_stmt.xmile') + output, canon = runner(test_models + '/if_stmt/if_stmt.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_initial_function(self): - output, canon = runner(test_models + '/initial_function/test_initial.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile model') - def test_input_functions(self): - output, canon = runner(test_models + '/input_functions/test_inputs.mdl') + output, canon = runner(test_models + '/initial_function/test_initial.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_limits(self): - output, canon = runner(test_models + '/limits/test_limits.xmile') + output, canon = runner(test_models + '/limits/test_limits.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_line_breaks(self): - output, canon = runner(test_models + '/line_breaks/test_line_breaks.xmile') + output, canon = runner(test_models + '/line_breaks/test_line_breaks.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_line_continuation(self): - output, canon = runner(test_models + '/line_continuation/test_line_continuation.xmile') + output, canon = runner(test_models + '/line_continuation/test_line_continuation.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_ln(self): - output, canon = runner(test_models + '/ln/test_ln.xmile') + output, canon = runner(test_models + '/ln/test_ln.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_log(self): - output, canon = runner(test_models + '/log/test_log.xmile') + output, canon = runner(test_models + '/log/test_log.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_logicals(self): - output, canon = runner(test_models + '/logicals/test_logicals.xmile') + output, canon = runner(test_models + '/logicals/test_logicals.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_lookups(self): - output, canon = runner(test_models + '/lookups/test_lookups.xmile') + output, canon = runner(test_models + '/lookups/test_lookups.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_lookups_xscale(self): - output, canon = runner(test_models + '/lookups/test_lookups_xscale.xmile') + output, canon = runner(test_models + '/lookups/test_lookups_xscale.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_lookups_xpts_sep(self): - output, canon = runner(test_models + '/lookups/test_lookups_xpts_sep.xmile') + output, canon = runner(test_models + '/lookups/test_lookups_xpts_sep.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_lookups_ypts_sep(self): - output, canon = runner(test_models + '/lookups/test_lookups_ypts_sep.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_lookups_funcnames(self): - output, canon = runner(test_models + '/lookups_funcnames/test_lookups_funcnames.mdl') + output, canon = runner(test_models + '/lookups/test_lookups_ypts_sep.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_lookups_inline(self): - output, canon = runner(test_models + '/lookups_inline/test_lookups_inline.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_lookups_inline_bounded(self): - output, canon = runner( - test_models + '/lookups_inline_bounded/test_lookups_inline_bounded.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_macro_cross_reference(self): - output, canon = runner(test_models + '/macro_cross_reference/test_macro_cross_reference.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_macro_expression(self): - output, canon = runner(test_models + '/macro_expression/test_macro_expression.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_macro_multi_expression(self): - output, canon = runner( - test_models + '/macro_multi_expression/test_macro_multi_expression.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_macro_multi_macros(self): - output, canon = runner( - test_models + '/macro_multi_macros/test_macro_multi_macros.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_macro_output(self): - output, canon = runner(test_models + '/macro_output/test_macro_output.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_macro_stock(self): - output, canon = runner(test_models + '/macro_stock/test_macro_stock.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('do we need this?') - def test_macro_trailing_definition(self): - output, canon = runner(test_models + '/macro_trailing_definition/test_macro_trailing_definition.mdl') + output, canon = runner(test_models + '/lookups_inline/test_lookups_inline.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_model_doc(self): - output, canon = runner(test_models + '/model_doc/model_doc.xmile') + output, canon = runner(test_models + '/model_doc/model_doc.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_number_handling(self): - output, canon = runner(test_models + '/number_handling/test_number_handling.xmile') + output, canon = runner(test_models + '/number_handling/test_number_handling.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_parentheses(self): - output, canon = runner(test_models + '/parentheses/test_parens.xmile') + output, canon = runner(test_models + '/parentheses/test_parens.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_reference_capitalization(self): """A properly formatted Vensim model should never create this failure""" output, canon = runner( - test_models + '/reference_capitalization/test_reference_capitalization.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('in branch') - def test_rounding(self): - output, canon = runner(test_models + '/rounding/test_rounding.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_smooth(self): - output, canon = runner(test_models + '/smooth/test_smooth.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_smooth_and_stock(self): - output, canon = runner(test_models + '/smooth_and_stock/test_smooth_and_stock.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_special_characters(self): - output, canon = runner( - test_models + '/special_characters/test_special_variable_names.xmile') + test_models + '/reference_capitalization/test_reference_capitalization.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_sqrt(self): - output, canon = runner(test_models + '/sqrt/test_sqrt.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_subscript_multiples(self): - output, canon = runner( - test_models + '/subscript multiples/test_multiple_subscripts.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_subscript_1d_arrays(self): - output, canon = runner( - test_models + '/subscript_1d_arrays/test_subscript_1d_arrays.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_2d_arrays(self): - output, canon = runner( - test_models + '/subscript_2d_arrays/test_subscript_2d_arrays.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_3d_arrays(self): - output, canon = runner(test_models + '/subscript_3d_arrays/test_subscript_3d_arrays.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_3d_arrays_lengthwise(self): - output, canon = runner(test_models + '/subscript_3d_arrays_lengthwise/test_subscript_3d_arrays_lengthwise.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_3d_arrays_widthwise(self): - output, canon = runner(test_models + '/subscript_3d_arrays_widthwise/test_subscript_3d_arrays_widthwise.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('in branch') - def test_subscript_aggregation(self): - output, canon = runner(test_models + '/subscript_aggregation/test_subscript_aggregation.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_subscript_constant_call(self): - output, canon = runner( - test_models + '/subscript_constant_call/test_subscript_constant_call.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_docs(self): - output, canon = runner(test_models + '/subscript_docs/subscript_docs.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_individually_defined_1_of_2d_arrays(self): - output, canon = runner(test_models + '/subscript_individually_defined_1_of_2d_arrays/subscript_individually_defined_1_of_2d_arrays.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_individually_defined_1_of_2d_arrays_from_floats(self): - output, canon = runner(test_models + '/subscript_individually_defined_1_of_2d_arrays_from_floats/subscript_individually_defined_1_of_2d_arrays_from_floats.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_individually_defined_1d_arrays(self): - output, canon = runner(test_models + '/subscript_individually_defined_1d_arrays/subscript_individually_defined_1d_arrays.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_individually_defined_stocks(self): - output, canon = runner(test_models + '/subscript_individually_defined_stocks/test_subscript_individually_defined_stocks.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_mixed_assembly(self): - output, canon = runner(test_models + '/subscript_mixed_assembly/test_subscript_mixed_assembly.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_selection(self): - output, canon = runner(test_models + '/subscript_selection/subscript_selection.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_subscript_subranges(self): - output, canon = runner( - test_models + '/subscript_subranges/test_subscript_subrange.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_subscript_subranges_equal(self): - output, canon = runner( - test_models + '/subscript_subranges_equal/test_subscript_subrange_equal.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscript_switching(self): - output, canon = runner(test_models + '/subscript_switching/subscript_switching.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('missing test model') - def test_subscript_updimensioning(self): - output, canon = runner( - test_models + '/subscript_updimensioning/test_subscript_updimensioning.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscripted_delays(self): - output, canon = runner(test_models + '/subscripted_delays/test_subscripted_delays.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_subscripted_flows(self): - output, canon = runner(test_models + '/subscripted_flows/test_subscripted_flows.mdl') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_time(self): - output, canon = runner(test_models + '/time/test_time.mdl') + output, canon = runner(test_models + '/sqrt/test_sqrt.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) def test_trig(self): - output, canon = runner(test_models + '/trig/test_trig.xmile') + output, canon = runner(test_models + '/trig/test_trig.xmile', old=True) assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_trend(self): - output, canon = runner(test_models + '/trend/test_trend.xmile') - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('no xmile') - def test_xidz_zidz(self): - output, canon = runner(test_models + '/xidz_zidz/xidz_zidz.xmile') - assert_frames_close(output, canon, rtol=rtol) - - diff --git a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py index 22876b26..0d5b0c8e 100644 --- a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py +++ b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py @@ -1,4 +1,5 @@ import pytest +import shutil from pysd.tools.benchmarking import runner, assert_frames_close # TODO add warnings catcher per test @@ -482,28 +483,39 @@ class TestIntegrateVensim: """ Test for splitting Vensim views in modules and submodules """ + @pytest.fixture + def test_folder(self, tmp_path, _test_models, test_data): + """ + Copy test folder to a temporary folder therefore we avoid creating + PySD model files in the original folder + """ + test_folder = tmp_path.joinpath(test_data["folder"]) + shutil.copytree( + _test_models.joinpath(test_data["folder"]), + test_folder + ) + return test_folder @pytest.fixture - def model_path(self, _test_models, test_data): - return _test_models.joinpath( - test_data["folder"]).joinpath(test_data["file"]) + def model_path(self, test_folder, test_data): + """Return model path""" + return test_folder.joinpath(test_data["file"]) @pytest.fixture - def data_path(self, _test_models, test_data): + def data_path(self, test_folder, test_data): """Fixture for models with data_path""" if "data_files" in test_data: if isinstance(test_data["data_files"], str): - return _test_models.joinpath( - test_data["folder"]).joinpath(test_data["data_files"]) + return test_folder.joinpath(test_data["data_files"]) elif isinstance(test_data["data_files"], list): return [ - _test_models.joinpath(test_data["folder"]).joinpath(file) + test_folder.joinpath(file) for file in test_data["data_files"] ] else: return { - _test_models.joinpath(test_data["folder"]).joinpath(file): - values for file, values in test_data["data_files"].items() + test_folder.joinpath(file): values + for file, values in test_data["data_files"].items() } else: return None diff --git a/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py b/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py new file mode 100644 index 00000000..620ba6cd --- /dev/null +++ b/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py @@ -0,0 +1,248 @@ +import pytest +import shutil +from pysd.tools.benchmarking import runner, assert_frames_close + +# TODO add warnings catcher per test + + +xmile_test = { + "abs": { + "folder": "abs", + "file": "test_abs.xmile" + }, + "active_initial": pytest.param({ + "folder": "active_initial", + "file": "test_active_initial.xmile" + }, marks=pytest.mark.xfail(reason="failing originally")), + "builtin_max": { + "folder": "builtin_max", + "file": "builtin_max.xmile" + }, + "builtin_min": { + "folder": "builtin_min", + "file": "builtin_min.xmile" + }, + "chained_initialization": { + "folder": "chained_initialization", + "file": "test_chained_initialization.xmile" + }, + "comparisons": { + "folder": "comparisons", + "file": "comparisons.xmile" + }, + "constant_expressions": { + "folder": "constant_expressions", + "file": "test_constant_expressions.xmile" + }, + "euler_step_vs_saveper": pytest.param({ + "folder": "euler_step_vs_saveper", + "file": "test_euler_step_vs_saveper.xmile" + }, marks=pytest.mark.xfail(reason="failing originally")), + "eval_order": { + "folder": "eval_order", + "file": "eval_order.xmile" + }, + "exp": { + "folder": "exp", + "file": "test_exp.xmile" + }, + "exponentiation": { + "folder": "exponentiation", + "file": "exponentiation.xmile" + }, + "function_capitalization": { + "folder": "function_capitalization", + "file": "test_function_capitalization.xmile" + }, + "game": { + "folder": "game", + "file": "test_game.xmile" + }, + "if_stmt": { + "folder": "if_stmt", + "file": "if_stmt.xmile" + }, + "initial_function": { + "folder": "initial_function", + "file": "test_initial.xmile" + }, + "limits": { + "folder": "limits", + "file": "test_limits.xmile" + }, + "line_breaks": { + "folder": "line_breaks", + "file": "test_line_breaks.xmile" + }, + "line_continuation": { + "folder": "line_continuation", + "file": "test_line_continuation.xmile" + }, + "ln": { + "folder": "ln", + "file": "test_ln.xmile" + }, + "log": { + "folder": "log", + "file": "test_log.xmile" + }, + "logicals": { + "folder": "logicals", + "file": "test_logicals.xmile" + }, + "lookups": { + "folder": "lookups", + "file": "test_lookups.xmile" + }, + "lookups_no-indirect": pytest.param({ + "folder": "lookups", + "file": "test_lookups_no-indirect.xmile" + }, marks=pytest.mark.xfail(reason="failing originally")), + "lookups_xpts_sep": { + "folder": "lookups", + "file": "test_lookups_xpts_sep.xmile" + }, + "lookups_xscale": { + "folder": "lookups", + "file": "test_lookups_xscale.xmile" + }, + "lookups_ypts_sep": { + "folder": "lookups", + "file": "test_lookups_ypts_sep.xmile" + }, + "lookups_inline": pytest.param({ + "folder": "lookups_inline", + "file": "test_lookups_inline.xmile" + }, marks=pytest.mark.xfail(reason="failing originally")), + "macro_expression": pytest.param({ + "folder": "macro_expression", + "file": "test_macro_expression.xmile" + }, marks=pytest.mark.xfail(reason="failing originally")), + "macro_multi_expression": pytest.param({ + "folder": "macro_multi_expression", + "file": "test_macro_multi_expression.xmile" + }, marks=pytest.mark.xfail(reason="failing originally")), + "macro_multi_macros": pytest.param({ + "folder": "macro_multi_macros", + "file": "test_macro_multi_macros.xmile" + }, marks=pytest.mark.xfail(reason="failing originally")), + "macro_stock": pytest.param({ + "folder": "macro_stock", + "file": "test_macro_stock.xmile" + }, marks=pytest.mark.xfail(reason="failing originally")), + "model_doc": { + "folder": "model_doc", + "file": "model_doc.xmile" + }, + "number_handling": { + "folder": "number_handling", + "file": "test_number_handling.xmile" + }, + "parentheses": { + "folder": "parentheses", + "file": "test_parens.xmile" + }, + "reference_capitalization": { + "folder": "reference_capitalization", + "file": "test_reference_capitalization.xmile" + }, + "smooth_and_stock": pytest.param({ + "folder": "smooth_and_stock", + "file": "test_smooth_and_stock.xmile" + }, marks=pytest.mark.xfail(reason="failing originally")), + "special_characters": pytest.param({ + "folder": "special_characters", + "file": "test_special_variable_names.xmile" + }, marks=pytest.mark.xfail(reason="failing originally")), + "sqrt": { + "folder": "sqrt", + "file": "test_sqrt.xmile" + }, + "subscript_1d_arrays": { + "folder": "subscript_1d_arrays", + "file": "test_subscript_1d_arrays.xmile" + }, + "subscript_constant_call": { + "folder": "subscript_constant_call", + "file": "test_subscript_constant_call.xmile" + }, + "subscript_individually_defined_1d_arrays": { + "folder": "subscript_individually_defined_1d_arrays", + "file": "subscript_individually_defined_1d_arrays.xmile" + }, + "subscript_mixed_assembly": { + "folder": "subscript_mixed_assembly", + "file": "test_subscript_mixed_assembly.xmile" + }, + "subscript_multiples": { + "folder": "subscript_multiples", + "file": "test_multiple_subscripts.xmile" + }, + "subscript_subranges": { + "folder": "subscript_subranges", + "file": "test_subscript_subrange.xmile" + }, + "subscript_subranges_equal": { + "folder": "subscript_subranges_equal", + "file": "test_subscript_subrange_equal.xmile" + }, + "subscript_updimensioning": { + "folder": "subscript_updimensioning", + "file": "test_subscript_updimensioning.xmile" + }, + "subscripted_flows": { + "folder": "subscripted_flows", + "file": "test_subscripted_flows.xmile" + }, + "trig": { + "folder": "trig", + "file": "test_trig.xmile" + }, + "xidz_zidz": { + "folder": "xidz_zidz", + "file": "xidz_zidz.xmile" + }, +} + + +@pytest.mark.parametrize( + "test_data", + [item for item in xmile_test.values()], + ids=list(xmile_test) +) +class TestIntegrateXmile: + """ + Test for full translation and integration of models + """ + + @pytest.fixture + def test_folder(self, tmp_path, _test_models, test_data): + """ + Copy test folder to a temporary folder therefore we avoid creating + PySD model files in the original folder + """ + test_folder = tmp_path.joinpath(test_data["folder"]) + shutil.copytree( + _test_models.joinpath(test_data["folder"]), + test_folder + ) + return test_folder + + @pytest.fixture + def model_path(self, test_folder, test_data): + """Return model path""" + return test_folder.joinpath(test_data["file"]) + + @pytest.fixture + def kwargs(self, test_data): + """Fixture for atol and rtol""" + kwargs = {} + if "atol" in test_data: + kwargs["atol"] = test_data["atol"] + if "rtol" in test_data: + kwargs["rtol"] = test_data["rtol"] + return kwargs + + def test_read_vensim_file(self, model_path, kwargs): + output, canon = runner(model_path) + assert_frames_close(output, canon, **kwargs) diff --git a/tests/pytest_translation/vensim_parser/pytest_vensim_file.py b/tests/pytest_translation/vensim_parser/pytest_vensim_file.py index 135dd7d8..cc1f97b2 100644 --- a/tests/pytest_translation/vensim_parser/pytest_vensim_file.py +++ b/tests/pytest_translation/vensim_parser/pytest_vensim_file.py @@ -2,7 +2,7 @@ import pytest from pathlib import Path -from pysd.translation.vensim.vensin_file import VensimFile +from pysd.translation.vensim.vensim_file import VensimFile @pytest.mark.parametrize( @@ -35,7 +35,7 @@ def model_path(self, _root, path): return _root.joinpath(path) @pytest.mark.dependency(name="read_vensim_file") - def test_read_vensim_file(self, request, path, model_path): + def test_read_vensim_file(self, model_path): # assert that the files don't exist in the temporary directory ven_file = VensimFile(model_path) @@ -48,7 +48,7 @@ def test_read_vensim_file(self, request, path, model_path): assert isinstance(getattr(ven_file, "model_text"), str) @pytest.mark.dependency(depends=["read_vensim_file"]) - def test_file_split_file_sections(self, request, path, model_path): + def test_file_split_file_sections(self, model_path): ven_file = VensimFile(model_path) ven_file.parse() - print(ven_file.verbose) \ No newline at end of file + print(ven_file.verbose) From 4faeac457835a06059adad3a67b3086395c45db7 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 10 Mar 2022 15:52:37 +0100 Subject: [PATCH 05/96] Make Xmile work with new translator --- .../python/python_expressions_builder.py | 5 +- pysd/building/python/python_model_builder.py | 6 +- pysd/py_backend/lookups.py | 29 ++- pysd/pysd.py | 2 + .../structures/abstract_expressions.py | 5 +- pysd/translation/vensim/vensim_element.py | 6 +- .../xmile/parsing_grammars/equations.peg | 53 +++++ pysd/translation/xmile/xmile_element.py | 225 ++++++++++-------- pysd/translation/xmile/xmile_section.py | 122 +++++++--- pysd/translation/xmile/xmile_structures.py | 67 ++++-- pysd/translation/xmile/xmile_utils.py | 31 +-- .../pytest_integration_xmile_pathway.py | 36 +-- 12 files changed, 364 insertions(+), 223 deletions(-) create mode 100644 pysd/translation/xmile/parsing_grammars/equations.peg diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index 88803a3b..b8989e69 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -893,6 +893,7 @@ def __init__(self, lookups_str, component): self.arguments = {} self.x = lookups_str.x self.y = lookups_str.y + self.keyword = lookups_str.type def build(self, arguments): self.component.type = "Lookup" @@ -908,6 +909,7 @@ def build(self, arguments): threshold=len(self.y) ) arguments["subscripts"] = self.def_subs + arguments["interp"] = self.keyword if "hardcoded_lookups" in self.element.objects: # object already exists @@ -926,7 +928,8 @@ def build(self, arguments): self.element.objects["hardcoded_lookups"] = { "name": arguments["name"], "expression": "%(name)s = HardcodedLookups(%(x)s, %(y)s, " - "%(subscripts)s, '%(name)s')" % arguments + "%(subscripts)s, '%(interp)s', '%(name)s')" + % arguments } return BuildAST( diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 6d596556..477688cc 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -505,13 +505,11 @@ def build_element_out(self): indent = 12 + # convert newline indicator and add expected level of indentation self.contents = contents.replace("\n", "\n" + " " * (indent+4)) self.objects = objects.replace("\n", "\n" + " " * indent) - - # convert newline indicator and add expected level of indentation - # TODO check if this is neccessary self.documentation = self.documentation.replace( - "\\", "\n").replace("\n", "\n" + "" * indent) + "\\", "\n").replace("\n", "\n" + " " * indent) return textwrap.dedent(''' %(subs_dec)s diff --git a/pysd/py_backend/lookups.py b/pysd/py_backend/lookups.py index 5d92eb7b..5f4c4c7a 100644 --- a/pysd/py_backend/lookups.py +++ b/pysd/py_backend/lookups.py @@ -19,12 +19,14 @@ def _call(self, data, x): if not x.dims: # shape 0 xarrays return self._call(data, float(x)) - if np.all(x > data['lookup_dim'].values[-1]): + if self.interp != "extrapolate" and\ + np.all(x > data['lookup_dim'].values[-1]): outdata, _ = xr.broadcast(data[-1], x) warnings.warn( self.py_name + "\n" + "extrapolating data above the maximum value of the series") - elif np.all(x < data['lookup_dim'].values[0]): + elif self.interp != "extrapolate" and\ + np.all(x < data['lookup_dim'].values[0]): outdata, _ = xr.broadcast(data[0], x) warnings.warn( self.py_name + "\n" @@ -43,15 +45,31 @@ def _call(self, data, x): if x in data['lookup_dim'].values: outdata = data.sel(lookup_dim=x) elif x > data['lookup_dim'].values[-1]: - outdata = data[-1] + if self.interp == "extrapolate": + # extrapolate method for xmile models + k = (data[-1]-data[-2])\ + / (data['lookup_dim'].values[-1] + - data['lookup_dim'].values[-2]) + outdata = data[-1] + k*(x - data['lookup_dim'].values[-1]) + else: + outdata = data[-1] warnings.warn( self.py_name + "\n" + "extrapolating data above the maximum value of the series") elif x < data['lookup_dim'].values[0]: - outdata = data[0] + if self.interp == "extrapolate": + # extrapolate method for xmile models + k = (data[1]-data[0])\ + / (data['lookup_dim'].values[1] + - data['lookup_dim'].values[0]) + outdata = data[0] + k*(x - data['lookup_dim'].values[0]) + else: + outdata = data[0] warnings.warn( self.py_name + "\n" + "extrapolating data below the minimum value of the series") + elif self.interp == 'hold_backward': + outdata = data.sel(lookup_dim=x, method="pad") else: outdata = data.interp(lookup_dim=x) @@ -67,7 +85,7 @@ def _call(self, data, x): class HardcodedLookups(Lookups): """Class for lookups defined in the file""" - def __init__(self, x, y, coords, py_name): + def __init__(self, x, y, coords, interp, py_name): # TODO: avoid add and merge all declarations in one definition self.is_float = not bool(coords) self.py_name = py_name @@ -78,6 +96,7 @@ def __init__(self, x, y, coords, py_name): ["lookup_dim"] + list(coords) ) self.x = set(x) + self.interp = interp def add(self, x, y, coords): y = np.array(y).reshape((len(x),) + (1,)*len(coords)) diff --git a/pysd/pysd.py b/pysd/pysd.py index 0aaa574a..bdf0b0bb 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -71,7 +71,9 @@ def read_xmile(xmile_file, data_files=None, initialize=True, old=False, xmile_file_obj.parse() abs_model = xmile_file_obj.get_abstract_model() + #print(abs_model.dump(indent=" ")) py_model_file = ModelBuilder(abs_model).build_model() + model = load(py_model_file, data_files, initialize, missing_values) model.xmile_file = str(xmile_file) return model diff --git a/pysd/translation/structures/abstract_expressions.py b/pysd/translation/structures/abstract_expressions.py index 7996269a..6930a64c 100644 --- a/pysd/translation/structures/abstract_expressions.py +++ b/pysd/translation/structures/abstract_expressions.py @@ -207,10 +207,11 @@ class LookupsStructure: y: tuple x_range: tuple y_range: tuple + type: str def __str__(self) -> str: - return "LookupStructure:\n\tx %s = %s\n\ty %s = %s\n" % ( - self.x_range, self.x, self.y_range, self.y + return "LookupStructure (%s):\n\tx %s = %s\n\ty %s = %s\n" % ( + self.type, self.x_range, self.x, self.y_range, self.y ) diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index db09a269..f2b9d7be 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -323,7 +323,8 @@ def visit_regularLookup(self, n, vc): x=tuple(values[:, 0]), y=tuple(values[:, 1]), x_range=tuple(xy_range[:, 0]), - y_range=tuple(xy_range[:, 1]) + y_range=tuple(xy_range[:, 1]), + type="interpolate" ) def visit_excelLookup(self, n, vc): @@ -424,7 +425,8 @@ def visit_lookup_with_def(self, n, vc): x=tuple(values[:, 0]), y=tuple(values[:, 1]), x_range=tuple(xy_range[:, 0]), - y_range=tuple(xy_range[:, 1]) + y_range=tuple(xy_range[:, 1]), + type="interpolate" ) return self.add_element(structures["with_lookup"]( diff --git a/pysd/translation/xmile/parsing_grammars/equations.peg b/pysd/translation/xmile/parsing_grammars/equations.peg new file mode 100644 index 00000000..7affb0a2 --- /dev/null +++ b/pysd/translation/xmile/parsing_grammars/equations.peg @@ -0,0 +1,53 @@ +# Parsing Expression Grammar: components + +expr_type = array / final_expr / empty + +final_expr = conditional_statement / logic2_expr + +logic2_expr = logic_expr (_ logic_oper _ logic_expr)* # logic operators (:and:, :or:) +logic_expr = not_oper? _ comp_expr # :not: operator +comp_expr = add_expr (_ comp_oper _ add_expr)? # comparison (e.g. '<', '=>') +add_expr = prod_expr (_ add_oper _ prod_expr)* # addition and substraction +prod_expr = exp_expr (_ prod_oper _ exp_expr)* # product and division +exp_expr = neg_expr (_ exp_oper _ neg_expr)* # exponential +neg_expr = pre_oper? _ expr # pre operators (-, +) +expr = call / parens / number / reference + +arguments = ((string / final_expr) _ ","? _)* +parens = "(" _ final_expr _ ")" + +call = reference _ "(" _ arguments _ ")" +conditional_statement = "IF" _ logic2_expr _ "THEN" _ logic2_expr _ "ELSE" _ logic2_expr + +reference = (name _ subscript_list) / name # check first for subscript +subscript_list = "[" _ (name _ "!"? _ ","? _)+ _ "]" + +array = (raw_number _ ("," / ";")? _)+ !~r"." # negative lookahead for + +logic_oper = ~r"(%(logic_ops)s)"IU +not_oper = ~r"(%(not_ops)s)"IU +comp_oper = ~r"(%(comp_ops)s)"IU +add_oper = ~r"(%(add_ops)s)"IU +prod_oper = ~r"(%(prod_ops)s)"IU +exp_oper = ~r"(%(exp_ops)s)"IU +pre_oper = ~r"(%(pre_ops)s)"IU + +empty = "" # empty string + +_ = spacechar* +spacechar = " "* ~"\t"* + +name = basic_id / escape_group + +# This takes care of models with Unicode variable names +basic_id = id_start id_continue* + +id_start = ~r"[\w]"IU +id_continue = id_start / ~r"[0-9\'\$\_]" + +# between quotes, either escaped quote or character that is not a quote +escape_group = "\"" ( "\\\"" / ~r"[^\"]" )* "\"" + +number = raw_number +raw_number = ("+"/"-")? ~r"\d+\.?\d*([eE][+-]?\d+)?" +string = "\'" (~r"[^\']"IU)* "\'" diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py index ca0f25e1..4b5741dc 100644 --- a/pysd/translation/xmile/xmile_element.py +++ b/pysd/translation/xmile/xmile_element.py @@ -1,10 +1,9 @@ import re -import warnings import parsimonious import numpy as np -from ..structures.abstract_model import AbstractData, AbstractLookup,\ - AbstractComponent +from ..structures.abstract_model import AbstractElement, AbstractLookup,\ + AbstractComponent from . import xmile_utils as vu from .xmile_structures import structures, parsing_ops @@ -12,12 +11,20 @@ class Element(): + interp_methods = { + "continuous": "interpolate", + "extrapolate": "extrapolate", + "discrete": "hold_backward" + } + def __init__(self, node, ns): self.node = node self.ns = ns - self.name = node.attrib['name'] - self.units = self.get_xpath_text(node, 'ns:units') or "" - self.documentation = self.get_xpath_text(node, 'ns:doc') or "" + self.name = node.attrib["name"] + self.units = self.get_xpath_text(node, "ns:units") or "" + self.documentation = self.get_xpath_text(node, "ns:doc") or "" + self.limits = (None, None) + self.components = [] def __str__(self): text = "\n%s definition: %s" % (self.kind, self.name) @@ -84,38 +91,49 @@ def parse_lookup_xml_node(self, node): sep=xs_node.attrib['sep'] if 'sep' in xs_node.attrib else ',' ) - type = node.attrib['type'] if 'type' in node.attrib else 'continuous' - - functions_map = { - "continuous": { - "name": "lookup", - "module": "functions" - }, - 'extrapolation': { - "name": "lookup_extrapolation", - "module": "functions" - }, - 'discrete': { - "name": "lookup_discrete", - "module": "functions" - } - } - lookup_function = functions_map[type] if type in functions_map\ - else functions_map['continuous'] - - return { - 'name': node.attrib['name'] if 'name' in node.attrib else '', - 'xs': xs, - 'ys': ys, - 'type': type, - 'function': lookup_function - } + interp = node.attrib['type'] if 'type' in node.attrib else 'continuous' + + return structures["lookup"]( + x=tuple(xs[np.argsort(xs)]), + y=tuple(ys[np.argsort(xs)]), + x_range=(np.min(xs), np.max(xs)), + y_range=(np.min(ys), np.max(ys)), + type=self.interp_methods[interp] + ) + + def _parse(self): + if self.node.xpath("ns:element", namespaces=self.ns): + for subnode in self.node.xpath("ns:element", namespaces=self.ns): + self.components.append( + ((subnode.attrib["subscript"].split(","), []), + self._parse_component(subnode)) + ) + else: + subscripts = [] + for subnode in self.node.xpath("ns:dimensions/ns:dim", namespaces=self.ns): + subscripts.append(subnode.attrib["name"]) + self.components = [ + ((subscripts, []), + self._parse_component(self.node)) + ] + + def smile_parser(self, expression): + tree = vu.Grammar.get("equations", parsing_ops).parse(expression) + return EquationParser(tree).translation + + def get_abstract_element(self): + return AbstractElement( + name=self.name, + units=self.units, + range=self.limits, + documentation=self.documentation, + components=[]) class Flaux(Element): """Flow or auxiliary variable""" def __init__(self, node, ns): - super.__init__(node, ns) + super().__init__(node, ns) self.limits = self.get_lims() @property @@ -126,38 +144,29 @@ def _verbose(self): def verbose(self): print(self._verbose) - def _parse(self): - eqn = self.get_xpath_text(self.node, 'ns:eqn') + def _parse_component(self, node): + eqn = self.get_xpath_text(node, 'ns:eqn') # Replace new lines with space, and replace 2 or more spaces with # single space. Then ensure there is no space at start or end of # equation eqn = re.sub(r"(\s{2,})", " ", eqn.replace("\n", ' ')).strip() - ast = smile_parser.parse(eqn, element) + ast = self.smile_parser(eqn) - gf_node = self.node.xpath("ns:gf", namespace=self.ns) + gf_node = self.node.xpath("ns:gf", namespaces=self.ns) if len(gf_node) > 0: - gf_data = parse_lookup_xml_node(gf_node[0]) - xs = '[' + ','.join("%10.3f" % x for x in gf_data['xs']) + ']' - ys = '[' + ','.join("%10.3f" % x for x in gf_data['ys']) + ']' - py_expr =\ - builder.build_function_call(gf_data['function'], - [element['py_expr'], xs, ys])\ - + ' if x is None else '\ - + builder.build_function_call(gf_data['function'], - ['x', xs, ys]) - element.update({ - 'kind': 'lookup', - # This lookup declared as inline, so we should implement - # inline mode for flow and aux - 'arguments': "x = None", - 'py_expr': py_expr - }) - - self.ast = ast + ast = structures["inline_lookup"]( + ast, self.parse_lookup_xml_node(gf_node[0])) + + return ast def get_abstract_component(self): - return AbstractComponent(subscripts=self.subscripts, ast=self.ast) + ae = self.get_abstract_element() + for component in self.components: + ae.components.append(AbstractComponent( + subscripts=component[0], + ast=component[1])) + return ae class Gf(Element): @@ -165,7 +174,7 @@ class Gf(Element): kind = "Gf component" def __init__(self, node, ns): - super.__init__(node, ns) + super().__init__(node, ns) self.limits = self.get_lims() def get_lims(self): @@ -175,14 +184,16 @@ def get_lims(self): ) return tuple(float(x) if x is not None else x for x in lims) - def _parse(self): - gf_data = self.parse_lookup_xml_node(self.node) - xs = gf_data['xs'] - ys = gf_data['ys'] - self.ast = None + def _parse_component(self, node): + return self.parse_lookup_xml_node(self.node) def get_abstract_component(self): - return AbstractLookup(subscripts=self.subscripts, ast=self.ast) + ae = self.get_abstract_element() + for component in self.components: + ae.components.append(AbstractLookup( + subscripts=component[0], + ast=component[1])) + return ae class Stock(Element): @@ -190,16 +201,16 @@ class Stock(Element): kind = "Stock component" def __init__(self, node, ns): - super.__init__(node, ns) + super().__init__(node, ns) self.limits = self.get_lims() - def _parse(self): + def _parse_component(self, node): # Parse each flow equations inflows = [ - smile_parser.parse(inflow.text) + self.smile_parser(inflow.text) for inflow in self.node.xpath('ns:inflow', namespaces=self.ns)] outflows = [ - smile_parser.parse(outflow.text) + self.smile_parser(outflow.text) for outflow in self.node.xpath('ns:outflow', namespaces=self.ns)] if inflows: @@ -219,15 +230,42 @@ def _parse(self): flows = structures["arithmetic"](expr, inflows+outflows) else: # stock has only one flow - flows = inflows + outflows + flows = inflows[0] if inflows else outflows[0] # Read the initial value equation for stock element - initial = smile_parser.parse(self.get_xpath_text(self.node, 'ns:eqn')) + initial = self.smile_parser(self.get_xpath_text(self.node, 'ns:eqn')) + + return structures["stock"](flows, initial) + + def get_abstract_component(self): + ae = self.get_abstract_element() + for component in self.components: + ae.components.append(AbstractComponent( + subscripts=component[0], + ast=component[1])) + return ae + - self.ast = structures["stock"](flows, initial) +class ControlElement(Element): + """Control variable (lookup)""" + kind = "Control bvariable" + + def __init__(self, name, units, documentation, eqn): + self.name = name + self.units = units + self.documentation = documentation + self.limits = (None, None) + self.eqn = eqn + + def _parse(self): + self.ast = self.smile_parser(self.eqn) def get_abstract_component(self): - return AbstractComponent(subscripts=self.subscripts, ast=self.ast) + ae = self.get_abstract_element() + ae.components.append(AbstractComponent( + subscripts=([], []), + ast=self.ast)) + return ae class SubscriptRange(): @@ -252,8 +290,7 @@ def verbose(self): print(self._verbose) - -class ComponentsParser(parsimonious.NodeVisitor): +class EquationParser(parsimonious.NodeVisitor): def __init__(self, ast): self.translation = None self.elements = {} @@ -264,14 +301,14 @@ def __init__(self, ast): def visit_expr_type(self, n, vc): self.translation = self.elements[vc[0]] - def visit_final_expr(self, n, vc): + def visit_logic2_expr(self, n, vc): return vu.split_arithmetic( structures["logic"], parsing_ops["logic_ops"], "".join(vc).strip(), self.elements) def visit_logic_expr(self, n, vc): id = vc[2] - if vc[0].lower() == ":not:": + if vc[0].lower() == "not": id = self.add_element(structures["logic"]( [":NOT:"], (self.elements[id],) @@ -311,38 +348,26 @@ def visit_call(self, n, vc): func = self.elements[vc[0]] args = self.elements[vc[4]] if func.reference in structures: - return self.add_element(structures[func.reference](*args)) + func_str = structures[func.reference] + if isinstance(func_str, dict): + return self.add_element(func_str[len(args)](*args)) + else: + return self.add_element(func_str(*args)) else: return self.add_element(structures["call"](func, args)) + def visit_conditional_statement(self, n, vc): + return self.add_element(structures["if_then_else"]( + self.elements[vc[2]], + self.elements[vc[6]], + self.elements[vc[10]])) + def visit_reference(self, n, vc): id = self.add_element(structures["reference"]( - vc[0].lower().replace(" ", "_"), self.subs)) + vc[0].lower().replace(" ", "_").strip("\""), self.subs)) self.subs = None return id - def visit_range(self, n, vc): - return self.add_element(n.text.strip()[:-1].replace(")-(", "),(")) - - def visit_lookup_with_def(self, n, vc): - if vc[10]: - xy_range = np.array(eval(self.elements[vc[10]])) - else: - xy_range = np.full((2, 2), np.nan) - - values = np.array((eval(vc[11]))) - values = values[np.argsort(values[:, 0])] - - lookup = structures["lookup"]( - x=tuple(values[:, 0]), - y=tuple(values[:, 1]), - x_range=tuple(xy_range[:, 0]), - y_range=tuple(xy_range[:, 1]) - ) - - return self.add_element(structures["with_lookup"]( - self.elements[vc[4]], lookup)) - def visit_array(self, n, vc): if ";" in n.text or "," in n.text: return self.add_element(np.squeeze(np.array( @@ -352,7 +377,7 @@ def visit_array(self, n, vc): return self.add_element(eval(n.text)) def visit_subscript_list(self, n, vc): - subs = [x.strip() for x in vc[2].split(",")] + subs = [x.strip().replace("_", " ") for x in vc[2].split(",")] self.subs = structures["subscripts_ref"](subs) return "" diff --git a/pysd/translation/xmile/xmile_section.py b/pysd/translation/xmile/xmile_section.py index eea777df..3cf63bc0 100644 --- a/pysd/translation/xmile/xmile_section.py +++ b/pysd/translation/xmile/xmile_section.py @@ -4,11 +4,13 @@ from ..structures.abstract_model import\ AbstractElement, AbstractSubscriptRange, AbstractSection -from .xmile_element import SubscriptRange, Flaux, Gf, Stock +from .xmile_element import ControlElement, SubscriptRange, Flaux, Gf, Stock class FileSection(): # File section dataclass + control_vars = ["initial_time", "final_time", "time_step", "saveper"] + def __init__(self, name: str, path: Path, type: str, params: List[str], returns: List[str], content_root: str, namespace: str, split: bool, @@ -46,6 +48,8 @@ def verbose(self): def _parse(self): self.subscripts = self._parse_subscripts() self.components = self._parse_components() + if self.name == "__main__": + self.components += self._parse_control_vars() self.elements = self.subscripts + self.components def _parse_subscripts(self): @@ -61,24 +65,84 @@ def _parse_subscripts(self): subscripts_ranges.append(SubscriptRange(name, subscripts, [])) return subscripts_ranges - def _parse_components(self): - components = [] + def _parse_control_vars(self): + + # Read the start time of simulation + node = self.content.xpath('ns:sim_specs', namespaces=self.ns)[0] + time_units = node.attrib['time_units'] if 'time_units' in node.attrib else "" + + control_vars = [] + + control_vars.append(ControlElement( + name="INITIAL TIME", + units=time_units, + documentation="The initial time for the simulation.", + eqn=node.xpath("ns:start", namespaces=self.ns)[0].text + )) + + control_vars.append(ControlElement( + name="FINAL TIME", + units=time_units, + documentation="The final time for the simulation.", + eqn=node.xpath("ns:stop", namespaces=self.ns)[0].text + )) + + # Read the time step of simulation + dt_node = node.xpath("ns:dt", namespaces=self.ns) + + # Use default value for time step if `dt` is not specified in model + dt_eqn = "1" + if len(dt_node) > 0: + dt_node = dt_node[0] + dt_eqn = dt_node.text + # If reciprocal mode are defined for `dt`, we should inverse value + if "reciprocal" in dt_node.attrib\ + and dt_node.attrib["reciprocal"].lower() == "true": + dt_eqn = "1/(" + dt_eqn + ")" + + control_vars.append(ControlElement( + name="TIME STEP", + units=time_units, + documentation="The time step for the simulation.", + eqn=dt_eqn + )) + + control_vars.append(ControlElement( + name="SAVEPER", + units=time_units, + documentation="The save time step for the simulation.", + eqn="time_step" + )) + + [component._parse() for component in control_vars] + return control_vars - flaux_xpath = "ns:model/ns:variables/ns:aux|"\ - "ns:model/ns:variables/ns:flow" - for node in self.conten.xpath(flaux_xpath, namespace=self.ns): - # flows and auxiliary variables - components.append(Flaux(node, self.ns)) + def _parse_components(self): - gf_xpath = "ns:model/ns:variables/ns:gf" - for node in self.conten.xpath(gf_xpath, namespace=self.ns): - # Lookups - components.append(Gf(node, self.ns)) + # Add flows and auxiliary variables + components = [ + Flaux(node, self.ns) + for node in self.content.xpath( + "ns:model/ns:variables/ns:aux|ns:model/ns:variables/ns:flow", + namespaces=self.ns) + if node.attrib["name"].lower().replace(" ", "_") + not in self.control_vars] + + # Add lookups + components += [ + Gf(node, self.ns) + for node in self.content.xpath( + "ns:model/ns:variables/ns:gf", + namespaces=self.ns) + ] - stock_xpath = "ns:model/ns:variables/ns:stock" - for node in self.conten.xpath(stock_xpath, namespace=self.ns): - # Integs (stocks) - components.append(Stock(node, self.ns)) + # Add stocks + components += [ + Stock(node, self.ns) + for node in self.content.xpath( + "ns:model/ns:variables/ns:stock", + namespaces=self.ns) + ] [component._parse() for component in components] return components @@ -91,7 +155,10 @@ def get_abstract_section(self): params=self.params, returns=self.returns, subscripts=self.solve_subscripts(), - elements=self.merge_components(), + elements=[ + component.get_abstract_component() + for component in self.components + ], split=self.split, views_dict=self.views_dict ) @@ -102,24 +169,3 @@ def solve_subscripts(self): subscripts=subs_range.definition, mapping=subs_range.mapping ) for subs_range in self.subscripts] - - def merge_components(self): - merged = {} - for component in self.components: - name = component.name.lower().replace(" ", "_") - if name not in merged: - merged[name] = AbstractElement( - name=component.name, - components=[]) - - if component.units: - merged[name].units = component.units - if component.limits[0] is not None\ - or component.limits[1] is not None: - merged[name].range = component.limits - if component.documentation: - merged[name].documentation = component.documentation - - merged[name].components.append(component.get_abstract_component()) - - return list(merged.values()) diff --git a/pysd/translation/xmile/xmile_structures.py b/pysd/translation/xmile/xmile_structures.py index 33d8ce9e..2f37be37 100644 --- a/pysd/translation/xmile/xmile_structures.py +++ b/pysd/translation/xmile/xmile_structures.py @@ -8,38 +8,53 @@ "arithmetic": ae.ArithmeticStructure, "logic": ae.LogicStructure, "inline_lookup": ae.InlineLookupsStructure, + "lookup": ae.LookupsStructure, "call": ae.CallStructure, - "game": ae.GameStructure, - "get_xls_lookups": ae.GetLookupsStructure, - "get_direct_lookups": ae.GetLookupsStructure, - "get_xls_data": ae.GetDataStructure, - "get_direct_data": ae.GetDataStructure, - "get_xls_constants": ae.GetConstantsStructure, - "get_direct_constants": ae.GetConstantsStructure, - "initial": ae.InitialStructure, + "init": ae.InitialStructure, "stock": ae.IntegStructure, - "delay1": lambda x, y: ae.DelayStructure(x, y, x, 1), - "delay1i": lambda x, y, z: ae.DelayStructure(x, y, z, 1), - "delay3": lambda x, y: ae.DelayStructure(x, y, x, 3), - "delay3i": lambda x, y, z: ae.DelayStructure(x, y, z, 3), - "delay_n": ae.DelayNStructure, - "delay_fixed": ae.DelayFixedStructure, - "smooth": lambda x, y: ae.SmoothStructure(x, y, x, 1), - "smoothi": lambda x, y, z: ae.SmoothStructure(x, y, z, 1), - "smooth3": lambda x, y: ae.SmoothStructure(x, y, x, 3), - "smooth3i": lambda x, y, z: ae.SmoothStructure(x, y, z, 3), - "smooth_n": ae.SmoothNStructure, - "trend": ae.TrendStructure, - "forecast": ae.ForecastStructure, - "sample_if_true": ae.SampleIfTrueStructure, - "lookup": ae.LookupsStructure, - "data": ae.DataStructure + "delay1": { + 2: lambda x, y: ae.DelayStructure(x, y, x, 1), + 3: lambda x, y, z: ae.DelayStructure(x, y, z, 1) + }, + "delay3": { + 2: lambda x, y: ae.DelayStructure(x, y, x, 3), + 3: lambda x, y, z: ae.DelayStructure(x, y, z, 3), + }, + "delayn": { + 3: lambda x, y, n: ae.DelayNStructure(x, y, x, n), + 4: ae.DelayNStructure, + }, + "smth1": { + 2: lambda x, y: ae.SmoothStructure(x, y, x, 1), + 3: lambda x, y, z: ae.SmoothStructure(x, y, z, 1) + }, + "smth3": { + 2: lambda x, y: ae.SmoothStructure(x, y, x, 3), + 3: lambda x, y, z: ae.SmoothStructure(x, y, z, 3) + }, + "smthn": { + 3: lambda x, y, n: ae.SmoothNStructure(x, y, x, n), + 4: ae.SmoothNStructure + }, + "trend": { + 2: lambda x, y: ae.TrendStructure(x, y, 0), + 3: ae.TrendStructure, + }, + "safediv": { + 2: lambda x, y: ae.CallStructure( + ae.ReferenceStructure("zidz"), (x, y)), + 3: lambda x, y, z: ae.CallStructure( + ae.ReferenceStructure("xidz"), (x, y, z)) + }, + "if_then_else": lambda x, y, z: ae.CallStructure( + ae.ReferenceStructure("if_then_else"), (x, y, z)), + "negative": lambda x: ae.ArithmeticStructure(["negative"], (x,)) } operators = { - "logic_ops": [":AND:", ":OR:"], - "not_ops": [":NOT:"], + "logic_ops": ["and", "or"], + "not_ops": ["not"], "comp_ops": ["=", "<>", "<=", "<", ">=", ">"], "add_ops": ["+", "-"], "prod_ops": ["*", "/"], diff --git a/pysd/translation/xmile/xmile_utils.py b/pysd/translation/xmile/xmile_utils.py index ca369555..73873fd6 100644 --- a/pysd/translation/xmile/xmile_utils.py +++ b/pysd/translation/xmile/xmile_utils.py @@ -10,7 +10,7 @@ class Grammar(): _common_grammar = None - _grammar_path: Path = Path(__file__).parent.joinpath("parsing_expr") + _grammar_path: Path = Path(__file__).parent.joinpath("parsing_grammars") _grammar: Dict = {} @classmethod @@ -30,18 +30,7 @@ def _read_grammar(cls, grammar: str) -> str: with cls._gpath(grammar).open(encoding="ascii") as gfile: source_grammar: str = gfile.read() - return cls._include_common_grammar(source_grammar) - - @classmethod - def _include_common_grammar(cls, source_grammar: str) -> str: - """Include common grammar""" - if not cls._common_grammar: - with cls._gpath("common_grammar").open(encoding="ascii") as gfile: - cls._common_grammar: str = gfile.read() - - return r"{source_grammar}{common_grammar}".format( - source_grammar=source_grammar, common_grammar=cls._common_grammar - ) + return source_grammar @classmethod def _gpath(cls, grammar: str) -> Path: @@ -55,26 +44,14 @@ def clean(cls) -> None: cls._grammar: Dict = {} -def _detect_encoding_from_file(mdl_file: Path) -> str: - """Detect and return the encoding from a Vensim file""" - try: - with mdl_file.open("rb") as in_file: - f_line: bytes = in_file.readline() - f_line: str = f_line.decode(detect(f_line)['encoding']) - return re.search(r"(?<={)(.*)(?=})", f_line).group() - except (AttributeError, UnicodeDecodeError): - warnings.warn( - "No encoding specified or detected to translate the model " - "file. 'UTF-8' encoding will be used.") - return "UTF-8" - - def split_arithmetic(structure: object, parsing_ops: dict, expression: str, elements: dict, negatives: set = set()) -> object: pattern = re.compile(parsing_ops) parts = pattern.split(expression) ops = pattern.findall(expression) + ops = list(map( + lambda x: x.replace('and', ':AND:').replace('or', ':OR:'), ops)) if not ops: if parts[0] in negatives: negatives.remove(parts[0]) diff --git a/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py b/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py index 620ba6cd..b252ef30 100644 --- a/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py +++ b/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py @@ -110,10 +110,10 @@ "folder": "lookups", "file": "test_lookups_ypts_sep.xmile" }, - "lookups_inline": pytest.param({ + "lookups_inline": { "folder": "lookups_inline", "file": "test_lookups_inline.xmile" - }, marks=pytest.mark.xfail(reason="failing originally")), + }, "macro_expression": pytest.param({ "folder": "macro_expression", "file": "test_macro_expression.xmile" @@ -158,42 +158,42 @@ "folder": "sqrt", "file": "test_sqrt.xmile" }, - "subscript_1d_arrays": { + "subscript_1d_arrays": pytest.param({ "folder": "subscript_1d_arrays", "file": "test_subscript_1d_arrays.xmile" - }, - "subscript_constant_call": { + }, marks=pytest.mark.xfail(reason="eqn with ??? in the model")), + "subscript_constant_call": pytest.param({ "folder": "subscript_constant_call", "file": "test_subscript_constant_call.xmile" - }, + }, marks=pytest.mark.xfail(reason="eqn with ??? in the model")), "subscript_individually_defined_1d_arrays": { "folder": "subscript_individually_defined_1d_arrays", "file": "subscript_individually_defined_1d_arrays.xmile" }, - "subscript_mixed_assembly": { + "subscript_mixed_assembly": pytest.param({ "folder": "subscript_mixed_assembly", "file": "test_subscript_mixed_assembly.xmile" - }, - "subscript_multiples": { + }, marks=pytest.mark.xfail(reason="eqn with ??? in the model")), + "subscript_multiples": pytest.param({ "folder": "subscript_multiples", "file": "test_multiple_subscripts.xmile" - }, - "subscript_subranges": { + }, marks=pytest.mark.xfail(reason="eqn with ??? in the model")), + "subscript_subranges": pytest.param({ "folder": "subscript_subranges", "file": "test_subscript_subrange.xmile" - }, - "subscript_subranges_equal": { + }, marks=pytest.mark.xfail(reason="eqn with ??? in the model")), + "subscript_subranges_equal": pytest.param({ "folder": "subscript_subranges_equal", "file": "test_subscript_subrange_equal.xmile" - }, - "subscript_updimensioning": { + }, marks=pytest.mark.xfail(reason="eqn with ??? in the model")), + "subscript_updimensioning": pytest.param({ "folder": "subscript_updimensioning", "file": "test_subscript_updimensioning.xmile" - }, - "subscripted_flows": { + }, marks=pytest.mark.xfail(reason="eqn with ??? in the model")), + "subscripted_flows": pytest.param({ "folder": "subscripted_flows", "file": "test_subscripted_flows.xmile" - }, + }, marks=pytest.mark.xfail(reason="eqn with ??? in the model")), "trig": { "folder": "trig", "file": "test_trig.xmile" From 68f44479b29b1e579394be4ebbff7eab5aba3ade Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 10 Mar 2022 18:07:54 +0100 Subject: [PATCH 06/96] Document translators and remove old files --- pysd/building/python/python_utils.py | 61 - pysd/pysd.py | 64 +- pysd/tools/benchmarking.py | 7 +- pysd/translation/builder.py | 2396 ----------------- pysd/translation/utils.py | 522 ---- .../parsing_grammars/common_grammar.peg | 2 +- .../vensim/parsing_grammars/components.peg | 2 +- .../parsing_grammars/element_object.peg | 2 +- pysd/translation/vensim/vensim2py.py | 1972 -------------- pysd/translation/vensim/vensim_element.py | 77 +- pysd/translation/vensim/vensim_file.py | 57 +- pysd/translation/vensim/vensim_section.py | 47 +- pysd/translation/xmile/SMILE2Py.py | 357 --- pysd/translation/xmile/smile.grammar | 25 - pysd/translation/xmile/xmile2py.py | 405 --- pysd/translation/xmile/xmile_element.py | 188 +- pysd/translation/xmile/xmile_file.py | 36 +- pysd/translation/xmile/xmile_section.py | 98 +- tests/integration_test_vensim_pathway.py | 543 ---- tests/integration_test_xmile_pathway.py | 135 - tests/unit_test_builder.py | 430 --- tests/unit_test_pysd.py | 47 - tests/unit_test_translation_utils.py | 245 -- tests/unit_test_vensim2py.py | 1181 -------- tests/unit_test_xmile2py.py | 67 - 25 files changed, 400 insertions(+), 8566 deletions(-) delete mode 100644 pysd/building/python/python_utils.py delete mode 100644 pysd/translation/builder.py delete mode 100644 pysd/translation/utils.py delete mode 100644 pysd/translation/vensim/vensim2py.py delete mode 100644 pysd/translation/xmile/SMILE2Py.py delete mode 100644 pysd/translation/xmile/smile.grammar delete mode 100644 pysd/translation/xmile/xmile2py.py delete mode 100644 tests/integration_test_vensim_pathway.py delete mode 100644 tests/integration_test_xmile_pathway.py delete mode 100644 tests/unit_test_builder.py delete mode 100644 tests/unit_test_translation_utils.py delete mode 100644 tests/unit_test_vensim2py.py delete mode 100644 tests/unit_test_xmile2py.py diff --git a/pysd/building/python/python_utils.py b/pysd/building/python/python_utils.py deleted file mode 100644 index 1836bdac..00000000 --- a/pysd/building/python/python_utils.py +++ /dev/null @@ -1,61 +0,0 @@ -import re -import warnings -import numpy as np - -# used to create python safe names with the variable reserved_words -from keyword import kwlist -from builtins import __dir__ as bidir -from pysd.py_backend.components import __dir__ as cdir -from pysd.py_backend.data import __dir__ as ddir -from pysd.py_backend.decorators import __dir__ as dedir -from pysd.py_backend.external import __dir__ as edir -from pysd.py_backend.functions import __dir__ as fdir -from pysd.py_backend.statefuls import __dir__ as sdir -from pysd.py_backend.utils import __dir__ as udir - - -reserved_words = set( - dir() + bidir() + cdir() + ddir() + dedir() + edir() + fdir() - + sdir() + udir()).union(kwlist) - - -def simplify_subscript_input(coords, subscript_dict, return_full, merge_subs): - """ - Parameters - ---------- - coords: dict - Coordinates to write in the model file. - - subscript_dict: dict - The subscript dictionary of the model file. - - return_full: bool - If True the when coords == subscript_dict, '_subscript_dict' - will be returned - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects - - Returns - ------- - coords: str - The equations to generate the coord dicttionary in the model file. - - """ - - if coords == subscript_dict and return_full: - # variable defined with all the subscripts - return "_subscript_dict" - - coordsp = [] - for ndim, (dim, coord) in zip(merge_subs, coords.items()): - # find dimensions can be retrieved from _subscript_dict - if coord == subscript_dict[dim]: - # use _subscript_dict - coordsp.append(f"'{ndim}': _subscript_dict['{dim}']") - else: - # write whole dict - coordsp.append(f"'{ndim}': {coord}") - - return "{" + ", ".join(coordsp) + "}" diff --git a/pysd/pysd.py b/pysd/pysd.py index bdf0b0bb..eb55771f 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -6,7 +6,10 @@ """ import sys -from .py_backend.statefuls import Model +from pysd.translation.vensim.vensim_file import VensimFile +from pysd.translation.xmile.xmile_file import XmileFile +from pysd.building.python.python_model_builder import ModelBuilder +from pysd.py_backend.statefuls import Model if sys.version_info[:2] < (3, 7): # pragma: no cover @@ -23,7 +26,7 @@ ) -def read_xmile(xmile_file, data_files=None, initialize=True, old=False, +def read_xmile(xmile_file, data_files=None, initialize=True, missing_values="warning"): """ Construct a model from `.xmile` file. @@ -60,28 +63,26 @@ def read_xmile(xmile_file, data_files=None, initialize=True, old=False, >>> model = read_xmile('../tests/test-models/samples/teacup/teacup.xmile') """ - if old: - # TODO: remove when this branch is ready to merge - from .translation.xmile.xmile2py import translate_xmile - py_model_file = translate_xmile(xmile_file) - else: - from pysd.translation.xmile.xmile_file import XmileFile - from pysd.building.python.python_model_builder import ModelBuilder - xmile_file_obj = XmileFile(xmile_file) - xmile_file_obj.parse() - - abs_model = xmile_file_obj.get_abstract_model() - #print(abs_model.dump(indent=" ")) - py_model_file = ModelBuilder(abs_model).build_model() + # Read and parse Xmile file + xmile_file_obj = XmileFile(xmile_file) + xmile_file_obj.parse() + # get AbstractModel + abs_model = xmile_file_obj.get_abstract_model() + + # build python file + py_model_file = ModelBuilder(abs_model).build_model() + + # load python file model = load(py_model_file, data_files, initialize, missing_values) model.xmile_file = str(xmile_file) + return model def read_vensim(mdl_file, data_files=None, initialize=True, missing_values="warning", split_views=False, - encoding=None, old=False, **kwargs): + encoding=None, **kwargs): """ Construct a model from Vensim `.mdl` file. @@ -136,25 +137,24 @@ def read_vensim(mdl_file, data_files=None, initialize=True, >>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl') """ - if old: - # TODO: remove when this branch is ready to merge - from .translation.vensim.vensim2py import translate_vensim - py_model_file = translate_vensim( - mdl_file, split_views, encoding, **kwargs) - else: - from pysd.translation.vensim.vensim_file import VensimFile - from pysd.building.python.python_model_builder import ModelBuilder - ven_file = VensimFile(mdl_file) - ven_file.parse() - if split_views: - subview_sep = kwargs.get("subview_sep", "") - ven_file.parse_sketch(subview_sep) - - abs_model = ven_file.get_abstract_model() - py_model_file = ModelBuilder(abs_model).build_model() + # Read and parse Vensim file + ven_file = VensimFile(mdl_file, encoding=encoding) + ven_file.parse() + if split_views: + # split variables per views + subview_sep = kwargs.get("subview_sep", "") + ven_file.parse_sketch(subview_sep) + # get AbstractModel + abs_model = ven_file.get_abstract_model() + + # build python file + py_model_file = ModelBuilder(abs_model).build_model() + + # load python file model = load(py_model_file, data_files, initialize, missing_values) model.mdl_file = str(mdl_file) + return model diff --git a/pysd/tools/benchmarking.py b/pysd/tools/benchmarking.py index debcf99c..313b2b46 100644 --- a/pysd/tools/benchmarking.py +++ b/pysd/tools/benchmarking.py @@ -12,8 +12,7 @@ from ..py_backend.utils import load_outputs, detect_encoding -def runner(model_file, canonical_file=None, transpose=False, data_files=None, - old=False): +def runner(model_file, canonical_file=None, transpose=False, data_files=None): """ Translates and runs a model and returns its output and the canonical output. @@ -63,9 +62,9 @@ def runner(model_file, canonical_file=None, transpose=False, data_files=None, # load model if model_file.suffix.lower() == ".mdl": - model = read_vensim(model_file, data_files, old=old) + model = read_vensim(model_file, data_files) elif model_file.suffix.lower() == ".xmile": - model = read_xmile(model_file, data_files, old=old) + model = read_xmile(model_file, data_files) elif model_file.suffix.lower() == ".py": model = load(model_file, data_files) else: diff --git a/pysd/translation/builder.py b/pysd/translation/builder.py deleted file mode 100644 index 8be64936..00000000 --- a/pysd/translation/builder.py +++ /dev/null @@ -1,2396 +0,0 @@ -""" -These elements are used by the translator to construct the model from the -interpreted results. It is technically possible to use these functions to -build a model from scratch. But - it would be rather error prone. - -This is code to assemble a pysd model once all of the elements have -been translated from their native language into python compatible syntax. -There should be nothing here that has to know about either vensim or -xmile specific syntax. -""" - -import re -import os.path -import textwrap -import warnings -from io import open -import black -import json - -from . import utils - -from pysd._version import __version__ - - -class Imports(): - """ - Class to save the imported modules information for intelligent import - """ - _numpy, _xarray, _subs = False, False, False - _functions, _statefuls, _external, _data, _utils =\ - set(), set(), set(), set(), set() - _external_libs = {"numpy": "np", "xarray": "xr"} - _internal_libs = [ - "functions", "statefuls", "external", "data", "utils"] - - @classmethod - def add(cls, module, function=None): - """ - Add a function from module. - - Parameters - ---------- - module: str - module name. - - function: str or None - function name. If None module will be set to true. - - """ - if function: - getattr(cls, f"_{module}").add(function) - else: - setattr(cls, f"_{module}", True) - - @classmethod - def get_header(cls, outfile): - """ - Returns the importing information to print in the model file - - Parameters - ---------- - outfile: str - Name of the outfile to print in the header. - - Returns - ------- - text: str - Header of the translated model file. - - """ - text =\ - f'"""\nPython model \'{outfile}\'\nTranslated using PySD\n"""\n\n' - - text += "from pathlib import Path\n" - - for module, shortname in cls._external_libs.items(): - if getattr(cls, f"_{module}"): - text += f"import {module} as {shortname}\n" - - text += "\n" - - for module in cls._internal_libs: - if getattr(cls, f"_{module}"): - text += "from pysd.py_backend.%(module)s import %(methods)s\n"\ - % { - "module": module, - "methods": ", ".join(getattr(cls, f"_{module}"))} - - if cls._subs: - text += "from pysd import subs\n" - - cls.reset() - - return text - - @classmethod - def reset(cls): - """ - Reset the imported modules - """ - cls._numpy, cls._xarray, cls._subs = False, False, False - cls._functions, cls._statefuls, cls._external, cls._data,\ - cls._utils = set(), set(), set(), set(), set() - - -# Variable to save identifiers of external objects -build_names = set() - - -def build_modular_model(elements, subscript_dict, namespace, dependencies, - main_filename, elements_per_view): - - """ - This is equivalent to the build function, but is used when the - split_views parameter is set to True in the read_vensim function. - The main python model file will be named as the original model file, - and stored in the same folder. The modules will be stored in a separate - folder named modules + original_model_name. Three extra json files will - be generated, containing the namespace, subscripts_dict and the module - names plus the variables included in each module, respectively. - - Setting split_views=True is recommended for large models with many - different views. - - Parameters - ---------- - elements: list - Each element is a dictionary, with the various components needed - to assemble a model component in python syntax. This will contain - multiple entries for elements that have multiple definitions in - the original file, and which need to be combined. - - subscript_dict: dict - A dictionary containing the names of subscript families (dimensions) - as keys, and a list of the possible positions within that dimension - for each value. - - namespace: dict - Translation from original model element names (keys) to python safe - function identifiers (values). - - main_filename: str - The name of the file to write the main module of the model to. - - elements_per_view: dict - Contains the names of the modules and submodules as keys and the - variables in each specific module inside a list as values. - - """ - root_dir = os.path.dirname(main_filename) - model_name = os.path.basename(main_filename).split(".")[0] - modules_dir = os.path.join(root_dir, "modules_" + model_name) - # create modules directory if it does not exist - os.makedirs(modules_dir, exist_ok=True) - - def process_views_tree(view_name, - view_content, - working_directory, - processed_elements): - """ - Creates a directory tree based on the elements_per_view dictionary. - If it's the final view, it creates a file, if not, it creates a folder. - """ - if isinstance(view_content, list): # will become a module - subview_elems = [] - for element in elements: - if element.get("py_name") in view_content or \ - element.get("parent_name") in view_content: - subview_elems.append(element) - - _build_separate_module(subview_elems, subscript_dict, - view_name, working_directory) - processed_elements += subview_elems - - else: # the current view has subviews - working_directory = os.path.join(working_directory, view_name) - os.makedirs(working_directory, exist_ok=True) - - for subview_name, subview_content in view_content.items(): - process_views_tree(subview_name, - subview_content, - working_directory, - processed_elements) - - processed_elements = [] - for view_name, view_content in elements_per_view.items(): - process_views_tree(view_name, - view_content, - modules_dir, - processed_elements) - - # the unprocessed will go in the main file - unprocessed_elements = [ - element for element in elements if element not in processed_elements - ] - - # building main file using the build function - _build_main_module(unprocessed_elements, subscript_dict, main_filename) - - # create json file for the modules and corresponding model elements - with open(os.path.join(modules_dir, "_modules.json"), "w") as outfile: - json.dump(elements_per_view, outfile, indent=4, sort_keys=True) - - # create single namespace in a separate json file - with open( - os.path.join(root_dir, "_namespace_" + model_name + ".json"), "w" - ) as outfile: - json.dump(namespace, outfile, indent=4, sort_keys=True) - - # create single subscript_dict in a separate json file - with open( - os.path.join(root_dir, "_subscripts_" + model_name + ".json"), "w" - ) as outfile: - json.dump(subscript_dict, outfile, indent=4, sort_keys=True) - - # create single subscript_dict in a separate json file - with open( - os.path.join(root_dir, "_dependencies_" + model_name + ".json"), "w" - ) as outfile: - json.dump(dependencies, outfile, indent=4, sort_keys=True) - - -def _build_main_module(elements, subscript_dict, file_name): - """ - Constructs and writes the python representation of the main model - module, when the split_views=True in the read_vensim function. - - Parameters - ---------- - elements: list - Elements belonging to the main module. Ideally, there should only be - the initial_time, final_time, saveper and time_step, functions, though - there might be others in some situations. Each element is a - dictionary, with the various components needed to assemble a model - component in python syntax. This will contain multiple entries for - elements that have multiple definitions in the original file, and - which need to be combined. - - subscript_dict: dict - A dictionary containing the names of subscript families (dimensions) - as keys, and a list of the possible positions within that dimension - for each value. - - file_name: str - Path of the file where the main module will be stored. - - Returns - ------- - None or text: None or str - If file_name="return" it will return the content of the output file - instead of saving it. It is used for testing. - - """ - # separating between control variables and rest of variables - control_vars, funcs = _build_variables(elements, subscript_dict) - - Imports.add("utils", "load_model_data") - Imports.add("utils", "load_modules") - - # import of needed functions and packages - text = Imports.get_header(os.path.basename(file_name)) - - # import namespace from json file - text += textwrap.dedent(""" - __pysd_version__ = '%(version)s' - - __data = { - 'scope': None, - 'time': lambda: 0 - } - - _root = Path(__file__).parent - - _namespace, _subscript_dict, _dependencies, _modules = load_model_data( - _root, "%(outfile)s") - """ % { - "outfile": os.path.basename(file_name).split(".")[0], - "version": __version__ - }) - - text += _get_control_vars(control_vars) - - text += textwrap.dedent(""" - # load modules from modules_%(outfile)s directory - exec(load_modules("modules_%(outfile)s", _modules, _root, [])) - - """ % { - "outfile": os.path.basename(file_name).split(".")[0], - }) - - text += funcs - text = black.format_file_contents(text, fast=True, mode=black.FileMode()) - - # Needed for various sessions - build_names.clear() - - with open(file_name, "w", encoding="UTF-8") as out: - out.write(text) - - -def _build_separate_module(elements, subscript_dict, module_name, module_dir): - """ - Constructs and writes the python representation of a specific model - module, when the split_views=True in the read_vensim function - - Parameters - ---------- - elements: list - Elements belonging to the module module_name. Each element is a - dictionary, with the various components needed to assemble a model - component in python syntax. This will contain multiple entries for - elements that have multiple definitions in the original file, and - which need to be combined. - - subscript_dict: dict - A dictionary containing the names of subscript families (dimensions) - as keys, and a list of the possible positions within that dimension - for each value. - - module_name: str - Name of the module - - module_dir: str - Path of the directory where module files will be stored. - - Returns - ------- - None - - """ - text = textwrap.dedent(''' - """ - Module %(module_name)s - Translated using PySD version %(version)s - """ - ''' % { - "module_name": module_name, - "version": __version__, - }) - funcs = _generate_functions(elements, subscript_dict) - text += funcs - text = black.format_file_contents(text, fast=True, mode=black.FileMode()) - - outfile_name = os.path.join(module_dir, module_name + ".py") - - with open(outfile_name, "w", encoding="UTF-8") as out: - out.write(text) - - -def build(elements, subscript_dict, namespace, dependencies, outfile_name): - """ - Constructs and writes the python representation of the model, when the - the split_modules is set to False in the read_vensim function. The entire - model is put in a single python file. - - Parameters - ---------- - elements: list - Each element is a dictionary, with the various components needed to - assemble a model component in python syntax. This will contain - multiple entries for elements that have multiple definitions in the - original file, and which need to be combined. - - subscript_dict: dict - A dictionary containing the names of subscript families (dimensions) - as keys, and a list of the possible positions within that dimension - for each value. - - namespace: dict - Translation from original model element names (keys) to python safe - function identifiers (values). - - dependencies: dict - Dependencies dictionary. Variables as keys and set of called values or - objects, objects as keys and a dictionary of dependencies for - initialization and dependencies for run. - - outfile_name: str - The name of the file to write the model to. - - Returns - ------- - None or text: None or str - If outfile_name="return" it will return the content of the output file - instead of saving it. It is used for testing. - - """ - # separating between control variables and rest of variables - control_vars, funcs = _build_variables(elements, subscript_dict) - - text = Imports.get_header(os.path.basename(outfile_name)) - - text += textwrap.dedent(""" - __pysd_version__ = '%(version)s' - - __data = { - 'scope': None, - 'time': lambda: 0 - } - - _root = Path(__file__).parent - - _subscript_dict = %(subscript_dict)s - - _namespace = %(namespace)s - - _dependencies = %(dependencies)s - """ % { - "subscript_dict": repr(subscript_dict), - "namespace": repr(namespace), - "dependencies": repr(dependencies), - "version": __version__, - }) - - text += _get_control_vars(control_vars) + funcs - text = black.format_file_contents(text, fast=True, mode=black.FileMode()) - - # Needed for various sessions - build_names.clear() - - # this is used for testing - if outfile_name == "return": - return text - - with open(outfile_name, "w", encoding="UTF-8") as out: - out.write(text) - - -def _generate_functions(elements, subscript_dict): - """ - Builds all model elements as functions in string format. - NOTE: this function calls the build_element function, which updates the - import_modules. - Therefore, it needs to be executed before the_generate_automatic_imports - function. - - Parameters - ---------- - elements: dict - Each element is a dictionary, with the various components needed to - assemble a model component in python syntax. This will contain - multiple entries for elements that have multiple definitions in the - original file, and which need to be combined. - - subscript_dict: dict - A dictionary containing the names of subscript families (dimensions) - as keys, and a list of the possible positions within that dimension - for each value. - - Returns - ------- - funcs: str - String containing all formated model functions - - """ - functions = [build_element(element, subscript_dict) for element in - elements] - - funcs = "%(functions)s" % {"functions": "\n".join(functions)} - funcs = funcs.replace("\t", " ") - - return funcs - - -def _get_control_vars(control_vars): - """ - Create the section of control variables - - Parameters - ---------- - control_vars: str - Functions to define control variables. - - Returns - ------- - text: str - Control variables section and header of model variables section. - - """ - text = textwrap.dedent(""" - ########################################################################## - # CONTROL VARIABLES # - ########################################################################## - %(control_vars_dict)s - def _init_outer_references(data): - for key in data: - __data[key] = data[key] - - - def time(): - return __data['time']() - - """ % {"control_vars_dict": control_vars[0]}) - - text += control_vars[1] - - text += textwrap.dedent(""" - ########################################################################## - # MODEL VARIABLES # - ########################################################################## - """) - - return text - - -def _build_variables(elements, subscript_dict): - """ - Build model variables (functions) and separate then in control variables - and regular variables. - - Parameters - ---------- - elements: list - Model elements. - - subscript_dict: - - Returns - ------- - control_vars, regular_vars: tuple, str - control_vars is a tuple of length 2. First element is the dictionary - of original control vars. Second is the string to add the control - variables' functions. regular_vars is the string to add the regular - variables' functions. - - """ - # returns of the control variables - control_vars_dict = { - "initial_time": ["__data['time'].initial_time()"], - "final_time": ["__data['time'].final_time()"], - "time_step": ["__data['time'].time_step()"], - "saveper": ["__data['time'].saveper()"] - } - regular_vars = [] - control_vars = [] - - for element in elements: - if element["py_name"] in control_vars_dict: - # change the return expression in the element and update the dict - # with the original expression - control_vars_dict[element["py_name"]], element["py_expr"] =\ - element["py_expr"][0], control_vars_dict[element["py_name"]] - control_vars.append(element) - else: - regular_vars.append(element) - - if len(control_vars) == 0: - # macro objects, no control variables - control_vars_dict = "" - else: - control_vars_dict = """ - _control_vars = { - "initial_time": lambda: %(initial_time)s, - "final_time": lambda: %(final_time)s, - "time_step": lambda: %(time_step)s, - "saveper": lambda: %(saveper)s - } - """ % control_vars_dict - - return (control_vars_dict, - _generate_functions(control_vars, subscript_dict)),\ - _generate_functions(regular_vars, subscript_dict) - - -def build_element(element, subscript_dict): - """ - Returns a string that has processed a single element dictionary. - - Parameters - ---------- - element: dict - A dictionary containing at least the elements: - - kind: ['constant', 'setup', 'component', 'lookup'] - Different types of elements will be built differently - - py_expr: str - An expression that has been converted already into python syntax - - subs: list of lists - Each sublist contains coordinates for initialization of a - particular part of a subscripted function, the list of - subscripts vensim attaches to an equation - - subscript_dict: dict - A dictionary containing the names of subscript families (dimensions) - as keys, and a list of the possible positions within that dimension - for each value. - - Returns - ------- - func: str - The function to write in the model file. - - """ - # check the elements with ADD in their name - # as these wones are directly added to the - # external objecets via .add method - py_expr_no_ADD = ["ADD" not in py_expr for py_expr in element["py_expr"]] - - if element["kind"] == "dependencies": - # element only used to update dependencies - return "" - elif sum(py_expr_no_ADD) > 1 and element["kind"] not in [ - "stateful", - "external", - "external_add", - ]: - py_expr_i = [] - # need to append true to the end as the next element is checked - py_expr_no_ADD.append(True) - for i, (py_expr, subs_i) in enumerate(zip(element["py_expr"], - element["subs"])): - if not (py_expr.startswith("xr.") or py_expr.startswith("_ext_")): - # rearrange if it doesn't come from external or xarray - coords = utils.make_coord_dict( - subs_i, - subscript_dict, - terse=False) - coords = { - new_dim: coords[dim] - for new_dim, dim in zip(element["merge_subs"], coords) - } - dims = list(coords) - Imports.add("utils", "rearrange") - py_expr_i.append("rearrange(%s, %s, %s)" % ( - py_expr, dims, coords)) - elif py_expr_no_ADD[i]: - # element comes from external or xarray - py_expr_i.append(py_expr) - Imports.add("utils", "xrmerge") - py_expr = "xrmerge(%s)" % ( - ",\n".join(py_expr_i)) - else: - py_expr = element["py_expr"][0] - - contents = "return %s" % py_expr - - element["subs_dec"] = "" - element["subs_doc"] = "None" - - if element["merge_subs"]: - # We add the list of the subs to the __doc__ of the function - # this will give more information to the user and make possible - # to rewrite subscripted values with model.run(params=X) or - # model.run(initial_condition=(n,x)) - element["subs_doc"] = "%s" % element["merge_subs"] - if element["kind"] in ["component", "setup", "constant", - "component_ext_data", "data"]: - # the decorator is not always necessary as the objects - # defined as xarrays in the model will have the right - # dimensions always, we should try to reduce to the - # maximum when we use it - # re arrange the python object - element["subs_dec"] =\ - "@subs(%s, _subscript_dict)" % element["merge_subs"] - Imports.add("subs") - - indent = 8 - element.update( - { - "ulines": "-" * len(element["real_name"]), - "contents": contents.replace("\n", "\n" + " " * indent), - } - ) - # indent lines 2 onward - - # convert newline indicator and add expected level of indentation - element["doc"] = element["doc"].replace("\\", "\n").replace("\n", "\n ") - - if element["kind"] in ["stateful", "external", "tab_data"]: - func = """ - %(py_name)s = %(py_expr)s - """ % { - "py_name": element["py_name"], - "py_expr": element["py_expr"][0], - } - - elif element["kind"] == "external_add": - # external expressions to be added with .add method - # remove the ADD from the end - py_name = element["py_name"].split("ADD")[0] - func = """ - %(py_name)s%(py_expr)s - """ % { - "py_name": py_name, - "py_expr": element["py_expr"][0], - } - - else: - sep = "\n" + " " * 10 - if len(element["eqn"]) == 1: - # Original equation in the same line - element["eqn"] = element["eqn"][0] - elif len(element["eqn"]) > 5: - # First and last original equations separated by vertical dots - element["eqn"] = ( - sep + element["eqn"][0] + (sep + " .") * 3 + sep - + element["eqn"][-1] - ) - else: - # From 2 to 5 equations in different lines - element["eqn"] = sep + sep.join(element["eqn"]) - - func = ( - ''' - %(subs_dec)s - def %(py_name)s(%(arguments)s): - """ - Real Name: %(real_name)s - Original Eqn: %(eqn)s - Units: %(unit)s - Limits: %(lims)s - Type: %(kind)s - Subs: %(subs_doc)s - - %(doc)s - """ - %(contents)s - ''' - % element - ) - - return textwrap.dedent(func) - - -def merge_partial_elements(element_list): - """ - Merges model elements which collectively all define the model component, - mostly for multidimensional subscripts - - Parameters - ---------- - element_list: list - List of all the elements. - - Returns - ------- - list: - List of merged elements. - - """ - outs = dict() # output data structure - - for element in element_list: - name = element["py_name"] - if name not in outs: - # Use 'expr' for Vensim models, and 'eqn' for Xmile - # (This makes the Vensim equation prettier.) - eqn = element["expr"] if "expr" in element else element["eqn"] - parent_name = element["parent_name"] if "parent_name" in element\ - else None - outs[name] = { - "py_name": element["py_name"], - "real_name": element["real_name"], - "doc": element["doc"], - "py_expr": [element["py_expr"]], # in a list - "unit": element["unit"], - "subs": [element["subs"]], - "merge_subs": element["merge_subs"] - if "merge_subs" in element else None, - "dependencies": element["dependencies"] - if "dependencies" in element else None, - "lims": element["lims"], - "eqn": [eqn.replace(r"\ ", "")], - "parent_name": parent_name, - "kind": element["kind"], - "arguments": element["arguments"], - } - - else: - eqn = element["expr"] if "expr" in element else element["eqn"] - - outs[name]["doc"] = outs[name]["doc"] or element["doc"] - outs[name]["unit"] = outs[name]["unit"] or element["unit"] - outs[name]["lims"] = outs[name]["lims"] or element["lims"] - outs[name]["eqn"] += [eqn.replace(r"\ ", "")] - outs[name]["py_expr"] += [element["py_expr"]] - outs[name]["subs"] += [element["subs"]] - if outs[name]["dependencies"] is not None: - if name.startswith("_"): - # stateful object merge initial and step - for target in outs[name]["dependencies"]: - _merge_dependencies( - outs[name]["dependencies"][target], - element["dependencies"][target]) - else: - # regular element - _merge_dependencies( - outs[name]["dependencies"], - element["dependencies"]) - outs[name]["arguments"] = element["arguments"] - - return list(outs.values()) - - -def _merge_dependencies(current, new): - """ - Merge two dependencies dicts of an element. - - Parameters - ---------- - current: dict - Current dependencies of the element. It will be mutated. - - new: dict - New dependencies to add. - - Returns - ------- - None - - """ - current_set, new_set = set(current), set(new) - for dep in current_set.intersection(new_set): - if dep.startswith("__"): - # if it is special (__lookup__, __external__) continue - continue - # if dependency is in both sum the number of calls - current[dep] += new[dep] - for dep in new_set.difference(current_set): - # if dependency is only in new copy it - current[dep] = new[dep] - - -def build_active_initial_deps(identifier, arguments, deps): - """ - Creates new model element dictionaries for the model elements associated - with a stock. - - Parameters - ---------- - identifier: str - The python-safe name of the stock. - - expression: str - Formula which forms the regular value for active initial. - - initial: str - Formula which forms the initial value for active initial. - - deps: dict - The dictionary with all the denpendencies in the expression. - - Returns - ------- - reference: str - A reference to the gost variable that defines the dependencies. - - new_structure: list - List of additional model element dictionaries. - - """ - deps = build_dependencies( - deps, - { - "initial": [arguments[1]], - "step": [arguments[0]] - }) - - py_name = "_active_initial_%s" % identifier - - # describe the stateful object - new_structure = [{ - "py_name": py_name, - "parent_name": "", - "real_name": "", - "doc": "", - "py_expr": "", - "unit": "", - "lims": "", - "eqn": "", - "subs": "", - "merge_subs": None, - "dependencies": deps, - "kind": "dependencies", - "arguments": "", - }] - - return py_name, new_structure - - -def add_stock(identifier, expression, initial_condition, subs, merge_subs, - deps): - """ - Creates new model element dictionaries for the model elements associated - with a stock. - - Parameters - ---------- - identifier: str - The python-safe name of the stock. - - expression: str - The formula which forms the derivative of the stock. - - initial_condition: str - Formula which forms the initial condition for the stock. - - subs: list of strings - List of strings of subscript indices that correspond to the - list of expressions, and collectively define the shape of the output. - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects. - - deps: dict - The dictionary with all the denpendencies in the expression. - - Returns - ------- - reference: str - a string to use in place of the 'INTEG...' pieces in the element - expression string, a reference to the stateful object. - - new_structure: list - List of additional model element dictionaries. When there are - subscripts, constructs an external 'init' and 'ddt' function so - that these can be appropriately aggregated. - - """ - Imports.add("statefuls", "Integ") - - deps = build_dependencies( - deps, - { - "initial": [initial_condition], - "step": [expression] - }) - - new_structure = [] - py_name = "_integ_%s" % identifier - - if len(subs) == 0: - stateful_py_expr = "Integ(lambda: %s, lambda: %s, '%s')" % ( - expression, - initial_condition, - py_name, - ) - else: - stateful_py_expr = "Integ(_integ_input_%s, _integ_init_%s, '%s')" % ( - identifier, - identifier, - py_name, - ) - - # following elements not specified in the model file, but must exist - # create the stock initialization element - new_structure.append({ - "py_name": "_integ_init_%s" % identifier, - "parent_name": identifier, - "real_name": "Implicit", - "kind": "setup", - "py_expr": initial_condition, - "subs": subs, - "merge_subs": merge_subs, - "doc": "Provides initial conditions for %s function" - % identifier, - "unit": "See docs for %s" % identifier, - "lims": "None", - "eqn": "None", - "arguments": "", - }) - - new_structure.append({ - "py_name": "_integ_input_%s" % identifier, - "parent_name": identifier, - "real_name": "Implicit", - "kind": "component", - "doc": "Provides derivative for %s function" % identifier, - "subs": subs, - "merge_subs": merge_subs, - "unit": "See docs for %s" % identifier, - "lims": "None", - "eqn": "None", - "py_expr": expression, - "arguments": "", - }) - - # describe the stateful object - new_structure.append({ - "py_name": py_name, - "parent_name": identifier, - "real_name": "Representation of %s" % identifier, - "doc": "Integrates Expression %s" % expression, - "py_expr": stateful_py_expr, - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": "", - "merge_subs": None, - "dependencies": deps, - "kind": "stateful", - "arguments": "", - }) - - return "%s()" % py_name, new_structure - - -def add_delay(identifier, delay_input, delay_time, initial_value, order, - subs, merge_subs, deps): - """ - Creates code to instantiate a stateful 'Delay' object, - and provides reference to that object's output. - - The name of the stateful object is based upon the passed in parameters, - so if there are multiple places where identical delay functions are - referenced, the translated python file will only maintain one stateful - object, and reference it multiple times. - - Parameters - ---------- - identifier: str - The python-safe name of the delay. - - delay_input: str - Reference to the model component that is the input to the delay. - - delay_time: str - Can be a number (in string format) or a reference to another model - element which will calculate the delay. This is calculated throughout - the simulation at runtime. - - initial_value: str - This is used to initialize the stocks that are present in the delay. - We initialize the stocks with equal values so that the outflow in - the first timestep is equal to this value. - - order: str - The number of stocks in the delay pipeline. As we construct the - delays at build time, this must be an integer and cannot be calculated - from other model components. Anything else will yield a ValueError. - - subs: list of strings - List of strings of subscript indices that correspond to the - list of expressions, and collectively define the shape of the output. - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects. - - deps: dict - The dictionary with all the denpendencies in the expression. - - Returns - ------- - reference: str - Reference to the delay object `__call__` method, which will return - the output of the delay process. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("statefuls", "Delay") - - deps = build_dependencies( - deps, - { - "initial": [initial_value, order, delay_time], - "step": [delay_time, delay_input] - }) - - new_structure = [] - py_name = "_delay_%s" % identifier - - if len(subs) == 0: - stateful_py_expr = ( - "Delay(lambda: %s, lambda: %s," - "lambda: %s, lambda: %s, time_step, '%s')" - % (delay_input, delay_time, initial_value, order, py_name) - ) - - else: - stateful_py_expr = ( - "Delay(_delay_input_%s, lambda: %s, _delay_init_%s," - "lambda: %s, time_step, '%s')" - % (identifier, delay_time, identifier, order, py_name) - ) - - # following elements not specified in the model file, but must exist - # create the delay initialization element - new_structure.append({ - "py_name": "_delay_init_%s" % identifier, - "parent_name": identifier, - "real_name": "Implicit", - "kind": "setup", # not specified in the model file, but must exist - "py_expr": initial_value, - "subs": subs, - "merge_subs": merge_subs, - "doc": "Provides initial conditions for %s function" \ - % identifier, - "unit": "See docs for %s" % identifier, - "lims": "None", - "eqn": "None", - "arguments": "", - }) - - new_structure.append({ - "py_name": "_delay_input_%s" % identifier, - "parent_name": identifier, - "real_name": "Implicit", - "kind": "component", - "doc": "Provides input for %s function" % identifier, - "subs": subs, - "merge_subs": merge_subs, - "unit": "See docs for %s" % identifier, - "lims": "None", - "eqn": "None", - "py_expr": delay_input, - "arguments": "", - }) - - # describe the stateful object - new_structure.append({ - "py_name": py_name, - "parent_name": identifier, - "real_name": "Delay of %s" % delay_input, - "doc": "Delay time: %s \n Delay initial value %s \n Delay order %s" - % (delay_time, initial_value, order), - "py_expr": stateful_py_expr, - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": "", - "merge_subs": None, - "dependencies": deps, - "kind": "stateful", - "arguments": "", - }) - - return "%s()" % py_name, new_structure - - -def add_delay_f(identifier, delay_input, delay_time, initial_value, deps): - """ - Creates code to instantiate a stateful 'DelayFixed' object, - and provides reference to that object's output. - - The name of the stateful object is based upon the passed in parameters, - so if there are multiple places where identical delay functions are - referenced, the translated python file will only maintain one stateful - object, and reference it multiple times. - - Parameters - ---------- - identifier: str - The python-safe name of the delay. - - delay_input: str - Reference to the model component that is the input to the delay. - - delay_time: str - Can be a number (in string format) or a reference to another model - element which will calculate the delay. This is calculated throughout - the simulation at runtime. - - initial_value: str - This is used to initialize the stocks that are present in the delay. - We initialize the stocks with equal values so that the outflow in - the first timestep is equal to this value. - - deps: dict - The dictionary with all the denpendencies in the expression. - - Returns - ------- - reference: str - Reference to the delay object `__call__` method, which will return - the output of the delay process. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("statefuls", "DelayFixed") - - deps = build_dependencies( - deps, - { - "initial": [initial_value, delay_time], - "step": [delay_input] - }) - - py_name = "_delayfixed_%s" % identifier - - stateful_py_expr = ( - "DelayFixed(lambda: %s, lambda: %s," - "lambda: %s, time_step, '%s')" - % (delay_input, delay_time, initial_value, py_name) - ) - - # describe the stateful object - stateful = { - "py_name": py_name, - "parent_name": identifier, - "real_name": "Delay fixed of %s" % delay_input, - "doc": "DelayFixed time: %s \n Delay initial value %s" - % (delay_time, initial_value), - "py_expr": stateful_py_expr, - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": "", - "merge_subs": None, - "dependencies": deps, - "kind": "stateful", - "arguments": "", - } - - return "%s()" % py_name, [stateful] - - -def add_n_delay(identifier, delay_input, delay_time, initial_value, order, - subs, merge_subs, deps): - """ - Creates code to instantiate a stateful 'DelayN' object, - and provides reference to that object's output. - - The name of the stateful object is based upon the passed in parameters, - so if there are multiple places where identical delay functions are - referenced, the translated python file will only maintain one stateful - object, and reference it multiple times. - - Parameters - ---------- - identifier: str - The python-safe name of the delay. - - delay_input: str - Reference to the model component that is the input to the delay. - - delay_time: str - Can be a number (in string format) or a reference to another model - element which will calculate the delay. This is calculated throughout - the simulation at runtime. - - initial_value: str - This is used to initialize the stocks that are present in the delay. - We initialize the stocks with equal values so that the outflow in - the first timestep is equal to this value. - - order: str - The number of stocks in the delay pipeline. As we construct the - delays at build time, this must be an integer and cannot be calculated - from other model components. Anything else will yield a ValueError. - - subs: list of strings - List of strings of subscript indices that correspond to the - list of expressions, and collectively define the shape of the output. - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects. - - deps: dict - The dictionary with all the denpendencies in the expression. - - Returns - ------- - reference: str - Reference to the delay object `__call__` method, which will return - the output of the delay process. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("statefuls", "DelayN") - - deps = build_dependencies( - deps, - { - "initial": [initial_value, order, delay_time], - "step": [delay_time, delay_input] - }) - - new_structure = [] - py_name = "_delayn_%s" % identifier - - if len(subs) == 0: - stateful_py_expr = ( - "DelayN(lambda: %s, lambda: %s," - "lambda: %s, lambda: %s, time_step, '%s')" - % (delay_input, delay_time, initial_value, order, py_name) - ) - - else: - stateful_py_expr = ( - "DelayN(_delayn_input_%s, lambda: %s," - " _delayn_init_%s, lambda: %s, time_step, '%s')" - % (identifier, delay_time, identifier, order, py_name) - ) - - # following elements not specified in the model file, but must exist - # create the delay initialization element - new_structure.append({ - "py_name": "_delayn_init_%s" % identifier, - "parent_name": identifier, - "real_name": "Implicit", - "kind": "setup", # not specified in the model file, but must exist - "py_expr": initial_value, - "subs": subs, - "merge_subs": merge_subs, - "doc": "Provides initial conditions for %s function" \ - % identifier, - "unit": "See docs for %s" % identifier, - "lims": "None", - "eqn": "None", - "arguments": "", - }) - - new_structure.append({ - "py_name": "_delayn_input_%s" % identifier, - "parent_name": identifier, - "real_name": "Implicit", - "kind": "component", - "doc": "Provides input for %s function" % identifier, - "subs": subs, - "merge_subs": merge_subs, - "unit": "See docs for %s" % identifier, - "lims": "None", - "eqn": "None", - "py_expr": delay_input, - "arguments": "", - }) - - # describe the stateful object - new_structure.append({ - "py_name": py_name, - "parent_name": identifier, - "real_name": "DelayN of %s" % delay_input, - "doc": "DelayN time: %s \n DelayN initial value %s \n DelayN order\ - %s" - % (delay_time, initial_value, order), - "py_expr": stateful_py_expr, - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": "", - "merge_subs": None, - "dependencies": deps, - "kind": "stateful", - "arguments": "", - }) - - return "%s()" % py_name, new_structure - - -def add_forecast(identifier, forecast_input, average_time, horizon, - subs, merge_subs, deps): - """ - Constructs Forecast object. - - Parameters - ---------- - identifier: str - The python-safe name of the forecast. - - forecast_input: str - Input of the forecast. - - average_time: str - Average time of the forecast. - - horizon: str - Horizon for the forecast. - - subs: list of strings - List of strings of subscript indices that correspond to the - list of expressions, and collectively define the shape of the output. - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects. - - deps: dict - The dictionary with all the denpendencies in the expression. - - Returns - ------- - reference: str - Reference to the forecast object `__call__` method, which will return - the output of the forecast process. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("statefuls", "Forecast") - - deps = build_dependencies( - deps, - { - "initial": [forecast_input], - "step": [forecast_input, average_time, horizon] - }) - - new_structure = [] - py_name = "_forecast_%s" % identifier - - if len(subs) == 0: - stateful_py_expr = "Forecast(lambda: %s, lambda: %s,"\ - " lambda: %s, '%s')" % ( - forecast_input, average_time, - horizon, py_name) - - else: - # only need to re-dimension init as xarray will take care of other - stateful_py_expr = "Forecast(_forecast_input_%s, lambda: %s,"\ - " lambda: %s, '%s')" % ( - identifier, average_time, - horizon, py_name) - - # following elements not specified in the model file, but must exist - # create the delay initialization element - new_structure.append({ - "py_name": "_forecast_input_%s" % identifier, - "parent_name": identifier, - "real_name": "Implicit", - "kind": "setup", # not specified in the model file, but must exist - "py_expr": forecast_input, - "subs": subs, - "merge_subs": merge_subs, - "doc": "Provides input for %s function" - % identifier, - "unit": "See docs for %s" % identifier, - "lims": "None", - "eqn": "None", - "arguments": "", - }) - - new_structure.append({ - "py_name": py_name, - "parent_name": identifier, - "real_name": "Forecast of %s" % forecast_input, - "doc": "Forecast average time: %s \n Horizon %s" - % (average_time, horizon), - "py_expr": stateful_py_expr, - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": "", - "merge_subs": None, - "dependencies": deps, - "kind": "stateful", - "arguments": "", - }) - - return "%s()" % py_name, new_structure - - -def add_sample_if_true(identifier, condition, actual_value, initial_value, - subs, merge_subs, deps): - """ - Creates code to instantiate a stateful 'SampleIfTrue' object, - and provides reference to that object's output. - - Parameters - ---------- - identifier: str - The python-safe name of the sample if true. - - condition: str - Reference to another model element that is the condition to the - 'sample if true' function. - - actual_value: str - Can be a number (in string format) or a reference to another model - element which is calculated throughout the simulation at runtime. - - initial_value: str - This is used to initialize the state of the sample if true function. - - subs: list of strings - List of strings of subscript indices that correspond to the - list of expressions, and collectively define the shape of the output. - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects. - - deps: dict - The dictionary with all the denpendencies in the expression. - - Returns - ------- - reference: str - Reference to the sample if true object `__call__` method, - which will return the output of the sample if true process. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("statefuls", "SampleIfTrue") - - deps = build_dependencies( - deps, - { - "initial": [initial_value], - "step": [actual_value, condition] - }) - - new_structure = [] - py_name = "_sample_if_true_%s" % identifier - - if len(subs) == 0: - stateful_py_expr = "SampleIfTrue(lambda: %s, lambda: %s,"\ - "lambda: %s, '%s')" % ( - condition, actual_value, initial_value, py_name) - - else: - stateful_py_expr = "SampleIfTrue(lambda: %s, lambda: %s,"\ - "_sample_if_true_init_%s, '%s')" % ( - condition, actual_value, identifier, py_name) - - # following elements not specified in the model file, but must exist - # create the delay initialization element - new_structure.append({ - "py_name": "_sample_if_true_init_%s" % identifier, - "parent_name": identifier, - "real_name": "Implicit", - "kind": "setup", # not specified in the model file, but must exist - "py_expr": initial_value, - "subs": subs, - "merge_subs": merge_subs, - "doc": "Provides initial conditions for %s function" % identifier, - "unit": "See docs for %s" % identifier, - "lims": "None", - "eqn": "None", - "arguments": "" - }) - # describe the stateful object - new_structure.append({ - "py_name": py_name, - "parent_name": identifier, - "real_name": "Sample if true of %s" % identifier, - "doc": "Initial value: %s \n Input: %s \n Condition: %s" % ( - initial_value, actual_value, condition), - "py_expr": stateful_py_expr, - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": "", - "merge_subs": None, - "dependencies": deps, - "kind": "stateful", - "arguments": "" - }) - - return "%s()" % py_name, new_structure - - -def add_n_smooth(identifier, smooth_input, smooth_time, initial_value, order, - subs, merge_subs, deps): - """ - Constructs stock and flow chains that implement the calculation of - a smoothing function. - - Parameters - ---------- - identifier: str - The python-safe name of the smooth. - - smooth_input: str - Reference to the model component that is the input to the - smoothing function. - - smooth_time: str - Can be a number (in string format) or a reference to another model - element which will calculate the delay. This is calculated throughout - the simulation at runtime. - - initial_value: str - This is used to initialize the stocks that are present in the delay. - We initialize the stocks with equal values so that the outflow in - the first timestep is equal to this value. - - order: str - The number of stocks in the delay pipeline. As we construct the delays - at build time, this must be an integer and cannot be calculated from - other model components. Anything else will yield a ValueError. - - subs: list of strings - List of strings of subscript indices that correspond to the - list of expressions, and collectively define the shape of the output - - merge_subs: list of strings - List of the final subscript range of the python array after. - - deps: dict - The dictionary with all the denpendencies in the expression. - - Returns - ------- - reference: str - Reference to the smooth object `__call__` method, which will return - the output of the smooth process. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("statefuls", "Smooth") - - deps = build_dependencies( - deps, - { - "initial": [initial_value, order], - "step": [smooth_input, smooth_time] - }) - - new_structure = [] - py_name = "_smooth_%s" % identifier - - if len(subs) == 0: - stateful_py_expr = ( - "Smooth(lambda: %s, lambda: %s," - "lambda: %s, lambda: %s, '%s')" - % (smooth_input, smooth_time, initial_value, order, py_name) - ) - - else: - # only need to re-dimension init and input as xarray will take care of - # other - stateful_py_expr = ( - "Smooth(_smooth_input_%s, lambda: %s," - " _smooth_init_%s, lambda: %s, '%s')" - % (identifier, smooth_time, identifier, order, py_name) - ) - - # following elements not specified in the model file, but must exist - # create the delay initialization element - new_structure.append({ - "py_name": "_smooth_init_%s" % identifier, - "parent_name": identifier, - "real_name": "Implicit", - "kind": "setup", # not specified in the model file, but must exist - "py_expr": initial_value, - "subs": subs, - "merge_subs": merge_subs, - "doc": "Provides initial conditions for %s function" % \ - identifier, - "unit": "See docs for %s" % identifier, - "lims": "None", - "eqn": "None", - "arguments": "", - }) - - new_structure.append({ - "py_name": "_smooth_input_%s" % identifier, - "parent_name": identifier, - "real_name": "Implicit", - "kind": "component", - "doc": "Provides input for %s function" % identifier, - "subs": subs, - "merge_subs": merge_subs, - "unit": "See docs for %s" % identifier, - "lims": "None", - "eqn": "None", - "py_expr": smooth_input, - "arguments": "", - }) - - new_structure.append({ - "py_name": py_name, - "parent_name": identifier, - "real_name": "Smooth of %s" % smooth_input, - "doc": "Smooth time %s \n Initial value %s \n Smooth order %s" % ( - smooth_time, initial_value, order), - "py_expr": stateful_py_expr, - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": "", - "merge_subs": None, - "dependencies": deps, - "kind": "stateful", - "arguments": "", - }) - - return "%s()" % py_name, new_structure - - -def add_n_trend(identifier, trend_input, average_time, initial_trend, - subs, merge_subs, deps): - """ - Constructs Trend object. - - Parameters - ---------- - identifier: str - The python-safe name of the trend. - - trend_input: str - Input of the trend. - - average_time: str - Average time of the trend. - - trend_initial: str - This is used to initialize the trend. - - subs: list of strings - List of strings of subscript indices that correspond to the - list of expressions, and collectively define the shape of the output. - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects. - - deps: dict - The dictionary with all the denpendencies in the expression. - - Returns - ------- - reference: str - Reference to the trend object `__call__` method, which will return the - output of the trend process. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("statefuls", "Trend") - - deps = build_dependencies( - deps, - { - "initial": [initial_trend, trend_input, average_time], - "step": [trend_input, average_time] - }) - - new_structure = [] - py_name = "_trend_%s" % identifier - - if len(subs) == 0: - stateful_py_expr = "Trend(lambda: %s, lambda: %s,"\ - " lambda: %s, '%s')" % ( - trend_input, average_time, - initial_trend, py_name) - - else: - # only need to re-dimension init as xarray will take care of other - stateful_py_expr = "Trend(lambda: %s, lambda: %s,"\ - " _trend_init_%s, '%s')" % ( - trend_input, average_time, - identifier, py_name) - - # following elements not specified in the model file, but must exist - # create the delay initialization element - new_structure.append({ - "py_name": "_trend_init_%s" % identifier, - "parent_name": identifier, - "real_name": "Implicit", - "kind": "setup", # not specified in the model file, but must exist - "py_expr": initial_trend, - "subs": subs, - "merge_subs": merge_subs, - "doc": "Provides initial conditions for %s function" - % identifier, - "unit": "See docs for %s" % identifier, - "lims": "None", - "eqn": "None", - "arguments": "", - }) - - new_structure.append({ - "py_name": py_name, - "parent_name": identifier, - "real_name": "trend of %s" % trend_input, - "doc": "Trend average time: %s \n Trend initial value %s" - % (average_time, initial_trend), - "py_expr": stateful_py_expr, - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": "", - "merge_subs": None, - "dependencies": deps, - "kind": "stateful", - "arguments": "", - }) - - return "%s()" % py_name, new_structure - - -def add_initial(identifier, value, deps): - """ - Constructs a stateful object for handling vensim's 'Initial' functionality. - - Parameters - ---------- - identifier: str - The python-safe name of the initial. - - value: str - The expression which will be evaluated, and the first value of - which returned. - - deps: dict - The dictionary with all the denpendencies in the expression. - - Returns - ------- - reference: str - Reference to the Initial object `__call__` method, - which will return the first calculated value of `identifier`. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("statefuls", "Initial") - - deps = build_dependencies( - deps, - { - "initial": [value] - }) - - py_name = "_initial_%s" % identifier - - stateful = { - "py_name": py_name, - "parent_name": identifier, - "real_name": "Initial %s" % identifier, - "doc": "Returns the value taken on during the initialization phase", - "py_expr": "Initial(lambda: %s, '%s')" % (value, py_name), - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": "", - "merge_subs": None, - "dependencies": deps, - "kind": "stateful", - "arguments": "", - } - - return "%s()" % stateful["py_name"], [stateful] - - -def add_tab_data(identifier, real_name, subs, - subscript_dict, merge_subs, keyword): - """ - Constructs an object for handling Vensim's regular DATA components. - - Parameters - ---------- - identifier: str - The python-safe name of the external values. - - real_name: str - The real name of the variable. - - subs: list of strings - List of strings of subscript indices that correspond to the - list of expressions, and collectively define the shape of the output. - - subscript_dict: dict - Dictionary describing the possible dimensions of the stock's - subscripts. - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects. - - keyword: str - Data retrieval method ('interpolate', 'look forward', 'hold backward'). - - Returns - ------- - reference: str - Reference to the TabData object `__call__` method, which will - return the retrieved value of data for the current time step. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("data", "TabData") - - coords = utils.simplify_subscript_input( - utils.make_coord_dict(subs, subscript_dict, terse=False), - subscript_dict, return_full=False, merge_subs=merge_subs) - keyword = ( - "'%s'" % keyword.strip(":").lower() if isinstance(keyword, str) else - keyword) - name = "_data_%s" % identifier - - data = { - "py_name": name, - "parent_name": identifier, - "real_name": "Data for %s" % identifier, - "doc": "Provides data for data variable %s" % identifier, - "py_expr": "TabData('%s', '%s', %s, %s)" % ( - real_name, identifier, coords, keyword), - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": subs, - "merge_subs": merge_subs, - "kind": "tab_data", - "arguments": "", - } - - return "%s(time())" % data["py_name"], [data] - - -def add_ext_data(identifier, file_name, tab, time_row_or_col, cell, subs, - subscript_dict, merge_subs, keyword): - """ - Constructs a external object for handling Vensim's GET XLS DATA and - GET DIRECT DATA functionality. - - Parameters - ---------- - identifier: str - The python-safe name of the external values. - - file_name: str - Filepath to the data. - - tab: str - Tab where the data is. - - time_row_or_col: str - Identifier to the starting point of the time dimension. - - cell: str - Cell identifier where the data starts. - - subs: list of strings - List of strings of subscript indices that correspond to the - list of expressions, and collectively define the shape of the output. - - subscript_dict: dict - Dictionary describing the possible dimensions of the stock's - subscripts. - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects. - - keyword: str - Data retrieval method ('interpolate', 'look forward', 'hold backward'). - - Returns - ------- - reference: str - Reference to the ExtData object `__call__` method, which will - return the retrieved value of data for the current time step. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("external", "ExtData") - - coords = utils.simplify_subscript_input( - utils.make_coord_dict(subs, subscript_dict, terse=False), - subscript_dict, return_full=False, merge_subs=merge_subs) - keyword = ( - "'%s'" % keyword.strip(":").lower() if isinstance(keyword, str) else - keyword) - name = "_ext_data_%s" % identifier - - # Check if the object already exists - if name in build_names: - # Create a new py_name with ADD_# ending - # This object name will not be used in the model as - # the information is added to the existing object - # with add method. - kind = "external_add" - name = utils.make_add_identifier(name, build_names) - py_expr = ".add(%s, %s, %s, %s, %s, %s)" - else: - # Regular name will be used and a new object will be created - # in the model file. - build_names.add(name) - kind = "external" - py_expr = "ExtData(%s, %s, %s, %s, %s, %s,\n"\ - " _root, '{}')".format(name) - - external = { - "py_name": name, - "parent_name": identifier, - "real_name": "External data for %s" % identifier, - "doc": "Provides data for data variable %s" % identifier, - "py_expr": py_expr % (file_name, tab, time_row_or_col, cell, keyword, - coords), - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": subs, - "merge_subs": merge_subs, - "kind": kind, - "arguments": "", - } - - return "%s(time())" % external["py_name"], [external] - - -def add_ext_constant(identifier, file_name, tab, cell, - subs, subscript_dict, merge_subs): - """ - Constructs a external object for handling Vensim's GET XLS CONSTANT and - GET DIRECT CONSTANT functionality. - - Parameters - ---------- - identifier: str - The python-safe name of the external values. - - file_name: str - Filepath to the data. - - tab: str - Tab where the data is. - - cell: str - Cell identifier where the data starts. - - subs: list of strings - List of strings of subscript indices that correspond to the - list of expressions, and collectively define the shape of the output. - - subscript_dict: dict - Dictionary describing the possible dimensions of the stock's - subscripts. - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects. - - Returns - ------- - reference: str - Reference to the ExtConstant object `__call__` method, - which will return the read value of the data. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("external", "ExtConstant") - - coords = utils.simplify_subscript_input( - utils.make_coord_dict(subs, subscript_dict, terse=False), - subscript_dict, return_full=False, merge_subs=merge_subs) - name = "_ext_constant_%s" % identifier - - # Check if the object already exists - if name in build_names: - # Create a new py_name with ADD_# ending - # This object name will not be used in the model as - # the information is added to the existing object - # with add method. - kind = "external_add" - name = utils.make_add_identifier(name, build_names) - py_expr = ".add(%s, %s, %s, %s)" - else: - # Regular name will be used and a new object will be created - # in the model file. - kind = "external" - py_expr = "ExtConstant(%s, %s, %s, %s,\n"\ - " _root, '{}')".format(name) - build_names.add(name) - - external = { - "py_name": name, - "parent_name": identifier, - "real_name": "External constant for %s" % identifier, - "doc": "Provides data for constant data variable %s" % identifier, - "py_expr": py_expr % (file_name, tab, cell, coords), - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": subs, - "merge_subs": merge_subs, - "kind": kind, - "arguments": "", - } - - return "%s()" % external["py_name"], [external] - - -def add_ext_lookup(identifier, file_name, tab, x_row_or_col, cell, - subs, subscript_dict, merge_subs): - """ - Constructs a external object for handling Vensim's GET XLS LOOKUPS and - GET DIRECT LOOKUPS functionality. - - Parameters - ---------- - identifier: str - The python-safe name of the external values. - - file_name: str - Filepath to the data. - - tab: str - Tab where the data is. - - x_row_or_col: str - Identifier to the starting point of the lookup dimension. - - cell: str - Cell identifier where the data starts. - - subs: list of strings - List of strings of subscript indices that correspond to the - list of expressions, and collectively define the shape of the output. - - subscript_dict: dict - Dictionary describing the possible dimensions of the stock's - subscripts. - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects. - - Returns - ------- - reference: str - Reference to the ExtLookup object `__call__` method, - which will return the retrieved value of data after interpolating it. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("external", "ExtLookup") - - coords = utils.simplify_subscript_input( - utils.make_coord_dict(subs, subscript_dict, terse=False), - subscript_dict, return_full=False, merge_subs=merge_subs) - name = "_ext_lookup_%s" % identifier - - # Check if the object already exists - if name in build_names: - # Create a new py_name with ADD_# ending - # This object name will not be used in the model as - # the information is added to the existing object - # with add method. - kind = "external_add" - name = utils.make_add_identifier(name, build_names) - py_expr = ".add(%s, %s, %s, %s, %s)" - else: - # Regular name will be used and a new object will be created - # in the model file. - kind = "external" - py_expr = "ExtLookup(%s, %s, %s, %s, %s,\n"\ - " _root, '{}')".format(name) - build_names.add(name) - - external = { - "py_name": name, - "parent_name": identifier, - "real_name": "External lookup data for %s" % identifier, - "doc": "Provides data for external lookup variable %s" % identifier, - "py_expr": py_expr % (file_name, tab, x_row_or_col, cell, coords), - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": subs, - "merge_subs": merge_subs, - "kind": kind, - "arguments": "x", - } - - return "%s(x)" % external["py_name"], [external] - - -def add_macro(identifier, macro_name, filename, arg_names, arg_vals, deps): - """ - Constructs a stateful object instantiating a 'Macro'. - - Parameters - ---------- - identifier: str - The python-safe name of the element that calls the macro. - - macro_name: str - Python safe name for macro. - - filename: str - Filepath to macro definition. - - func_args: dict - Dictionary of values to be passed to macro, {key: function}. - - Returns - ------- - reference: str - Reference to the Initial object `__call__` method, - which will return the first calculated value of `initial_input`. - - new_structure: list - List of element construction dictionaries for the builder to assemble. - - """ - Imports.add("statefuls", "Macro") - - deps = build_dependencies( - deps, - { - "initial": arg_vals, - "step": arg_vals - }) - - py_name = "_macro_" + macro_name + "_" + identifier - - func_args = "{ %s }" % ", ".join( - ["'%s': lambda: %s" % (key, val) for key, val in zip(arg_names, - arg_vals)]) - - stateful = { - "py_name": py_name, - "parent_name": identifier, - "real_name": "Macro Instantiation of " + macro_name, - "doc": "Instantiates the Macro", - "py_expr": "Macro(_root.joinpath('%s'), %s, '%s'," - " time_initialization=lambda: __data['time']," - " py_name='%s')" % (filename, func_args, macro_name, py_name), - "unit": "None", - "lims": "None", - "eqn": "None", - "subs": "", - "merge_subs": None, - "dependencies": deps, - "kind": "stateful", - "arguments": "", - } - - return "%s()" % stateful["py_name"], [stateful] - - -def add_incomplete(var_name, dependencies): - """ - Incomplete functions don't really need to be 'builders' as they - add no new real structure, but it's helpful to have a function - in which we can raise a warning about the incomplete equation - at translate time. - - parameters - ---------- - var_name: str - The python-safe name of the incomplete variable. - - dependencies: list - The list of the dependencies in the variable. - - Returns - ------- - str: - Inclompete funcion call. - - """ - Imports.add("functions", "incomplete") - - warnings.warn( - "%s has no equation specified" % var_name, SyntaxWarning, stacklevel=2 - ) - - # first arg is `self` reference - return "incomplete(%s)" % ", ".join(dependencies), [] - - -def build_dependencies(deps, exps): - # TODO document - - deps_dict = {"initial": {}, "step": {}} - - for target, exprs in exps.items(): - expr = ".".join(exprs) - for dep in deps: - n_calls = len( - re.findall( - "(? 0: - deps_dict[target][dep] = n_calls - - return deps_dict - - -def build_function_call(function_def, user_arguments, dependencies=set()): - """ - Build a function call using the arguments from the original model. - - Parameters - ---------- - function_def: dict - Function definition map with following keys: - - name: name of the function. - - parameters: list with description of all parameters of this function - - name - - optional? - - type: [ - "expression", - provide converted expression as parameter for - runtime evaluating before the method call - "lambda", - provide lambda expression as parameter for - delayed runtime evaluation in the method call - "time", - provide access to current instance of - time object - "scope", - provide access to current instance of - scope object (instance of Macro object) - "predef" - provide an invariant argument. Argument not - given in Vensim/Xmile but needed for python. - "ignore" - ignore an user argument. Argument given in - Vensim/Xmile but not needed for python. - "subs_range_to_list" - - provides the list of subscripts in a given - subscript range - ] - - user_arguments: list - Arguments provided from model. - - dependencies: set (optional) - Set to update dependencies if needed. - - Returns - ------- - str: - Function call. - - """ - if isinstance(function_def, str): - return function_def + "(" + ",".join(user_arguments) + ")" - - if function_def["name"] == "not_implemented_function": - user_arguments = ["'" + function_def["original_name"] + "'"] + \ - user_arguments - warnings.warn( - "\n\nTrying to translate " - + function_def["original_name"] - + " which it is not implemented on PySD. The translated " - + "model will crash... " - ) - - if "module" in function_def: - if function_def["module"] in ["numpy", "xarray"]: - # import external modules - Imports.add(function_def["module"]) - else: - # import method from PySD module - Imports.add(function_def["module"], function_def["name"]) - - if "parameters" in function_def: - parameters = function_def["parameters"] - user_argument = "" - arguments = [] - argument_idx = 0 - for parameter_idx in range(len(parameters)): - parameter_def = parameters[parameter_idx] - is_optional = ( - parameter_def["optional"] if "optional" in parameter_def else - False - ) - if argument_idx >= len(user_arguments) and is_optional: - break - - parameter_type = ( - parameter_def["type"] if "type" in parameter_def else - "expression") - - if parameter_type in ["expression", - "lambda", - "subs_range_to_list"]: - user_argument = user_arguments[argument_idx] - argument_idx += 1 - elif parameter_type == "time": - if "time" in dependencies: - dependencies["time"] += 1 - else: - dependencies["time"] = 1 - elif parameter_type == "ignore": - argument_idx += 1 - continue - - arguments.append( - { - "expression": user_argument, - "lambda": "lambda: " + user_argument, - "time": "__data['time']", - "scope": "__data['scope']", - "predef": parameter_def["name"], - "subs_range_to_list": f"_subscript_dict['{user_argument}']" - }[parameter_type] - ) - - return function_def["name"] + "(" + ", ".join(arguments) + ")" - - return function_def["name"] + "(" + ",".join(user_arguments) + ")" diff --git a/pysd/translation/utils.py b/pysd/translation/utils.py deleted file mode 100644 index d967c14d..00000000 --- a/pysd/translation/utils.py +++ /dev/null @@ -1,522 +0,0 @@ -""" -These are general utilities used by the builder.py, functions.py or the -model file. Vensim's function equivalents should not go here but in -functions.py -""" - -import warnings -from collections.abc import Mapping - -import regex as re -import numpy as np - -# used to create python safe names with the variable reserved_words -from keyword import kwlist -from builtins import __dir__ as bidir -from ..py_backend.components import __dir__ as cdir -from ..py_backend.data import __dir__ as ddir -from ..py_backend.decorators import __dir__ as dedir -from ..py_backend.external import __dir__ as edir -from ..py_backend.functions import __dir__ as fdir -from ..py_backend.statefuls import __dir__ as sdir -from ..py_backend.utils import __dir__ as udir - - -reserved_words = set( - dir() + bidir() + cdir() + ddir() + dedir() + edir() + fdir() - + sdir() + udir()) -reserved_words = reserved_words.union(kwlist) - - -def find_subscript_name(subscript_dict, element, avoid=[]): - """ - Given a subscript dictionary, and a member of a subscript family, - return the first key of which the member is within the value list. - If element is already a subscript name, return that. - - Parameters - ---------- - subscript_dict: dict - Follows the {'subscript name':['list','of','subscript','elements']} - format. - - element: str - - avoid: list (optional) - List of subscripts to avoid. Default is an empty list. - - Returns - ------- - - Examples - -------- - >>> find_subscript_name({'Dim1': ['A', 'B'], - ... 'Dim2': ['C', 'D', 'E'], - ... 'Dim3': ['F', 'G', 'H', 'I']}, - ... 'D') - 'Dim2' - >>> find_subscript_name({'Dim1': ['A', 'B'], - ... 'Dim2': ['A', 'B'], - ... 'Dim3': ['A', 'B']}, - ... 'B') - 'Dim1' - >>> find_subscript_name({'Dim1': ['A', 'B'], - ... 'Dim2': ['A', 'B'], - ... 'Dim3': ['A', 'B']}, - ... 'B', - ... avoid=['Dim1']) - 'Dim2' - """ - if element in subscript_dict.keys(): - return element - - for name, elements in subscript_dict.items(): - if element in elements and name not in avoid: - return name - - -def make_coord_dict(subs, subscript_dict, terse=True): - """ - This is for assisting with the lookup of a particular element, such that - the output of this function would take the place of %s in this expression. - - `variable.loc[%s]` - - Parameters - ---------- - subs: list of strings - coordinates, either as names of dimensions, or positions within - a dimension. - - subscript_dict: dict - the full dictionary of subscript names and values. - - terse: bool (optional) - If True, includes only elements that do not cover the full range of - values in their respective dimension.If False, returns all dimensions. - Default is True. - - Returns - ------- - coordinates: dict - Coordinates needed to access the xarray quantities we're interested in. - - Examples - -------- - >>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], - ... 'Dim2': ['D', 'E', 'F']}) - {'Dim2': ['D']} - >>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], - ... 'Dim2':['D', 'E', 'F']}, terse=False) - {'Dim2': ['D'], 'Dim1': ['A', 'B', 'C']} - - """ - sub_elems_list = [y for x in subscript_dict.values() for y in x] - coordinates = {} - for sub in subs: - if sub in sub_elems_list: - name = find_subscript_name(subscript_dict, sub, avoid=subs) - coordinates[name] = [sub] - elif not terse: - coordinates[sub] = subscript_dict[sub] - return coordinates - - -def make_merge_list(subs_list, subscript_dict, element=""): - """ - This is for assisting when building xrmerge. From a list of subscript - lists returns the final subscript list after mergin. Necessary when - merging variables with subscripts comming from different definitions. - - Parameters - ---------- - subs_list: list of lists of strings - Coordinates, either as names of dimensions, or positions within - a dimension. - - subscript_dict: dict - The full dictionary of subscript names and values. - - element: str (optional) - Element name, if given it will be printed with any error or - warning message. Default is "". - - Returns - ------- - dims: list - Final subscripts after merging. - - Examples - -------- - >>> make_merge_list([['upper'], ['C']], {'all': ['A', 'B', 'C'], - ... 'upper': ['A', 'B']}) - ['all'] - - """ - coords_set = [set() for i in range(len(subs_list[0]))] - coords_list = [ - make_coord_dict(subs, subscript_dict, terse=False) - for subs in subs_list - ] - - # update coords set - [[coords_set[i].update(coords[dim]) for i, dim in enumerate(coords)] - for coords in coords_list] - - dims = [None] * len(coords_set) - # create an array with the name of the subranges for all merging elements - dims_list = np.array([list(coords) for coords in coords_list]).transpose() - indexes = np.arange(len(dims)) - - for i, coord2 in enumerate(coords_set): - dims1 = [ - dim for dim in dims_list[i] - if dim is not None and set(subscript_dict[dim]) == coord2 - ] - if dims1: - # if the given coordinate already matches return it - dims[i] = dims1[0] - else: - # find a suitable coordinate - other_dims = dims_list[indexes != i] - for name, elements in subscript_dict.items(): - if coord2 == set(elements) and name not in other_dims: - dims[i] = name - break - - if not dims[i]: - # the dimension is incomplete use the smaller - # dimension that completes it - for name, elements in subscript_dict.items(): - if coord2.issubset(set(elements))\ - and name not in other_dims: - dims[i] = name - warnings.warn( - element - + "\nDimension given by subscripts:" - + "\n\t{}\nis incomplete ".format(coord2) - + "using {} instead.".format(name) - + "\nSubscript_dict:" - + "\n\t{}".format(subscript_dict) - ) - break - - if not dims[i]: - for name, elements in subscript_dict.items(): - if coord2 == set(elements): - j = 1 - while name + str(j) in subscript_dict.keys(): - j += 1 - subscript_dict[name + str(j)] = elements - dims[i] = name + str(j) - warnings.warn( - element - + "\nAdding new subscript range to" - + " subscript_dict:\n" - + name + str(j) + ": " + ', '.join(elements)) - break - - if not dims[i]: - # not able to find the correct dimension - raise ValueError( - element - + "\nImpossible to find the dimension that contains:" - + "\n\t{}\nFor subscript_dict:".format(coord2) - + "\n\t{}".format(subscript_dict) - ) - - return dims - - -def make_python_identifier(string, namespace=None): - """ - Takes an arbitrary string and creates a valid Python identifier. - - If the input string is in the namespace, return its value. - - If the python identifier created is already in the namespace, - but the input string is not (ie, two similar strings resolve to - the same python identifier) - - or if the identifier is a reserved word in the reserved_words - list, or is a python default reserved word, - adds _1, or if _1 is in the namespace, _2, etc. - - Parameters - ---------- - string: str - The text to be converted into a valid python identifier. - - namespace: dict - Map of existing translations into python safe identifiers. - This is to ensure that two strings are not translated into - the same python identifier. If string is already in the namespace - its value will be returned. Otherwise, namespace will be mutated - adding string as a new key and its value. - - Returns - ------- - identifier: str - A vaild python identifier based on the input string. - - Examples - -------- - >>> make_python_identifier('Capital') - 'capital' - - >>> make_python_identifier('multiple words') - 'multiple_words' - - >>> make_python_identifier('multiple spaces') - 'multiple_spaces' - - When the name is a python keyword, add '_1' to differentiate it - >>> make_python_identifier('for') - 'for_1' - - Remove leading and trailing whitespace - >>> make_python_identifier(' whitespace ') - 'whitespace' - - Remove most special characters outright: - >>> make_python_identifier('H@t tr!ck') - 'ht_trck' - - remove leading digits - >>> make_python_identifier('123abc') - 'nvs_123abc' - - already in namespace - >>> make_python_identifier('Var$', namespace={'Var$': 'var'}) - ''var' - - namespace conflicts - >>> make_python_identifier('Var@', namespace={'Var$': 'var'}) - 'var_1' - - >>> make_python_identifier('Var$', namespace={'Var@': 'var', - ... 'Var%':'var_1'}) - 'var_2' - - References - ---------- - Identifiers must follow the convention outlined here: - https://docs.python.org/2/reference/lexical_analysis.html#identifiers - - """ - if namespace is None: - namespace = dict() - - if string in namespace: - return namespace[string] - - # create a working copy (and make it lowercase, while we're at it) - s = string.lower() - - # remove leading and trailing whitespace - s = s.strip() - - # Make spaces into underscores - s = re.sub(r"[\s\t\n]+", "_", s) - - # Remove invalid characters - s = re.sub(r"[^\p{l}\p{m}\p{n}_]", "", s) - - # If leading character is not a letter add nvs_. - # Only letters can be leading characters. - if re.findall(r"^[^\p{l}_]", s): - s = "nvs_" + s - elif re.findall(r"^_", s): - s = "nvs" + s - - # reserved the names of PySD functions and methods and other vars - # in the namespace - used_words = reserved_words.union(namespace.values()) - - # Check that the string is not a python identifier - identifier = s - i = 1 - while identifier in used_words: - identifier = s + '_' + str(i) - i += 1 - - namespace[string] = identifier - - return identifier - - -def make_add_identifier(identifier, build_names): - """ - Takes an existing used Python identifier and attatch a unique - identifier with ADD_# ending. - - Used for add new information to an existing external object. - build_names will be updated inside this functions as a set - is mutable. - - Parameters - ---------- - identifier: str - Existing python identifier. - - build_names: set - Set of the already used identifiers for external objects. - - Returns - ------- - identifier: str - A vaild python identifier based on the input indentifier - and the existing ones. - - """ - identifier += "ADD_" - number = 1 - # iterate until finding a non-used identifier - while identifier + str(number) in build_names: - number += 1 - - # update identifier - identifier += str(number) - - # update the build names - build_names.add(identifier) - - return identifier - - -def simplify_subscript_input(coords, subscript_dict, return_full, merge_subs): - """ - Parameters - ---------- - coords: dict - Coordinates to write in the model file. - - subscript_dict: dict - The subscript dictionary of the model file. - - return_full: bool - If True the when coords == subscript_dict, '_subscript_dict' - will be returned - - merge_subs: list of strings - List of the final subscript range of the python array after - merging with other objects - - Returns - ------- - coords: str - The equations to generate the coord dicttionary in the model file. - - """ - - if coords == subscript_dict and return_full: - # variable defined with all the subscripts - return "_subscript_dict" - - coordsp = [] - for ndim, (dim, coord) in zip(merge_subs, coords.items()): - # find dimensions can be retrieved from _subscript_dict - if coord == subscript_dict[dim]: - # use _subscript_dict - coordsp.append(f"'{ndim}': _subscript_dict['{dim}']") - else: - # write whole dict - coordsp.append(f"'{ndim}': {coord}") - - return "{" + ", ".join(coordsp) + "}" - - -def add_entries_underscore(*dictionaries): - """ - Expands dictionaries adding new keys underscoring the white spaces - in the old ones. As the dictionaries are mutable objects this functions - will add the new entries to the already existing dictionaries with - no need to return a new one. - - Parameters - ---------- - *dictionaries: dict(s) - The dictionary or dictionaries to add the entries with underscore. - - Return - ------ - None - - """ - for dictionary in dictionaries: - keys = list(dictionary) - for name in keys: - dictionary[re.sub(" ", "_", name)] = dictionary[name] - return - - -def clean_file_names(*args): - """ - Removes special characters and makes clean file names - - Parameters - ---------- - *args: tuple - Any number of strings to to clean - - Returns - ------- - clean: list - List containing the clean strings - """ - clean = [] - for name in args: - clean.append(re.sub( - r"[\W]+", "", name.replace(" ", "_") - ).lstrip("0123456789") - ) - return clean - - -def merge_nested_dicts(original_dict, dict_to_merge): - """ - Merge dictionaries recursively, preserving common keys. - - Parameters - ---------- - original_dict: dict - Dictionary onto which the merge is executed. - - dict_to_merge: dict - Dictionary to be merged to the original_dict. - - Returns - ------- - None - - """ - for k, v in dict_to_merge.items(): - if (k in original_dict and isinstance(original_dict[k], dict) - and isinstance(dict_to_merge[k], Mapping)): - merge_nested_dicts(original_dict[k], dict_to_merge[k]) - else: - original_dict[k] = dict_to_merge[k] - - -def update_dependency(dependency, deps_dict): - """ - Update dependency in dependencies dict. - - Parameters - ---------- - dependency: str - The dependency to add to the dependency dict. - - deps_dict: dict - The dictionary of dependencies. If dependency is in deps_dict add 1 - to its value. Otherwise, add dependency to deps_dict with value 1. - - Returns - ------- - None - - """ - if dependency in deps_dict: - deps_dict[dependency] += 1 - else: - deps_dict[dependency] = 1 diff --git a/pysd/translation/vensim/parsing_grammars/common_grammar.peg b/pysd/translation/vensim/parsing_grammars/common_grammar.peg index 329bba92..7517cad5 100644 --- a/pysd/translation/vensim/parsing_grammars/common_grammar.peg +++ b/pysd/translation/vensim/parsing_grammars/common_grammar.peg @@ -16,4 +16,4 @@ raw_number = ("+"/"-")? ~r"\d+\.?\d*([eE][+-]?\d+)?" string = "\'" (~r"[^\']"IU)* "\'" range = _ "[" ~r"[^\]]*" "]" _ "," -_ = ~r"[\s\\]*" \ No newline at end of file +_ = ~r"[\s\\]*" diff --git a/pysd/translation/vensim/parsing_grammars/components.peg b/pysd/translation/vensim/parsing_grammars/components.peg index 27592de3..2a8b6ae4 100644 --- a/pysd/translation/vensim/parsing_grammars/components.peg +++ b/pysd/translation/vensim/parsing_grammars/components.peg @@ -33,4 +33,4 @@ prod_oper = ~r"(%(prod_ops)s)"IU exp_oper = ~r"(%(exp_ops)s)"IU pre_oper = ~r"(%(pre_ops)s)"IU -empty = "" # empty string \ No newline at end of file +empty = "" # empty string diff --git a/pysd/translation/vensim/parsing_grammars/element_object.peg b/pysd/translation/vensim/parsing_grammars/element_object.peg index 58fb8310..298a7415 100644 --- a/pysd/translation/vensim/parsing_grammars/element_object.peg +++ b/pysd/translation/vensim/parsing_grammars/element_object.peg @@ -44,4 +44,4 @@ subscript = basic_id / escape_group # Other definitions subscript_component = subscript_list _ subscript_list_except? expression = ~r".*" # expression could be anything, at this point. -keyword = ":" _ basic_id _ ":" \ No newline at end of file +keyword = ":" _ basic_id _ ":" diff --git a/pysd/translation/vensim/vensim2py.py b/pysd/translation/vensim/vensim2py.py deleted file mode 100644 index 8bcec591..00000000 --- a/pysd/translation/vensim/vensim2py.py +++ /dev/null @@ -1,1972 +0,0 @@ -""" -These functions translate vensim .mdl file to pieces needed by the builder -module to write a python version of the model. Everything that requires -knowledge of vensim syntax should be here. -""" - -import pathlib -import re -import warnings -from io import open -from chardet import detect - -import numpy as np -import parsimonious -from parsimonious.exceptions import IncompleteParseError,\ - VisitationError,\ - ParseError - -from .. import builder, utils -from ...py_backend.external import ExtSubscript -from ...py_backend.utils import compute_shape - - -def get_file_sections(file_str): - """ - This is where we separate out the macros from the rest of the model file. - Working based upon documentation at: - https://www.vensim.com/documentation/index.html?macros.htm - - Macros will probably wind up in their own python modules eventually. - - Parameters - ---------- - file_str: str - File content to parse. - - Returns - ------- - entries: list of dictionaries - Each dictionary represents a different section of the model file, - either a macro, or the main body of the model file. The - dictionaries contain various elements: - - returns: list of strings - represents what is returned from a macro (for macros) or - empty for main model - - params: list of strings - represents what is passed into a macro (for macros) or - empty for main model - - name: string - the name of the macro, or 'main' for main body of model - - string: string - string representing the model section - - Examples - -------- - >>> get_file_sections(r'a~b~c| d~e~f| g~h~i|') - [{'returns': [], 'params': [], 'name': 'main', 'string': 'a~b~c| d~e~f| g~h~i|'}] - - """ - - # the leading 'r' for 'raw' in this string is important for - # handling backslashes properly - file_structure_grammar = _include_common_grammar( - r""" - file = encoding? (macro / main)+ - macro = ":MACRO:" _ name _ "(" _ (name _ ","? _)+ _ ":"? _ (name _ ","? _)* _ ")" ~r".+?(?=:END OF MACRO:)" ":END OF MACRO:" - main = !":MACRO:" ~r".+(?!:MACRO:)" - encoding = ~r"\{[^\}]*\}" - """ - ) - parser = parsimonious.Grammar(file_structure_grammar) - tree = parser.parse(file_str) - - class FileParser(parsimonious.NodeVisitor): - def __init__(self, ast): - self.entries = [] - self.visit(ast) - - def visit_main(self, n, vc): - self.entries.append( - { - "name": "_main_", - "params": [], - "returns": [], - "string": n.text.strip(), - } - ) - - def visit_macro(self, n, vc): - name = vc[2] - params = vc[6] - returns = vc[10] - text = vc[13] - self.entries.append( - { - "name": name, - "params": [x.strip() for x in params.split(",") - ] if params else [], - "returns": [x.strip() for x in returns.split(",")] - if returns - else [], - "string": text.strip(), - } - ) - - def generic_visit(self, n, vc): - return "".join(filter(None, vc)) or n.text or "" - - return FileParser(tree).entries - - -def get_model_elements(model_str): - """ - Takes in a string representing model text and splits it into elements - - All newline characters were alreeady removed in a previous step. - - Parameters - ---------- - model_str : str - Model file content to read. - - Returns - ------- - entries : array of dictionaries - Each dictionary contains the components of a different model element, - separated into the equation, units, and docstring. - - Examples - -------- - # Basic Parsing: - >>> get_model_elements(r'a~b~c| d~e~f| g~h~i|') - [{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}] - - # Special characters are escaped within double-quotes: - >>> get_model_elements(r'a~b~c| d~e"~"~f| g~h~i|') - [{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"~"', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}] - >>> get_model_elements(r'a~b~c| d~e~"|"f| g~h~i|') - [{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': '"|"f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}] - - # Double-quotes within escape groups are themselves escaped with - # backslashes: - >>> get_model_elements(r'a~b~c| d~e"\\\"~"~f| g~h~i|') - [{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"\\\\"~"', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}] - >>> get_model_elements(r'a~b~c| d~e~"\\\"|"f| g~h~i|') - [{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': '"\\\\"|"f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}] - >>> get_model_elements(r'a~b~c| d~e"x\\nx"~f| g~h~|') - [{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"x\\\\nx"', 'eqn': 'd'}, {'doc': '', 'unit': 'h', 'eqn': 'g'}] - - # Todo: Handle model-level or section-level documentation - >>> get_model_elements(r'*** .model doc ***~ Docstring!| d~e~f| g~h~i|') - [{'doc': 'Docstring!', 'unit': '', 'eqn': ''}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}] - - # Handle control sections, returning appropriate docstring pieces - >>> get_model_elements(r'a~b~c| ****.Control***~ Simulation Control Parameters | g~h~i|') - [{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}] - - # Handle the model display elements (ignore them) - >>> get_model_elements(r'a~b~c| d~e~f| \\\---///junk|junk~junk') - [{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}] - - - Notes - ----- - - Tildes and pipes are not allowed in element docstrings, but we should - still handle them there - - """ - - model_structure_grammar = _include_common_grammar( - r""" - model = (entry / section)+ sketch? - entry = element "~" element "~" doc ("~" element)? "|" - section = element "~" element "|" - sketch = ~r".*" #anything - - # Either an escape group, or a character that is not tilde or pipe - element = ( escape_group / ~r"[^~|]")* - # Anything other that is not a tilde or pipe - doc = (~r"[^~|]")* - """ - ) - - parser = parsimonious.Grammar(model_structure_grammar) - tree = parser.parse(model_str) - - class ModelParser(parsimonious.NodeVisitor): - def __init__(self, ast): - self.entries = [] - self.visit(ast) - - def visit_entry(self, n, vc): - units, lims = parse_units(vc[2].strip()) - self.entries.append( - { - "eqn": vc[0].strip(), - "unit": units, - "lims": str(lims), - "doc": vc[4].strip(), - "kind": "entry", - } - ) - - def visit_section(self, n, vc): - if vc[2].strip() != "Simulation Control Parameters": - self.entries.append( - { - "eqn": "", - "unit": "", - "lims": "", - "doc": vc[2].strip(), - "kind": "section", - } - ) - - def generic_visit(self, n, vc): - return "".join(filter(None, vc)) or n.text or "" - - return ModelParser(tree).entries - - -def _include_common_grammar(source_grammar): - common_grammar = r""" - name = basic_id / escape_group - - # This takes care of models with Unicode variable names - basic_id = id_start id_continue* - - id_start = ~r"[\w]"IU - id_continue = id_start / ~r"[0-9\'\$\s\_]" - - # between quotes, either escaped quote or character that is not a quote - escape_group = "\"" ( "\\\"" / ~r"[^\"]" )* "\"" - - _ = ~r"[\s\\]*" # whitespace character - """ - - return r""" - {source_grammar} - - {common_grammar} - """.format( - source_grammar=source_grammar, common_grammar=common_grammar - ) - - -def get_equation_components(equation_str, root_path=None): - """ - Breaks down a string representing only the equation part of a model - element. Recognizes the various types of model elements that may exist, - and identifies them. - - Parameters - ---------- - equation_str : basestring - the first section in each model element - the full equation. - - root_path: basestring - the root path of the vensim file (necessary to resolve external - data file paths) - - Returns - ------- - Returns a dictionary containing the following: - - real_name: basestring - The name of the element as given in the original vensim file - - subs: list of strings - list of subscripts or subscript elements - - expr: basestring - - kind: basestring - What type of equation have we found? - - *component* - normal model expression or constant - - *lookup* - a lookup table - - *subdef* - a subscript definition - - *data* - a data variable - - keyword: basestring or None - - Examples - -------- - >>> get_equation_components(r'constant = 25') - {'expr': '25', 'kind': 'component', 'subs': [], 'real_name': 'constant'} - - Notes - ----- - in this function we don't create python identifiers, we use real names. - This is so that when everything comes back together, we can manage - any potential namespace conflicts properly - """ - - imp_subs_func_list = [ - "get xls subscript", - "get direct subscript", - "get_xls_subscript", - "get_direct_subscript", - ] - - component_structure_grammar = _include_common_grammar( - r""" - entry = component / ext_data_definition / data_definition / test_definition / subscript_definition / lookup_definition / subscript_copy - component = name _ subscriptlist? _ "=" "="? _ expression - subscript_definition = name _ ":" _ (imported_subscript / literal_subscript / numeric_range) _ subscript_mapping_list? - ext_data_definition = name _ subscriptlist? _ keyword? _ ":=" _ expression - data_definition = name _ subscriptlist? _ keyword - lookup_definition = name _ subscriptlist? &"(" _ expression # uses - # lookahead assertion to capture whole group - test_definition = name _ subscriptlist? _ &keyword _ expression - subscript_copy = name _ "<->" _ name_mapping - - name = basic_id / escape_group - - literal_subscript = index_list - imported_subscript = imp_subs_func _ "(" _ (string _ ","? _)* ")" - numeric_range = _ (range / value) _ ("," _ (range / value) _)* - value = _ sequence_id _ - range = "(" _ sequence_id _ "-" _ sequence_id _ ")" - subscriptlist = '[' _ index_list _ ']' - subscript_mapping_list = "->" _ subscript_mapping _ ("," _ subscript_mapping _)* - subscript_mapping = (_ name_mapping _) / (_ "(" _ name_mapping _ ":" _ index_list _")" ) - - expression = ~r".*" # expression could be anything, at this point. - keyword = ":" _ basic_id _ ":" - index_list = subscript _ ("," _ subscript _)* - name_mapping = basic_id / escape_group - sequence_id = _ basic_id _ - subscript = basic_id / escape_group - imp_subs_func = ~r"(%(imp_subs)s)"IU - string = "\'" ( "\\\'" / ~r"[^\']"IU )* "\'" - """ - % {"imp_subs": "|".join(imp_subs_func_list)} - ) - - # replace any amount of whitespace with a single space - equation_str = equation_str.replace("\\t", " ") - equation_str = re.sub(r"\s+", " ", equation_str) - - parser = parsimonious.Grammar(component_structure_grammar) - - class ComponentParser(parsimonious.NodeVisitor): - def __init__(self, ast): - self.subscripts = [] - self.subscripts_compatibility = {} - self.real_name = None - self.expression = None - self.kind = None - self.keyword = None - self.visit(ast) - - def visit_subscript_definition(self, n, vc): - self.kind = "subdef" - - def visit_lookup_definition(self, n, vc): - self.kind = "lookup" - - def visit_component(self, n, vc): - self.kind = "component" - - def visit_ext_data_definition(self, n, vc): - self.kind = "component" - - def visit_data_definition(self, n, vc): - self.kind = "data" - - def visit_test_definition(self, n, vc): - # TODO: add test for test - self.kind = "test" - - def visit_keyword(self, n, vc): - self.keyword = n.text.strip() - - def visit_imported_subscript(self, n, vc): - # TODO: make this less fragile - # TODO: allow reading the subscripts from Excel - # once the model has been translated - args = [x.strip().strip("'") for x in vc[4].split(",")] - self.subscripts += ExtSubscript(*args, root=root_path).subscript - - def visit_subscript_copy(self, n, vc): - self.kind = "subdef" - subs_copy1 = vc[4].strip() - subs_copy2 = vc[0].strip() - - if subs_copy1 not in self.subscripts_compatibility: - self.subscripts_compatibility[subs_copy1] = [] - - if subs_copy2 not in self.subscripts_compatibility: - self.subscripts_compatibility[subs_copy2] = [] - - self.subscripts_compatibility[subs_copy1].append(subs_copy2) - self.subscripts_compatibility[subs_copy2].append(subs_copy1) - - def visit_subscript_mapping(self, n, vc): - - warnings.warn( - "\n Subscript mapping detected." - + "This feature works only in some simple cases." - ) - - if ":" in str(vc): - # TODO: add test for this condition - # Obtain subscript name and split by : and ( - name_mapped = str(vc).split(":")[0].split("(")[1] - else: - (name_mapped,) = vc - - if self.real_name not in self.subscripts_compatibility: - self.subscripts_compatibility[self.real_name] = [] - self.subscripts_compatibility[self.real_name].append( - name_mapped.strip()) - - def visit_range(self, n, vc): - subs_start = vc[2].strip() - subs_end = vc[6].strip() - - # get the common prefix and the starting and - # ending number of the numeric range - subs_start = re.findall(r"\d+|\D+", subs_start) - subs_end = re.findall(r"\d+|\D+", subs_end) - prefix_start = "".join(subs_start[:-1]) - prefix_end = "".join(subs_end[:-1]) - num_start = int(subs_start[-1]) - num_end = int(subs_end[-1]) - - if not prefix_start or not prefix_end: - raise ValueError( - "\nA numeric range must contain at least one letter.") - elif num_start >= num_end: - raise ValueError( - "\nThe number of the first subscript value must be " - "lower than the second subscript value in a " - "subscript numeric range.") - elif (prefix_start != prefix_end - or subs_start[0].isdigit() - or subs_end[0].isdigit()): - raise ValueError( - "\nOnly matching names ending in numbers are valid.") - - for i in range(num_start, num_end + 1): - s = prefix_start + str(i) - self.subscripts.append(s.strip()) - - def visit_value(self, n, vc): - self.subscripts.append(vc[1].strip()) - - def visit_name(self, n, vc): - (name,) = vc - self.real_name = name.strip() - return self.real_name - - def visit_subscript(self, n, vc): - (subscript,) = vc - self.subscripts.append(subscript.strip()) - return subscript.strip() - - def visit_expression(self, n, vc): - self.expression = n.text.strip() - - def generic_visit(self, n, vc): - return "".join(filter(None, vc)) or n.text - - def visit__(self, n, vc): - return " " - - try: - tree = parser.parse(equation_str) - parse_object = ComponentParser(tree) - except (IncompleteParseError, VisitationError, ParseError) as err: - # this way we get the element name and equation and is easier - # to detect the error in the model file - raise ValueError( - err.args[0] + "\n\n" - "\nError when parsing definition:\n\t %s\n\n" - "probably used definition is invalid or not integrated..." - "\nSee parsimonious output above." % (equation_str) - ) - - return { - "real_name": parse_object.real_name, - "subs": parse_object.subscripts, - "subs_compatibility": parse_object.subscripts_compatibility, - "expr": parse_object.expression, - "kind": parse_object.kind, - "keyword": parse_object.keyword, - } - - -def parse_sketch_line(sketch_line, namespace): - """ - This syntax parses a single line of the Vensim sketch at a time. - - Not all possibilities can be tested, so this gammar may be considered - experimental for now - - """ - - sketch_grammar = _include_common_grammar( - r""" - line = var_definition / view_intro / view_title / view_definition / arrow / flow / other_objects / anything - view_intro = ~r"\s*Sketch.*?names$" / ~r"^V300.*?ignored$" - view_title = "*" view_name - view_name = ~r"(?<=\*)[^\n]+$" - view_definition = "$" color "," digit "," font_properties "|" ( ( color / ones_and_dashes ) "|")* view_code - var_definition = var_code "," var_number "," var_name "," position "," var_box_type "," arrows_in_allowed "," hide_level "," var_face "," var_word_position "," var_thickness "," var_rest_conf ","? ( ( ones_and_dashes / color) ",")* font_properties? ","? extra_bytes? - # elements used in a line defining the properties of a variable or stock - var_name = element - var_name = ~r"(?<=,)[^,]+(?=,)" - var_number = digit - var_box_type = ~r"(?<=,)\d+,\d+,\d+(?=,)" # improve this regex - arrows_in_allowed = ~r"(?<=,)\d+(?=,)" # if this is an even number, - # it's a shadow variable - hide_level = digit - var_face = digit - var_word_position = ~r"(?<=,)\-*\d+(?=,)" - var_thickness = digit - var_rest_conf = digit "," ~r"\d+" - extra_bytes = ~r"\d+,\d+,\d+,\d+,\d+,\d+" # required since Vensim 8.2.1 - arrow = arrow_code "," digit "," origin_var "," destination_var "," (digit ",")+ (ones_and_dashes ",")? ((color ",") / ("," ~r"\d+") / (font_properties "," ~r"\d+"))* "|(" position ")|" - # arrow origin and destination (this may be useful if further - # parsing is required) - origin_var = digit - destination_var = digit - # flow arrows - flow = source_or_sink_or_plot / flow_arrow - # if you want to extend the parsing, these three would be a good - # starting point (they are followed by "anything") - source_or_sink_or_plot = multipurpose_code "," anything - flow_arrow = flow_arrow_code "," anything - other_objects = other_objects_code "," anything - # fonts - font_properties = font_name? "|" font_size "|" font_style? "|" color - font_style = "B" / "I" / "U" / "S" / "V" # italics, bold, underline, etc - font_size = ~r"\d+" # this needs to be made a regex to match any font - font_name = ~r"(?<=,)[^\|\d]+(?=\|)" - # x and y within the view layout. This may be useful if further - # parsing is required - position = ~r"-*\d+,-*\d+" - # rgb color (e.g. 255-255-255) - color = ~r"((?>> parse_units('Widgets/Month [-10,10,1]') - ('Widgets/Month', (-10,10,1)) - - >>> parse_units('Month [0,?]') - ('Month', [-10, None]) - - >>> parse_units('Widgets [0,100]') - ('Widgets', (0, 100)) - - >>> parse_units('Widgets') - ('Widgets', (None, None)) - - >>> parse_units('[0, 100]') - ('', (0, 100)) - - """ - if not len(units_str): - return units_str, (None, None) - - if units_str[-1] == "]": - units, lims = units_str.rsplit("[") # types: str, str - else: - units = units_str - lims = "?, ?]" - - lims = tuple( - [float(x) if x.strip() != "?" else None for x in lims.strip("]").split( - ",")] - ) - - return units.strip(), lims - - -functions = { - # element-wise functions - "abs": {"name": "np.abs", "module": "numpy"}, - "min": {"name": "np.minimum", "module": "numpy"}, - "max": {"name": "np.maximum", "module": "numpy"}, - "exp": {"name": "np.exp", "module": "numpy"}, - "sin": {"name": "np.sin", "module": "numpy"}, - "cos": {"name": "np.cos", "module": "numpy"}, - "tan": {"name": "np.tan", "module": "numpy"}, - "arcsin": {"name": "np.arcsin", "module": "numpy"}, - "arccos": {"name": "np.arccos", "module": "numpy"}, - "arctan": {"name": "np.arctan", "module": "numpy"}, - "sinh": {"name": "np.sinh", "module": "numpy"}, - "cosh": {"name": "np.cosh", "module": "numpy"}, - "tanh": {"name": "np.tanh", "module": "numpy"}, - "sqrt": {"name": "np.sqrt", "module": "numpy"}, - "integer": {"name": "integer", "module": "functions"}, - "quantum": {"name": "quantum", "module": "functions"}, - "modulo": {"name": "modulo", "module": "functions"}, - "xidz": {"name": "xidz", "module": "functions"}, - "zidz": {"name": "zidz", "module": "functions"}, - "ln": {"name": "np.log", "module": "numpy"}, - "log": {"name": "log", "module": "functions"}, - "lognormal": {"name": "np.random.lognormal", "module": "numpy"}, - "random normal": {"name": "bounded_normal", "module": "functions"}, - "poisson": {"name": "np.random.poisson", "module": "numpy"}, - "exprnd": {"name": "np.random.exponential", "module": "numpy"}, - "random 0 1": { - "name": "np.random.uniform", - "parameters": [ - {"name": "0", "type": "predef"}, - {"name": "1", "type": "predef"} - ], - "module": "numpy"}, - "random uniform": { - "name": "np.random.uniform", - "parameters": [ - {"name": "m"}, - {"name": "x"}, - {"name": "s", "type": "ignore"} - ], - "module": "numpy"}, - "elmcount": { - "name": "len", - "parameters": [ - {"name": "subs_range", "type": "subs_range_to_list"}, - ] - }, - "if then else": { - "name": "if_then_else", - "parameters": [ - {"name": "condition"}, - {"name": "val_if_true", "type": "lambda"}, - {"name": "val_if_false", "type": "lambda"}, - ], - "module": "functions", - }, - "step": { - "name": "step", - "parameters": [ - {"name": "time", "type": "time"}, - {"name": "value"}, - {"name": "tstep"}, - ], - "module": "functions", - }, - "pulse": { - "name": "pulse", - "parameters": [ - {"name": "time", "type": "time"}, - {"name": "start"}, - {"name": "duration"}, - ], - "module": "functions", - }, - # time, start, duration, repeat_time, end - "pulse train": { - "name": "pulse_train", - "parameters": [ - {"name": "time", "type": "time"}, - {"name": "start"}, - {"name": "duration"}, - {"name": "repeat_time"}, - {"name": "end"}, - ], - "module": "functions", - }, - "ramp": { - "name": "ramp", - "parameters": [ - {"name": "time", "type": "time"}, - {"name": "slope"}, - {"name": "start"}, - {"name": "finish"}, - ], - "module": "functions", - }, - "active initial": { - "name": "active_initial", - "parameters": [ - {"name": "time", "type": "time"}, - {"name": "expr", "type": "lambda"}, - {"name": "init_val"}, - ], - "module": "functions", - }, - "game": "", # In the future, may have an actual `functions.game` to pass - # vector functions - "sum": {"name": "sum", "module": "functions"}, - "prod": {"name": "prod", "module": "functions"}, - "vmin": {"name": "vmin", "module": "functions"}, - "vmax": {"name": "vmax", "module": "functions"}, - # matricial functions - "invert matrix": { - "name": "invert_matrix", - "parameters": [ - {"name": "mat"}, - {"name": "n", "type": "ignore"} - # we can safely ignore VENSIM's n parameter - ], - "module": "functions"}, - # TODO functions/stateful objects to be added - "get time value": { - "name": "not_implemented_function", - "module": "functions", - "original_name": "GET TIME VALUE", - }, - # https://github.com/JamesPHoughton/pysd/issues/263 - "allocate by priority": { - "name": "not_implemented_function", - "module": "functions", - "original_name": "ALLOCATE BY PRIORITY", - }, - # https://github.com/JamesPHoughton/pysd/issues/266 - "vector select": { - "name": "not_implemented_function", - "module": "functions", - "original_name": "VECTOR SELECT", - }, - # https://github.com/JamesPHoughton/pysd/issues/265 - "shift if true": { - "name": "not_implemented_function", - "module": "functions", - "original_name": "SHIFT IF TRUE", - }, -} - - -# list of fuctions that accept a dimension to apply over -vectorial_funcs = ["sum", "prod", "vmax", "vmin"] - -# other functions -functions_utils = { - "lookup": {"name": "lookup", "module": "functions"}, - "rearrange": {"name": "rearrange", "module": "utils"}, - "DataArray": {"name": "xr.DataArray", "module": "xarray"}, -} - -# logical operators (bool? operator bool) -in_logical_ops = { - ":and:": { - "name": "logical_and", - "module": "functions" - }, - ":or:": { - "name": "logical_or", - "module": "functions" - } -} - -pre_logical_ops = { - ":not:": { - "name": "np.logical_not", - "module": "numpy" - } -} - -data_ops = { - "get data at time": "", - "get data between times": "", - "get data last time": "", - "get data max": "", - "get data min": "", - "get data median": "", - "get data mean": "", - "get data stdv": "", - "get data total points": "", -} - -builders = { - "integ": lambda element, subscript_dict, args: - builder.add_stock( - identifier=element["py_name"], - expression=args[0], - initial_condition=args[1], - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "delay1": lambda element, subscript_dict, args: - builder.add_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[0], - order="1", - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "delay1i": lambda element, subscript_dict, args: - builder.add_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[2], - order="1", - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "delay3": lambda element, subscript_dict, args: - builder.add_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[0], - order="3", - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "delay3i": lambda element, subscript_dict, args: - builder.add_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[2], - order="3", - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "delay fixed": lambda element, subscript_dict, args: - builder.add_delay_f( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[2], - deps=element["dependencies"] - ), - "delay n": lambda element, subscript_dict, args: - builder.add_n_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[2], - order=args[3], - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "forecast": lambda element, subscript_dict, args: - builder.add_forecast( - identifier=element["py_name"], - forecast_input=args[0], - average_time=args[1], - horizon=args[2], - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "sample if true": lambda element, subscript_dict, args: - builder.add_sample_if_true( - identifier=element["py_name"], - condition=args[0], - actual_value=args[1], - initial_value=args[2], - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "smooth": lambda element, subscript_dict, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[0], - order="1", - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "smoothi": lambda element, subscript_dict, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[2], - order="1", - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "smooth3": lambda element, subscript_dict, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[0], - order="3", - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "smooth3i": lambda element, subscript_dict, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[2], - order="3", - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "smooth n": lambda element, subscript_dict, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[2], - order=args[3], - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "trend": lambda element, subscript_dict, args: - builder.add_n_trend( - identifier=element["py_name"], - trend_input=args[0], - average_time=args[1], - initial_trend=args[2], - subs=element["subs"], - merge_subs=element["merge_subs"], - deps=element["dependencies"] - ), - "get xls data": lambda element, subscript_dict, args: - builder.add_ext_data( - identifier=element["py_name"], - file_name=args[0], - tab=args[1], - time_row_or_col=args[2], - cell=args[3], - subs=element["subs"], - subscript_dict=subscript_dict, - merge_subs=element["merge_subs"], - keyword=element["keyword"], - ), - "get xls constants": lambda element, subscript_dict, args: - builder.add_ext_constant( - identifier=element["py_name"], - file_name=args[0], - tab=args[1], - cell=args[2], - subs=element["subs"], - subscript_dict=subscript_dict, - merge_subs=element["merge_subs"], - ), - "get xls lookups": lambda element, subscript_dict, args: - builder.add_ext_lookup( - identifier=element["py_name"], - file_name=args[0], - tab=args[1], - x_row_or_col=args[2], - cell=args[3], - subs=element["subs"], - subscript_dict=subscript_dict, - merge_subs=element["merge_subs"], - ), - "initial": lambda element, subscript_dict, args: - builder.add_initial( - identifier=element["py_name"], - value=args[0], - deps=element["dependencies"] - ), - "a function of": lambda element, subscript_dict, args: - builder.add_incomplete( - element["real_name"], args - ), -} - -# direct and xls methods are identically implemented in PySD -builders["get direct data"] = builders["get xls data"] -builders["get direct lookups"] = builders["get xls lookups"] -builders["get direct constants"] = builders["get xls constants"] - -# expand dictionaries to detect _ in Vensim def -utils.add_entries_underscore(functions, data_ops, builders) - - -def parse_general_expression(element, namespace={}, subscript_dict={}, - macro_list=None, elements_subs_dict={}, - subs_compatibility={}): - """ - Parses a normal expression - # its annoying that we have to construct and compile the grammar every - # time... - - Parameters - ---------- - element: dictionary - - namespace : dictionary - - subscript_dict : dictionary - - macro_list: list of dictionaries - [{'name': 'M', 'py_name':'m', 'filename':'path/to/file', 'args': - ['arg1', 'arg2']}] - - elements_subs_dict : dictionary - The dictionary with element python names as keys and their merged - subscripts as values. - - subs_compatibility : dictionary - The dictionary storing the mapped subscripts - - Returns - ------- - translation - - new_elements: list of dictionaries - If the expression contains builder functions, those builders will - create new elements to add to our running list (that will eventually - be output to a file) such as stock initialization and derivative - funcs, etc. - - - Examples - -------- - >>> parse_general_expression({'expr': 'INTEG (FlowA, -10)', - ... 'py_name':'test_stock', - ... 'subs':None}, - ... {'FlowA': 'flowa'}), - ({'kind': 'component', 'py_expr': "_state['test_stock']"}, - [{'kind': 'implicit', - 'subs': None, - 'doc': 'Provides initial conditions for test_stock function', - 'py_name': 'init_test_stock', - 'real_name': None, - 'unit': 'See docs for test_stock', - 'py_expr': '-10'}, - {'py_name': 'dtest_stock_dt', - 'kind': 'implicit', - 'py_expr': 'flowa', - 'real_name': None}]) - - """ - - element["dependencies"] = dict() - # spaces important for word-based operators - in_ops = { - "+": "+", "-": "-", "*": "*", "/": "/", "^": "**", "=": "==", - "<=": "<=", "<>": "!=", "<": "<", ">=": ">=", ">": ">"} - - pre_ops = { - "-": "-", - "+": " " # space is important, so that and empty string doesn't - # slip through generic - } - - pre_ops = {"-": "-", "+": " ", ":not:": " not "} - - # in the following, if lists are empty use non-printable character - # everything needs to be escaped before going into the grammar, - # in case it includes quotes - sub_names_list = [re.escape(x) for x in subscript_dict.keys()] or ["\\a"] - sub_elems_list = [ - re.escape(y).replace('"', "") for x in subscript_dict.values() for y - in x] or ["\\a"] - in_ops_list = [re.escape(x) for x in in_ops.keys()] - pre_ops_list = [re.escape(x) for x in pre_ops.keys()] - if macro_list is not None and len(macro_list) > 0: - macro_names_list = [re.escape(x["name"]) for x in macro_list] - else: - macro_names_list = ["\\a"] - - expression_grammar = _include_common_grammar( - r""" - expr_type = array / expr / empty - expr = _ pre_oper? _ (lookup_with_def / build_call / macro_call / call / lookup_call / parens / number / string / reference / nan) _ (in_oper _ expr)? - subs_expr = subs _ in_oper _ subs - - logical_expr = logical_in_expr / logical_pre_expr / logical_parens / subs_expr - logical_in_expr = (logical_pre_expr / logical_parens / subs_expr / expr) (_ in_logical_oper _ (logical_pre_expr / logical_parens / subs_expr / expr))+ - logical_pre_expr = pre_logical_oper _ (logical_parens / subs_expr / expr) - - lookup_with_def = ~r"(WITH\ LOOKUP)"I _ "(" _ expr _ "," _ "(" _ ("[" ~r"[^\]]*" "]" _ ",")? ( "(" _ expr _ "," _ expr _ ")" _ ","? _ )+ _ ")" _ ")" - - lookup_call = lookup_call_subs _ parens - lookup_call_subs = (id _ subscript_list) / id # check first for subscript - - nan = ":NA:" - number = ("+"/"-")? ~r"\d+\.?\d*([eE][+-]?\d+)?" - range = _ "[" ~r"[^\]]*" "]" _ "," - - arguments = ((logical_expr / (subs_range !(_ id)) / expr) _ ","? _)* - parens = "(" _ expr _ ")" - logical_parens = "(" _ logical_expr _ ")" - - call = func _ "(" _ arguments _ ")" - build_call = builder _ "(" _ arguments _ ")" - macro_call = macro _ "(" _ arguments _ ")" - - reference = (id _ subscript_list) / id # check first for subscript - subscript_list = "[" _ ~"\""? _ (subs _ ~"\""? _ "!"? _ ","? _)+ _ "]" - - array = (number _ ("," / ";")? _ "\\"? _)+ !~r"." # negative lookahead for - # anything other than an array - string = "\'" ( "\\\'" / ~r"[^\']"IU )* "\'" - - id = ( basic_id / escape_group ) - - subs = ~r"(%(subs)s)"IU # subscript names and elements (if none, use - # non-printable character) - subs_range = ~r"(%(subs_range)s)"IU # subscript names - func = ~r"(%(funcs)s)"IU # functions (case insensitive) - in_oper = ~r"(%(in_ops)s)"IU # infix operators (case insensitive) - pre_oper = ~r"(%(pre_ops)s)"IU # prefix operators (case insensitive) - in_logical_oper = ~r"(%(in_logical_ops)s)"IU # infix operators (case - # insensitive) - pre_logical_oper = ~r"(%(pre_logical_ops)s)"IU # prefix operators (case - # insensitive) - builder = ~r"(%(builders)s)"IU # builder functions (case insensitive) - macro = ~r"(%(macros)s)"IU # macros from model file (if none, use - # non-printable character) - - empty = "" # empty string - """ % { - # In the following, we have to sort keywords in decreasing order - # of length so that the peg parser doesn't quit early when - # finding a partial keyword - 'subs': '|'.join(reversed(sorted(sub_names_list + sub_elems_list, - key=len))), - 'subs_range': '|'.join(reversed(sorted(sub_names_list, key=len))), - 'funcs': '|'.join(reversed(sorted(functions.keys(), key=len))), - 'in_ops': '|'.join(reversed(sorted(in_ops_list, key=len))), - 'pre_ops': '|'.join(reversed(sorted(pre_ops_list, key=len))), - 'in_logical_ops': '|'.join(reversed(sorted(in_logical_ops.keys(), - key=len))), - 'pre_logical_ops': '|'.join(reversed(sorted(pre_logical_ops.keys(), - key=len))), - 'builders': '|'.join(reversed(sorted(builders.keys(), key=len))), - 'macros': '|'.join(reversed(sorted(macro_names_list, key=len))) - }) - - parser = parsimonious.Grammar(expression_grammar) - - class ExpressionParser(parsimonious.NodeVisitor): - # TODO: at some point, we could make the 'kind' identification - # recursive on expression, so that if an expression is passed into - # a builder function, the information about whether it is a constant, - # or calls another function, goes with it. - - def __init__(self, ast): - self.translation = "" - self.subs = None # the subscript list if given - self.lookup_subs = [] - self.apply_dim = set() # the dimensions with ! if given - self.kind = "constant" # change if we reference anything else - self.new_structure = [] - self.append = "" - self.lookup_append = [] - self.arguments = None - self.in_oper = None - self.args = [] - self.logical_op = None - self.to_float = False # convert subseted reference to float - self.visit(ast) - - def visit_expr_type(self, n, vc): - s = "".join(filter(None, vc)).strip() - self.translation = s - - def visit_expr(self, n, vc): - s = "".join(filter(None, vc)).strip() - self.translation = s - return s - - def visit_call(self, n, vc): - self.kind = "component" - - function_name = vc[0].lower() - arguments = vc[4] - - # add dimensions as last argument - if self.apply_dim and function_name in vectorial_funcs: - arguments += ["dim=" + str(tuple(self.apply_dim))] - self.apply_dim = set() - - if re.match(r"active(_|\s)initial", function_name): - ghost_name, new_structure = builder.build_active_initial_deps( - element["py_name"], arguments, element["dependencies"]) - element["dependencies"] = {ghost_name: 1} - self.new_structure += new_structure - - return builder.build_function_call( - functions[function_name], - arguments, element["dependencies"]) - - def visit_in_oper(self, n, vc): - return in_ops[n.text.lower()] - - def visit_pre_oper(self, n, vc): - return pre_ops[n.text.lower()] - - def visit_logical_in_expr(self, n, vc): - # build logical in expression (or, and) - expr = "".join(vc) - expr_low = expr.lower() - - if ":and:" in expr_low and ":or:" in expr_low: - raise ValueError( - "\nError when parsing %s with equation\n\t %s\n\n" - "mixed definition of logical operators :OR: and :AND:" - "\n Use parethesis to avoid confusions." % ( - element['real_name'], element['eqn']) - ) - elif ":and:" in expr_low: - expr = re.split(":and:", expr, flags=re.IGNORECASE) - op = ':and:' - elif ":or:" in expr_low: - expr = re.split(":or:", expr, flags=re.IGNORECASE) - op = ':or:' - - return builder.build_function_call(in_logical_ops[op], expr) - - def visit_logical_pre_expr(self, n, vc): - # build logical pre expression (not) - return builder.build_function_call(pre_logical_ops[vc[0].lower()], - [vc[-1]]) - - def visit_logical_parens(self, n, vc): - # we can forget about the parenthesis in logical expressions - # as we pass them as arguments to other functions: - # (A or B) and C -> logical_and(logical_or(A, B), C) - return vc[2] - - def visit_reference(self, n, vc): - self.kind = "component" - - py_expr = vc[0] + "()" + self.append - self.append = "" - - if self.to_float: - # convert element to float after subscript subsetting - self.to_float = False - return "float(" + py_expr.replace(".reset_coords(drop=True", - "") - elif self.subs: - if elements_subs_dict[vc[0]] != self.subs: - py_expr = builder.build_function_call( - functions_utils["rearrange"], - [py_expr, repr(self.subs), "_subscript_dict"], - ) - - mapping = self.subs.copy() - for i, sub in enumerate(self.subs): - if sub in subs_compatibility: - for compatible in subs_compatibility[sub]: - if compatible in element["subs"]: - mapping[i] = compatible - - if self.subs != mapping: - py_expr = builder.build_function_call( - functions_utils["rearrange"], - [py_expr, repr(mapping), "_subscript_dict"], - ) - - self.subs = None - - return py_expr - - def visit_lookup_call_subs(self, n, vc): - # necessary if a lookup dimension is subselected but we have - # other reference objects as arguments - self.lookup_append.append(self.append) - self.to_float = False # argument may have dims, cannot convert - self.append = "" - - # recover subs for lookup to avoid using them for arguments - if self.subs: - self.lookup_subs.append(self.subs) - self.subs = None - else: - self.lookup_subs.append(None) - - return vc[0] - - def visit_lookup_call(self, n, vc): - lookup_append = self.lookup_append.pop() - lookup_subs = self.lookup_subs.pop() - py_expr = "".join([x.strip(",") for x in vc]) + lookup_append - - if lookup_subs and elements_subs_dict[vc[0]] != lookup_subs: - dims = [ - utils.find_subscript_name(subscript_dict, sub) - for sub in lookup_subs - ] - return builder.build_function_call( - functions_utils["rearrange"], - [py_expr, repr(dims), "_subscript_dict"], - ) - - return py_expr - - def visit_id(self, n, vc): - subelement = namespace[n.text.strip()] - utils.update_dependency(subelement, element["dependencies"]) - return subelement - - def visit_lookup_with_def(self, n, vc): - """This exists because vensim has multiple ways of doing lookups. - Which is frustrating.""" - x_val = vc[4] - pairs = vc[11] - mixed_list = pairs.replace("(", "").replace(")", "").split(",") - xs = mixed_list[::2] - ys = mixed_list[1::2] - arguments = [x_val, "[" + ",".join(xs) + "]", "[" + ",".join(ys) + - "]"] - return builder.build_function_call(functions_utils["lookup"], - arguments) - - def visit_array(self, n, vc): - # first test handles when subs is not defined - if "subs" in element and element["subs"]: - coords = utils.make_coord_dict( - element["subs"], subscript_dict, terse=False - ) - if ";" in n.text or "," in n.text: - text = n.text.strip(";").replace(" ", "").replace( - ";", ",").replace("\\", "") - data = np.array([float(s) for s in text.split(",")]) - data = data.reshape(compute_shape(coords)) - datastr = ( - np.array2string(data, separator=",") - .replace("\n", "") - .replace(" ", "") - ) - else: - datastr = n.text - - return builder.build_function_call( - functions_utils["DataArray"], - [datastr, - utils.simplify_subscript_input( - coords, subscript_dict, - return_full=True, - merge_subs=element["merge_subs"]), - repr(element["merge_subs"])] - ) - else: - return n.text.replace(" ", "") - - def visit_subs_expr(self, n, vc): - # visit a logical comparation between subscripts - return builder.build_function_call( - functions_utils["DataArray"], [ - f"_subscript_dict['{vc[0]}']", - "{"+f"'{vc[0]}': _subscript_dict['{vc[0]}']"+"}", - f"'{vc[0]}'"] - ) + vc[2] + builder.build_function_call( - functions_utils["DataArray"], [ - f"_subscript_dict['{vc[4]}']", - "{"+f"'{vc[4]}': _subscript_dict['{vc[4]}']"+"}", - f"'{vc[4]}'"] - ) - - def visit_subscript_list(self, n, vc): - refs = vc[4] - subs = [x.strip() for x in refs.split(",")] - coordinates = [ - sub if sub not in subscript_dict and sub[-1] != "!" else False - for sub in subs - ] - - # Implements basic "!" subscript functionality in Vensim. - # Does NOT work for matrix diagonals in - # FUNC(variable[sub1!,sub1!]) functions - self.apply_dim.update(["%s" % s.strip("!") for s in subs if s[-1] - == "!"]) - - if any(coordinates): - coords, subs2 = [], [] - for coord, sub in zip(coordinates, subs): - if coord: - # subset coord - coords.append("'%s'" % coord) - else: - # do not subset coord - coords.append(":") - subs2.append(sub.strip("!")) - - if subs2: - self.subs = subs2 - else: - # convert subseted element to float (avoid using 0D xarray) - self.to_float = True - - self.append = ".loc[%s].reset_coords(drop=True)" % (", ".join( - coords)) - - else: - self.subs = ["%s" % s.strip("!") for s in subs] - - return "" - - def visit_build_call(self, n, vc): - # use only the dict with the final subscripts - # needed for the good working of externals - subs_dict = { - k: subscript_dict[k] for k in - element["merge_subs"] - } - # add subscript ranges given in expr - subs_dict.update({ - sub: subscript_dict[sub] for sub in element['subs'] - if sub in subscript_dict - }) - - self.kind = "component" - builder_name = vc[0].strip().lower() - - name, structure = builders[builder_name]( - element, subs_dict, vc[4]) - - self.new_structure += structure - - if "lookups" in builder_name: - self.arguments = "x" - self.kind = "lookup" - element["dependencies"].update({ - "__external__": None, "__lookup__": None}) - elif "constant" in builder_name: - # External constants - self.kind = "constant" - element["dependencies"]["__external__"] = None - elif "data" in builder_name: - # External data - self.kind = "component_ext_data" - element["dependencies"]["__external__"] = None - element["dependencies"]["time"] = 1 - elif "a function of" not in builder_name: - element["dependencies"] = {structure[-1]["py_name"]: 1} - - return name - - def visit_macro_call(self, n, vc): - call = vc[0] - arglist = vc[4] - self.kind = "component" - py_name = utils.make_python_identifier(call) - macro = [x for x in macro_list if x["py_name"] == py_name][ - 0 - ] # should match once - name, structure = builder.add_macro( - element["py_name"], - macro["py_name"], macro["file_name"], - macro["params"], arglist, element["dependencies"] - ) - element["dependencies"] = {structure[-1]["py_name"]: 1} - self.new_structure += structure - return name - - def visit_arguments(self, n, vc): - arglist = [x.strip(",") for x in vc] - return arglist - - def visit__(self, n, vc): - """Handles whitespace characters""" - return "" - - def visit_nan(self, n, vc): - builder.Imports.add("numpy") - return "np.nan" - - def visit_empty(self, n, vc): - warnings.warn(f"Empty expression for '{element['real_name']}''.") - return "None" - - def generic_visit(self, n, vc): - return "".join(filter(None, vc)) or n.text - - try: - tree = parser.parse(element["expr"]) - parse_object = ExpressionParser(tree) - except (IncompleteParseError, VisitationError, ParseError) as err: - # this way we get the element name and equation and is easier - # to detect the error in the model file - raise ValueError( - err.args[0] + "\n\n" - "\nError when parsing %s with equation\n\t %s\n\n" - "probably a used function is not integrated..." - "\nSee parsimonious output above." % (element["real_name"], - element["eqn"]) - ) - - return ( - { - "py_expr": parse_object.translation, - "kind": parse_object.kind, - "arguments": parse_object.arguments or "", - }, - parse_object.new_structure, - ) - - -def parse_lookup_expression(element, subscript_dict): - """This syntax parses lookups that are defined with their own element""" - - element["dependencies"] = dict() - - lookup_grammar = r""" - lookup = _ "(" _ (regularLookup / excelLookup) _ ")" - regularLookup = range? _ ( "(" _ number _ "," _ number _ ")" _ ","? _ )+ - excelLookup = ~"GET( |_)(XLS|DIRECT)( |_)LOOKUPS"I _ "(" (args _ ","? _)+ ")" - args = ~r"[^,()]*" - number = ("+"/"-")? ~r"\d+\.?\d*(e[+-]\d+)?" - _ = ~r"[\s\\]*" #~r"[\ \t\n]*" #~r"[\s\\]*" # whitespace character - range = _ "[" ~r"[^\]]*" "]" _ "," - """ - parser = parsimonious.Grammar(lookup_grammar) - tree = parser.parse(element["expr"]) - - class LookupParser(parsimonious.NodeVisitor): - def __init__(self, ast): - self.translation = "" - self.new_structure = [] - self.visit(ast) - - def visit__(self, n, vc): - # remove whitespace - return "" - - def visit_regularLookup(self, n, vc): - - pairs = max(vc, key=len) - mixed_list = pairs.replace("(", "").replace(")", "").split(",") - xs = mixed_list[::2] - ys = mixed_list[1::2] - arguments = ["x", "[" + ",".join(xs) + "]", "[" + ",".join(ys) + - "]"] - self.translation = builder.build_function_call( - functions_utils["lookup"], arguments - ) - - def visit_excelLookup(self, n, vc): - arglist = vc[3].split(",") - arglist = [arg.replace("\\ ", "") for arg in arglist] - # use only the dict with the final subscripts - # needed for the good working of externals - subs_dict = { - k: subscript_dict[k] for k in - element["merge_subs"] - } - # add subscript ranges given in expr - subs_dict.update({ - sub: subscript_dict[sub] for sub in element['subs'] - if sub in subscript_dict - }) - trans, structure = builders["get xls lookups"]( - element, subs_dict, arglist - ) - element["dependencies"]["__external__"] = None - - self.translation = trans - self.new_structure += structure - - def generic_visit(self, n, vc): - return "".join(filter(None, vc)) or n.text - - parse_object = LookupParser(tree) - return ( - {"py_expr": parse_object.translation, "arguments": "x"}, - parse_object.new_structure, - ) - - -def translate_section(section, macro_list, sketch, root_path, subview_sep=""): - - model_elements = get_model_elements(section["string"]) - - # extract equation components - model_docstring = "" - for entry in model_elements: - if entry["kind"] == "entry": - entry.update(get_equation_components(entry["eqn"], root_path)) - elif entry["kind"] == "section": - model_docstring += entry["doc"] - - # make python identifiers and track for namespace conflicts - namespace = {"TIME": "time", "Time": "time"} # Initialize with builtins - - # add macro parameters when parsing a macro section - for param in section["params"]: - utils.make_python_identifier(param, namespace) - - # add macro functions to namespace - for macro in macro_list: - if macro["name"] != "_main_": - utils.make_python_identifier(macro["name"], namespace) - - # Create a namespace for the subscripts as these aren't used to - # create actual python functions, but are just labels on arrays, - # they don't actually need to be python-safe - # Also creates a dictionary with all the subscript that are mapped - - subscript_dict = {} - subs_compatibility_dict = {} - for e in model_elements: - if e["kind"] == "subdef": - subscript_dict[e["real_name"]] = e["subs"] - for compatible in e["subs_compatibility"]: - subs_compatibility_dict[compatible] =\ - set(e["subs_compatibility"][compatible]) - # check if copy - if not subscript_dict[compatible]: - # copy subscript to subscript_dict - subscript_dict[compatible] =\ - subscript_dict[e["subs_compatibility"][compatible][0]] - - elements_subs_dict = {} - # add model elements - for element in model_elements: - if element["kind"] not in ["subdef", "section"]: - element["py_name"] = utils.make_python_identifier( - element["real_name"], namespace) - # dictionary to save the subscripts of each element so we can avoid - # using utils.rearrange when calling them with the same dimensions - if element["py_name"] in elements_subs_dict: - elements_subs_dict[element["py_name"]].append(element["subs"]) - else: - elements_subs_dict[element["py_name"]] = [element["subs"]] - - elements_subs_dict = { - el: utils.make_merge_list(elements_subs_dict[el], subscript_dict, el) - for el in elements_subs_dict - } - - for element in model_elements: - if "py_name" in element and element["py_name"] in elements_subs_dict: - element["merge_subs"] =\ - elements_subs_dict[element["py_name"]] - else: - element["merge_subs"] = None - - # Parse components to python syntax. - for element in model_elements: - if element["kind"] == "component" and "py_expr" not in element: - # TODO: if there is new structure, - # it should be added to the namespace... - translation, new_structure = parse_general_expression( - element, - namespace=namespace, - subscript_dict=subscript_dict, - macro_list=macro_list, - subs_compatibility=subs_compatibility_dict, - elements_subs_dict=elements_subs_dict - ) - element.update(translation) - model_elements += new_structure - - elif element["kind"] == "data": - element["eqn"] = element["expr"] = element["arguments"] = "" - element["py_expr"], new_structure = builder.add_tab_data( - element["py_name"], element["real_name"], - element["subs"], subscript_dict, element["merge_subs"], - element["keyword"]) - - element["dependencies"] = {"time": 1, "__data__": None} - model_elements += new_structure - - elif element["kind"] == "lookup": - translation, new_structure = parse_lookup_expression( - element, - subscript_dict=subscript_dict - ) - element.update(translation) - model_elements += new_structure - - element["dependencies"]["__lookup__"] = None - - # send the pieces to be built - build_elements = builder.merge_partial_elements([ - e for e in model_elements if e["kind"] not in ["subdef", "test", - "section"] - ]) - - dependencies = { - element["py_name"]: element["dependencies"] - - for element in build_elements - if element["dependencies"] is not None - } - - # macros are built in their own separate files, and their inputs and - # outputs are put in views/subviews - if sketch and (section["name"] == "_main_"): - module_elements = _classify_elements_by_module(sketch, namespace, - subview_sep) - if (len(module_elements.keys()) == 1) \ - and (isinstance(module_elements[list(module_elements)[0]], list)): - warnings.warn( - "Only a single view with no subviews was detected. The model" - " will be built in a single file.") - else: - builder.build_modular_model( - build_elements, - subscript_dict, - namespace, - dependencies, - section["file_path"], - module_elements, - ) - return section["file_path"] - - builder.build(build_elements, subscript_dict, namespace, dependencies, - section["file_path"]) - - return section["file_path"] - - -def _classify_elements_by_module(sketch, namespace, subview_sep): - """ - Takes the Vensim sketch as a string, parses it (line by line) and - returns a dictionary containing the views/subviews as keys and the model - elements that belong to each view/subview inside a list as values. - - Parameters - ---------- - sketch: string - Representation of the Vensim Sketch as a string. - - namespace: dict - Translation from original model element names (keys) to python - safe function identifiers (values). - - subview_sep: list - Characters used to split view names into view + subview - (e.g. if a view is named ENERGY.Demand and suview_sep is set to ".", - then the Demand subview would be placed inside the ENERGY directory) - - Returns - ------- - views_dict: dict - Dictionary containing view names as keys and a list of the - corresponding variables as values. If the subview_sep is defined, - then the dictionary will have a nested dict containing the subviews. - - """ - # split the sketch in different views - sketch = list(map(lambda x: x.strip(), sketch.split("\\\\\\---/// "))) - - view_elements = {} - for module in sketch: - for sketch_line in module.split("\n"): - # line is a dict with keys "variable_name" and "view_name" - line = parse_sketch_line(sketch_line.strip(), namespace) - - if line["view_name"]: - view_name = line["view_name"] - view_elements[view_name] = [] - - if line["variable_name"]: - if line["variable_name"] not in view_elements[view_name]: - view_elements[view_name].append(line["variable_name"]) - - # removes views that do not include any variable in them - non_empty_views = { - key.lower(): value for key, value in view_elements.items() if value - } - - # split into subviews, if subview_sep is provided - views_dict = {} - if subview_sep and any( - sep in view for sep in subview_sep for view in non_empty_views): - escaped_separators = list(map(lambda x: re.escape(x), subview_sep)) - for full_name, values in non_empty_views.items(): - # split the full view name using the separator and make the - # individual parts safe file or directory names - clean_view_parts = utils.clean_file_names( - *re.split( - "|".join(escaped_separators), - full_name)) - # creating a nested dict for each view.subview - # (e.g. {view_name: {subview_name: [values]}}) - nested_dict = values - - for item in reversed(clean_view_parts): - - nested_dict = {item: nested_dict} - # merging the new nested_dict into the views_dict, preserving - # repeated keys - utils.merge_nested_dicts(views_dict, nested_dict) - - # view names do not have separators or separator characters not provided - else: - if subview_sep and not any( - sep in view for sep in subview_sep for view in non_empty_views): - warnings.warn("The given subview separators were not matched in " - + "any view name.") - - for view_name, elements in non_empty_views.items(): - views_dict[utils.clean_file_names(view_name)[0]] = elements - - return views_dict - - -def _split_sketch(text): - """ - Splits the model file between the main section and the sketch - - Parameters - ---------- - text : string - Full model as a string. - - Returns - ------- - text: string - Model file without sketch. - - sketch: string - Model sketch. - - """ - split_model = text.split("\\\\\\---///", 1) - text = split_model[0] - - try: - sketch = split_model[1] - # remove plots section, if it exists - sketch = sketch.split("///---\\\\\\")[0] - except LookupError: - sketch = "" - warnings.warn("Your model does not have a sketch.") - - return text, sketch - - -def translate_vensim(mdl_file, split_views, encoding=None, **kwargs): - """ - Translate a vensim file. - - Parameters - ---------- - mdl_file: str or pathlib.PosixPath - File path of a vensim model file to translate to python. - - split_views: bool - If True, the sketch is parsed to detect model elements in each - model view, and then translate each view in a separate python - file. Setting this argument to True is recommended for large - models that are split in many different views. - - encoding: str or None (optional) - Encoding of the source model file. If None, the encoding will be - read from the model, if the encoding is not defined in the model - file it will be set to 'UTF-8'. Default is None. - - **kwargs: (optional) - Additional parameters passed to the translate_vensim function - - Returns - ------- - outfile_name: str - Name of the output file. - - Examples - -------- - >>> translate_vensim('teacup.mdl') - - """ - # character used to place subviews in the parent view folder - subview_sep = kwargs.get("subview_sep", "") - - if isinstance(mdl_file, str): - mdl_file = pathlib.Path(mdl_file) - - # check for model extension - if mdl_file.suffix.lower() != ".mdl": - raise ValueError( - "The file to translate, " - + str(mdl_file) - + " is not a vensim model. It must end with mdl extension." - ) - - root_path = mdl_file.parent - - if encoding is None: - encoding = _detect_encoding_from_file(mdl_file) - - with open(mdl_file, "r", encoding=encoding, errors="ignore") as in_file: - text = in_file.read() - - outfile_name = mdl_file.with_suffix(".py") - - if split_views: - text, sketch = _split_sketch(text) - else: - sketch = "" - - file_sections = get_file_sections(text.replace("\n", "")) - - for section in file_sections: - if section["name"] == "_main_": - section["file_path"] = outfile_name - else: # separate macro elements into their own files - section["py_name"] = utils.make_python_identifier( - section["name"]) - section["file_name"] = section["py_name"] + ".py" - section["file_path"] = root_path.joinpath(section["file_name"]) - - macro_list = [s for s in file_sections if s["name"] != "_main_"] - - for section in file_sections: - translate_section(section, macro_list, sketch, root_path, subview_sep) - - return outfile_name - - -def _detect_encoding_from_file(mdl_file): - - try: - with open(mdl_file, "rb") as in_file: - f_line = in_file.readline() - f_line = f_line.decode(detect(f_line)['encoding']) - return re.search(r"(?<={)(.*)(?=})", f_line).group() - except (AttributeError, UnicodeDecodeError): - warnings.warn( - "No encoding specified or detected to translate the model " - "file. 'UTF-8' encoding will be used.") - return "UTF-8" diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index f2b9d7be..74017e95 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -1,4 +1,5 @@ import re +from typing import Union, Tuple, List import warnings import parsimonious import numpy as np @@ -12,10 +13,11 @@ class Element(): + """Model element parsed definition""" - def __init__(self, equation, units, documentation): + def __init__(self, equation: str, units: str, documentation: str): self.equation = equation - self.units, self.limits = self._parse_units(units) + self.units, self.range = self._parse_units(units) self.documentation = documentation def __str__(self): @@ -23,14 +25,17 @@ def __str__(self): self.equation, self.units, self.documentation) @property - def _verbose(self): + def _verbose(self) -> str: + """Get model information""" return self.__str__() @property def verbose(self): + """Print model information""" print(self._verbose) - def _parse_units(self, units_str): + def _parse_units(self, units_str: str) -> Tuple[str, tuple]: + """Split the range from the units""" # TODO improve units parsing: move to _parse_section_elements if not units_str: return "", (None, None) @@ -49,16 +54,19 @@ def _parse_units(self, units_str): ) return units, lims - def _parse(self): + def _parse(self) -> object: + """Parse model element to get the component object""" tree = vu.Grammar.get("element_object").parse(self.equation) self.component = ElementsComponentParser(tree).component self.component.units = self.units - self.component.limits = self.limits + self.component.range = self.range self.component.documentation = self.documentation return self.component class ElementsComponentParser(parsimonious.NodeVisitor): + """Visit model element definition to get the component object""" + def __init__(self, ast): self.mapping = [] self.subscripts = [] @@ -176,7 +184,8 @@ def visit__(self, n, vc): class SubscriptRange(): """Subscript range definition, defined by ":" or "<->" in Vensim.""" - def __init__(self, name, definition, mapping=[]): + def __init__(self, name: str, definition: Union[List[str], str, dict], + mapping: List[str] = []): self.name = name self.definition = definition self.mapping = mapping @@ -188,11 +197,13 @@ def __str__(self): if self.mapping else self.definition) @property - def _verbose(self): + def _verbose(self) -> str: + """Get model information""" return self.__str__() @property def verbose(self): + """Print model information""" print(self._verbose) @@ -200,7 +211,8 @@ class Component(): """Model component defined by "name = expr" in Vensim.""" kind = "Model component" - def __init__(self, name, subscripts, expression): + def __init__(self, name: str, subscripts: Tuple[list, list], + expression: str): self.name = name self.subscripts = subscripts self.expression = expression @@ -230,16 +242,20 @@ def _verbose(self): def verbose(self): print(self._verbose) - def _parse(self): + def _parse(self) -> None: + """Parse model component to get the AST""" tree = vu.Grammar.get("components", parsing_ops).parse(self.expression) - self.ast = ComponentsParser(tree).translation + self.ast = EquationParser(tree).translation if isinstance(self.ast, structures["get_xls_lookups"]): self.lookup = True else: self.lookup = False - def get_abstract_component(self): + def get_abstract_component(self) -> Union[AbstractComponent, + AbstractLookup]: + """Get Abstract Component used for building""" if self.lookup: + # get lookups equations return AbstractLookup(subscripts=self.subscripts, ast=self.ast) else: return AbstractComponent(subscripts=self.subscripts, ast=self.ast) @@ -249,10 +265,12 @@ class UnchangeableConstant(Component): """Unchangeable constant defined by "name == expr" in Vensim.""" kind = "Unchangeable constant component" - def __init__(self, name, subscripts, expression): + def __init__(self, name: str, subscripts: Tuple[list, list], + expression: str): super().__init__(name, subscripts, expression) - def get_abstract_component(self): + def get_abstract_component(self) -> AbstractUnchangeableConstant: + """Get Abstract Component used for building""" return AbstractUnchangeableConstant( subscripts=self.subscripts, ast=self.ast) @@ -261,14 +279,17 @@ class Lookup(Component): """Lookup variable, defined by "name(expr)" in Vensim.""" kind = "Lookup component" - def __init__(self, name, subscripts, expression): + def __init__(self, name: str, subscripts: Tuple[list, list], + expression: str): super().__init__(name, subscripts, expression) - def _parse(self): + def _parse(self) -> None: + """Parse model component to get the AST""" tree = vu.Grammar.get("lookups").parse(self.expression) self.ast = LookupsParser(tree).translation - def get_abstract_component(self): + def get_abstract_component(self) -> AbstractLookup: + """Get Abstract Component used for building""" return AbstractLookup(subscripts=self.subscripts, ast=self.ast) @@ -276,7 +297,8 @@ class Data(Component): """Data variable, defined by "name := expr" in Vensim.""" kind = "Data component" - def __init__(self, name, subscripts, keyword, expression): + def __init__(self, name: str, subscripts: Tuple[list, list], + keyword: str, expression: str): super().__init__(name, subscripts, expression) self.keyword = keyword @@ -290,19 +312,22 @@ def __str__(self): text += "\n\t%s" % self._expression return text - def _parse(self): + def _parse(self) -> None: + """Parse model component to get the AST""" if not self.expression: # empty data vars, read from vdf file self.ast = structures["data"]() else: super()._parse() - def get_abstract_component(self): + def get_abstract_component(self) -> AbstractData: + """Get Abstract Component used for building""" return AbstractData( subscripts=self.subscripts, ast=self.ast, keyword=self.keyword) class LookupsParser(parsimonious.NodeVisitor): + """Visit the elements of a lookups to get the AST""" def __init__(self, ast): self.translation = None self.visit(ast) @@ -341,7 +366,8 @@ def generic_visit(self, n, vc): return "".join(filter(None, vc)) or n.text -class ComponentsParser(parsimonious.NodeVisitor): +class EquationParser(parsimonious.NodeVisitor): + """Visit the elements of a equation to get the AST""" def __init__(self, ast): self.translation = None self.elements = {} @@ -353,11 +379,13 @@ def visit_expr_type(self, n, vc): self.translation = self.elements[vc[0]] def visit_final_expr(self, n, vc): + # expressions with logical binary operators (:AND:, :OR:) return vu.split_arithmetic( structures["logic"], parsing_ops["logic_ops"], "".join(vc).strip(), self.elements) def visit_logic_expr(self, n, vc): + # expressions with logical unitary operators (:NOT:) id = vc[2] if vc[0].lower() == ":not:": id = self.add_element(structures["logic"]( @@ -367,21 +395,25 @@ def visit_logic_expr(self, n, vc): return id def visit_comp_expr(self, n, vc): + # expressions with comparisons (=, <>, <, <=, >, >=) return vu.split_arithmetic( structures["logic"], parsing_ops["comp_ops"], "".join(vc).strip(), self.elements) def visit_add_expr(self, n, vc): + # expressions with additions (+, -) return vu.split_arithmetic( structures["arithmetic"], parsing_ops["add_ops"], "".join(vc).strip(), self.elements) def visit_prod_expr(self, n, vc): + # expressions with products (*, /) return vu.split_arithmetic( structures["arithmetic"], parsing_ops["prod_ops"], "".join(vc).strip(), self.elements) def visit_exp_expr(self, n, vc): + # expressions with exponentials (^) return vu.split_arithmetic( structures["arithmetic"], parsing_ops["exp_ops"], "".join(vc).strip(), self.elements, self.negatives) @@ -467,14 +499,13 @@ def visit_parens(self, n, vc): return vc[2] def visit__(self, n, vc): - """Handles whitespace characters""" + # handles whitespace characters return "" def visit_nan(self, n, vc): return "np.nan" def visit_empty(self, n, vc): - #warnings.warn(f"Empty expression for '{element['real_name']}''.") return self.add_element(None) def generic_visit(self, n, vc): diff --git a/pysd/translation/vensim/vensim_file.py b/pysd/translation/vensim/vensim_file.py index 579c214a..ca7d7b59 100644 --- a/pysd/translation/vensim/vensim_file.py +++ b/pysd/translation/vensim/vensim_file.py @@ -1,4 +1,5 @@ import re +from typing import Union, List from pathlib import Path import warnings import parsimonious @@ -25,7 +26,8 @@ class VensimFile(): file it will be set to 'UTF-8'. Default is None. """ - def __init__(self, mdl_path, encoding=None): + def __init__(self, mdl_path: Union[str, Path], + encoding: Union[None, str] = None): self.mdl_path = Path(mdl_path) self.root_path = self.mdl_path.parent self.model_text = self._read(encoding) @@ -37,7 +39,8 @@ def __str__(self): return "\nVensim model file, loaded from:\n\t%s\n" % self.mdl_path @property - def _verbose(self): + def _verbose(self) -> str: + """Get model information""" text = self.__str__() for section in self.sections: text += section._verbose @@ -46,10 +49,18 @@ def _verbose(self): @property def verbose(self): + """Print model information""" print(self._verbose) - def _read(self, encoding): - """Read a Vensim file and assign its content to self.model_text""" + def _read(self, encoding: Union[None, str]) -> str: + """ + Read a Vensim file and assign its content to self.model_text + + Returns + ------- + str: model file content + + """ # check for model extension if self.mdl_path.suffix.lower() != ".mdl": raise ValueError( @@ -58,6 +69,7 @@ def _read(self, encoding): ) if encoding is None: + # Try detecting the encoding from the file encoding = vu._detect_encoding_from_file(self.mdl_path) with self.mdl_path.open("r", encoding=encoding, @@ -66,7 +78,7 @@ def _read(self, encoding): return model_text - def _split_sketch(self): + def _split_sketch(self) -> None: """Split model from the sketch""" try: split_model = self.model_text.split("\\\\\\---///", 1) @@ -76,22 +88,31 @@ def _split_sketch(self): except LookupError: pass - def _clean(self, text): + def _clean(self, text: str) -> str: + """Remove unnecessary characters""" return re.sub(r"[\n\t\s]+", " ", re.sub(r"\\\n\t", " ", text)) - def parse(self): + def parse(self) -> None: + """Parse model file""" + # get model sections (__main__ + macros) tree = vu.Grammar.get("file_sections").parse(self.model_text) self.sections = FileSectionsParser(tree).entries + + # main section path (Python model file) self.sections[0].path = self.mdl_path.with_suffix(".py") + for section in self.sections[1:]: + # macrots paths section.path = self.mdl_path.parent.joinpath( self.clean_file_names(section.name)[0] ).with_suffix(".py") for section in self.sections: + # parse each section section._parse() - def parse_sketch(self, subview_sep): + def parse_sketch(self, subview_sep: List[str]) -> None: + """Parse the sketch of the models to classify the variables""" if self.sketch: sketch = list(map( lambda x: x.strip(), @@ -164,7 +185,14 @@ def parse_sketch(self, subview_sep): self.sections[0].split = True self.sections[0].views_dict = views_dict - def get_abstract_model(self): + def get_abstract_model(self) -> AbstractModel: + """ + Get Abstract Model used for building + + Returns + ------- + AbstractModel + """ return AbstractModel( original_path=self.mdl_path, sections=tuple(section.get_abstract_section() @@ -230,7 +258,7 @@ def visit_main(self, n, vc): self.entries[0] = FileSection( name="__main__", path=Path("."), - type="main", + section_type="main", params=[], returns=[], content=n.text.strip(), @@ -246,9 +274,11 @@ def visit_macro(self, n, vc): FileSection( name=vc[2].strip().lower().replace(" ", "_"), path=Path("."), - type="macro", - params=[x.strip() for x in vc[6].split(",")] if vc[6] else [], - returns=[x.strip() for x in vc[10].split(",")] if vc[10] else [], + section_type="macro", + params=[ + x.strip() for x in vc[6].split(",")] if vc[6] else [], + returns=[ + x.strip() for x in vc[10].split(",")] if vc[10] else [], content=vc[13].strip(), split=False, views_dict=None @@ -260,6 +290,7 @@ def generic_visit(self, n, vc): class SketchParser(parsimonious.NodeVisitor): + """Sketch visitor to save the view names and the variables in each""" def __init__(self, ast): self.variable_name = None self.view_name = None diff --git a/pysd/translation/vensim/vensim_section.py b/pysd/translation/vensim/vensim_section.py index 4cdee6b0..98e49d54 100644 --- a/pysd/translation/vensim/vensim_section.py +++ b/pysd/translation/vensim/vensim_section.py @@ -3,7 +3,7 @@ import parsimonious from ..structures.abstract_model import\ - AbstractElement, AbstractSubscriptRange, AbstractSection + AbstractElement, AbstractSubscriptRange, AbstractSection from . import vensim_utils as vu from .vensim_element import Element, SubscriptRange, Component @@ -11,13 +11,13 @@ class FileSection(): # File section dataclass - def __init__(self, name: str, path: Path, type: str, + def __init__(self, name: str, path: Path, section_type: str, params: List[str], returns: List[str], content: str, split: bool, views_dict: Union[dict, None] - ) -> object: + ): self.name = name self.path = path - self.type = type + self.type = section_type self.params = params self.returns = returns self.content = content @@ -29,7 +29,8 @@ def __str__(self): return "\nFile section: %s\n" % self.name @property - def _verbose(self): + def _verbose(self) -> str: + """Get model information""" text = self.__str__() if self.elements: for element in self.elements: @@ -41,12 +42,16 @@ def _verbose(self): @property def verbose(self): + """Print model information""" print(self._verbose) - def _parse(self): + def _parse(self) -> None: + """Parse the section""" + # parse the section to get the elements tree = vu.Grammar.get("section_elements").parse(self.content) self.elements = SectionElementsParser(tree).entries self.elements = [element._parse() for element in self.elements] + # split subscript from other components self.subscripts = [ element for element in self.elements @@ -56,12 +61,20 @@ def _parse(self): element for element in self.elements if isinstance(element, Component) ] + # reorder element list for better printing self.elements = self.subscripts + self.components [component._parse() for component in self.components] - def get_abstract_section(self): + def get_abstract_section(self) -> AbstractSection: + """ + Get Abstract Section used for building + + Returns + ------- + AbstractSection + """ return AbstractSection( name=self.name, path=self.path, @@ -74,36 +87,46 @@ def get_abstract_section(self): views_dict=self.views_dict ) - def solve_subscripts(self): + def solve_subscripts(self) -> List[AbstractSubscriptRange]: + """Convert the subscript ranges to Abstract Subscript Ranges""" return [AbstractSubscriptRange( name=subs_range.name, subscripts=subs_range.definition, mapping=subs_range.mapping ) for subs_range in self.subscripts] - def merge_components(self): + def merge_components(self) -> List[AbstractElement]: + """Merge model components by their name""" merged = {} for component in self.components: + # get a safe name to merge (case and white/underscore sensitivity) name = component.name.lower().replace(" ", "_") if name not in merged: + # create new element if it is the first component merged[name] = AbstractElement( name=component.name, components=[]) if component.units: + # add units to element data merged[name].units = component.units - if component.limits[0] is not None\ - or component.limits[1] is not None: - merged[name].range = component.limits + if component.range != (None, None): + # add range to element data + merged[name].range = component.range if component.documentation: + # add documentation to element data merged[name].documentation = component.documentation + # add AbstractComponent to the list of components merged[name].components.append(component.get_abstract_component()) return list(merged.values()) class SectionElementsParser(parsimonious.NodeVisitor): + """ + Visit section elements to get their equation units and documentation. + """ # TODO include units parsing def __init__(self, ast): self.entries = [] diff --git a/pysd/translation/xmile/SMILE2Py.py b/pysd/translation/xmile/SMILE2Py.py deleted file mode 100644 index 8a789af6..00000000 --- a/pysd/translation/xmile/SMILE2Py.py +++ /dev/null @@ -1,357 +0,0 @@ -""" -Created August 14 2014 -James Houghton - -Changed May 03 2017 -Alexey Prey Mulyukin from sdCloud.io developement team - Changes: - - [May 03 2017] Alexey Prey Mulyukin: Integrate support to - logical operators like 'AND', 'OR' and 'NOT'. - Fix support the whitespaces in expressions between - operators and operands. - Add support to modulo operator - 'MOD'. - Fix support for case insensitive in function names. - -This module converts a string of SMILE syntax into Python - -""" -import parsimonious -from parsimonious.nodes import NodeVisitor -import pkg_resources -import re -from .. import builder, utils - -# Here we define which python function each XMILE keyword corresponds to -functions = { - # === - # 3.5.1 Mathematical Functions - # http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039980 - # === - - "abs": "abs", - "int": "int", - "inf": {"name": "np.inf", "module": "numpy"}, - "exp": {"name": "np.exp", "module": "numpy"}, - "sin": {"name": "np.sin", "module": "numpy"}, - "cos": {"name": "np.cos", "module": "numpy"}, - "tan": {"name": "np.tan", "module": "numpy"}, - "arcsin": {"name": "np.arcsin", "module": "numpy"}, - "arccos": {"name": "np.arccos", "module": "numpy"}, - "arctan": {"name": "np.arctan", "module": "numpy"}, - "sqrt": {"name": "np.sqrt", "module": "numpy"}, - "ln": {"name": "np.log", "module": "numpy"}, - "log10": {"name": "np.log10", "module": "numpy"}, - "max": "max", - "min": "min", - - # === - # 3.5.2 Statistical Functions - # http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039981 - # === - - "exprnd": {"name": "np.random.exponential", "module": "numpy"}, - "lognormal": {"name": "np.random.lognormal", "module": "numpy"}, - "normal": {"name": "np.random.normal", "module": "numpy"}, - "poisson": {"name": "np.random.poisson", "module": "numpy"}, - "random": {"name": "np.random.rand", "module": "numpy"}, - - # === - # 3.5.4 Test Input Functions - # http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039983 - # === - - "pulse": { - "name": "pulse_magnitude", - "parameters": [ - {"name": 'time', "type": "time"}, - {"name": 'magnitude'}, - {"name": 'start'}, - {"name": "repeat_time", "optional": True} - ], - "module": "functions" - }, - "step": { - "name": "step", - "parameters": [ - {"name": 'time', "type": 'time'}, - {"name": 'value'}, - {"name": 'tstep'} - ], - "module": "functions" - }, - # time, slope, start, finish=0 - "ramp": { - "name": "ramp", - "parameters": [ - {"name": 'time', "type": 'time'}, - {"name": 'slope'}, - {"name": 'start'}, - {"name": 'finish', "optional": True} - ], - "module": "functions" - }, - - # === - # 3.5.6 Miscellaneous Functions - # http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039985 - # === - "if then else": { - "name": "if_then_else", - "parameters": [ - {"name": 'condition'}, - {"name": 'val_if_true', "type": 'lambda'}, - {"name": 'val_if_false', "type": 'lambda'} - ], - "module": "functions" - }, - - # TODO functions/stateful objects to be added - # https://github.com/JamesPHoughton/pysd/issues/154 - "forecast": {"name": "not_implemented_function", "module": "functions", - "original_name": "forecast"}, - "previous": {"name": "not_implemented_function", "module": "functions", - "original_name": "previous"}, - "self": {"name": "not_implemented_function", "module": "functions", - "original_name": "self"} -} - -prefix_operators = { - "not": " not ", - "-": "-", - "+": " ", -} - -infix_operators = { - "and": " and ", - "or": " or ", - "=": "==", - "<=": "<=", - "<": "<", - ">=": ">=", - ">": ">", - "<>": "!=", - "^": "**", - "+": "+", - "-": "-", - "*": "*", - "/": "/", - "mod": "%", -} - -# ==== -# 3.5.3 Delay Functions -# http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039982 -# ==== - -builders = { - # "delay" !TODO! How to add the infinity delay? - - "delay1": lambda element, subscript_dict, args: - builder.add_n_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[2] if len(args) > 2 else args[0], - order="1", - subs=element["subs"], - merge_subs=None, - deps=element["dependencies"] - ), - - "delay3": lambda element, subscript_dict, args: - builder.add_n_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[2] if len(args) > 2 else args[0], - order="3", - subs=element["subs"], - merge_subs=None, - deps=element["dependencies"] - ), - - "delayn": lambda element, subscript_dict, args: - builder.add_n_delay( - identifier=element["py_name"], - delay_input=args[0], - delay_time=args[1], - initial_value=args[2] if len(args) > 3 else args[0], - order=args[2], - subs=element["subs"], - merge_subs=None, - deps=element["dependencies"] - ), - - "smth1": lambda element, subscript_dict, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[2] if len(args) > 2 else args[0], - order="1", - subs=element["subs"], - merge_subs=None, - deps=element["dependencies"] - ), - - "smth3": lambda element, subscript_dict, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[2] if len(args) > 2 else args[0], - order="3", - subs=element["subs"], - merge_subs=None, - deps=element["dependencies"] - ), - - "smthn": lambda element, subscript_dict, args: - builder.add_n_smooth( - identifier=element["py_name"], - smooth_input=args[0], - smooth_time=args[1], - initial_value=args[2] if len(args) > 3 else args[0], - order=args[2], - subs=element["subs"], - merge_subs=None, - deps=element["dependencies"] - ), - - # "forcst" !TODO! - - "trend": lambda element, subscript_dict, args: - builder.add_n_trend( - identifier=element["py_name"], - trend_input=args[0], - average_time=args[1], - initial_trend=args[2] if len(args) > 2 else 0, - subs=element["subs"], - merge_subs=None, - deps=element["dependencies"] - ), - - "init": lambda element, subscript_dict, args: - builder.add_initial( - identifier=element["py_name"], - value=args[0], - deps=element["dependencies"]), -} - - -def format_word_list(word_list): - return '|'.join( - [re.escape(k) for k in reversed(sorted(word_list, key=len))]) - - -class SMILEParser(NodeVisitor): - def __init__(self, model_namespace={}, subscript_dict={}): - - self.model_namespace = model_namespace - self.subscript_dict = subscript_dict - self.extended_model_namespace = { - key.replace(' ', '_'): value - for key, value in self.model_namespace.items()} - self.extended_model_namespace.update(self.model_namespace) - - # === - # 3.5.5 Time Functions - # http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039984 - # === - self.extended_model_namespace.update({'dt': 'time_step'}) - self.extended_model_namespace.update({'starttime': 'initial_time'}) - self.extended_model_namespace.update({'endtime': 'final_time'}) - - grammar = pkg_resources.resource_string( - "pysd", "translation/xmile/smile.grammar") - grammar = grammar.decode('ascii').format( - funcs=format_word_list(functions.keys()), - in_ops=format_word_list(infix_operators.keys()), - pre_ops=format_word_list(prefix_operators.keys()), - identifiers=format_word_list(self.extended_model_namespace.keys()), - build_keywords=format_word_list(builders.keys()) - ) - - self.grammar = parsimonious.Grammar(grammar) - - def parse(self, text, element, context='eqn'): - """ - context : 'eqn', 'defn' - If context is set to equation, lone identifiers will be - parsed as calls to elements. If context is set to definition, - lone identifiers will be cleaned and returned. - """ - - # Remove the inline comments from `text` before parsing the grammar - # http://docs.oasis-open.org/xmile/xmile/v1.0/csprd01/xmile-v1.0-csprd01.html#_Toc398039973 - text = re.sub(r"\{[^}]*\}", "", text) - if "dependencies" not in element: - element["dependencies"] = dict() - - self.ast = self.grammar.parse(text) - self.context = context - self.element = element - self.new_structure = [] - - py_expr = self.visit(self.ast) - - return ({ - 'py_expr': py_expr - }, self.new_structure) - - def visit_conditional_statement(self, n, vc): - return builder.build_function_call(functions["if then else"], vc[2::4]) - - def visit_user_call_identifier(self, n, vc): - return self.extended_model_namespace[n.text] - - def visit_user_call_quoted_identifier(self, n, vc): - return self.extended_model_namespace[vc[1]] - - def visit_identifier(self, n, vc): - subelement = self.extended_model_namespace[n.text] - utils.update_dependency(subelement, self.element["dependencies"]) - return subelement + '()' - - def visit_quoted_identifier(self, n, vc): - subelement = self.extended_model_namespace[vc[1]] - utils.update_dependency(subelement, self.element["dependencies"]) - return subelement + '()' - - def visit_call(self, n, vc): - function_name = vc[0].lower() - arguments = [e.strip() for e in vc[4].split(",")] - return builder.build_function_call( - functions[function_name], arguments, self.element["dependencies"]) - - def visit_user_call(self, n, vc): - return vc[0] + '(' + vc[4] + ')' - - def visit_build_call(self, n, vc): - builder_name = vc[0].lower() - arguments = [e.strip() for e in vc[4].split(",")] - name, structure = builders[builder_name]( - self.element, self.subscript_dict, arguments) - self.new_structure += structure - self.element["dependencies"] = {structure[-1]["py_name"]: 1} - return name - - def visit_pre_oper(self, n, vc): - return prefix_operators[n.text.lower()] - - def visit_in_oper(self, n, vc): - return infix_operators[n.text.lower()] - - def generic_visit(self, n, vc): - """ - Replace childbearing nodes with a list of their children; - for leaves, return the node text; - for empty nodes, return an empty string. - - Handles: - - call - - parens - - - """ - return ''.join(filter(None, vc)) or n.text or '' diff --git a/pysd/translation/xmile/smile.grammar b/pysd/translation/xmile/smile.grammar deleted file mode 100644 index c6250b3b..00000000 --- a/pysd/translation/xmile/smile.grammar +++ /dev/null @@ -1,25 +0,0 @@ -expr = conditional_statement / (_ pre_oper? _ primary _ (in_oper _ expr)?) - -conditional_statement = "IF" _ expr _ "THEN" _ expr _ "ELSE" _ expr - -primary = call / build_call / user_call / parens / number / identifier / quoted_identifier -parens = "(" _ expr _ ")" -call = func _ "(" _ arguments _ ")" -build_call = build_keyword _ "(" _ arguments _ ")" -user_call = user_call_identifiers _ "(" _ arguments _ ")" -arguments = (expr _ ","? _)* - -number = ((~"[0-9]"+ "."? ~"[0-9]"*) / ("." ~"[0-9]"+)) (("e"/"E") ("-"/"+") ~"[0-9]"+)? - -_ = spacechar* -spacechar = " "* ~"\t"* - -func = ~r"{funcs}"i -build_keyword = ~r"{build_keywords}"i -pre_oper = ~r"{pre_ops}"i -in_oper = ~r"{in_ops}"i -user_call_identifiers = user_call_identifier / user_call_quoted_identifier -user_call_identifier = ~r"{identifiers}"i -user_call_quoted_identifier = "\"" ~r"{identifiers}" "\"" -identifier = ~r"{identifiers}"i -quoted_identifier = "\"" ~r"{identifiers}" "\"" \ No newline at end of file diff --git a/pysd/translation/xmile/xmile2py.py b/pysd/translation/xmile/xmile2py.py deleted file mode 100644 index e2b973fd..00000000 --- a/pysd/translation/xmile/xmile2py.py +++ /dev/null @@ -1,405 +0,0 @@ -""" -Deals with accessing the components of the xmile file, and -formatting them for the builder - -James Houghton -Alexey Prey Mulyukin from sdCloud.io development team. - -""" -import re -import os.path - -from .SMILE2Py import SMILEParser -from lxml import etree -from .. import builder, utils - -import numpy as np - - -def translate_xmile(xmile_file): - """ Translate an xmile model file into a python class. - Functionality is currently limited. - - """ - if not isinstance(xmile_file, str): - xmile_file = str(xmile_file) - # process xml file - xml_parser = etree.XMLParser(encoding="utf-8", recover=True) - root = etree.parse(xmile_file, parser=xml_parser).getroot() - NS = root.nsmap[None] # namespace of the xmile document - - def get_xpath_text(node, path, ns=None, default=''): - """ Safe access of occassionally missing elements """ - # defined here to take advantage of NS in default - if ns is None: - ns = {'ns': NS} - try: - return node.xpath(path, namespaces=ns)[0].text - except IndexError: - return default - - def get_xpath_attrib(node, path, attrib, ns=None, default=None): - """ Safe access of occassionally missing elements """ - # defined here to take advantage of NS in default - if ns is None: - ns = {'ns': NS} - try: - return node.xpath(path, namespaces=ns)[0].attrib[attrib] - except IndexError: - return default - - def is_constant_expression(py_expr): - try: - float(py_expr) - return True - except ValueError: - return False - - def parse_lookup_xml_node(node): - ys_node = node.xpath('ns:ypts', namespaces={'ns': NS})[0] - ys = np.fromstring( - ys_node.text, - dtype=float, - sep=ys_node.attrib['sep'] if 'sep' in ys_node.attrib else ',' - ) - xscale_node = node.xpath('ns:xscale', namespaces={'ns': NS}) - if len(xscale_node) > 0: - xmin = xscale_node[0].attrib['min'] - xmax = xscale_node[0].attrib['max'] - xs = np.linspace(float(xmin), float(xmax), len(ys)) - else: - xs_node = node.xpath('ns:xpts', namespaces={'ns': NS})[0] - xs = np.fromstring( - xs_node.text, - dtype=float, - sep=xs_node.attrib['sep'] if 'sep' in xs_node.attrib else ',' - ) - - type = node.attrib['type'] if 'type' in node.attrib else 'continuous' - - functions_map = { - "continuous": { - "name": "lookup", - "module": "functions" - }, - 'extrapolation': { - "name": "lookup_extrapolation", - "module": "functions" - }, - 'discrete': { - "name": "lookup_discrete", - "module": "functions" - } - } - lookup_function = functions_map[type] if type in functions_map\ - else functions_map['continuous'] - - return { - 'name': node.attrib['name'] if 'name' in node.attrib else '', - 'xs': xs, - 'ys': ys, - 'type': type, - 'function': lookup_function - } - - # build model namespace - namespace = { - 'TIME': 'time', - 'Time': 'time', - 'time': 'time' - } # namespace of the python model - names_xpath = '//ns:model/ns:variables/ns:aux|' \ - '//ns:model/ns:variables/ns:flow|' \ - '//ns:model/ns:variables/ns:stock|' \ - '//ns:model/ns:variables/ns:gf' - - for node in root.xpath(names_xpath, namespaces={'ns': NS}): - name = node.attrib['name'] - utils.make_python_identifier(name, namespace) - - model_elements = [] - smile_parser = SMILEParser(namespace) - - # add aux and flow elements - flaux_xpath =\ - '//ns:model/ns:variables/ns:aux|//ns:model/ns:variables/ns:flow' - for node in root.xpath(flaux_xpath, namespaces={'ns': NS}): - name = node.attrib['name'] - units = get_xpath_text(node, 'ns:units') - lims = ( - get_xpath_attrib(node, 'ns:range', 'min'), - get_xpath_attrib(node, 'ns:range', 'max') - ) - lims = str(tuple(float(x) if x is not None else x for x in lims)) - doc = get_xpath_text(node, 'ns:doc') - py_name = namespace[name] - eqn = get_xpath_text(node, 'ns:eqn') - - # Replace new lines with space, and replace 2 or more spaces with - # single space. Then ensure there is no space at start or end of - # equation - eqn = (re.sub(r"(\s{2,})", " ", eqn.replace("\n", ' ')).strip()) - - element = { - 'kind': 'component', - 'real_name': name, - 'unit': units, - 'doc': doc, - 'eqn': eqn, - 'lims': lims, - 'py_name': py_name, - 'subs': [], # Todo later - 'arguments': '', - } - - tranlation, new_structure = smile_parser.parse(eqn, element) - element.update(tranlation) - if is_constant_expression(element['py_expr']): - element['kind'] = 'constant' - - model_elements += new_structure - - gf_node = node.xpath("ns:gf", namespaces={'ns': NS}) - if len(gf_node) > 0: - gf_data = parse_lookup_xml_node(gf_node[0]) - xs = '[' + ','.join("%10.3f" % x for x in gf_data['xs']) + ']' - ys = '[' + ','.join("%10.3f" % x for x in gf_data['ys']) + ']' - py_expr =\ - builder.build_function_call(gf_data['function'], - [element['py_expr'], xs, ys])\ - + ' if x is None else '\ - + builder.build_function_call(gf_data['function'], - ['x', xs, ys]) - element.update({ - 'kind': 'lookup', - # This lookup declared as inline, so we should implement - # inline mode for flow and aux - 'arguments': "x = None", - 'py_expr': py_expr - }) - - model_elements.append(element) - - # add gf elements - gf_xpath = '//ns:model/ns:variables/ns:gf' - for node in root.xpath(gf_xpath, namespaces={'ns': NS}): - name = node.attrib['name'] - py_name = namespace[name] - - units = get_xpath_text(node, 'ns:units') - doc = get_xpath_text(node, 'ns:doc') - - gf_data = parse_lookup_xml_node(node) - xs = '[' + ','.join("%10.3f" % x for x in gf_data['xs']) + ']' - ys = '[' + ','.join("%10.3f" % x for x in gf_data['ys']) + ']' - py_expr = builder.build_function_call(gf_data['function'], - ['x', xs, ys]) - element = { - 'kind': 'lookup', - 'real_name': name, - 'unit': units, - 'lims': None, - 'doc': doc, - 'eqn': '', - 'py_name': py_name, - 'py_expr': py_expr, - 'arguments': 'x', - 'dependencies': {"__lookup__": None}, - 'subs': [], # Todo later - } - model_elements.append(element) - - # add stock elements - stock_xpath = '//ns:model/ns:variables/ns:stock' - for node in root.xpath(stock_xpath, namespaces={'ns': NS}): - name = node.attrib['name'] - units = get_xpath_text(node, 'ns:units') - lims = ( - get_xpath_attrib(node, 'ns:range', 'min'), - get_xpath_attrib(node, 'ns:range', 'max') - ) - lims = str(tuple(float(x) if x is not None else x for x in lims)) - doc = get_xpath_text(node, 'ns:doc') - py_name = namespace[name] - - # Extract input and output flows equations - inflows = [ - n.text for n in node.xpath('ns:inflow', namespaces={'ns': NS})] - outflows = [ - n.text for n in node.xpath('ns:outflow', namespaces={'ns': NS})] - - eqn = ' + '.join(inflows) if inflows else '' - eqn += (' - ' + ' - '.join(outflows)) if outflows else '' - - element = { - 'kind': 'component' if inflows or outflows else 'constant', - 'real_name': name, - 'unit': units, - 'doc': doc, - 'eqn': eqn, - 'lims': lims, - 'py_name': py_name, - 'subs': [], # Todo later - 'arguments': '' - } - - # Parse each flow equations - py_inflows = [] - for inputFlow in inflows: - translation, new_structure = smile_parser.parse( - inputFlow, element) - py_inflows.append(translation['py_expr']) - model_elements += new_structure - - # Parse each flow equations - py_outflows = [] - for outputFlow in outflows: - translation, new_structure = smile_parser.parse( - outputFlow, element) - py_outflows.append(translation['py_expr']) - model_elements += new_structure - - py_ddt = ' + '.join(py_inflows) if py_inflows else '' - py_ddt += (' - ' + ' - '.join(py_outflows)) if py_outflows else '' - - # Read the initial value equation for stock element - initial_value_eqn = get_xpath_text(node, 'ns:eqn') - translation, new_structure = smile_parser.parse( - initial_value_eqn, element) - py_initial_value = translation['py_expr'] - model_elements += new_structure - - py_expr, new_structure = builder.add_stock( - identifier=py_name, - subs=[], # Todo later - merge_subs=[], - expression=py_ddt, - initial_condition=py_initial_value, - deps=element["dependencies"]) - element['py_expr'] = py_expr - element["dependencies"] = {new_structure[-1]["py_name"]: 1} - model_elements.append(element) - model_elements += new_structure - - # remove timestamp pieces so as not to double-count - model_elements_parsed = [] - for element in model_elements: - if element['real_name'].lower() not in ['initial time', - 'final time', - 'time step', - 'saveper']: - model_elements_parsed.append(element) - model_elements = model_elements_parsed - - # Add timeseries information - - # Read the start time of simulation - sim_spec_node = root.xpath('//ns:sim_specs', namespaces={'ns': NS}) - time_units = sim_spec_node[0].attrib['time_units']\ - if len(sim_spec_node) > 0 and 'time_units' in sim_spec_node[0].attrib\ - else "" - - tstart = root.xpath( - '//ns:sim_specs/ns:start', - namespaces={'ns': NS})[0].text - element = { - 'kind': 'constant', - 'real_name': 'INITIAL TIME', - 'unit': time_units, - 'lims': None, - 'doc': 'The initial time for the simulation.', - 'eqn': tstart, - 'py_name': 'initial_time', - 'subs': None, - 'arguments': '', - } - translation, new_structure = smile_parser.parse(tstart, element) - element.update(translation) - model_elements.append(element) - model_elements += new_structure - - # Read the final time of simulation - tstop = root.xpath('//ns:sim_specs/ns:stop', namespaces={'ns': NS})[0].text - element = { - 'kind': 'constant', - 'real_name': 'FINAL TIME', - 'unit': time_units, - 'lims': None, - 'doc': 'The final time for the simulation.', - 'eqn': tstart, - 'py_name': 'final_time', - 'subs': None, - 'arguments': '', - } - - translation, new_structure = smile_parser.parse(tstop, element) - element.update(translation) - model_elements.append(element) - model_elements += new_structure - - # Read the time step of simulation - dt_node = root.xpath('//ns:sim_specs/ns:dt', namespaces={'ns': NS}) - - # Use default value for time step if `dt` is not specified in model - dt_eqn = "1.0" - if len(dt_node) > 0: - dt_node = dt_node[0] - dt_eqn = dt_node.text - # If reciprocal mode are defined for `dt`, we should inverse value - if "reciprocal" in dt_node.attrib\ - and dt_node.attrib["reciprocal"].lower() == "true": - dt_eqn = "1/" + dt_eqn - - element = { - 'kind': 'constant', - 'real_name': 'TIME STEP', - 'unit': time_units, - 'lims': None, - 'doc': 'The time step for the simulation.', - 'eqn': dt_eqn, - 'py_name': 'time_step', - 'subs': None, - 'arguments': '', - } - translation, new_structure = smile_parser.parse(dt_eqn, element) - element.update(translation) - model_elements.append(element) - model_elements += new_structure - - # Add the SAVEPER attribute to the model - model_elements.append({ - 'kind': 'constant', - 'real_name': 'SAVEPER', - 'unit': time_units, - 'lims': None, - 'doc': 'The time step for the simulation.', - 'eqn': dt_eqn, - 'py_name': 'saveper', - 'py_expr': 'time_step()', - 'subs': None, - 'dependencies': {'time_step': 1}, - 'arguments': '', - }) - - # send the pieces to be built - build_elements = builder.merge_partial_elements([ - e for e in model_elements if e["kind"] not in ["subdef", "test", - "section"] - ]) - - dependencies = { - element["py_name"]: element["dependencies"] - - for element in build_elements - if element["dependencies"] is not None - } - file_name, file_extension = os.path.splitext(xmile_file) - outfile_name = file_name + '.py' - - builder.build(elements=build_elements, - subscript_dict={}, - namespace=namespace, - dependencies=dependencies, - outfile_name=outfile_name) - - return outfile_name diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py index 4b5741dc..3d148cbb 100644 --- a/pysd/translation/xmile/xmile_element.py +++ b/pysd/translation/xmile/xmile_element.py @@ -1,4 +1,6 @@ import re +from typing import Tuple, Union, List +from lxml import etree import parsimonious import numpy as np @@ -17,13 +19,15 @@ class Element(): "discrete": "hold_backward" } - def __init__(self, node, ns): + kind = "Element" + + def __init__(self, node: etree._Element, ns: dict): self.node = node self.ns = ns self.name = node.attrib["name"] self.units = self.get_xpath_text(node, "ns:units") or "" self.documentation = self.get_xpath_text(node, "ns:doc") or "" - self.limits = (None, None) + self.range = (None, None) self.components = [] def __str__(self): @@ -42,36 +46,49 @@ def _expression(self): return self.node.text.replace("\n", "\n\t") @property - def _verbose(self): + def _verbose(self) -> str: + """Get model information""" return self.__str__() @property def verbose(self): + """Print model information""" print(self._verbose) - def get_xpath_text(self, node, xpath): - """ Safe access of occassionally missing text""" + def get_xpath_text(self, node: etree._Element, + xpath: str) -> Union[str, None]: + """Safe access of occassionally missing text""" try: return node.xpath(xpath, namespaces=self.ns)[0].text except IndexError: return None - def get_xpath_attrib(self, node, xpath, attrib): - """ Safe access of occassionally missing attributes""" + def get_xpath_attrib(self, node: etree._Element, + xpath: str, attrib: str) -> Union[str, None]: + """Safe access of occassionally missing attributes""" # defined here to take advantage of NS in default try: return node.xpath(xpath, namespaces=self.ns)[0].attrib[attrib] except IndexError: return None - def get_lims(self): + def get_range(self) -> Tuple[Union[None, str], Union[None, str]]: + """Get the range of the element""" lims = ( self.get_xpath_attrib(self.node, 'ns:range', 'min'), self.get_xpath_attrib(self.node, 'ns:range', 'max') ) return tuple(float(x) if x is not None else x for x in lims) - def parse_lookup_xml_node(self, node): + def parse_lookup_xml_node(self, node: etree._Element) -> object: + """ + Parse lookup definition + + Returns + ------- + AST: AbstractSyntaxTree + + """ ys_node = node.xpath('ns:ypts', namespaces=self.ns)[0] ys = np.fromstring( ys_node.text, @@ -101,7 +118,8 @@ def parse_lookup_xml_node(self, node): type=self.interp_methods[interp] ) - def _parse(self): + def _parse(self) -> None: + """Parse all the components of an element""" if self.node.xpath("ns:element", namespaces=self.ns): for subnode in self.node.xpath("ns:element", namespaces=self.ns): self.components.append( @@ -109,42 +127,62 @@ def _parse(self): self._parse_component(subnode)) ) else: - subscripts = [] - for subnode in self.node.xpath("ns:dimensions/ns:dim", namespaces=self.ns): - subscripts.append(subnode.attrib["name"]) + subscripts = [ + subnode.attrib["name"] + for subnode + in self.node.xpath("ns:dimensions/ns:dim", namespaces=self.ns) + ] self.components = [ ((subscripts, []), self._parse_component(self.node)) ] - def smile_parser(self, expression): + def smile_parser(self, expression: str) -> object: + """ + Parse expression with parsimonious. + + Returns + ------- + AST: AbstractSyntaxTree + + """ tree = vu.Grammar.get("equations", parsing_ops).parse(expression) return EquationParser(tree).translation - def get_abstract_element(self): + def get_empty_abstract_element(self) -> AbstractElement: + """ + Get empty Abstract used for building + + Returns + ------- + AbstractElement + """ return AbstractElement( name=self.name, units=self.units, - range=self.limits, + range=self.range, documentation=self.documentation, components=[]) class Flaux(Element): """Flow or auxiliary variable""" + + kind = "Flaux" + def __init__(self, node, ns): super().__init__(node, ns) - self.limits = self.get_lims() + self.range = self.get_range() - @property - def _verbose(self): - return self.__str__() + def _parse_component(self, node) -> object: + """ + Parse one Flaux component - @property - def verbose(self): - print(self._verbose) + Returns + ------- + AST: AbstractSyntaxTree - def _parse_component(self, node): + """ eqn = self.get_xpath_text(node, 'ns:eqn') # Replace new lines with space, and replace 2 or more spaces with @@ -160,8 +198,15 @@ def _parse_component(self, node): return ast - def get_abstract_component(self): - ae = self.get_abstract_element() + def get_abstract_element(self) -> AbstractElement: + """ + Get Abstract Element with components used for building + + Returns + ------- + AbstractElement + """ + ae = self.get_empty_abstract_element() for component in self.components: ae.components.append(AbstractComponent( subscripts=component[0], @@ -171,24 +216,41 @@ def get_abstract_component(self): class Gf(Element): """Gf variable (lookup)""" + kind = "Gf component" def __init__(self, node, ns): super().__init__(node, ns) - self.limits = self.get_lims() + self.range = self.get_range() - def get_lims(self): + def get_range(self) -> Tuple[Union[None, str], Union[None, str]]: + """Get the range of the Gf element""" lims = ( self.get_xpath_attrib(self.node, 'ns:yscale', 'min'), self.get_xpath_attrib(self.node, 'ns:yscale', 'max') ) return tuple(float(x) if x is not None else x for x in lims) - def _parse_component(self, node): + def _parse_component(self, node) -> object: + """ + Parse one Gf component + + Returns + ------- + AST: AbstractSyntaxTree + + """ return self.parse_lookup_xml_node(self.node) - def get_abstract_component(self): - ae = self.get_abstract_element() + def get_abstract_element(self) -> AbstractElement: + """ + Get Abstract Element with components used for building + + Returns + ------- + AbstractElement + """ + ae = self.get_empty_abstract_element() for component in self.components: ae.components.append(AbstractLookup( subscripts=component[0], @@ -198,13 +260,22 @@ def get_abstract_component(self): class Stock(Element): """Stock component (Integ)""" + kind = "Stock component" def __init__(self, node, ns): super().__init__(node, ns) - self.limits = self.get_lims() + self.range = self.get_range() - def _parse_component(self, node): + def _parse_component(self, node) -> object: + """ + Parse one Stock component + + Returns + ------- + AST: AbstractSyntaxTree + + """ # Parse each flow equations inflows = [ self.smile_parser(inflow.text) @@ -237,8 +308,15 @@ def _parse_component(self, node): return structures["stock"](flows, initial) - def get_abstract_component(self): - ae = self.get_abstract_element() + def get_abstract_element(self) -> AbstractElement: + """ + Get Abstract Element with components used for building + + Returns + ------- + AbstractElement + """ + ae = self.get_empty_abstract_element() for component in self.components: ae.components.append(AbstractComponent( subscripts=component[0], @@ -254,14 +332,29 @@ def __init__(self, name, units, documentation, eqn): self.name = name self.units = units self.documentation = documentation - self.limits = (None, None) + self.range = (None, None) self.eqn = eqn - def _parse(self): + def _parse(self) -> None: + """ + Parse control elment. + + Returns + ------- + AST: AbstractSyntaxTree + + """ self.ast = self.smile_parser(self.eqn) - def get_abstract_component(self): - ae = self.get_abstract_element() + def get_abstract_element(self) -> AbstractElement: + """ + Get Abstract Element with components used for building + + Returns + ------- + AbstractElement + """ + ae = self.get_empty_abstract_element() ae.components.append(AbstractComponent( subscripts=([], []), ast=self.ast)) @@ -271,7 +364,8 @@ def get_abstract_component(self): class SubscriptRange(): """Subscript range definition.""" - def __init__(self, name, definition, mapping=[]): + def __init__(self, name: str, definition: List[str], + mapping: List[str] = []): self.name = name self.definition = definition self.mapping = mapping @@ -282,15 +376,18 @@ def __str__(self): self.definition) @property - def _verbose(self): + def _verbose(self) -> str: + """Get model information""" return self.__str__() @property def verbose(self): + """Print model information""" print(self._verbose) class EquationParser(parsimonious.NodeVisitor): + """Visit the elements of a equation to get the AST""" def __init__(self, ast): self.translation = None self.elements = {} @@ -302,11 +399,13 @@ def visit_expr_type(self, n, vc): self.translation = self.elements[vc[0]] def visit_logic2_expr(self, n, vc): + # expressions with logical binary operators (and, or) return vu.split_arithmetic( structures["logic"], parsing_ops["logic_ops"], "".join(vc).strip(), self.elements) def visit_logic_expr(self, n, vc): + # expressions with logical unitary operators (not) id = vc[2] if vc[0].lower() == "not": id = self.add_element(structures["logic"]( @@ -316,21 +415,25 @@ def visit_logic_expr(self, n, vc): return id def visit_comp_expr(self, n, vc): + # expressions with comparisons (=, <>, <, <=, >, >=) return vu.split_arithmetic( structures["logic"], parsing_ops["comp_ops"], "".join(vc).strip(), self.elements) def visit_add_expr(self, n, vc): + # expressions with additions (+, -) return vu.split_arithmetic( structures["arithmetic"], parsing_ops["add_ops"], "".join(vc).strip(), self.elements) def visit_prod_expr(self, n, vc): + # expressions with products (*, /) return vu.split_arithmetic( structures["arithmetic"], parsing_ops["prod_ops"], "".join(vc).strip(), self.elements) def visit_exp_expr(self, n, vc): + # expressions with exponentials (^) return vu.split_arithmetic( structures["arithmetic"], parsing_ops["exp_ops"], "".join(vc).strip(), self.elements, self.negatives) @@ -403,14 +506,13 @@ def visit_parens(self, n, vc): return vc[2] def visit__(self, n, vc): - """Handles whitespace characters""" + # handles whitespace characters return "" def visit_nan(self, n, vc): return "np.nan" def visit_empty(self, n, vc): - #warnings.warn(f"Empty expression for '{element['real_name']}''.") return self.add_element(None) def generic_visit(self, n, vc): diff --git a/pysd/translation/xmile/xmile_file.py b/pysd/translation/xmile/xmile_file.py index 8e232ef6..ea360c9b 100644 --- a/pysd/translation/xmile/xmile_file.py +++ b/pysd/translation/xmile/xmile_file.py @@ -1,3 +1,4 @@ +from typing import Union from pathlib import Path from lxml import etree @@ -15,13 +16,8 @@ class XmileFile(): xmile_path: str or pathlib.Path Path to the Xmile model. - encoding: str or None (optional) - Encoding of the source model file. If None, the encoding will be - read from the model, if the encoding is not defined in the model - file it will be set to 'UTF-8'. Default is None. - """ - def __init__(self, xmile_path, encoding=None): + def __init__(self, xmile_path: Union[str, Path]): self.xmile_path = Path(xmile_path) self.root_path = self.xmile_path.parent self.xmile_root = self.get_root() @@ -32,7 +28,8 @@ def __str__(self): return "\nXmile model file, loaded from:\n\t%s\n" % self.xmile_path @property - def _verbose(self): + def _verbose(self) -> str: + """Get model information""" text = self.__str__() for section in self.sections: text += section._verbose @@ -41,10 +38,18 @@ def _verbose(self): @property def verbose(self): + """Print model information""" print(self._verbose) - def get_root(self): - """Read a Xmile file and assign its content to self.model_text""" + def get_root(self) -> etree._Element: + """ + Read a Xmile file and assign its content to self.model_text + + Returns + ------- + lxml.etree._Element: parsed xml object + + """ # check for model extension if self.xmile_path.suffix.lower() != ".xmile": raise ValueError( @@ -57,14 +62,14 @@ def get_root(self): parser=etree.XMLParser(encoding="utf-8", recover=True) ).getroot() - def parse(self): + def parse(self) -> None: # We keep everything in a single section # TODO: in order to make macros work we need to split them here in # several sections self.sections = [FileSection( name="__main__", path=self.xmile_path.with_suffix(".py"), - type="main", + section_type="main", params=[], returns=[], content_root=self.xmile_root, @@ -75,7 +80,14 @@ def parse(self): for section in self.sections: section._parse() - def get_abstract_model(self): + def get_abstract_model(self) -> AbstractModel: + """ + Get Abstract Model used for building + + Returns + ------- + AbstractModel + """ return AbstractModel( original_path=self.xmile_path, sections=tuple(section.get_abstract_section() diff --git a/pysd/translation/xmile/xmile_section.py b/pysd/translation/xmile/xmile_section.py index 3cf63bc0..e188cb1b 100644 --- a/pysd/translation/xmile/xmile_section.py +++ b/pysd/translation/xmile/xmile_section.py @@ -1,8 +1,9 @@ from typing import List, Union +from lxml import etree from pathlib import Path from ..structures.abstract_model import\ - AbstractElement, AbstractSubscriptRange, AbstractSection + AbstractSubscriptRange, AbstractSection from .xmile_element import ControlElement, SubscriptRange, Flaux, Gf, Stock @@ -11,14 +12,14 @@ class FileSection(): # File section dataclass control_vars = ["initial_time", "final_time", "time_step", "saveper"] - def __init__(self, name: str, path: Path, type: str, + def __init__(self, name: str, path: Path, section_type: str, params: List[str], returns: List[str], - content_root: str, namespace: str, split: bool, + content_root: etree._Element, namespace: str, split: bool, views_dict: Union[dict, None] - ) -> object: + ): self.name = name self.path = path - self.type = type + self.type = section_type self.params = params self.returns = returns self.content = content_root @@ -31,7 +32,8 @@ def __str__(self): return "\nFile section: %s\n" % self.name @property - def _verbose(self): + def _verbose(self) -> str: + """Get model information""" text = self.__str__() if self.elements: for element in self.elements: @@ -43,36 +45,47 @@ def _verbose(self): @property def verbose(self): + """Print model information""" print(self._verbose) - def _parse(self): + def _parse(self) -> None: + """Parse the section""" + # parse subscripts and components self.subscripts = self._parse_subscripts() self.components = self._parse_components() + if self.name == "__main__": + # parse control variables self.components += self._parse_control_vars() + + # define elements for printting information self.elements = self.subscripts + self.components - def _parse_subscripts(self): + def _parse_subscripts(self) -> List[SubscriptRange]: """Parse the subscripts of the section""" - subscripts_ranges = [] - path = "ns:dimensions/ns:dim" - for node in self.content.xpath(path, namespaces=self.ns): - name = node.attrib["name"] - subscripts = [ - sub.attrib["name"] - for sub in node.xpath("ns:elem", namespaces=self.ns) - ] - subscripts_ranges.append(SubscriptRange(name, subscripts, [])) - return subscripts_ranges - - def _parse_control_vars(self): + return [ + SubscriptRange( + node.attrib["name"], + [ + sub.attrib["name"] + for sub in node.xpath("ns:elem", namespaces=self.ns) + ], + []) # no subscript mapping implemented + for node + in self.content.xpath("ns:dimensions/ns:dim", namespaces=self.ns) + ] + + def _parse_control_vars(self) -> List[ControlElement]: + """Parse control vars and rename them with Vensim standard""" # Read the start time of simulation node = self.content.xpath('ns:sim_specs', namespaces=self.ns)[0] - time_units = node.attrib['time_units'] if 'time_units' in node.attrib else "" + time_units = node.attrib['time_units']\ + if 'time_units' in node.attrib else "" control_vars = [] + # initial time of the simulation control_vars.append(ControlElement( name="INITIAL TIME", units=time_units, @@ -80,6 +93,7 @@ def _parse_control_vars(self): eqn=node.xpath("ns:start", namespaces=self.ns)[0].text )) + # final time of the simulation control_vars.append(ControlElement( name="FINAL TIME", units=time_units, @@ -87,18 +101,11 @@ def _parse_control_vars(self): eqn=node.xpath("ns:stop", namespaces=self.ns)[0].text )) - # Read the time step of simulation - dt_node = node.xpath("ns:dt", namespaces=self.ns) - - # Use default value for time step if `dt` is not specified in model - dt_eqn = "1" - if len(dt_node) > 0: - dt_node = dt_node[0] - dt_eqn = dt_node.text - # If reciprocal mode are defined for `dt`, we should inverse value - if "reciprocal" in dt_node.attrib\ - and dt_node.attrib["reciprocal"].lower() == "true": - dt_eqn = "1/(" + dt_eqn + ")" + # time step of simulation + dt_node = node.xpath("ns:dt", namespaces=self.ns)[0] + dt_eqn = "1/(" + dt_node.text + ")" if "reciprocal" in dt_node.attrib\ + and dt_node.attrib["reciprocal"].lower() == "true"\ + else dt_node.text control_vars.append(ControlElement( name="TIME STEP", @@ -107,6 +114,7 @@ def _parse_control_vars(self): eqn=dt_eqn )) + # saving time of the simulation = time step control_vars.append(ControlElement( name="SAVEPER", units=time_units, @@ -117,7 +125,13 @@ def _parse_control_vars(self): [component._parse() for component in control_vars] return control_vars - def _parse_components(self): + def _parse_components(self) -> List[Union[Flaux, Gf, Stock]]: + """ + Parse model components. Three groups defined: + Flaux: flows and auxiliary variables + Gf: lookups + Stock: integs + """ # Add flows and auxiliary variables components = [ @@ -147,7 +161,14 @@ def _parse_components(self): [component._parse() for component in components] return components - def get_abstract_section(self): + def get_abstract_section(self) -> AbstractSection: + """ + Get Abstract Section used for building + + Returns + ------- + AbstractSection + """ return AbstractSection( name=self.name, path=self.path, @@ -156,14 +177,15 @@ def get_abstract_section(self): returns=self.returns, subscripts=self.solve_subscripts(), elements=[ - component.get_abstract_component() - for component in self.components + element.get_abstract_element() + for element in self.components ], split=self.split, views_dict=self.views_dict ) - def solve_subscripts(self): + def solve_subscripts(self) -> List[AbstractSubscriptRange]: + """Convert the subscript ranges to Abstract Subscript Ranges""" return [AbstractSubscriptRange( name=subs_range.name, subscripts=subs_range.definition, diff --git a/tests/integration_test_vensim_pathway.py b/tests/integration_test_vensim_pathway.py deleted file mode 100644 index fb564e1c..00000000 --- a/tests/integration_test_vensim_pathway.py +++ /dev/null @@ -1,543 +0,0 @@ - -""" -Note that this file is autogenerated by `integration_test_factory.py` -and changes are likely to be overwritten. -""" -import os -import warnings -import unittest -from pysd.tools.benchmarking import runner, assert_frames_close - -rtol = .05 - -_root = os.path.dirname(__file__) -test_models = os.path.join(_root, "test-models/tests") - - -class TestIntegrationExamples(unittest.TestCase): - - def test_abs(self): - output, canon = runner(test_models + '/abs/test_abs.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_active_initial(self): - output, canon = runner(test_models + '/active_initial/test_active_initial.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_active_initial_circular(self): - output, canon = runner(test_models + '/active_initial_circular/test_active_initial_circular.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_arguments(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - output, canon = runner(test_models + '/arguments/test_arguments.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_array_with_line_break(self): - output, canon = runner(test_models + '/array_with_line_break/test_array_with_line_break.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_builtin_max(self): - output, canon = runner(test_models + '/builtin_max/builtin_max.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_builtin_min(self): - output, canon = runner(test_models + '/builtin_min/builtin_min.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_chained_initialization(self): - output, canon = runner(test_models + '/chained_initialization/test_chained_initialization.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip("Working on it") - def test_conditional_subscripts(self): - output, canon = runner(test_models + '/conditional_subscripts/test_conditional_subscripts.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_control_vars(self): - output, canon = runner(test_models + '/control_vars/test_control_vars.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_constant_expressions(self): - output, canon = runner(test_models + '/constant_expressions/test_constant_expressions.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_data_from_other_model(self): - output, canon = runner( - test_models + '/data_from_other_model/test_data_from_other_model.mdl', - data_files=test_models + '/data_from_other_model/data.tab', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_delay_fixed(self): - # issue https://github.com/JamesPHoughton/pysd/issues/147 - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - output, canon = runner(test_models + '/delay_fixed/test_delay_fixed.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_delay_numeric_error(self): - # issue https://github.com/JamesPHoughton/pysd/issues/225 - output, canon = runner(test_models + '/delay_numeric_error/test_delay_numeric_error.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_delay_parentheses(self): - output, canon = runner(test_models + '/delay_parentheses/test_delay_parentheses.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_delay_pipeline(self): - # issue https://github.com/JamesPHoughton/pysd/issues/147 - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - output, canon = runner(test_models + '/delay_pipeline/test_pipeline_delays.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_delays(self): - # issue https://github.com/JamesPHoughton/pysd/issues/147 - output, canon = runner(test_models + '/delays/test_delays.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_dynamic_final_time(self): - # issue https://github.com/JamesPHoughton/pysd/issues/278 - output, canon = runner(test_models + '/dynamic_final_time/test_dynamic_final_time.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_euler_step_vs_saveper(self): - output, canon = runner(test_models + '/euler_step_vs_saveper/test_euler_step_vs_saveper.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_exp(self): - output, canon = runner(test_models + '/exp/test_exp.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_exponentiation(self): - output, canon = runner(test_models + '/exponentiation/exponentiation.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_forecast(self): - output, canon = runner(test_models + '/forecast/test_forecast.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_function_capitalization(self): - output, canon = runner(test_models + '/function_capitalization/test_function_capitalization.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_game(self): - output, canon = runner(test_models + '/game/test_game.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_get_constants_subrange(self): - output, canon = runner( - test_models + '/get_constants_subranges/' - + 'test_get_constants_subranges.mdl', old=True - ) - assert_frames_close(output, canon, rtol=rtol) - - def test_get_data_args_3d_xls(self): - """ - Test for usage of GET DIRECT/XLS DATA with arguments from a Excel file - All the possible combinations of lentgh-wise and different dimensions - are tested in unit_test_external.py, this test want to test only the - good working of the builder - """ - output, canon = runner( - test_models + '/get_data_args_3d_xls/' - + 'test_get_data_args_3d_xls.mdl', old=True - ) - assert_frames_close(output, canon, rtol=rtol) - - def test_get_lookups_data_3d_xls(self): - """ - Test for usage of GET DIRECT/XLS LOOKUPS/DATA from a Excel file - All the possible combinations of lentgh-wise and different dimensions - are tested in unit_test_external.py, this test want to test only the - good working of the builder - """ - output, canon = runner( - test_models + '/get_lookups_data_3d_xls/' - + 'test_get_lookups_data_3d_xls.mdl', old=True - ) - assert_frames_close(output, canon, rtol=rtol) - - def test_get_lookups_subscripted_args(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - output, canon = runner( - test_models + '/get_lookups_subscripted_args/' - + 'test_get_lookups_subscripted_args.mdl', old=True - ) - assert_frames_close(output, canon, rtol=rtol) - - def test_get_lookups_subset(self): - output, canon = runner( - test_models + '/get_lookups_subset/' - + 'test_get_lookups_subset.mdl', old=True - ) - assert_frames_close(output, canon, rtol=rtol) - - def test_get_with_missing_values_xlsx(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - output, canon = runner( - test_models + '/get_with_missing_values_xlsx/' - + 'test_get_with_missing_values_xlsx.mdl', old=True - ) - - assert_frames_close(output, canon, rtol=rtol) - - def test_get_mixed_definitions(self): - output, canon = runner( - test_models + '/get_mixed_definitions/' - + 'test_get_mixed_definitions.mdl', old=True - ) - assert_frames_close(output, canon, rtol=rtol) - - def test_get_subscript_3d_arrays_xls(self): - """ - Test for usage of GET DIRECT/XLS SUBSCRIPTS/CONSTANTS from a Excel file - All the possible combinations of lentgh-wise and different dimensions - are tested in unit_test_external.py, this test want to test only the - good working of the builder - """ - output, canon = runner( - test_models + '/get_subscript_3d_arrays_xls/' - + 'test_get_subscript_3d_arrays_xls.mdl', old=True - ) - assert_frames_close(output, canon, rtol=rtol) - - def test_get_xls_cellrange(self): - output, canon = runner( - test_models + '/get_xls_cellrange/' - + 'test_get_xls_cellrange.mdl', old=True - ) - assert_frames_close(output, canon, rtol=rtol) - - def test_if_stmt(self): - output, canon = runner(test_models + '/if_stmt/if_stmt.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_initial_function(self): - output, canon = runner(test_models + '/initial_function/test_initial.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_input_functions(self): - output, canon = runner(test_models + '/input_functions/test_inputs.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscripted_round(self): - output, canon = runner(test_models + '/subscripted_round/test_subscripted_round.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_invert_matrix(self): - output, canon = runner(test_models + '/invert_matrix/test_invert_matrix.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_limits(self): - output, canon = runner(test_models + '/limits/test_limits.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_line_breaks(self): - output, canon = runner(test_models + '/line_breaks/test_line_breaks.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_line_continuation(self): - output, canon = runner(test_models + '/line_continuation/test_line_continuation.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_ln(self): - output, canon = runner(test_models + '/ln/test_ln.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_log(self): - output, canon = runner(test_models + '/log/test_log.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_logicals(self): - output, canon = runner(test_models + '/logicals/test_logicals.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_lookups(self): - output, canon = runner(test_models + '/lookups/test_lookups.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_lookups_without_range(self): - output, canon = runner(test_models + '/lookups_without_range/test_lookups_without_range.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_lookups_funcnames(self): - output, canon = runner(test_models + '/lookups_funcnames/test_lookups_funcnames.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_lookups_inline(self): - output, canon = runner(test_models + '/lookups_inline/test_lookups_inline.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_lookups_inline_bounded(self): - output, canon = runner(test_models + '/lookups_inline_bounded/test_lookups_inline_bounded.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_lookups_with_expr(self): - output, canon = runner(test_models + '/lookups_with_expr/test_lookups_with_expr.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_macro_cross_reference(self): - output, canon = runner(test_models + '/macro_cross_reference/test_macro_cross_reference.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_macro_expression(self): - output, canon = runner(test_models + '/macro_expression/test_macro_expression.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_macro_multi_expression(self): - output, canon = runner(test_models + '/macro_multi_expression/test_macro_multi_expression.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_macro_multi_macros(self): - output, canon = runner(test_models + '/macro_multi_macros/test_macro_multi_macros.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('working') - def test_macro_output(self): - output, canon = runner(test_models + '/macro_output/test_macro_output.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_macro_stock(self): - output, canon = runner(test_models + '/macro_stock/test_macro_stock.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip("Working on it") - def test_macro_trailing_definition(self): - output, canon = runner(test_models + '/macro_trailing_definition/test_macro_trailing_definition.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_model_doc(self): - output, canon = runner(test_models + '/model_doc/model_doc.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_nested_functions(self): - output, canon = runner(test_models + '/nested_functions/test_nested_functions.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_number_handling(self): - output, canon = runner(test_models + '/number_handling/test_number_handling.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_parentheses(self): - output, canon = runner(test_models + '/parentheses/test_parens.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - @unittest.skip('low priority') - def test_reference_capitalization(self): - """A properly formatted Vensim model should never create this failure""" - output, canon = runner(test_models + '/reference_capitalization/test_reference_capitalization.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_repeated_subscript(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - output, canon = runner(test_models + '/repeated_subscript/test_repeated_subscript.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_rounding(self): - output, canon = runner(test_models + '/rounding/test_rounding.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_sample_if_true(self): - output, canon = runner(test_models + '/sample_if_true/test_sample_if_true.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_smooth(self): - output, canon = runner(test_models + '/smooth/test_smooth.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_smooth_and_stock(self): - output, canon = runner(test_models + '/smooth_and_stock/test_smooth_and_stock.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_special_characters(self): - output, canon = runner(test_models + '/special_characters/test_special_variable_names.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_sqrt(self): - output, canon = runner(test_models + '/sqrt/test_sqrt.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subrange_merge(self): - output, canon = runner(test_models + '/subrange_merge/test_subrange_merge.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_logicals(self): - output, canon = runner(test_models + '/subscript_logicals/test_subscript_logicals.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_multiples(self): - output, canon = runner(test_models + '/subscript_multiples/test_multiple_subscripts.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_1d_arrays(self): - output, canon = runner(test_models + '/subscript_1d_arrays/test_subscript_1d_arrays.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_2d_arrays(self): - output, canon = runner(test_models + '/subscript_2d_arrays/test_subscript_2d_arrays.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_3d_arrays(self): - output, canon = runner(test_models + '/subscript_3d_arrays/test_subscript_3d_arrays.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_3d_arrays_lengthwise(self): - output, canon = runner(test_models + '/subscript_3d_arrays_lengthwise/test_subscript_3d_arrays_lengthwise.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_3d_arrays_widthwise(self): - output, canon = runner(test_models + '/subscript_3d_arrays_widthwise/test_subscript_3d_arrays_widthwise.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_aggregation(self): - output, canon = runner(test_models + '/subscript_aggregation/test_subscript_aggregation.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_constant_call(self): - output, canon = runner(test_models + '/subscript_constant_call/test_subscript_constant_call.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_copy(self): - output, canon = runner(test_models + '/subscript_copy/test_subscript_copy.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_docs(self): - output, canon = runner(test_models + '/subscript_docs/subscript_docs.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_element_name(self): - # issue https://github.com/JamesPHoughton/pysd/issues/216 - output, canon = runner(test_models + '/subscript_element_name/test_subscript_element_name.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_individually_defined_1_of_2d_arrays(self): - output, canon = runner(test_models + '/subscript_individually_defined_1_of_2d_arrays/subscript_individually_defined_1_of_2d_arrays.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_individually_defined_1_of_2d_arrays_from_floats(self): - output, canon = runner(test_models + '/subscript_individually_defined_1_of_2d_arrays_from_floats/subscript_individually_defined_1_of_2d_arrays_from_floats.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_individually_defined_1d_arrays(self): - output, canon = runner(test_models + '/subscript_individually_defined_1d_arrays/subscript_individually_defined_1d_arrays.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_individually_defined_stocks(self): - output, canon = runner(test_models + '/subscript_individually_defined_stocks/test_subscript_individually_defined_stocks.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_mapping_simple(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - output, canon = runner(test_models + '/subscript_mapping_simple/test_subscript_mapping_simple.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_mapping_vensim(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - output, canon = runner(test_models + '/subscript_mapping_vensim/test_subscript_mapping_vensim.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_mixed_assembly(self): - output, canon = runner(test_models + '/subscript_mixed_assembly/test_subscript_mixed_assembly.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_selection(self): - output, canon = runner(test_models + '/subscript_selection/subscript_selection.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_numeric_range(self): - output, canon = runner(test_models + '/subscript_numeric_range/test_subscript_numeric_range.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_subranges(self): - output, canon = runner(test_models + '/subscript_subranges/test_subscript_subrange.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_subranges_equal(self): - output, canon = runner(test_models + '/subscript_subranges_equal/test_subscript_subrange_equal.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_switching(self): - output, canon = runner(test_models + '/subscript_switching/subscript_switching.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_transposition(self): - output, canon = runner(test_models + '/subscript_transposition/test_subscript_transposition.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscript_updimensioning(self): - output, canon = runner(test_models + '/subscript_updimensioning/test_subscript_updimensioning.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscripted_delays(self): - output, canon = runner(test_models + '/subscripted_delays/test_subscripted_delays.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscripted_flows(self): - output, canon = runner(test_models + '/subscripted_flows/test_subscripted_flows.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscripted_if_then_else(self): - output, canon = runner(test_models + '/subscripted_if_then_else/test_subscripted_if_then_else.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscripted_logicals(self): - output, canon = runner(test_models + '/subscripted_logicals/test_subscripted_logicals.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscripted_smooth(self): - # issue https://github.com/JamesPHoughton/pysd/issues/226 - output, canon = runner(test_models + '/subscripted_smooth/test_subscripted_smooth.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscripted_trend(self): - # issue https://github.com/JamesPHoughton/pysd/issues/226 - output, canon = runner(test_models + '/subscripted_trend/test_subscripted_trend.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subscripted_xidz(self): - output, canon = runner(test_models + '/subscripted_xidz/test_subscripted_xidz.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_subset_duplicated_coord(self): - output, canon = runner(test_models + '/subset_duplicated_coord/' - + 'test_subset_duplicated_coord.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_time(self): - output, canon = runner(test_models + '/time/test_time.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_trend(self): - output, canon = runner(test_models + '/trend/test_trend.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_trig(self): - output, canon = runner(test_models + '/trig/test_trig.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_variable_ranges(self): - output, canon = runner(test_models + '/variable_ranges/test_variable_ranges.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_unicode_characters(self): - output, canon = runner(test_models + '/unicode_characters/unicode_test_model.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_xidz_zidz(self): - output, canon = runner(test_models + '/xidz_zidz/xidz_zidz.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_run_uppercase(self): - output, canon = runner(test_models + '/case_sensitive_extension/teacup-upper.MDL', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_odd_number_quotes(self): - output, canon = runner(test_models + '/odd_number_quotes/teacup_3quotes.mdl', old=True) - assert_frames_close(output, canon, rtol=rtol) diff --git a/tests/integration_test_xmile_pathway.py b/tests/integration_test_xmile_pathway.py deleted file mode 100644 index 5e7f8186..00000000 --- a/tests/integration_test_xmile_pathway.py +++ /dev/null @@ -1,135 +0,0 @@ - -import os -import unittest -from pysd.tools.benchmarking import runner, assert_frames_close - -rtol = .05 - -_root = os.path.dirname(__file__) -test_models = os.path.join(_root, "test-models/tests") - - -class TestIntegrationExamples(unittest.TestCase): - - def test_abs(self): - output, canon = runner(test_models + '/abs/test_abs.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_builtin_max(self): - output, canon = runner(test_models + '/builtin_max/builtin_max.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_builtin_min(self): - output, canon = runner(test_models + '/builtin_min/builtin_min.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_chained_initialization(self): - output, canon = runner( - test_models + '/chained_initialization/test_chained_initialization.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_comparisons(self): - output, canon = runner( - test_models + '/comparisons/comparisons.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_constant_expressions(self): - output, canon = runner( - test_models + '/constant_expressions/test_constant_expressions.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_eval_order(self): - output, canon = runner( - test_models + '/eval_order/eval_order.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_exp(self): - output, canon = runner(test_models + '/exp/test_exp.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_exponentiation(self): - output, canon = runner(test_models + '/exponentiation/exponentiation.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_function_capitalization(self): - output, canon = runner( - test_models + '/function_capitalization/test_function_capitalization.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_if_stmt(self): - output, canon = runner(test_models + '/if_stmt/if_stmt.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_initial_function(self): - output, canon = runner(test_models + '/initial_function/test_initial.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_limits(self): - output, canon = runner(test_models + '/limits/test_limits.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_line_breaks(self): - output, canon = runner(test_models + '/line_breaks/test_line_breaks.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_line_continuation(self): - output, canon = runner(test_models + '/line_continuation/test_line_continuation.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_ln(self): - output, canon = runner(test_models + '/ln/test_ln.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_log(self): - output, canon = runner(test_models + '/log/test_log.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_logicals(self): - output, canon = runner(test_models + '/logicals/test_logicals.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_lookups(self): - output, canon = runner(test_models + '/lookups/test_lookups.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_lookups_xscale(self): - output, canon = runner(test_models + '/lookups/test_lookups_xscale.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_lookups_xpts_sep(self): - output, canon = runner(test_models + '/lookups/test_lookups_xpts_sep.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_lookups_ypts_sep(self): - output, canon = runner(test_models + '/lookups/test_lookups_ypts_sep.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_lookups_inline(self): - output, canon = runner(test_models + '/lookups_inline/test_lookups_inline.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_model_doc(self): - output, canon = runner(test_models + '/model_doc/model_doc.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_number_handling(self): - output, canon = runner(test_models + '/number_handling/test_number_handling.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_parentheses(self): - output, canon = runner(test_models + '/parentheses/test_parens.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_reference_capitalization(self): - """A properly formatted Vensim model should never create this failure""" - output, canon = runner( - test_models + '/reference_capitalization/test_reference_capitalization.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_sqrt(self): - output, canon = runner(test_models + '/sqrt/test_sqrt.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) - - def test_trig(self): - output, canon = runner(test_models + '/trig/test_trig.xmile', old=True) - assert_frames_close(output, canon, rtol=rtol) diff --git a/tests/unit_test_builder.py b/tests/unit_test_builder.py deleted file mode 100644 index 313f0b9b..00000000 --- a/tests/unit_test_builder.py +++ /dev/null @@ -1,430 +0,0 @@ -import textwrap - -from unittest import TestCase - -import numpy as np -from numbers import Number -import xarray as xr - - -def runner(string, ns=None): - code = compile(string, '', 'exec') - if not ns: - ns = dict() - ns.update({'xr': xr, 'np': np}) - exec(code, ns) - return ns - - -class TestBuildElement(TestCase): - def test_no_subs_constant(self): - from pysd.translation.builder import build_element - string = textwrap.dedent( - build_element(element={'kind': 'constant', - 'subs': [[]], - 'merge_subs': [], - 'doc': '', - 'py_name': 'my_variable', - 'real_name': 'My Variable', - 'py_expr': ['0.01'], - 'unit': '', - 'eqn': '', - 'lims': '', - 'arguments': ''}, - subscript_dict={}) - ) - ns = runner(string) - a = ns['my_variable']() - self.assertIsInstance(a, Number) - self.assertEqual(a, .01) - - def test_no_subs_call(self): - from pysd.translation.builder import build_element - string = textwrap.dedent( - build_element(element={'kind': 'constant', - 'subs': [[]], - 'merge_subs': [], - 'doc': '', - 'py_name': 'my_first_variable', - 'real_name': 'My Variable', - 'py_expr': ['other_variable()'], - 'eqn': '', - 'lims': '', - 'unit': '', - 'arguments': ''}, - subscript_dict={}) - ) - ns = {'other_variable': lambda: 3} - ns = runner(string, ns) - a = ns['my_first_variable']() - self.assertIsInstance(a, Number) - self.assertEqual(a, 3) - - -class TestBuildFunctionCall(TestCase): - def test_build_function_not_implemented(self): - from warnings import catch_warnings - from pysd.translation.builder import build_function_call - args = ['a', 'b'] - nif = {"name": "not_implemented_function", - "module": "functions", - "original_name": "NIF"} - with catch_warnings(record=True) as ws: - self.assertEqual(build_function_call(nif, args), - "not_implemented_function('NIF',a,b)") - self.assertEqual(len(ws), 1) - self.assertTrue("Trying to translate NIF which it is " - + "not implemented on PySD." - in str(ws[0].message)) - - def test_build_function_with_time_dependency(self): - from pysd.translation.builder import build_function_call - args = ['a', 'b'] - pulse = { - "name": "pulse", - "parameters": [ - {"name": "time", "type": "time"}, - {"name": "start"}, - {"name": "duration"}, - ], - "module": "functions", - } - - dependencies = {'a': 1, 'b': 2} - self.assertNotIn('time', dependencies) - self.assertEqual(build_function_call(pulse, args, dependencies), - "pulse(__data['time'], a, b)") - self.assertIn('time', dependencies) - - def test_build_function_ignore_arguments(self): - from pysd.translation.builder import build_function_call - args = ['a', 'b', 'c'] - my_func_conf = { - "name": "my_func", - "parameters": [ - {"name": "A"}, - {"name": "B", "type": "ignore"}, - {"name": "C"} - ] - } - - self.assertEqual(build_function_call(my_func_conf, args), - "my_func(a, c)") - - my_func_conf = { - "name": "my_func", - "parameters": [ - {"name": "A", "type": "ignore"}, - {"name": "B"}, - {"name": "C", "type": "ignore"} - ] - } - - self.assertEqual(build_function_call(my_func_conf, args), - "my_func(b)") - - def test_build_function_lambda_arguments(self): - from pysd.translation.builder import build_function_call - args = ['a', 'b', 'c'] - my_func_conf = { - "name": "my_func", - "parameters": [ - {"name": "A"}, - {"name": "B", "type": "lambda"}, - {"name": "C"} - ] - } - - self.assertEqual(build_function_call(my_func_conf, args), - "my_func(a, lambda: b, c)") - - my_func_conf = { - "name": "my_func", - "parameters": [ - {"name": "A", "type": "lambda"}, - {"name": "B"}, - {"name": "C", "type": "lambda"} - ] - } - - self.assertEqual(build_function_call(my_func_conf, args), - "my_func(lambda: a, b, lambda: c)") - - def test_build_function_optional_arguments(self): - from pysd.translation.builder import build_function_call - my_func_conf = { - "name": "my_func", - "parameters": [ - {"name": "A"}, - {"name": "B"}, - {"name": "C", "optional": True} - ] - } - - self.assertEqual(build_function_call(my_func_conf, ['a', 'b', 'c']), - "my_func(a, b, c)") - - self.assertEqual(build_function_call(my_func_conf, ['a', 'b']), - "my_func(a, b)") - - # optional lambda argument, check optional + type - my_func_conf = { - "name": "my_func", - "parameters": [ - {"name": "A"}, - {"name": "B"}, - {"name": "C", "type": "lambda", "optional": True} - ] - } - - self.assertEqual(build_function_call(my_func_conf, ['a', 'b', 'c']), - "my_func(a, b, lambda: c)") - - self.assertEqual(build_function_call(my_func_conf, ['a', 'b']), - "my_func(a, b)") - - def test_build_function_predef_arguments(self): - from pysd.translation.builder import build_function_call - args = ['a', 'c'] - my_func_conf = { - "name": "my_func", - "parameters": [ - {"name": "A"}, - {"name": "0", "type": "predef"}, - {"name": "C"} - ] - } - - self.assertEqual(build_function_call(my_func_conf, args), - "my_func(a, 0, c)") - - my_func_conf = { - "name": "my_func", - "parameters": [ - {"name": "time_step()", "type": "predef"}, - {"name": "B"}, - {"name": "1", "type": "predef"} - ] - } - - self.assertEqual(build_function_call(my_func_conf, ["b"]), - "my_func(time_step(), b, 1)") - - -class TestBuild(TestCase): - def test_build(self): - # Todo: add other builder-specific inclusions to this test - from pysd.translation.builder import build - actual = textwrap.dedent( - build(elements=[{'kind': 'component', - 'subs': [], - 'doc': '', - 'py_name': 'stocka', - 'real_name': 'StockA', - 'py_expr': ["_state['stocka']"], - 'eqn': [''], - 'lims': '', - 'unit': '', - 'merge_subs': [], - 'arguments': ''}, - {'kind': 'component', - 'subs': [], - 'doc': 'Provides derivative for stocka', - 'py_name': '_dstocka_dt', - 'real_name': 'Implicit', - 'py_expr': ['flowa()'], - 'unit': 'See docs for stocka', - 'eqn': [''], - 'lims': '', - 'merge_subs': [], - 'arguments': ''}, - {'kind': 'setup', - 'subs': [], - 'doc': 'Provides initial conditions for stocka', - 'py_name': 'init_stocka', - 'real_name': 'Implicit', - 'py_expr': ['-10'], - 'unit': 'See docs for stocka', - 'eqn': [''], - 'lims': '', - 'merge_subs': [], - 'arguments': ''}], - namespace={'StockA': 'stocka'}, - subscript_dict={'Dim1': ['A', 'B', 'C']}, - dependencies={ - "stocka": {"_integ_stocka"}, - "_integ_stocka": { - "initial": None, - "step": {"flowa"} - }, - "flowa": None - }, - outfile_name='return')) - self.assertIn('_subscript_dict = {"Dim1": ["A", "B", "C"]}', actual) - self.assertIn('_namespace = {"StockA": "stocka"}', actual) - self.assertIn( - '_dependencies = {\n "stocka": {"_integ_stocka"},' - + '\n "_integ_stocka": {"initial": None, "step": {"flowa"}},' - + '\n "flowa": None,\n}', actual) - - -class TestMergePartialElements(TestCase): - def test_single_set(self): - from pysd.translation.builder import merge_partial_elements - - self.assertEqual( - merge_partial_elements( - [{'py_name': 'a', 'py_expr': 'ms', - 'subs': ['Name1', 'element1'], - 'merge_subs': ['Name1', 'Elements'], - 'real_name': 'A', 'doc': 'Test', 'unit': None, - 'eqn': 'eq1', 'lims': '', - 'dependencies': {'b': 1, 'time': 3}, - 'kind': 'component', 'arguments': ''}, - {'py_name': 'a', 'py_expr': 'njk', - 'subs': ['Name1', 'element2'], - 'merge_subs': ['Name1', 'Elements'], - 'real_name': 'A', 'doc': None, 'unit': None, - 'eqn': 'eq2', 'lims': '', - 'dependencies': {'c': 1, 'time': 5}, - 'kind': 'component', 'arguments': ''}, - {'py_name': 'a', 'py_expr': 'as', - 'subs': ['Name1', 'element3'], - 'merge_subs': ['Name1', 'Elements'], - 'real_name': 'A', 'doc': '', 'unit': None, - 'eqn': 'eq3', 'lims': '', 'dependencies': {'b': 1}, - 'kind': 'component', 'arguments': ''}]), - [{'py_name': 'a', - 'py_expr': ['ms', 'njk', 'as'], - 'subs': [['Name1', 'element1'], - ['Name1', 'element2'], - ['Name1', 'element3']], - 'merge_subs': ['Name1', 'Elements'], - 'kind': 'component', - 'doc': 'Test', - 'real_name': 'A', - 'unit': None, - 'eqn': ['eq1', 'eq2', 'eq3'], - 'lims': '', - 'parent_name': None, - 'dependencies': {'b': 2, 'c': 1, 'time': 8}, - 'arguments': '' - }]) - - def test_multiple_sets(self): - from pysd.translation.builder import merge_partial_elements - actual = merge_partial_elements( - [{'py_name': 'a', 'py_expr': 'ms', 'subs': ['Name1', 'element1'], - 'merge_subs': ['Name1', 'Elements'], 'dependencies': {'b': 1}, - 'real_name': 'A', 'doc': 'Test', 'unit': None, - 'eqn': 'eq1', 'lims': '', 'kind': 'component', 'arguments': ''}, - {'py_name': 'a', 'py_expr': 'njk', 'subs': ['Name1', 'element2'], - 'merge_subs': ['Name1', 'Elements'], 'dependencies': {'b': 2}, - 'real_name': 'A', 'doc': None, 'unit': None, - 'eqn': 'eq2', 'lims': '', 'kind': 'component', 'arguments': ''}, - {'py_name': 'a', 'py_expr': 'as', 'subs': ['Name1', 'element3'], - 'merge_subs': ['Name1', 'Elements'], 'dependencies': {'b': 1}, - 'real_name': 'A', 'doc': '', 'unit': None, - 'eqn': 'eq3', 'lims': '', 'kind': 'component', 'arguments': ''}, - {'py_name': '_b', 'py_expr': 'bgf', 'subs': ['Name1', 'element1'], - 'merge_subs': ['Name1', 'Elements'], 'dependencies': { - 'initial': {'c': 3}, 'step': {}}, - 'real_name': 'B', 'doc': 'Test', 'unit': None, - 'eqn': 'eq4', 'lims': '', 'kind': 'component', 'arguments': ''}, - {'py_name': '_b', 'py_expr': 'r4', 'subs': ['Name1', 'element2'], - 'merge_subs': ['Name1', 'Elements'], 'dependencies': { - 'initial': {'d': 1}, 'step': {'time': 2, 'd': 5}}, - 'real_name': 'B', 'doc': None, 'unit': None, - 'eqn': 'eq5', 'lims': '', 'kind': 'component', 'arguments': ''}, - {'py_name': '_b', 'py_expr': 'ymt', 'subs': ['Name1', 'element3'], - 'merge_subs': ['Name1', 'Elements'], 'dependencies': { - 'initial': {}, 'step': {'time': 3, 'a': 1}}, - 'real_name': 'B', 'doc': '', 'unit': None, - 'eqn': 'eq6', 'lims': '', 'kind': 'component', 'arguments': ''}]) - - expected = [{'py_name': 'a', - 'py_expr': ['ms', 'njk', 'as'], - 'subs': [['Name1', 'element1'], - ['Name1', 'element2'], - ['Name1', 'element3']], - 'merge_subs': ['Name1', 'Elements'], - 'kind': 'component', - 'doc': 'Test', - 'real_name': 'A', - 'unit': None, - 'eqn': ['eq1', 'eq2', 'eq3'], - 'lims': '', - 'parent_name': None, - 'dependencies': {'b': 4}, - 'arguments': '' - }, - {'py_name': '_b', - 'py_expr': ['bgf', 'r4', 'ymt'], - 'subs': [['Name1', 'element1'], - ['Name1', 'element2'], - ['Name1', 'element3']], - 'merge_subs': ['Name1', 'Elements'], - 'kind': 'component', - 'doc': 'Test', - 'real_name': 'B', - 'unit': None, - 'eqn': ['eq4', 'eq5', 'eq6'], - 'lims': '', - 'parent_name': None, - 'dependencies': { - 'initial': {'c': 3, 'd': 1}, - 'step': {'time': 5, 'a': 1, 'd': 5} - }, - 'arguments': '' - }] - self.assertIn(actual[0], expected) - self.assertIn(actual[1], expected) - - def test_non_set(self): - from pysd.translation.builder import merge_partial_elements - actual = merge_partial_elements( - [{'py_name': 'a', 'py_expr': 'ms', 'subs': ['Name1', 'element1'], - 'merge_subs': ['Name1', 'Elements'], 'dependencies': {'c': 1}, - 'real_name': 'A', 'doc': 'Test', 'unit': None, - 'eqn': 'eq1', 'lims': '', 'kind': 'component', 'arguments': ''}, - {'py_name': 'a', 'py_expr': 'njk', 'subs': ['Name1', 'element2'], - 'merge_subs': ['Name1', 'Elements'], 'dependencies': {'b': 2}, - 'real_name': 'A', 'doc': None, 'unit': None, - 'eqn': 'eq2', 'lims': '', 'kind': 'component', 'arguments': ''}, - {'py_name': 'c', 'py_expr': 'as', 'subs': ['Name1', 'element3'], - 'merge_subs': ['Name1', 'elements3'], 'dependencies': {}, - 'real_name': 'C', 'doc': 'hi', 'unit': None, - 'eqn': 'eq3', 'lims': '', 'kind': 'component', 'arguments': ''}, - ]) - - expected = [{'py_name': 'a', - 'py_expr': ['ms', 'njk'], - 'subs': [['Name1', 'element1'], ['Name1', 'element2']], - 'merge_subs': ['Name1', 'Elements'], - 'kind': 'component', - 'doc': 'Test', - 'real_name': 'A', - 'unit': None, - 'eqn': ['eq1', 'eq2'], - 'lims': '', - 'parent_name': None, - 'dependencies': {'b': 2, 'c': 1}, - 'arguments': '' - }, - {'py_name': 'c', - 'py_expr': ['as'], - 'subs': [['Name1', 'element3']], - 'merge_subs': ['Name1', 'elements3'], - 'kind': 'component', - 'doc': 'hi', - 'real_name': 'C', - 'unit': None, - 'eqn': ['eq3'], - 'lims': '', - 'parent_name': None, - 'dependencies': {}, - 'arguments': '' - }] - - self.assertIn(actual[0], expected) - self.assertIn(actual[1], expected) diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index 1502a4a3..9cfcb6b1 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -624,50 +624,6 @@ def test_docs(self): "(32.0, 212.0)", ) - def test_docs_old(self): - """ Test that the model prints some documentation """ - - model = pysd.read_vensim(test_model, old=True) - self.assertIsInstance(str(model), str) # tests string conversion of - # model - - doc = model.doc() - self.assertIsInstance(doc, pd.DataFrame) - self.assertSetEqual( - { - "Characteristic Time", - "Teacup Temperature", - "FINAL TIME", - "Heat Loss to Room", - "INITIAL TIME", - "Room Temperature", - "SAVEPER", - "TIME STEP", - }, - set(doc["Real Name"].values), - ) - - self.assertEqual( - doc[doc["Real Name"] == "Heat Loss to Room"]["Unit"].values[0], - "Degrees Fahrenheit/Minute", - ) - self.assertEqual( - doc[doc["Real Name"] == "Teacup Temperature"]["Py Name"].values[0], - "teacup_temperature", - ) - self.assertEqual( - doc[doc["Real Name"] == "INITIAL TIME"]["Comment"].values[0], - "The initial time for the simulation.", - ) - self.assertEqual( - doc[doc["Real Name"] == "Characteristic Time"]["Type"].values[0], - "constant" - ) - self.assertEqual( - doc[doc["Real Name"] == "Teacup Temperature"]["Lims"].values[0], - "(32.0, 212.0)", - ) - def test_docs_multiline_eqn(self): """ Test that the model prints some documentation """ @@ -685,9 +641,6 @@ def test_docs_multiline_eqn(self): self.assertEqual( doc[doc["Real Name"] == "price"]["Subs"].values[0], "['fruits']" ) - # TODO: keep eqn? - #self.assertEqual(doc[doc["Real Name"] == "price"]["Eqn"].values[0], - # "1.2; .; .; .; 1.4") def test_stepwise_cache(self): from pysd.py_backend.decorators import Cache diff --git a/tests/unit_test_translation_utils.py b/tests/unit_test_translation_utils.py deleted file mode 100644 index 10a33434..00000000 --- a/tests/unit_test_translation_utils.py +++ /dev/null @@ -1,245 +0,0 @@ -from unittest import TestCase - - -class TestTranslationUtils(TestCase): - - def test_add_entries_underscore(self): - """" - Test for add_entries_undescore - """ - from pysd.translation.utils import add_entries_underscore - - dict1 = {'CD': 10, 'L F': 5} - dict2 = {'a b': 1, 'C': 2, 'L M H': 4} - - dict1b = dict1.copy() - - add_entries_underscore(dict1b) - - self.assertTrue('L_F' in dict1b) - self.assertEqual(dict1b['L F'], dict1b['L_F']) - - add_entries_underscore(dict1, dict2) - - self.assertTrue('L_F' in dict1) - self.assertEqual(dict1['L F'], dict1['L_F']) - self.assertTrue('a_b' in dict2) - self.assertEqual(dict2['a b'], dict2['a_b']) - self.assertTrue('L_M_H' in dict2) - self.assertEqual(dict2['L M H'], dict2['L_M_H']) - - def test_make_add_identifier(self): - """ - Test make_add_identifier for the .add methods py_name - """ - from pysd.translation.utils import make_add_identifier - - build_names = set() - - name = "values" - build_names.add(name) - - self.assertEqual(make_add_identifier(name, build_names), "valuesADD_1") - self.assertEqual(make_add_identifier(name, build_names), "valuesADD_2") - self.assertEqual(make_add_identifier(name, build_names), "valuesADD_3") - - name2 = "bb_a" - build_names.add(name2) - self.assertEqual(make_add_identifier(name2, build_names), "bb_aADD_1") - self.assertEqual(make_add_identifier(name, build_names), "valuesADD_4") - self.assertEqual(make_add_identifier(name2, build_names), "bb_aADD_2") - - def test_make_python_identifier(self): - from pysd.translation.utils import make_python_identifier - - self.assertEqual( - make_python_identifier('Capital'), 'capital') - - self.assertEqual( - make_python_identifier('multiple words'), 'multiple_words') - - self.assertEqual( - make_python_identifier('multiple spaces'), 'multiple_spaces') - - self.assertEqual( - make_python_identifier('for'), 'for_1') - - self.assertEqual( - make_python_identifier(' whitespace '), 'whitespace') - - self.assertEqual( - make_python_identifier('H@t tr!ck'), 'ht_trck') - - self.assertEqual( - make_python_identifier('123abc'), 'nvs_123abc') - - self.assertEqual( - make_python_identifier('Var$', {'Var$': 'var'}), - 'var') - - self.assertEqual( - make_python_identifier('Var@', {'Var$': 'var'}), 'var_1') - - self.assertEqual( - make_python_identifier('Var$', {'Var@': 'var', 'Var%': 'var_1'}), - 'var_2') - - my_vars = ["GDP 2010$", "GDP 2010€", "GDP 2010£"] - namespace = {} - expected = ["gdp_2010", "gdp_2010_1", "gdp_2010_2"] - for var, expect in zip(my_vars, expected): - self.assertEqual( - make_python_identifier(var, namespace), - expect) - - self.assertEqual( - make_python_identifier('1995 value'), - 'nvs_1995_value') - - self.assertEqual( - make_python_identifier('$ value'), - 'nvs_value') - - def test_make_coord_dict(self): - from pysd.translation.utils import make_coord_dict - self.assertEqual( - make_coord_dict(['Dim1', 'D'], - {'Dim1': ['A', 'B', 'C'], - 'Dim2': ['D', 'E', 'F']}, - terse=True), {'Dim2': ['D']}) - self.assertEqual( - make_coord_dict(['Dim1', 'D'], - {'Dim1': ['A', 'B', 'C'], - 'Dim2': ['D', 'E', 'F']}, - terse=False), {'Dim1': ['A', 'B', 'C'], - 'Dim2': ['D']}) - - def test_find_subscript_name(self): - from pysd.translation.utils import find_subscript_name - - self.assertEqual( - find_subscript_name({'Dim1': ['A', 'B'], - 'Dim2': ['C', 'D', 'E'], - 'Dim3': ['F', 'G', 'H', 'I']}, - 'D'), 'Dim2') - - self.assertEqual( - find_subscript_name({'Dim1': ['A', 'B'], - 'Dim2': ['C', 'D', 'E'], - 'Dim3': ['F', 'G', 'H', 'I']}, - 'Dim3'), 'Dim3') - - def test_make_merge_list(self): - from warnings import catch_warnings - from pysd.translation.utils import make_merge_list - - subscript_dict = { - "layers": ["l1", "l2", "l3"], - "layers1": ["l1", "l2", "l3"], - "up": ["l2", "l3"], - "down": ["l1", "l2"], - "dim": ["A", "B", "C"], - "dim1": ["A", "B", "C"] - } - - self.assertEqual( - make_merge_list([["l1"], ["up"]], - subscript_dict), - ["layers"]) - - self.assertEqual( - make_merge_list([["l3", "dim1"], ["down", "dim1"]], - subscript_dict), - ["layers", "dim1"]) - - self.assertEqual( - make_merge_list([["l2", "dim1", "dim"], ["l1", "dim1", "dim"]], - subscript_dict), - ["down", "dim1", "dim"]) - - self.assertEqual( - make_merge_list([["layers1", "l2"], ["layers1", "l3"]], - subscript_dict), - ["layers1", "up"]) - - # incomplete dimension - with catch_warnings(record=True) as ws: - self.assertEqual( - make_merge_list([["A"], ["B"]], - subscript_dict), - ["dim"]) - # use only user warnings - wu = [w for w in ws if issubclass(w.category, UserWarning)] - self.assertTrue(len(wu), 1) - self.assertIn("Dimension given by subscripts:" - + "\n\t{}\nis incomplete ".format({"A", "B"}) - + "using {} instead.".format("dim") - + "\nSubscript_dict:" - + "\n\t{}".format(subscript_dict), - str(wu[0].message)) - - # invalid dimension - try: - make_merge_list([["l1"], ["B"]], - subscript_dict) - self.assertFail() - except ValueError as err: - self.assertIn("Impossible to find the dimension that contains:" - + "\n\t{}\nFor subscript_dict:".format({"l1", "B"}) - + "\n\t{}".format(subscript_dict), - err.args[0]) - - # repeated subscript - with catch_warnings(record=True) as ws: - make_merge_list([["dim1", "A", "dim"], - ["dim1", "B", "dim"], - ["dim1", "C", "dim"]], - subscript_dict) - # use only user warnings - wu = [w for w in ws if issubclass(w.category, UserWarning)] - self.assertTrue(len(wu), 1) - self.assertIn( - "Adding new subscript range to subscript_dict:\ndim2: A, B, C", - str(wu[0].message)) - - subscript_dict2 = { - "dim1": ["A", "B", "C", "D"], - "dim1n": ["A", "B"], - "dim1y": ["C", "D"], - "dim2": ["E", "F", "G", "H"], - "dim2n": ["E", "F"], - "dim2y": ["G", "H"] - } - - # merging two subranges - self.assertEqual( - make_merge_list([["dim1y"], - ["dim1n"]], - subscript_dict2), - ["dim1"]) - - # final subscript in list - self.assertEqual( - make_merge_list([["dim1", "dim2n"], - ["dim1n", "dim2y"], - ["dim1y", "dim2y"]], - subscript_dict2), - ["dim1", "dim2"]) - - def test_update_dependency(self): - from pysd.translation.utils import update_dependency - - deps_dict = {} - - update_dependency("var1", deps_dict) - self.assertEqual(deps_dict, {"var1": 1}) - - update_dependency("var1", deps_dict) - self.assertEqual(deps_dict, {"var1": 2}) - - update_dependency("var2", deps_dict) - self.assertEqual(deps_dict, {"var1": 2, "var2": 1}) - - for i in range(10): - update_dependency("var1", deps_dict) - self.assertEqual(deps_dict, {"var1": 12, "var2": 1}) diff --git a/tests/unit_test_vensim2py.py b/tests/unit_test_vensim2py.py deleted file mode 100644 index 498355f2..00000000 --- a/tests/unit_test_vensim2py.py +++ /dev/null @@ -1,1181 +0,0 @@ -import unittest -import xarray as xr - - -class TestGetFileSections(unittest.TestCase): - def test_normal_load(self): - """normal model file with no macros""" - from pysd.translation.vensim.vensim2py import get_file_sections - - actual = get_file_sections(r"a~b~c| d~e~f| g~h~i|") - expected = [ - { - "returns": [], - "params": [], - "name": "_main_", - "string": "a~b~c| d~e~f| g~h~i|", - } - ] - self.assertEqual(actual, expected) - - def test_macro_only(self): - """ Macro Only """ - from pysd.translation.vensim.vensim2py import get_file_sections - - actual = get_file_sections(":MACRO: MAC(z) a~b~c| :END OF MACRO:") - expected = [{"returns": [], "params": ["z"], "name": "MAC", - "string": "a~b~c|"}] - self.assertEqual(actual, expected) - - def test_macro_and_model(self): - """ basic macro and model """ - from pysd.translation.vensim.vensim2py import get_file_sections - - actual = get_file_sections( - ":MACRO: MAC(z) a~b~c| :END OF MACRO: d~e~f| g~h~i|") - expected = [ - {"returns": [], "params": ["z"], "name": "MAC", - "string": "a~b~c|"}, - {"returns": [], "params": [], "name": "_main_", - "string": "d~e~f| g~h~i|"}, - ] - self.assertEqual(actual, expected) - - def test_macro_multiple_inputs(self): - """ macro with multiple input parameters """ - from pysd.translation.vensim.vensim2py import get_file_sections - - actual = get_file_sections( - ":MACRO: MAC(z, y) a~b~c| :END OF MACRO: d~e~f| g~h~i|" - ) - expected = [ - {"returns": [], "params": ["z", "y"], "name": "MAC", - "string": "a~b~c|"}, - {"returns": [], "params": [], "name": "_main_", - "string": "d~e~f| g~h~i|"}, - ] - self.assertEqual(actual, expected) - - def test_macro_with_returns(self): - """ macro with return values """ - from pysd.translation.vensim.vensim2py import get_file_sections - - actual = get_file_sections( - ":MACRO: MAC(z, y :x, w) a~b~c| :END OF MACRO: d~e~f| g~h~i|" - ) - expected = [ - { - "returns": ["x", "w"], - "params": ["z", "y"], - "name": "MAC", - "string": "a~b~c|", - }, - {"returns": [], "params": [], "name": "_main_", - "string": "d~e~f| g~h~i|"}, - ] - self.assertEqual(actual, expected) - - def test_handle_encoding(self): - """ Handle encoding """ - from pysd.translation.vensim.vensim2py import get_file_sections - - actual = get_file_sections(r"{UTF-8} a~b~c| d~e~f| g~h~i|") - expected = [ - { - "returns": [], - "params": [], - "name": "_main_", - "string": "a~b~c| d~e~f| g~h~i|", - } - ] - self.assertEqual(actual, expected) - - def test_handle_encoding_like_strings(self): - """ Handle encoding-like strings in other places in the file """ - from pysd.translation.vensim.vensim2py import get_file_sections - - actual = get_file_sections(r"a~b~c| d~e~f{special}| g~h~i|") - expected = [ - { - "returns": [], - "params": [], - "name": "_main_", - "string": "a~b~c| d~e~f{special}| g~h~i|", - } - ] - self.assertEqual(actual, expected) - - -class TestEquationStringParsing(unittest.TestCase): - """ Tests the 'get_equation_components function """ - - def test_basics(self): - from pysd.translation.vensim.vensim2py import get_equation_components - - self.assertEqual( - get_equation_components(r'constant = 25'), - { - 'expr': '25', - 'kind': 'component', - 'subs': [], - 'subs_compatibility': {}, - 'real_name': 'constant', - 'keyword': None - } - ) - - def test_equals_handling(self): - """ Parse cases with equal signs within the expression """ - from pysd.translation.vensim.vensim2py import get_equation_components - - self.assertEqual( - get_equation_components(r"Boolean = IF THEN ELSE(1 = 1, 1, 0)"), - { - "expr": "IF THEN ELSE(1 = 1, 1, 0)", - "kind": "component", - "subs": [], - "subs_compatibility": {}, - "real_name": "Boolean", - "keyword": None, - }, - ) - - def test_whitespace_handling(self): - """ Whitespaces should be shortened to a single space """ - from pysd.translation.vensim.vensim2py import get_equation_components - - self.assertEqual( - get_equation_components( - r"""constant\t = - \t25\t """ - ), - { - "expr": "25", - "kind": "component", - "subs": [], - "subs_compatibility": {}, - "real_name": "constant", - "keyword": None, - }, - ) - - # test eliminating vensim's line continuation character - self.assertEqual( - get_equation_components( - r"""constant [Sub1, \\ - Sub2] = 10, 12; 14, 16;""" - ), - { - "expr": "10, 12; 14, 16;", - "kind": "component", - "subs": ["Sub1", "Sub2"], - "subs_compatibility": {}, - "real_name": "constant", - "keyword": None, - }, - ) - - def test_subscript_definition_parsing(self): - from pysd.translation.vensim.vensim2py import get_equation_components - - self.assertEqual( - get_equation_components(r"""Sub1: Entry 1, Entry 2, Entry 3 """), - { - "expr": None, - "kind": "subdef", - "subs": ["Entry 1", "Entry 2", "Entry 3"], - "subs_compatibility": {}, - "real_name": "Sub1", - "keyword": None, - }, - ) - - with self.assertRaises(ValueError) as err: - get_equation_components(r"""Sub2: (1-3) """) - - self.assertIn( - "A numeric range must contain at least one letter.", - str(err.exception)) - - with self.assertRaises(ValueError) as err: - get_equation_components(r"""Sub2: (a1-a1) """) - - self.assertIn( - "The number of the first subscript value must be " - "lower than the second subscript value in a " - "subscript numeric range.", - str(err.exception)) - - with self.assertRaises(ValueError) as err: - get_equation_components(r"""Sub2: (a1-b3) """) - - self.assertIn( - "Only matching names ending in numbers are valid.", - str(err.exception)) - - def test_subscript_references(self): - from pysd.translation.vensim.vensim2py import get_equation_components - - self.assertEqual( - get_equation_components( - r"constant [Sub1, Sub2] = 10, 12; 14, 16;"), - { - "expr": "10, 12; 14, 16;", - "kind": "component", - "subs": ["Sub1", "Sub2"], - "subs_compatibility": {}, - "real_name": "constant", - "keyword": None, - }, - ) - - self.assertEqual( - get_equation_components( - r"function [Sub1] = other function[Sub1]"), - { - "expr": "other function[Sub1]", - "kind": "component", - "subs": ["Sub1"], - "subs_compatibility": {}, - "real_name": "function", - "keyword": None, - }, - ) - - self.assertEqual( - get_equation_components( - r'constant ["S1,b", "S1,c"] = 1, 2; 3, 4;'), - { - "expr": "1, 2; 3, 4;", - "kind": "component", - "subs": ['"S1,b"', '"S1,c"'], - "subs_compatibility": {}, - "real_name": "constant", - "keyword": None, - }, - ) - - self.assertEqual( - get_equation_components( - r'constant ["S1=b", "S1=c"] = 1, 2; 3, 4;'), - { - "expr": "1, 2; 3, 4;", - "kind": "component", - "subs": ['"S1=b"', '"S1=c"'], - "subs_compatibility": {}, - "real_name": "constant", - "keyword": None, - }, - ) - - def test_lookup_definitions(self): - from pysd.translation.vensim.vensim2py import get_equation_components - - self.assertEqual( - get_equation_components(r"table([(0,-1)-(45,1)],(0,0),(5,0))"), - { - "expr": "([(0,-1)-(45,1)],(0,0),(5,0))", - "kind": "lookup", - "subs": [], - "subs_compatibility": {}, - "real_name": "table", - "keyword": None, - }, - ) - - self.assertEqual( - get_equation_components(r"table2 ([(0,-1)-(45,1)],(0,0),(5,0))"), - { - "expr": "([(0,-1)-(45,1)],(0,0),(5,0))", - "kind": "lookup", - "subs": [], - "subs_compatibility": {}, - "real_name": "table2", - "keyword": None, - }, - ) - - def test_get_lookup(self): - from pysd.translation.vensim.vensim2py import parse_lookup_expression - - res = parse_lookup_expression( - { - "expr": r"(GET DIRECT LOOKUPS('path2excel.xlsx', " - + r"'SheetName', 'index'\ , 'values'))", - "py_name": "get_lookup", - "subs": [], - "merge_subs": [] - }, - {} - )[1][0] - - self.assertEqual( - res["py_expr"], - "ExtLookup('path2excel.xlsx', 'SheetName', 'index', 'values', " - + "{},\n _root, '_ext_lookup_get_lookup')", - ) - - def test_pathological_names(self): - from pysd.translation.vensim.vensim2py import get_equation_components - - self.assertEqual( - get_equation_components(r'"silly-string" = 25'), - { - "expr": "25", - "kind": "component", - "subs": [], - "subs_compatibility": {}, - "real_name": '"silly-string"', - "keyword": None, - }, - ) - - self.assertEqual( - get_equation_components(r'"pathological\\-string" = 25'), - { - "expr": "25", - "kind": "component", - "subs": [], - "subs_compatibility": {}, - "real_name": r'"pathological\\-string"', - "keyword": None, - }, - ) - - def test_get_equation_components_error(self): - from pysd.translation.vensim.vensim2py import get_equation_components - - defi = "NIF: NFNF" - try: - get_equation_components(defi) - self.assertFail() - except ValueError as err: - self.assertIn( - "\nError when parsing definition:\n\t %s\n\n" - "probably used definition is invalid or not integrated..." - "\nSee parsimonious output above." % defi, - err.args[0], - ) - - -class TestParse_general_expression(unittest.TestCase): - def test_arithmetic(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - res = parse_general_expression({"expr": "-10^3+4"}) - self.assertEqual(res[0]["py_expr"], "-10**3+4") - - def test_arithmetic_scientific(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - res = parse_general_expression({"expr": "1e+4"}) - self.assertEqual(res[0]["py_expr"], "1e+4") - - res = parse_general_expression({"expr": "2e4"}) - self.assertEqual(res[0]["py_expr"], "2e4") - - res = parse_general_expression({"expr": "3.43e04"}) - self.assertEqual(res[0]["py_expr"], "3.43e04") - - res = parse_general_expression({"expr": "1.0E4"}) - self.assertEqual(res[0]["py_expr"], "1.0E4") - - res = parse_general_expression({"expr": "-2.0E43"}) - self.assertEqual(res[0]["py_expr"], "-2.0E43") - - res = parse_general_expression({"expr": "-2.0e-43"}) - self.assertEqual(res[0]["py_expr"], "-2.0e-43") - - def test_caps_handling(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - res = parse_general_expression({"expr": "Abs(-3)"}) - self.assertEqual(res[0]["py_expr"], "np.abs(-3)") - - res = parse_general_expression({"expr": "ABS(-3)"}) - self.assertEqual(res[0]["py_expr"], "np.abs(-3)") - - res = parse_general_expression({"expr": "aBS(-3)"}) - self.assertEqual(res[0]["py_expr"], "np.abs(-3)") - - def test_empty(self): - from warnings import catch_warnings - from pysd.translation.vensim.vensim2py import parse_general_expression - - with catch_warnings(record=True) as ws: - res = parse_general_expression({"expr": "", "real_name": "Var"}) - # use only user warnings - wu = [w for w in ws if issubclass(w.category, UserWarning)] - self.assertEqual(len(wu), 1) - self.assertIn("Empty expression for 'Var'", str(wu[0].message)) - - self.assertEqual(res[0]["py_expr"], "None") - - def test_function_calls(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - res = parse_general_expression({"expr": "ABS(StockA)", - "real_name": "AB", - "eqn": "AB = ABS(StockA)"}, - {"StockA": "stocka"}) - self.assertEqual(res[0]["py_expr"], "np.abs(stocka())") - - res = parse_general_expression( - {"expr": "If Then Else(A>B, 1, 0)", - "real_name": "IFE", - "eqn": "IFE = If Then Else(A>B, 1, 0)"}, - {"A": "a", "B": "b"} - ) - self.assertEqual( - res[0]["py_expr"], "if_then_else(a()>b(), lambda: 1, lambda: 0)" - ) - - # test that function calls are handled properly in arguments - res = parse_general_expression( - {"expr": "If Then Else(A>B,1,A)"}, {"A": "a", "B": "b"} - ) - self.assertEqual( - res[0]["py_expr"], "if_then_else(a()>b(), lambda: 1, lambda: a())" - ) - - def test_id_parsing(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - res = parse_general_expression({"expr": "StockA"}, - {"StockA": "stocka"}) - self.assertEqual(res[0]["py_expr"], "stocka()") - - def test_logicals(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - res = parse_general_expression( - {'expr': 'IF THEN ELSE(1 :AND: 0,0,1)'}) - self.assertEqual(res[0]['py_expr'], - 'if_then_else(logical_and(1,0), lambda: 0, lambda: 1)' - ) - - res = parse_general_expression( - {'expr': 'IF THEN ELSE(1 :OR: 0,0,1)'}) - self.assertEqual( - res[0]['py_expr'], - 'if_then_else(logical_or(1,0), lambda: 0, lambda: 1)' - ) - - res = parse_general_expression( - {'expr': 'IF THEN ELSE(1 :AND: 0 :and: 1,0,1)'}) - self.assertEqual( - res[0]['py_expr'], - 'if_then_else(logical_and(1,0,1), lambda: 0, lambda: 1)' - ) - - res = parse_general_expression( - {'expr': 'IF THEN ELSE(1 :or: 0 :OR: 1 :oR: 0,0,1)'}) - self.assertEqual( - res[0]['py_expr'], - 'if_then_else(logical_or(1,0,1,0), lambda: 0, lambda: 1)' - ) - - res = parse_general_expression( - {'expr': 'IF THEN ELSE(1 :AND: (0 :OR: 1),0,1)'}) - self.assertEqual(res[0]['py_expr'], - 'if_then_else(logical_and(1,logical_or(0,1)),' + - ' lambda: 0, lambda: 1)') - - res = parse_general_expression( - {'expr': 'IF THEN ELSE((1 :AND: 0) :OR: 1,0,1)'}) - self.assertEqual(res[0]['py_expr'], - 'if_then_else(logical_or(logical_and(1,0),1),' + - ' lambda: 0, lambda: 1)') - - with self.assertRaises(ValueError): - res = parse_general_expression( - {'expr': 'IF THEN ELSE(1 :AND: 0 :OR: 1,0,1)', - 'real_name': 'logical', - 'eqn': 'logical = IF THEN ELSE(1 :AND: 0 :OR: 1,0,1)'}) - - def test_number_parsing(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - res = parse_general_expression({'expr': '20'}) - self.assertEqual(res[0]['py_expr'], '20') - - res = parse_general_expression({"expr": "3.14159"}) - self.assertEqual(res[0]["py_expr"], "3.14159") - - res = parse_general_expression({"expr": "1.3e+10"}) - self.assertEqual(res[0]["py_expr"], "1.3e+10") - - res = parse_general_expression({"expr": "-1.3e-10"}) - self.assertEqual(res[0]["py_expr"], "-1.3e-10") - - def test_nan_parsing(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - from pysd.translation.builder import Imports - - Imports.reset() - self.assertFalse(Imports._numpy) - res = parse_general_expression({'expr': ':NA:'}) - self.assertEqual(res[0]['py_expr'], 'np.nan') - self.assertTrue(Imports._numpy) - - def test_stock_construction_function_no_subscripts(self): - """ stock construction should create a stateful variable and - reference it """ - from pysd.translation.vensim.vensim2py import parse_general_expression - from pysd.py_backend.statefuls import Integ - - res = parse_general_expression( - { - "expr": "INTEG (FlowA, -10)", - "py_name": "test_stock", - "subs": [], - "merge_subs": [] - }, - {"FlowA": "flowa"} - ) - - self.assertEqual(res[1][0]["kind"], "stateful") - a = eval(res[1][0]["py_expr"]) - self.assertIsInstance(a, Integ) - - # check the reference to that variable - self.assertEqual(res[0]["py_expr"], res[1][0]["py_name"] + "()") - - def test_delay_construction_function_no_subscripts(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - from pysd.py_backend.statefuls import Delay - - res = parse_general_expression( - { - "expr": "DELAY1(Variable, DelayTime)", - "py_name": "test_delay", - "subs": [], - "merge_subs": [] - }, - { - "Variable": "variable", - "DelayTime": "delaytime", - "TIME STEP": "time_step", - } - ) - - def time_step(): - return 0.5 - - self.assertEqual(res[1][0]["kind"], "stateful") - a = eval(res[1][0]["py_expr"]) - self.assertIsInstance(a, Delay) - - # check the reference to that variable - self.assertEqual(res[0]["py_expr"], res[1][0]["py_name"] + "()") - - def test_forecast_construction_function_no_subscripts(self): - """ Tests translation of 'forecast' - - This translation should create a new stateful object to hold the - forecast elements, and then pass back a reference to that value - """ - from pysd.translation.vensim.vensim2py import parse_general_expression - from pysd.py_backend.statefuls import Forecast - - res = parse_general_expression( - { - "expr": "FORECAST(Variable, AverageTime, Horizon)", - "py_name": "test_forecast", - "subs": [], - "merge_subs": [] - }, - {"Variable": "variable", "AverageTime": "averagetime", - "Horizon": "horizon"}, - elements_subs_dict={"test_forecast": []}, - ) - - # check stateful object creation - self.assertEqual(res[1][0]["kind"], "stateful") - a = eval(res[1][0]["py_expr"]) - self.assertIsInstance(a, Forecast) - - # check the reference to that variable - self.assertEqual(res[0]["py_expr"], res[1][0]["py_name"] + "()") - - def test_smooth_construction_function_no_subscripts(self): - """ Tests translation of 'smooth' - - This translation should create a new stateful object to hold the delay - elements, and then pass back a reference to that value - """ - from pysd.translation.vensim.vensim2py import parse_general_expression - from pysd.py_backend.statefuls import Smooth - - res = parse_general_expression( - { - "expr": "SMOOTH(Variable, DelayTime)", - "py_name": "test_smooth", - "subs": [], - "merge_subs": [] - }, - {"Variable": "variable", "DelayTime": "delaytime"}, - ) - - # check stateful object creation - self.assertEqual(res[1][0]["kind"], "stateful") - a = eval(res[1][0]["py_expr"]) - self.assertIsInstance(a, Smooth) - - # check the reference to that variable - self.assertEqual(res[0]["py_expr"], res[1][0]["py_name"] + "()") - - def test_subscript_float_initialization(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - _subscript_dict = { - "Dim": ["A", "B", "C", "D", "E"], - "Dim1": ["A", "B", "C"], "Dim2": ["D", "E"] - } - - # case 1 - element = parse_general_expression( - {"expr": "3.32", "subs": ["Dim1"], "py_name": "var", - "merge_subs": ["Dim1"]}, {}, - _subscript_dict - - ) - string = element[0]["py_expr"] - # TODO we should use a = eval(string) - # hoewever eval is not detecting _subscript_dict variable - self.assertEqual( - string, - "xr.DataArray(3.32,{'Dim1': _subscript_dict['Dim1']},['Dim1'])", - ) - a = xr.DataArray( - 3.32, {dim: _subscript_dict[dim] for dim in ["Dim1"]}, ["Dim1"] - ) - self.assertDictEqual( - {key: list(val.values) for key, val in a.coords.items()}, - {"Dim1": ["A", "B", "C"]}, - ) - self.assertEqual(a.loc[{"Dim1": "B"}], 3.32) - - # case 2: xarray subscript is a subrange from the final subscript range - element = parse_general_expression( - {"expr": "3.32", "subs": ["Dim1"], "py_name": "var", - "merge_subs": ["Dim"]}, {}, _subscript_dict - ) - string = element[0]["py_expr"] - # TODO we should use a = eval(string) - # hoewever eval is not detecting _subscript_dict variable - self.assertEqual( - string, - "xr.DataArray(3.32,{'Dim': _subscript_dict['Dim1']},['Dim'])", - ) - a = xr.DataArray( - 3.32, {"Dim": _subscript_dict["Dim1"]}, ["Dim"] - ) - self.assertDictEqual( - {key: list(val.values) for key, val in a.coords.items()}, - {"Dim": ["A", "B", "C"]}, - ) - self.assertEqual(a.loc[{"Dim": "B"}], 3.32) - - def test_subscript_1d_constant(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - _subscript_dict = {"Dim1": ["A", "B", "C"], "Dim2": ["D", "E"]} - element = parse_general_expression( - {"expr": "1, 2, 3", "subs": ["Dim1"], "py_name": "var", - "merge_subs": ["Dim1"]}, - {}, _subscript_dict - ) - string = element[0]["py_expr"] - # TODO we should use a = eval(string) - # hoewever eval is not detecting _subscript_dict variable - self.assertEqual( - string, - "xr.DataArray([1.,2.,3.],{'Dim1': _subscript_dict['Dim1']}," - "['Dim1'])", - ) - a = xr.DataArray([1.0, 2.0, 3.0], - {dim: _subscript_dict[dim] for dim in ["Dim1"]}, - ["Dim1"]) - self.assertDictEqual( - {key: list(val.values) for key, val in a.coords.items()}, - {"Dim1": ["A", "B", "C"]}, - ) - self.assertEqual(a.loc[{"Dim1": "A"}], 1) - - def test_subscript_2d_constant(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - _subscript_dict = {"Dim1": ["A", "B", "C"], "Dim2": ["D", "E"]} - element = parse_general_expression( - {"expr": "1, 2; 3, 4; 5, 6;", "subs": ["Dim1", "Dim2"], - "merge_subs": ["Dim1", "Dim2"], "py_name": "var"}, - {}, _subscript_dict - ) - string = element[0]["py_expr"] - a = eval(string) - self.assertDictEqual( - {key: list(val.values) for key, val in a.coords.items()}, - {"Dim1": ["A", "B", "C"], "Dim2": ["D", "E"]}, - ) - self.assertEqual(a.loc[{"Dim1": "A", "Dim2": "D"}], 1) - self.assertEqual(a.loc[{"Dim1": "B", "Dim2": "E"}], 4) - - def test_subscript_3d_depth(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - _subscript_dict = {"Dim1": ["A", "B", "C"], "Dim2": ["D", "E"]} - element = parse_general_expression( - {"expr": "1, 2; 3, 4; 5, 6;", "subs": ["Dim1", "Dim2"], - "merge_subs": ["Dim1", "Dim2"], "py_name": "var"}, - {}, _subscript_dict, - ) - string = element[0]["py_expr"] - a = eval(string) - self.assertDictEqual( - {key: list(val.values) for key, val in a.coords.items()}, - {"Dim1": ["A", "B", "C"], "Dim2": ["D", "E"]}, - ) - self.assertEqual(a.loc[{"Dim1": "A", "Dim2": "D"}], 1) - self.assertEqual(a.loc[{"Dim1": "B", "Dim2": "E"}], 4) - - def test_subscript_builder(self): - """ - Testing how subscripts are translated when we have common subscript - ranges. - """ - from pysd.translation.vensim.vensim2py import\ - parse_general_expression, parse_lookup_expression - - _subscript_dict = { - "Dim1": ["A", "B", "C"], "Dim2": ["B", "C"], "Dim3": ["B", "C"] - } - - # case 1: subscript of the expr is in the final range, which is a - # subrange of a greater range - element = parse_general_expression( - {"py_name": "var1", "subs": ["B"], "real_name": "var1", "eqn": "", - "expr": "GET DIRECT CONSTANTS('input.xlsx', 'Sheet1', 'C20')", - "merge_subs": ["Dim2"]}, - {}, - _subscript_dict - ) - self.assertIn( - "'Dim2': ['B']", element[1][0]['py_expr']) - - # case 1b: subscript of the expr is in the final range, which is a - # subrange of a greater range - element = parse_lookup_expression( - {"py_name": "var1b", "subs": ["B"], - "real_name": "var1b", "eqn": "", - "expr": "(GET DIRECT LOOKUPS('input.xlsx', 'Sheet1'," - " '19', 'C20'))", - "merge_subs": ["Dim2"]}, - _subscript_dict, - ) - self.assertIn( - "'Dim2': ['B']", element[1][0]['py_expr']) - - # case 2: subscript of the expr is a subscript subrange equal to the - # final range, which is a subrange of a greater range - element = parse_general_expression( - {"py_name": "var2", "subs": ["Dim2"], - "real_name": "var2", "eqn": "", - "expr": "GET DIRECT CONSTANTS('input.xlsx', 'Sheet1', 'C20')", - "merge_subs": ["Dim2"]}, - {}, - _subscript_dict - ) - self.assertIn( - "'Dim2': _subscript_dict['Dim2']", element[1][0]['py_expr']) - - # case 3: subscript of the expr is a subscript subrange equal to the - # final range, which is a subrange of a greater range, but there is - # a similar subrange before - element = parse_general_expression( - {"py_name": "var3", "subs": ["B"], "real_name": "var3", "eqn": "", - "expr": "GET DIRECT CONSTANTS('input.xlsx', 'Sheet1', 'C20')", - "merge_subs": ["Dim3"]}, - {}, - _subscript_dict - ) - self.assertIn( - "'Dim3': ['B']", element[1][0]['py_expr']) - - # case 4: subscript of the expr is a subscript subrange and the final - # subscript is a greater range - element = parse_general_expression( - {"py_name": "var4", "subs": ["Dim2"], - "real_name": "var4", "eqn": "", - "expr": "GET DIRECT CONSTANTS('input.xlsx', 'Sheet1', 'C20')", - "merge_subs": ["Dim1"]}, - {}, - _subscript_dict, - ) - self.assertIn( - "'Dim1': _subscript_dict['Dim2']", element[1][0]['py_expr']) - - # case 4b: subscript of the expr is a subscript subrange and the final - # subscript is a greater range - element = parse_general_expression( - {"py_name": "var4b", "subs": ["Dim2"], - "real_name": "var4b", "eqn": "", - "expr": "GET DIRECT DATA('input.xlsx', 'Sheet1', '19', 'C20')", - "keyword": None, "merge_subs": ["Dim1"]}, - {}, - _subscript_dict - ) - self.assertIn( - "'Dim1': _subscript_dict['Dim2']", element[1][0]['py_expr']) - - # case 4c: subscript of the expr is a subscript subrange and the final - # subscript is a greater range - element = parse_general_expression( - {"py_name": "var4c", "subs": ["Dim2"], - "real_name": "var4c", "eqn": "", - "expr": "GET DIRECT LOOKUPS('input.xlsx', 'Sheet1'," - " '19', 'C20')", "merge_subs": ["Dim1"]}, - {}, - _subscript_dict - ) - self.assertIn( - "'Dim1': _subscript_dict['Dim2']", element[1][0]['py_expr']) - - # case 4d: subscript of the expr is a subscript subrange and the final - # subscript is a greater range - element = parse_lookup_expression( - {"py_name": "var4d", "subs": ["Dim2"], - "real_name": "var4d", "eqn": "", - "expr": "(GET DIRECT LOOKUPS('input.xlsx', 'Sheet1'," - " '19', 'C20'))", "merge_subs": ["Dim1"]}, - _subscript_dict - ) - self.assertIn( - "'Dim1': _subscript_dict['Dim2']", element[1][0]['py_expr']) - - def test_subscript_reference(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - res = parse_general_expression( - {"expr": "Var A[Dim1, Dim2]", "real_name": "Var2", "eqn": ""}, - {"Var A": "var_a"}, - {"Dim1": ["A", "B"], "Dim2": ["C", "D", "E"]}, - None, - {"var_a": ["Dim1", "Dim2"]} - ) - - self.assertEqual(res[0]["py_expr"], "var_a()") - - res = parse_general_expression( - {"expr": "Var B[Dim1, C]"}, - {"Var B": "var_b"}, - {"Dim1": ["A", "B"], "Dim2": ["C", "D", "E"]}, - None, - {"var_b": ["Dim1", "Dim2"]}, - ) - - self.assertEqual( - res[0]["py_expr"], - "rearrange(var_b().loc[:, 'C'].reset_coords(drop=True)," - "['Dim1'],_subscript_dict)", - ) - - res = parse_general_expression({'expr': 'Var B[A, C]'}, - {'Var B': 'var_b'}, - {'Dim1': ['A', 'B'], - 'Dim2': ['C', 'D', 'E']}, - None, - {'var_b': ['Dim1', 'Dim2']}) - - self.assertEqual( - res[0]['py_expr'], - "float(var_b().loc['A', 'C'])") - - res = parse_general_expression({'expr': 'Var C[Dim1, C, H]'}, - {'Var C': 'var_c'}, - {'Dim1': ['A', 'B'], - 'Dim2': ['C', 'D', 'E'], - 'Dim3': ['F', 'G', 'H', 'I']}, - None, - {'var_c': ['Dim1', 'Dim2', 'Dim3']}) - self.assertEqual( - res[0]["py_expr"], - "rearrange(var_c().loc[:, 'C', 'H'].reset_coords(drop=True)," - "['Dim1'],_subscript_dict)", - ) - - res = parse_general_expression({'expr': 'Var C[B, C, H]'}, - {'Var C': 'var_c'}, - {'Dim1': ['A', 'B'], - 'Dim2': ['C', 'D', 'E'], - 'Dim3': ['F', 'G', 'H', 'I']}, - None, - {'var_c': ['Dim1', 'Dim2', 'Dim3']}) - - self.assertEqual( - res[0]['py_expr'], - "float(var_c().loc['B', 'C', 'H'])") - - def test_subscript_ranges(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - res = parse_general_expression( - {"expr": "Var D[Range1]"}, - {"Var D": "var_c"}, - {"Dim1": ["A", "B", "C", "D", "E", "F"], - "Range1": ["C", "D", "E"]}, - None, - {"var_c": ["Dim1"]}, - ) - - self.assertEqual( - res[0]["py_expr"], "rearrange(var_c(),['Range1'],_subscript_dict)" - ) - - def test_invert_matrix(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - res = parse_general_expression( - { - "expr": "INVERT MATRIX(A, 3)", - "real_name": "A1", - "py_name": "a1", - "merge_subs": ["dim1", "dim2"] - }, - { - "A": "a", - "A1": "a1", - }, - subscript_dict={ - "dim1": ["a", "b", "c"], "dim2": ["a", "b", "c"] - } - ) - - self.assertEqual(res[0]["py_expr"], "invert_matrix(a())") - - def test_subscript_elmcount(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - res = parse_general_expression( - { - "expr": "ELMCOUNT(dim1)", - "real_name": "A", - "py_name": "a", - "merge_subs": [] - }, - { - "A": "a", - }, - subscript_dict={ - "dim1": ["a", "b", "c"], "dim2": ["a", "b", "c"] - } - ) - - self.assertIn( - "len(_subscript_dict['dim1'])", - res[0]["py_expr"], ) - - def test_subscript_logicals(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - res = parse_general_expression( - { - "expr": "IF THEN ELSE(dim1=dim2, 5, 0)", - "real_name": "A", - "py_name": "a", - "merge_subs": ["dim1", "dim2"] - }, - { - "A": "a", - }, - subscript_dict={ - "dim1": ["a", "b", "c"], "dim2": ["a", "b", "c"] - } - ) - - self.assertIn( - "xr.DataArray(_subscript_dict['dim1']," - "{'dim1': _subscript_dict['dim1']},'dim1')" - "==xr.DataArray(_subscript_dict['dim2']," - "{'dim2': _subscript_dict['dim2']},'dim2')", - res[0]["py_expr"], ) - - def test_ref_with_subscript_prefix(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - # When parsing functions arguments first the subscript ranges are - # parsed and later the general id is used, however, the if a reference - # to a var starts with a subscript range name this could make the - # parser crash - res = parse_general_expression( - { - "expr": "ABS(Upper var)", - "real_name": "A", - "eqn": "A = ABS(Upper var)", - "py_name": "a", - "merge_subs": [] - }, - { - "Upper var": "upper_var", - }, - subscript_dict={ - "upper": ["a", "b", "c"] - } - ) - - self.assertIn( - "np.abs(upper_var())", - res[0]["py_expr"], ) - - def test_random_0_1(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - # When parsing functions arguments first the subscript ranges are - # parsed and later the general id is used, however, the if a reference - # to a var starts with a subscript range name this could make the - # parser crash - res = parse_general_expression( - { - "expr": "RANDOM 0 1()", - "real_name": "A", - "eqn": "A = RANDOM 0 1()", - "py_name": "a", - "merge_subs": [], - "dependencies": set() - }, - { - "A": "a", - } - ) - - self.assertIn( - "np.random.uniform(0, 1)", - res[0]["py_expr"], ) - - def test_random_uniform(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - # When parsing functions arguments first the subscript ranges are - # parsed and later the general id is used, however, the if a reference - # to a var starts with a subscript range name this could make the - # parser crash - res = parse_general_expression( - { - "expr": "RANDOM UNIFORM(10, 15, 3)", - "real_name": "A", - "eqn": "A = RANDOM UNIFORM(10, 15, 3)", - "py_name": "a", - "merge_subs": [], - "dependencies": set() - }, - { - "A": "a", - } - ) - - self.assertIn( - "np.random.uniform(10, 15)", - res[0]["py_expr"], ) - - def test_incomplete_expression(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - from warnings import catch_warnings - - with catch_warnings(record=True) as w: - res = parse_general_expression( - { - "expr": "A FUNCTION OF(Unspecified Eqn,Var A,Var B)", - "real_name": "Incomplete Func", - "py_name": "incomplete_func", - "eqn": "Incomplete Func = A FUNCTION OF(Unspecified " - + "Eqn,Var A,Var B)", - "subs": [], - "merge_subs": [] - }, - { - "Unspecified Eqn": "unspecified_eqn", - "Var A": "var_a", - "Var B": "var_b", - } - ) - self.assertEqual(len(w), 1) - self.assertTrue( - "Incomplete Func has no equation specified" in - str(w[-1].message) - ) - - self.assertEqual(res[0]["py_expr"], - "incomplete(unspecified_eqn(), var_a(), var_b())") - - def test_parse_general_expression_error(self): - from pysd.translation.vensim.vensim2py import parse_general_expression - - element = { - "expr": "NIF(1,3)", - "real_name": "not implemented function", - "eqn": "not implemented function=\tNIF(1,3)", - } - try: - parse_general_expression(element) - self.assertFail() - except ValueError as err: - self.assertIn( - "\nError when parsing %s with equation\n\t %s\n\n" - "probably a used function is not integrated..." - "\nSee parsimonious output above." - % (element["real_name"], element["eqn"]), - err.args[0], - ) - - -class TestParse_sketch_line(unittest.TestCase): - def test_parse_sketch_line(self): - from pysd.translation.vensim.vensim2py import parse_sketch_line - - namespace = {'"var-n"': "varn", "Stock": "stock", '"rate-1"': "rate1"} - lines = [ - '10,1,"var-n",332,344,21,12,0,3,0,32,1,0,0,0,-1--1--1,0-0-0' + - ',@Malgun Gothic|12||0-0-0', # normal variable with colors - "10,2,Stock,497,237,40,20,3,3,0,0,0,0,0,0", # stock - '10,7,"rate-1",382,262,21,11,40,3,0,0,-1,0,0,0', # normal variable - '10,2,"var-n",235,332,27,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,' + - '|0||128-128-128', # shadow variable - "*Just another view", # module definition - "1,5,6,3,100,0,0,22,0,0,0,-1--1--1,,1|(341,243)|", # arrow - "This is a random comment." - ] - - expected_var = [ - namespace['"var-n"'], - namespace["Stock"], - namespace['"rate-1"'], - "", - "", - "", - "" - ] - expected_mod = ["", "", "", "", "Just another view", "", ""] - - for num, line in enumerate(lines): - res = parse_sketch_line(line.strip(), namespace) - self.assertEqual(res["variable_name"], expected_var[num]) - self.assertEqual(res["view_name"], expected_mod[num]) - - -class TestParse_private_functions(unittest.TestCase): - def test__split_sketch_warning(self): - import warnings - from pysd.translation.vensim.vensim2py import _split_sketch - - model_str = "this is my model" - - with warnings.catch_warnings(record=True) as ws: - text, sketch = _split_sketch(model_str) - - # use only user warnings - wu = [w for w in ws if issubclass(w.category, UserWarning)] - self.assertEqual(len(wu), 1) - self.assertTrue( - "Your model does not have a sketch." in str(wu[0].message)) - - self.assertEqual(text, model_str) - self.assertEqual(sketch, "") diff --git a/tests/unit_test_xmile2py.py b/tests/unit_test_xmile2py.py deleted file mode 100644 index 7cdc7379..00000000 --- a/tests/unit_test_xmile2py.py +++ /dev/null @@ -1,67 +0,0 @@ -import os -import unittest -import tempfile - -from pysd.translation.xmile.xmile2py import translate_xmile - -_root = os.path.dirname(__file__) -TARGET_STMX_FILE = os.path.join(_root, "test-models/tests/game/test_game.stmx") - - -class TestXmileConversion(unittest.TestCase): - - def test_python_file_creation(self): - with open(TARGET_STMX_FILE, "r") as stmx: - contents = stmx.read() - - # Write out contents to temporary file - with tempfile.NamedTemporaryFile(mode="w", delete=False) as temp_file: - temp_file.write(contents) - - # Convert file (should not raise error) - generated_file = translate_xmile(temp_file.name) - - # Check if both source file and python file exists - try: - assert generated_file != temp_file.name,\ - "Accidental replacement of original model file!" - assert generated_file.endswith(".py"),\ - "File created without python extension" - assert os.path.exists(temp_file.name)\ - and os.path.exists(generated_file),\ - "Expected files are missing" - finally: - os.remove(temp_file.name) - - try: - os.remove(generated_file) - except FileNotFoundError: - # Okay if python file is missing - pass - - def test_multiline_equation(self): - with open(TARGET_STMX_FILE, "r") as stmx: - contents = stmx.read() - - # Insert line break in equation definition - contents = contents.replace( - "(Stock+Constant)", - "(Stock+\nConstant)") - - # Write out contents to temporary file - with tempfile.NamedTemporaryFile(mode="w", delete=False) as temp_file: - temp_file.write(contents) - - # Convert file (should not raise error) - generated_file = translate_xmile(temp_file.name) - - with open(generated_file, "r") as fp: - contents = fp.read() - - idx = contents.find("stock() + constant()") - - try: - assert idx > 0, "Correct, generated, equation not found" - finally: - os.remove(temp_file.name) - os.remove(generated_file) From 5cd4aeb62e6ec3b9a611cef51fd49ca080eb27b6 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 11 Mar 2022 10:37:48 +0100 Subject: [PATCH 07/96] Inlcude package data --- MANIFEST.in | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 1da30984..8aa7acaf 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,4 +2,4 @@ include requirements.txt include dev-requirements.txt include README.md include LICENSE -include pysd/translation/xmile/smile.grammar +graft pysd/translation/*/parsing_grammars diff --git a/setup.py b/setup.py index d890fcc0..d38815f0 100755 --- a/setup.py +++ b/setup.py @@ -43,7 +43,7 @@ }, package_data={ 'translation': [ - 'xmile/smile.grammar' + '*/parsing_grammars/*.peg' ] }, include_package_data=True From e26831c80257e2a22b9821dcab6399f32bf2092e Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 11 Mar 2022 12:12:34 +0100 Subject: [PATCH 08/96] Include new tests and solve some bugs with xmile --- pysd/tools/benchmarking.py | 3 - pysd/translation/xmile/xmile_element.py | 73 +++++++++++-------- pysd/translation/xmile/xmile_section.py | 11 ++- .../pytest_integration_vensim_pathway.py | 12 +++ .../pytest_integration_xmile_pathway.py | 16 ++-- tests/test-models | 2 +- 6 files changed, 73 insertions(+), 44 deletions(-) diff --git a/pysd/tools/benchmarking.py b/pysd/tools/benchmarking.py index 313b2b46..e26d3137 100644 --- a/pysd/tools/benchmarking.py +++ b/pysd/tools/benchmarking.py @@ -33,9 +33,6 @@ def runner(model_file, canonical_file=None, transpose=False, data_files=None): data_files: list (optional) List of the data files needed to run the model. - old: bool(optional) - If True use old translation method, used for testing backward compatibility. - Returns ------- output, canon: (pandas.DataFrame, pandas.DataFrame) diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py index 3d148cbb..936d2711 100644 --- a/pysd/translation/xmile/xmile_element.py +++ b/pysd/translation/xmile/xmile_element.py @@ -21,7 +21,7 @@ class Element(): kind = "Element" - def __init__(self, node: etree._Element, ns: dict): + def __init__(self, node: etree._Element, ns: dict, subscripts): self.node = node self.ns = ns self.name = node.attrib["name"] @@ -29,6 +29,7 @@ def __init__(self, node: etree._Element, ns: dict): self.documentation = self.get_xpath_text(node, "ns:doc") or "" self.range = (None, None) self.components = [] + self.subscripts = subscripts def __str__(self): text = "\n%s definition: %s" % (self.kind, self.name) @@ -121,21 +122,32 @@ def parse_lookup_xml_node(self, node: etree._Element) -> object: def _parse(self) -> None: """Parse all the components of an element""" if self.node.xpath("ns:element", namespaces=self.ns): + # defined in several equations each with one subscript for subnode in self.node.xpath("ns:element", namespaces=self.ns): self.components.append( ((subnode.attrib["subscript"].split(","), []), - self._parse_component(subnode)) + self._parse_component(subnode)[0]) ) else: + # get the subscripts from element subscripts = [ subnode.attrib["name"] for subnode in self.node.xpath("ns:dimensions/ns:dim", namespaces=self.ns) ] - self.components = [ - ((subscripts, []), - self._parse_component(self.node)) - ] + parsed = self._parse_component(self.node) + if len(parsed) == 1: + # element defined with one equation + self.components = [((subscripts, []), parsed[0])] + else: + # element defined in several equations, but only the general + # subscripts are given, save each equation with its + # subscrtipts + subs_list = self.subscripts[subscripts[0]] + self.components = [ + (([subs], []), parsed_i) for subs, parsed_i in + zip(subs_list, parsed) + ] def smile_parser(self, expression: str) -> object: """ @@ -170,11 +182,11 @@ class Flaux(Element): kind = "Flaux" - def __init__(self, node, ns): - super().__init__(node, ns) + def __init__(self, node, ns, subscripts): + super().__init__(node, ns, subscripts) self.range = self.get_range() - def _parse_component(self, node) -> object: + def _parse_component(self, node: etree._Element) -> List[object]: """ Parse one Flaux component @@ -183,20 +195,21 @@ def _parse_component(self, node) -> object: AST: AbstractSyntaxTree """ - eqn = self.get_xpath_text(node, 'ns:eqn') - - # Replace new lines with space, and replace 2 or more spaces with - # single space. Then ensure there is no space at start or end of - # equation - eqn = re.sub(r"(\s{2,})", " ", eqn.replace("\n", ' ')).strip() - ast = self.smile_parser(eqn) - - gf_node = self.node.xpath("ns:gf", namespaces=self.ns) - if len(gf_node) > 0: - ast = structures["inline_lookup"]( - ast, self.parse_lookup_xml_node(gf_node[0])) - - return ast + asts = [] + for eqn in node.xpath('ns:eqn', namespaces=self.ns): + # Replace new lines with space, and replace 2 or more spaces with + # single space. Then ensure there is no space at start or end of + # equation + eqn = re.sub(r"(\s{2,})", " ", eqn.text.replace("\n", ' ')).strip() + ast = self.smile_parser(eqn) + + gf_node = self.node.xpath("ns:gf", namespaces=self.ns) + if len(gf_node) > 0: + ast = structures["inline_lookup"]( + ast, self.parse_lookup_xml_node(gf_node[0])) + asts.append(ast) + + return asts def get_abstract_element(self) -> AbstractElement: """ @@ -219,8 +232,8 @@ class Gf(Element): kind = "Gf component" - def __init__(self, node, ns): - super().__init__(node, ns) + def __init__(self, node, ns, subscripts): + super().__init__(node, ns, subscripts) self.range = self.get_range() def get_range(self) -> Tuple[Union[None, str], Union[None, str]]: @@ -231,7 +244,7 @@ def get_range(self) -> Tuple[Union[None, str], Union[None, str]]: ) return tuple(float(x) if x is not None else x for x in lims) - def _parse_component(self, node) -> object: + def _parse_component(self, node: etree._Element) -> object: """ Parse one Gf component @@ -240,7 +253,7 @@ def _parse_component(self, node) -> object: AST: AbstractSyntaxTree """ - return self.parse_lookup_xml_node(self.node) + return [self.parse_lookup_xml_node(self.node)] def get_abstract_element(self) -> AbstractElement: """ @@ -263,8 +276,8 @@ class Stock(Element): kind = "Stock component" - def __init__(self, node, ns): - super().__init__(node, ns) + def __init__(self, node, ns, subscripts): + super().__init__(node, ns, subscripts) self.range = self.get_range() def _parse_component(self, node) -> object: @@ -306,7 +319,7 @@ def _parse_component(self, node) -> object: # Read the initial value equation for stock element initial = self.smile_parser(self.get_xpath_text(self.node, 'ns:eqn')) - return structures["stock"](flows, initial) + return [structures["stock"](flows, initial)] def get_abstract_element(self) -> AbstractElement: """ diff --git a/pysd/translation/xmile/xmile_section.py b/pysd/translation/xmile/xmile_section.py index e188cb1b..723f1391 100644 --- a/pysd/translation/xmile/xmile_section.py +++ b/pysd/translation/xmile/xmile_section.py @@ -63,7 +63,7 @@ def _parse(self) -> None: def _parse_subscripts(self) -> List[SubscriptRange]: """Parse the subscripts of the section""" - return [ + subscripts = [ SubscriptRange( node.attrib["name"], [ @@ -74,6 +74,9 @@ def _parse_subscripts(self) -> List[SubscriptRange]: for node in self.content.xpath("ns:dimensions/ns:dim", namespaces=self.ns) ] + self.subscripts_dict = { + subr.name: subr.definition for subr in subscripts} + return subscripts def _parse_control_vars(self) -> List[ControlElement]: """Parse control vars and rename them with Vensim standard""" @@ -135,7 +138,7 @@ def _parse_components(self) -> List[Union[Flaux, Gf, Stock]]: # Add flows and auxiliary variables components = [ - Flaux(node, self.ns) + Flaux(node, self.ns, self.subscripts_dict) for node in self.content.xpath( "ns:model/ns:variables/ns:aux|ns:model/ns:variables/ns:flow", namespaces=self.ns) @@ -144,7 +147,7 @@ def _parse_components(self) -> List[Union[Flaux, Gf, Stock]]: # Add lookups components += [ - Gf(node, self.ns) + Gf(node, self.ns, self.subscripts_dict) for node in self.content.xpath( "ns:model/ns:variables/ns:gf", namespaces=self.ns) @@ -152,7 +155,7 @@ def _parse_components(self) -> List[Union[Flaux, Gf, Stock]]: # Add stocks components += [ - Stock(node, self.ns) + Stock(node, self.ns, self.subscripts_dict) for node in self.content.xpath( "ns:model/ns:variables/ns:stock", namespaces=self.ns) diff --git a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py index 0d5b0c8e..fdc3e367 100644 --- a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py +++ b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py @@ -21,6 +21,10 @@ "folder": "arithmetics", "file": "test_arithmetics.mdl" }, + "arithmetics_exp": { + "folder": "arithmetics_exp", + "file": "test_arithmetics_exp.mdl" + }, "arguments": { "folder": "arguments", "file": "test_arguments.mdl", @@ -83,6 +87,10 @@ "folder": "dynamic_final_time", "file": "test_dynamic_final_time.mdl" }, + "elm_count": { + "folder": "elm_count", + "file": "test_elm_count.mdl" + }, "euler_step_vs_saveper": { "folder": "euler_step_vs_saveper", "file": "test_euler_step_vs_saveper.mdl" @@ -439,6 +447,10 @@ "folder": "subscripted_trend", "file": "test_subscripted_trend.mdl" }, + "subscripted_trig": { + "folder": "subscripted_trig", + "file": "test_subscripted_trig.mdl" + }, "subscripted_xidz": { "folder": "subscripted_xidz", "file": "test_subscripted_xidz.mdl" diff --git a/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py b/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py index b252ef30..ad1c3321 100644 --- a/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py +++ b/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py @@ -14,6 +14,10 @@ "folder": "active_initial", "file": "test_active_initial.xmile" }, marks=pytest.mark.xfail(reason="failing originally")), + "arithmetics_exp": { + "folder": "arithmetics_exp", + "file": "test_arithmetics_exp.xmile" + }, "builtin_max": { "folder": "builtin_max", "file": "builtin_max.xmile" @@ -34,10 +38,6 @@ "folder": "constant_expressions", "file": "test_constant_expressions.xmile" }, - "euler_step_vs_saveper": pytest.param({ - "folder": "euler_step_vs_saveper", - "file": "test_euler_step_vs_saveper.xmile" - }, marks=pytest.mark.xfail(reason="failing originally")), "eval_order": { "folder": "eval_order", "file": "eval_order.xmile" @@ -150,10 +150,10 @@ "folder": "smooth_and_stock", "file": "test_smooth_and_stock.xmile" }, marks=pytest.mark.xfail(reason="failing originally")), - "special_characters": pytest.param({ + "special_characters": pytest.param({ "folder": "special_characters", "file": "test_special_variable_names.xmile" - }, marks=pytest.mark.xfail(reason="failing originally")), + }, marks=pytest.mark.xfail(reason="failing originally")), "sqrt": { "folder": "sqrt", "file": "test_sqrt.xmile" @@ -194,6 +194,10 @@ "folder": "subscripted_flows", "file": "test_subscripted_flows.xmile" }, marks=pytest.mark.xfail(reason="eqn with ??? in the model")), + "subscripted_trig": { + "folder": "subscripted_trig", + "file": "test_subscripted_trig.xmile" + }, "trig": { "folder": "trig", "file": "test_trig.xmile" diff --git a/tests/test-models b/tests/test-models index 75ea19ba..a25b603f 160000 --- a/tests/test-models +++ b/tests/test-models @@ -1 +1 @@ -Subproject commit 75ea19badf2ed6e94aba0f707ef5d6c97d80195b +Subproject commit a25b603fd59800a4e0136c6f03090e0d2eb59dce From 2383f8f80e59d3b96c8cfbaf346ab59e12b53e45 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 11 Mar 2022 13:30:44 +0100 Subject: [PATCH 09/96] Document and clean --- .../python/python_expressions_builder.py | 4 +- pysd/building/python/python_model_builder.py | 66 +++++++-------- .../structures/abstract_expressions.py | 44 +++++----- pysd/translation/structures/abstract_model.py | 38 ++++----- pysd/translation/vensim/vensim_element.py | 22 ++--- pysd/translation/vensim/vensim_file.py | 6 +- pysd/translation/vensim/vensim_section.py | 6 +- pysd/translation/vensim/vensim_utils.py | 81 ++++++++++++++----- pysd/translation/xmile/xmile_element.py | 14 ++-- pysd/translation/xmile/xmile_file.py | 6 +- pysd/translation/xmile/xmile_section.py | 6 +- pysd/translation/xmile/xmile_utils.py | 55 ++++++++++--- .../pytest_integration_vensim_pathway.py | 2 +- .../pytest_split_views.py | 0 .../vensim_parser/pytest_vensim_file.py | 1 - tests/unit_test_pysd.py | 5 ++ 16 files changed, 209 insertions(+), 147 deletions(-) rename tests/pytest_translation/{vensim2py => vensim_parser}/pytest_split_views.py (100%) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index b8989e69..b4e64336 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -1181,12 +1181,10 @@ def _merge_dependencies(current, new): """ current_set, new_set = set(current), set(new) for dep in current_set.intersection(new_set): + # if dependency is in both sum the number of calls if dep.startswith("__"): # if it is special (__lookup__, __external__) continue continue - # if dependency is in both sum the number of calls - if dep in ["initial", "step"]: - _merge_dependencies(current[dep], new[dep]) else: current[dep] += new[dep] for dep in new_set.difference(current_set): diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 477688cc..5f8d117b 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -49,9 +49,6 @@ def __init__(self, abstract_section: AbstractSection): self.macrospace = {} self.dependencies = {} - def __str__(self): - return "SectionBuilder " + self.path.name - def build_section(self): # Create namespace for element in self.elements: @@ -182,13 +179,13 @@ def _build_main_module(self, elements): Parameters ---------- elements: list - Elements belonging to the main module. Ideally, there should only be - the initial_time, final_time, saveper and time_step, functions, though - there might be others in some situations. Each element is a - dictionary, with the various components needed to assemble a model - component in python syntax. This will contain multiple entries for - elements that have multiple definitions in the original file, and - which need to be combined. + Elements belonging to the main module. Ideally, there should + only be the initial_time, final_time, saveper and time_step, + functions, though there might be others in some situations. + Each element is a dictionary, with the various components + needed to assemble a model component in python syntax. This + will contain multiple entries for elements that have multiple + definitions in the original file, and which need to be combined. Returns ------- @@ -235,7 +232,8 @@ def _build_main_module(self, elements): }) text += funcs - text = black.format_file_contents(text, fast=True, mode=black.FileMode()) + text = black.format_file_contents( + text, fast=True, mode=black.FileMode()) with self.path.open("w", encoding="UTF-8") as out: out.write(text) @@ -271,25 +269,21 @@ def _build(self): text = black.format_file_contents( text, fast=True, mode=black.FileMode()) - # this is used for testing - if not self.path: - return text - with self.path.open("w", encoding="UTF-8") as out: out.write(text) def _build_variables(self, elements): """ - Build model variables (functions) and separate then in control variables - and regular variables. + Build model variables (functions) and separate then in control + variables and regular variables. Returns ------- control_vars, regular_vars: tuple, str - control_vars is a tuple of length 2. First element is the dictionary - of original control vars. Second is the string to add the control - variables' functions. regular_vars is the string to add the regular - variables' functions. + control_vars is a tuple of length 2. First element is the + dictionary of original control vars. Second is the string to + add the control variables' functions. regular_vars is the + string to add the regular variables' functions. """ # returns of the control variables @@ -304,8 +298,8 @@ def _build_variables(self, elements): for element in elements: if element.identifier in control_vars_dict: - # change the return expression in the element and update the dict - # with the original expression + # change the return expression in the element and update + # the dict with the original expression control_vars_dict[element.identifier], element.expression =\ element.expression, control_vars_dict[element.identifier] control_vars.append(element) @@ -332,18 +326,18 @@ def _build_variables(self, elements): def _generate_functions(self, elements): """ Builds all model elements as functions in string format. - NOTE: this function calls the build_element function, which updates the - import_modules. - Therefore, it needs to be executed before the_generate_automatic_imports - function. + NOTE: this function calls the build_element function, which + updates the import_modules. + Therefore, it needs to be executed before the method + _generate_automatic_imports. Parameters ---------- elements: dict - Each element is a dictionary, with the various components needed to - assemble a model component in python syntax. This will contain - multiple entries for elements that have multiple definitions in the - original file, and which need to be combined. + Each element is a dictionary, with the various components + needed to assemble a model component in python syntax. This + will contain multiple entries for elements that have multiple + definitions in the original file, and which need to be combined. Returns ------- @@ -394,16 +388,10 @@ def time(): return text -class SubSectionBuilder(SectionBuilder): - def __init__(self, abstract_section: AbstractSection): - pass - # TODO Use an intermediate class to split model, this calls could be inexistent and point to Section - # Namespace, subscripts and imports should point to parent section, others should remain in subsection - - class ElementBuilder: - def __init__(self, abstract_element: AbstractElement, section: SectionBuilder): + def __init__(self, abstract_element: AbstractElement, + section: SectionBuilder): self.__dict__ = abstract_element.__dict__.copy() self.type = None self.subtype = None diff --git a/pysd/translation/structures/abstract_expressions.py b/pysd/translation/structures/abstract_expressions.py index 6930a64c..805cb1f2 100644 --- a/pysd/translation/structures/abstract_expressions.py +++ b/pysd/translation/structures/abstract_expressions.py @@ -7,7 +7,7 @@ class ArithmeticStructure: operators: str arguments: tuple - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "ArithmeticStructure:\n\t %s %s" % ( self.operators, self.arguments) @@ -17,7 +17,7 @@ class LogicStructure: operators: str arguments: tuple - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "LogicStructure:\n\t %s %s" % ( self.operators, self.arguments) @@ -26,7 +26,7 @@ def __str__(self) -> str: class SubscriptsReferenceStructure: subscripts: tuple - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "SubscriptReferenceStructure:\n\t %s" % self.subscripts @@ -35,7 +35,7 @@ class ReferenceStructure: reference: str subscripts: Union[SubscriptsReferenceStructure, None] = None - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "ReferenceStructure:\n\t %s%s" % ( self.reference, "\n\t" + str(self.subscripts or "").replace("\n", "\n\t")) @@ -46,7 +46,7 @@ class CallStructure: function: Union[str, object] arguments: tuple - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "CallStructure:\n\t%s(%s)" % ( self.function, "\n\t\t,".join([ @@ -59,7 +59,7 @@ def __str__(self) -> str: class GameStructure: expression: object - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "GameStructure:\n\t%s" % self.expression @@ -67,7 +67,7 @@ def __str__(self) -> str: class InitialStructure: initial: object - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "InitialStructure:\n\t%s" % ( self.initial) @@ -77,7 +77,7 @@ class IntegStructure: flow: object initial: object - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "IntegStructure:\n\t%s,\n\t%s" % ( self.flow, self.initial) @@ -90,7 +90,7 @@ class DelayStructure: initial: object order: float - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "DelayStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % ( self.order, self.input, @@ -108,7 +108,7 @@ class DelayNStructure: # DELAY N may behave different than other delays when the delay time # changes during integration - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "DelayNStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % ( self.order, self.input, @@ -122,7 +122,7 @@ class DelayFixedStructure: delay_time: object initial: object - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "DelayFixedStructure:\n\t%s,\n\t%s,\n\t%s" % ( self.input, self.delay_time, @@ -136,7 +136,7 @@ class SmoothStructure: initial: object order: float - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "SmoothStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % ( self.order, self.input, @@ -154,7 +154,7 @@ class SmoothNStructure: # SMOOTH N may behave different than other smooths with RungeKutta # integration - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "SmoothNStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % ( self.order, self.input, @@ -168,7 +168,7 @@ class TrendStructure: average_time: object initial: object - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "TrendStructure:\n\t%s,\n\t%s,\n\t%s" % ( self.input, self.average_time, @@ -181,7 +181,7 @@ class ForecastStructure: average_time: object horizon: object - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "ForecastStructure:\n\t%s,\n\t%s,\n\t%s" % ( self.input, self.average_time, @@ -194,7 +194,7 @@ class SampleIfTrueStructure: input: object initial: object - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "SampleIfTrueStructure:\n\t%s,\n\t%s,\n\t%s" % ( self.condition, self.input, @@ -209,7 +209,7 @@ class LookupsStructure: y_range: tuple type: str - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "LookupStructure (%s):\n\tx %s = %s\n\ty %s = %s\n" % ( self.type, self.x_range, self.x, self.y_range, self.y ) @@ -220,7 +220,7 @@ class InlineLookupsStructure: argument: None lookups: LookupsStructure - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "InlineLookupsStructure:\n\t%s\n\t%s" % ( str(self.argument).replace("\n", "\n\t"), str(self.lookups).replace("\n", "\n\t") @@ -231,7 +231,7 @@ def __str__(self) -> str: class DataStructure: pass - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "DataStructure" @@ -242,7 +242,7 @@ class GetLookupsStructure: x_row_or_col: str cell: str - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "GetLookupStructure:\n\t'%s', '%s', '%s', '%s'\n" % ( self.file, self.tab, self.x_row_or_col, self.cell ) @@ -255,7 +255,7 @@ class GetDataStructure: time_row_or_col: str cell: str - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "GetDataStructure:\n\t'%s', '%s', '%s', '%s'\n" % ( self.file, self.tab, self.time_row_or_col, self.cell ) @@ -267,7 +267,7 @@ class GetConstantsStructure: tab: str cell: str - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "GetConstantsStructure:\n\t'%s', '%s', '%s'\n" % ( self.file, self.tab, self.cell ) diff --git a/pysd/translation/structures/abstract_model.py b/pysd/translation/structures/abstract_model.py index b295eba9..90ac5804 100644 --- a/pysd/translation/structures/abstract_model.py +++ b/pysd/translation/structures/abstract_model.py @@ -10,17 +10,17 @@ class AbstractComponent: type: str = "Auxiliary" subtype: str = "Normal" - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "AbstractComponent %s\n" % ( "%s" % repr(list(self.subscripts)) if self.subscripts else "") - def dump(self, depth=None, indent="") -> str: + def dump(self, depth=None, indent="") -> str: # pragma: no cover if depth == 0: return self.__str__() return self.__str__() + "\n" + self._str_child(depth, indent) - def _str_child(self, depth, indent) -> str: + def _str_child(self, depth, indent) -> str: # pragma: no cover return str(self.ast).replace("\t", indent).replace("\n", "\n" + indent) @@ -31,7 +31,7 @@ class AbstractUnchangeableConstant(AbstractComponent): type: str = "Constant" subtype: str = "Unchangeable" - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "AbstractLookup %s\n" % ( "%s" % repr(list(self.subscripts)) if self.subscripts else "") @@ -44,7 +44,7 @@ class AbstractLookup(AbstractComponent): type: str = "Lookup" subtype: str = "Hardcoded" - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "AbstractLookup %s\n" % ( "%s" % repr(list(self.subscripts)) if self.subscripts else "") @@ -57,18 +57,18 @@ class AbstractData(AbstractComponent): type: str = "Data" subtype: str = "Normal" - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "AbstractData (%s) %s\n" % ( self.keyword, "%s" % repr(list(self.subscripts)) if self.subscripts else "") - def dump(self, depth=None, indent="") -> str: + def dump(self, depth=None, indent="") -> str: # pragma: no cover if depth == 0: return self.__str__() return self.__str__() + "\n" + self._str_child(depth, indent) - def _str_child(self, depth, indent) -> str: + def _str_child(self, depth, indent) -> str: # pragma: no cover return str(self.ast).replace("\n", "\n" + indent) @@ -80,11 +80,11 @@ class AbstractElement: range: tuple = (None, None) documentation: str = "" - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "AbstractElement:\t%s (%s, %s)\n%s\n" % ( self.name, self.units, self.range, self.documentation) - def dump(self, depth=None, indent="") -> str: + def dump(self, depth=None, indent="") -> str: # pragma: no cover if depth == 0: return self.__str__() elif depth is not None: @@ -92,7 +92,7 @@ def dump(self, depth=None, indent="") -> str: return self.__str__() + "\n" + self._str_child(depth, indent) - def _str_child(self, depth, indent) -> str: + def _str_child(self, depth, indent) -> str: # pragma: no cover return "\n".join([ component.dump(depth, indent) for component in self.components ]).replace("\n", "\n" + indent) @@ -104,13 +104,13 @@ class AbstractSubscriptRange: subscripts: Tuple[str] mapping: Tuple[str] - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "AbstractSubscriptRange:\t%s\n\t%s\n" % ( self.name, "%s <- %s" % (self.subscripts, self.mapping) if self.mapping else self.subscripts) - def dump(self, depth=None, indent="") -> str: + def dump(self, depth=None, indent="") -> str: # pragma: no cover return self.__str__() @@ -126,11 +126,11 @@ class AbstractSection: split: bool views_dict: Union[dict, None] - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "AbstractSection (%s):\t%s (%s)\n" % ( self.type, self.name, self.path) - def dump(self, depth=None, indent="") -> str: + def dump(self, depth=None, indent="") -> str: # pragma: no cover if depth == 0: return self.__str__() elif depth is not None: @@ -138,7 +138,7 @@ def dump(self, depth=None, indent="") -> str: return self.__str__() + "\n" + self._str_child(depth, indent) - def _str_child(self, depth, indent) -> str: + def _str_child(self, depth, indent) -> str: # pragma: no cover return "\n".join([ element.dump(depth, indent) for element in self.subscripts ] + [ @@ -151,10 +151,10 @@ class AbstractModel: original_path: Path sections: Tuple[AbstractSection] - def __str__(self) -> str: + def __str__(self) -> str: # pragma: no cover return "AbstractModel:\t%s\n" % self.original_path - def dump(self, depth=None, indent="") -> str: + def dump(self, depth=None, indent="") -> str: # pragma: no cover if depth == 0: return self.__str__() elif depth is not None: @@ -162,7 +162,7 @@ def dump(self, depth=None, indent="") -> str: return self.__str__() + "\n" + self._str_child(depth, indent) - def _str_child(self, depth, indent) -> str: + def _str_child(self, depth, indent) -> str: # pragma: no cover return "\n".join([ section.dump(depth, indent) for section in self.sections ]).replace("\n", "\n" + indent) diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index 74017e95..39a9dc64 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -20,17 +20,17 @@ def __init__(self, equation: str, units: str, documentation: str): self.units, self.range = self._parse_units(units) self.documentation = documentation - def __str__(self): + def __str__(self): # pragma: no cover return "Model element:\n\t%s\nunits: %s\ndocs: %s\n" % ( self.equation, self.units, self.documentation) @property - def _verbose(self) -> str: + def _verbose(self) -> str: # pragma: no cover """Get model information""" return self.__str__() @property - def verbose(self): + def verbose(self): # pragma: no cover """Print model information""" print(self._verbose) @@ -190,19 +190,19 @@ def __init__(self, name: str, definition: Union[List[str], str, dict], self.definition = definition self.mapping = mapping - def __str__(self): + def __str__(self): # pragma: no cover return "\nSubscript range definition: %s\n\t%s\n" % ( self.name, "%s <- %s" % (self.definition, self.mapping) if self.mapping else self.definition) @property - def _verbose(self) -> str: + def _verbose(self) -> str: # pragma: no cover """Get model information""" return self.__str__() @property - def verbose(self): + def verbose(self): # pragma: no cover """Print model information""" print(self._verbose) @@ -217,7 +217,7 @@ def __init__(self, name: str, subscripts: Tuple[list, list], self.subscripts = subscripts self.expression = expression - def __str__(self): + def __str__(self): # pragma: no cover text = "\n%s definition: %s" % (self.kind, self.name) text += "\nSubscrips: %s" % repr(self.subscripts[0])\ if self.subscripts[0] else "" @@ -227,7 +227,7 @@ def __str__(self): return text @property - def _expression(self): + def _expression(self): # pragma: no cover if hasattr(self, "ast"): return str(self.ast).replace("\n", "\n\t") @@ -235,11 +235,11 @@ def _expression(self): return self.expression.replace("\n", "\n\t") @property - def _verbose(self): + def _verbose(self) -> str: # pragma: no cover return self.__str__() @property - def verbose(self): + def verbose(self): # pragma: no cover print(self._verbose) def _parse(self) -> None: @@ -302,7 +302,7 @@ def __init__(self, name: str, subscripts: Tuple[list, list], super().__init__(name, subscripts, expression) self.keyword = keyword - def __str__(self): + def __str__(self): # pragma: no cover text = "\n%s definition: %s" % (self.kind, self.name) text += "\nSubscrips: %s" % repr(self.subscripts[0])\ if self.subscripts[0] else "" diff --git a/pysd/translation/vensim/vensim_file.py b/pysd/translation/vensim/vensim_file.py index ca7d7b59..a7c8a315 100644 --- a/pysd/translation/vensim/vensim_file.py +++ b/pysd/translation/vensim/vensim_file.py @@ -35,11 +35,11 @@ def __init__(self, mdl_path: Union[str, Path], self.view_elements = None self._split_sketch() - def __str__(self): + def __str__(self): # pragma: no cover return "\nVensim model file, loaded from:\n\t%s\n" % self.mdl_path @property - def _verbose(self) -> str: + def _verbose(self) -> str: # pragma: no cover """Get model information""" text = self.__str__() for section in self.sections: @@ -48,7 +48,7 @@ def _verbose(self) -> str: return text @property - def verbose(self): + def verbose(self): # pragma: no cover """Print model information""" print(self._verbose) diff --git a/pysd/translation/vensim/vensim_section.py b/pysd/translation/vensim/vensim_section.py index 98e49d54..8a7efdad 100644 --- a/pysd/translation/vensim/vensim_section.py +++ b/pysd/translation/vensim/vensim_section.py @@ -25,11 +25,11 @@ def __init__(self, name: str, path: Path, section_type: str, self.views_dict = views_dict self.elements = None - def __str__(self): + def __str__(self): # pragma: no cover return "\nFile section: %s\n" % self.name @property - def _verbose(self) -> str: + def _verbose(self) -> str: # pragma: no cover """Get model information""" text = self.__str__() if self.elements: @@ -41,7 +41,7 @@ def _verbose(self) -> str: return text @property - def verbose(self): + def verbose(self): # pragma: no cover """Print model information""" print(self._verbose) diff --git a/pysd/translation/vensim/vensim_utils.py b/pysd/translation/vensim/vensim_utils.py index 7c32b378..c40989f1 100644 --- a/pysd/translation/vensim/vensim_utils.py +++ b/pysd/translation/vensim/vensim_utils.py @@ -48,35 +48,40 @@ def _gpath(cls, grammar: str) -> Path: """Get the grammar file path""" return cls._grammar_path.joinpath(grammar).with_suffix(".peg") - @classmethod - def clean(cls) -> None: - """Clean the saved grammars (used for debugging)""" - cls._common_grammar = None - cls._grammar: Dict = {} - - -def _detect_encoding_from_file(mdl_file: Path) -> str: - """Detect and return the encoding from a Vensim file""" - try: - with mdl_file.open("rb") as in_file: - f_line: bytes = in_file.readline() - f_line: str = f_line.decode(detect(f_line)['encoding']) - return re.search(r"(?<={)(.*)(?=})", f_line).group() - except (AttributeError, UnicodeDecodeError): - warnings.warn( - "No encoding specified or detected to translate the model " - "file. 'UTF-8' encoding will be used.") - return "UTF-8" - def split_arithmetic(structure: object, parsing_ops: dict, expression: str, elements: dict, negatives: set = set()) -> object: + """ + Split arithmetic pattern and return the corresponding object. + + Parameters + ---------- + structure: callable + Callable that generates the arithmetic object to return. + parsing_ops: dict + The parsing operators dictionary. + expression: str + Original expression with the operator and the hex code to the objects. + elements: dict + Dictionary of the hex identifiers and the objects that represent. + negative: set + Set of element hex values that must change their sign. + + Returns + ------- + object: structure + Final object of the arithmetic operation or initial object if + no operations are performed. + + """ pattern = re.compile(parsing_ops) - parts = pattern.split(expression) - ops = pattern.findall(expression) + parts = pattern.split(expression) # list of elements ids + ops = pattern.findall(expression) # operators list if not ops: + # no operators return original object if parts[0] in negatives: + # make original object negative negatives.remove(parts[0]) return add_element( elements, @@ -85,6 +90,7 @@ def split_arithmetic(structure: object, parsing_ops: dict, return expression else: if not negatives: + # create arithmetic object return add_element( elements, structure( @@ -110,6 +116,37 @@ def split_arithmetic(structure: object, parsing_ops: dict, def add_element(elements: dict, element: object) -> str: + """ + Add element to elements dict using an unique hex identifier + + Parameters + ---------- + elements: dict + Dictionary of all elements. + + element: object + Element to add. + + Returns + ------- + id: str (hex) + The name of the key where element is saved in elements. + + """ id = uuid.uuid4().hex elements[id] = element return id + + +def _detect_encoding_from_file(mdl_file: Path) -> str: + """Detect and return the encoding from a Vensim file""" + try: + with mdl_file.open("rb") as in_file: + f_line: bytes = in_file.readline() + f_line: str = f_line.decode(detect(f_line)['encoding']) + return re.search(r"(?<={)(.*)(?=})", f_line).group() + except (AttributeError, UnicodeDecodeError): + warnings.warn( + "No encoding specified or detected to translate the model " + "file. 'UTF-8' encoding will be used.") + return "UTF-8" diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py index 936d2711..a071f416 100644 --- a/pysd/translation/xmile/xmile_element.py +++ b/pysd/translation/xmile/xmile_element.py @@ -31,7 +31,7 @@ def __init__(self, node: etree._Element, ns: dict, subscripts): self.components = [] self.subscripts = subscripts - def __str__(self): + def __str__(self): # pragma: no cover text = "\n%s definition: %s" % (self.kind, self.name) text += "\nSubscrips: %s" % repr(self.subscripts)\ if self.subscripts else "" @@ -39,7 +39,7 @@ def __str__(self): return text @property - def _expression(self): + def _expression(self): # pragma: no cover if hasattr(self, "ast"): return str(self.ast).replace("\n", "\n\t") @@ -47,12 +47,12 @@ def _expression(self): return self.node.text.replace("\n", "\n\t") @property - def _verbose(self) -> str: + def _verbose(self) -> str: # pragma: no cover """Get model information""" return self.__str__() @property - def verbose(self): + def verbose(self): # pragma: no cover """Print model information""" print(self._verbose) @@ -383,18 +383,18 @@ def __init__(self, name: str, definition: List[str], self.definition = definition self.mapping = mapping - def __str__(self): + def __str__(self): # pragma: no cover return "\nSubscript range definition: %s\n\t%s\n" % ( self.name, self.definition) @property - def _verbose(self) -> str: + def _verbose(self) -> str: # pragma: no cover """Get model information""" return self.__str__() @property - def verbose(self): + def verbose(self): # pragma: no cover """Print model information""" print(self._verbose) diff --git a/pysd/translation/xmile/xmile_file.py b/pysd/translation/xmile/xmile_file.py index ea360c9b..fb348d34 100644 --- a/pysd/translation/xmile/xmile_file.py +++ b/pysd/translation/xmile/xmile_file.py @@ -24,11 +24,11 @@ def __init__(self, xmile_path: Union[str, Path]): self.ns = self.xmile_root.nsmap[None] # namespace of the xmile self.view_elements = None - def __str__(self): + def __str__(self): # pragma: no cover return "\nXmile model file, loaded from:\n\t%s\n" % self.xmile_path @property - def _verbose(self) -> str: + def _verbose(self) -> str: # pragma: no cover """Get model information""" text = self.__str__() for section in self.sections: @@ -37,7 +37,7 @@ def _verbose(self) -> str: return text @property - def verbose(self): + def verbose(self): # pragma: no cover """Print model information""" print(self._verbose) diff --git a/pysd/translation/xmile/xmile_section.py b/pysd/translation/xmile/xmile_section.py index 723f1391..76cd72fc 100644 --- a/pysd/translation/xmile/xmile_section.py +++ b/pysd/translation/xmile/xmile_section.py @@ -28,11 +28,11 @@ def __init__(self, name: str, path: Path, section_type: str, self.views_dict = views_dict self.elements = None - def __str__(self): + def __str__(self): # pragma: no cover return "\nFile section: %s\n" % self.name @property - def _verbose(self) -> str: + def _verbose(self) -> str: # pragma: no cover """Get model information""" text = self.__str__() if self.elements: @@ -44,7 +44,7 @@ def _verbose(self) -> str: return text @property - def verbose(self): + def verbose(self): # pragma: no cover """Print model information""" print(self._verbose) diff --git a/pysd/translation/xmile/xmile_utils.py b/pysd/translation/xmile/xmile_utils.py index 73873fd6..c3bb8e26 100644 --- a/pysd/translation/xmile/xmile_utils.py +++ b/pysd/translation/xmile/xmile_utils.py @@ -1,11 +1,9 @@ import re -import warnings import uuid import parsimonious from typing import Dict from pathlib import Path -from chardet import detect class Grammar(): @@ -37,23 +35,42 @@ def _gpath(cls, grammar: str) -> Path: """Get the grammar file path""" return cls._grammar_path.joinpath(grammar).with_suffix(".peg") - @classmethod - def clean(cls) -> None: - """Clean the saved grammars (used for debugging)""" - cls._common_grammar = None - cls._grammar: Dict = {} - def split_arithmetic(structure: object, parsing_ops: dict, expression: str, elements: dict, negatives: set = set()) -> object: + """ + Split arithmetic pattern and return the corresponding object. + + Parameters + ---------- + structure: callable + Callable that generates the arithmetic object to return. + parsing_ops: dict + The parsing operators dictionary. + expression: str + Original expression with the operator and the hex code to the objects. + elements: dict + Dictionary of the hex identifiers and the objects that represent. + negative: set + Set of element hex values that must change their sign. + + Returns + ------- + object: structure + Final object of the arithmetic operation or initial object if + no operations are performed. + + """ pattern = re.compile(parsing_ops) - parts = pattern.split(expression) - ops = pattern.findall(expression) + parts = pattern.split(expression) # list of elements ids + ops = pattern.findall(expression) # operators list ops = list(map( lambda x: x.replace('and', ':AND:').replace('or', ':OR:'), ops)) if not ops: + # no operators return original object if parts[0] in negatives: + # make original object negative negatives.remove(parts[0]) return add_element( elements, @@ -62,6 +79,7 @@ def split_arithmetic(structure: object, parsing_ops: dict, return expression else: if not negatives: + # create arithmetic object return add_element( elements, structure( @@ -87,6 +105,23 @@ def split_arithmetic(structure: object, parsing_ops: dict, def add_element(elements: dict, element: object) -> str: + """ + Add element to elements dict using an unique hex identifier + + Parameters + ---------- + elements: dict + Dictionary of all elements. + + element: object + Element to add. + + Returns + ------- + id: str (hex) + The name of the key where element is saved in elements. + + """ id = uuid.uuid4().hex elements[id] = element return id diff --git a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py index fdc3e367..3646e73a 100644 --- a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py +++ b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py @@ -28,7 +28,7 @@ "arguments": { "folder": "arguments", "file": "test_arguments.mdl", - "rtol": 1e-2 # TODO test why it is failing with smaller tolerance + "rtol": 1e-2 # TODO test why it is failing with smaller tolerance }, "array_with_line_break": { "folder": "array_with_line_break", diff --git a/tests/pytest_translation/vensim2py/pytest_split_views.py b/tests/pytest_translation/vensim_parser/pytest_split_views.py similarity index 100% rename from tests/pytest_translation/vensim2py/pytest_split_views.py rename to tests/pytest_translation/vensim_parser/pytest_split_views.py diff --git a/tests/pytest_translation/vensim_parser/pytest_vensim_file.py b/tests/pytest_translation/vensim_parser/pytest_vensim_file.py index cc1f97b2..7845748b 100644 --- a/tests/pytest_translation/vensim_parser/pytest_vensim_file.py +++ b/tests/pytest_translation/vensim_parser/pytest_vensim_file.py @@ -51,4 +51,3 @@ def test_read_vensim_file(self, model_path): def test_file_split_file_sections(self, model_path): ven_file = VensimFile(model_path) ven_file.parse() - print(ven_file.verbose) diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index 9cfcb6b1..ebd9839b 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -44,6 +44,11 @@ def test_read_not_model_vensim(self): pysd.read_vensim( more_tests.joinpath("not_vensim/test_not_vensim.txt")) + def test_read_not_model_xmile(self): + with self.assertRaises(ValueError): + pysd.read_xmile( + more_tests.joinpath("not_vensim/test_not_vensim.txt")) + def test_run(self): model = pysd.read_vensim(test_model) stocks = model.run() From f1b0fd446c2e6f75775d1417b35d93e55082bd83 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 11 Mar 2022 15:58:12 +0100 Subject: [PATCH 10/96] Add new tests and correct small bugs --- pysd/building/python/namespace.py | 2 +- .../python/python_expressions_builder.py | 21 ++++++++++++++----- pysd/translation/vensim/vensim_element.py | 2 +- pysd/translation/xmile/xmile_element.py | 3 --- pysd/translation/xmile/xmile_file.py | 5 +++-- .../pytest_integration_vensim_pathway.py | 20 ++++++++++++++++++ tests/test-models | 2 +- 7 files changed, 42 insertions(+), 13 deletions(-) diff --git a/pysd/building/python/namespace.py b/pysd/building/python/namespace.py index 71250ec6..7781b19f 100644 --- a/pysd/building/python/namespace.py +++ b/pysd/building/python/namespace.py @@ -124,7 +124,7 @@ def make_python_identifier(self, string, prefix=None, add_to_namespace=False): # Only letters can be leading characters. if prefix is not None: s = prefix + "_" + s - elif re.findall(r"^[0-9]", s): + elif re.findall(r"^[0-9]", s) or not s: s = "nvs_" + s elif re.findall(r"^_", s): s = "nvs" + s diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index b4e64336..b45585d7 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -198,7 +198,6 @@ def __init__(self, call_str, component): function_name = call_str.function.reference self.arguments = { str(i): arg for i, arg in enumerate(call_str.arguments)} - # move this to a setter if function_name in self.section.macrospace: # build macro @@ -1116,12 +1115,24 @@ def visit_subscripts(self, expression, original_subs): class NumericBuilder(StructureBuilder): - # Standard class, inherit all from StructureBuilder - pass + def build(self, arguments): + if np.isnan(self.value): + self.section.imports.add("numpy") + + return BuildAST( + expression="np.nan", + calls={}, + subscripts={}, + order=0) + else: + return BuildAST( + expression=repr(self.value), + calls={}, + subscripts={}, + order=0) class ArrayBuilder(StructureBuilder): - # Standard class, inherit all from StructureBuilder def build(self, arguments): self.value = np.array2string( self.value.reshape(compute_shape(self.def_subs)), @@ -1217,7 +1228,7 @@ class ASTVisitor: ae.ArithmeticStructure: OperationBuilder, int: NumericBuilder, float: NumericBuilder, - np.ndarray: ArrayBuilder + np.ndarray: ArrayBuilder, } def __init__(self, component): diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index 39a9dc64..975b0662 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -503,7 +503,7 @@ def visit__(self, n, vc): return "" def visit_nan(self, n, vc): - return "np.nan" + return self.add_element(np.nan) def visit_empty(self, n, vc): return self.add_element(None) diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py index a071f416..366ee476 100644 --- a/pysd/translation/xmile/xmile_element.py +++ b/pysd/translation/xmile/xmile_element.py @@ -522,9 +522,6 @@ def visit__(self, n, vc): # handles whitespace characters return "" - def visit_nan(self, n, vc): - return "np.nan" - def visit_empty(self, n, vc): return self.add_element(None) diff --git a/pysd/translation/xmile/xmile_file.py b/pysd/translation/xmile/xmile_file.py index fb348d34..d10c4975 100644 --- a/pysd/translation/xmile/xmile_file.py +++ b/pysd/translation/xmile/xmile_file.py @@ -51,10 +51,11 @@ def get_root(self) -> etree._Element: """ # check for model extension - if self.xmile_path.suffix.lower() != ".xmile": + if self.xmile_path.suffix.lower() not in [".xmile", ".xml", ".stmx"]: raise ValueError( "The file to translate, '%s' " % self.xmile_path - + "is not a Xmile model. It must end with xmile extension." + + "is not a Xmile model. It must end with xmile, xml or " + + "stmx extension." ) return etree.parse( diff --git a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py index 3646e73a..56d208fc 100644 --- a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py +++ b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py @@ -115,6 +115,10 @@ "folder": "function_capitalization", "file": "test_function_capitalization.mdl" }, + "fully_invalid_names": { + "folder": "fully_invalid_names", + "file": "test_fully_invalid_names.mdl" + }, "game": { "folder": "game", "file": "test_game.mdl" @@ -259,6 +263,10 @@ "folder": "multiple_lines_def", "file": "test_multiple_lines_def.mdl" }, + "na": { + "folder": "na", + "file": "test_na.mdl" + }, "nested_functions": { "folder": "nested_functions", "file": "test_nested_functions.mdl" @@ -291,6 +299,10 @@ "folder": "sample_if_true", "file": "test_sample_if_true.mdl" }, + "smaller_range": { + "folder": "smaller_range", + "file": "test_smaller_range.mdl" + }, "smooth": { "folder": "smooth", "file": "test_smooth.mdl" @@ -343,6 +355,10 @@ "folder": "subscript_copy", "file": "test_subscript_copy.mdl" }, + "subscript_copy2": { + "folder": "subscript_copy", + "file": "test_subscript_copy2.mdl" + }, "subscript_docs": { "folder": "subscript_docs", "file": "subscript_docs.mdl" @@ -471,6 +487,10 @@ "folder": "trig", "file": "test_trig.mdl" }, + "unchangeable_constant": { + "folder": "unchangeable_constant", + "file": "test_unchangeable_constant.mdl" + }, "unicode_characters": { "folder": "unicode_characters", "file": "unicode_test_model.mdl" diff --git a/tests/test-models b/tests/test-models index a25b603f..849a20da 160000 --- a/tests/test-models +++ b/tests/test-models @@ -1 +1 @@ -Subproject commit a25b603fd59800a4e0136c6f03090e0d2eb59dce +Subproject commit 849a20da4852c557391bc099aad5150f202e416f From 1448ff39b3d0216379479738e05a30b7b3afda64 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 11 Mar 2022 16:57:51 +0100 Subject: [PATCH 11/96] Add again support for xmile mod and int including a test model --- pysd/translation/xmile/parsing_grammars/equations.peg | 3 ++- pysd/translation/xmile/xmile_element.py | 11 +++++++++++ pysd/translation/xmile/xmile_structures.py | 4 +++- .../xmile_pathway/pytest_integration_xmile_pathway.py | 4 ++++ tests/test-models | 2 +- 5 files changed, 21 insertions(+), 3 deletions(-) diff --git a/pysd/translation/xmile/parsing_grammars/equations.peg b/pysd/translation/xmile/parsing_grammars/equations.peg index 7affb0a2..ed12709b 100644 --- a/pysd/translation/xmile/parsing_grammars/equations.peg +++ b/pysd/translation/xmile/parsing_grammars/equations.peg @@ -7,7 +7,8 @@ final_expr = conditional_statement / logic2_expr logic2_expr = logic_expr (_ logic_oper _ logic_expr)* # logic operators (:and:, :or:) logic_expr = not_oper? _ comp_expr # :not: operator comp_expr = add_expr (_ comp_oper _ add_expr)? # comparison (e.g. '<', '=>') -add_expr = prod_expr (_ add_oper _ prod_expr)* # addition and substraction +add_expr = mod_expr (_ add_oper _ mod_expr)* # addition and substraction +mod_expr = prod_expr (_ "mod" _ prod_expr)? # modulo prod_expr = exp_expr (_ prod_oper _ exp_expr)* # product and division exp_expr = neg_expr (_ exp_oper _ neg_expr)* # exponential neg_expr = pre_oper? _ expr # pre operators (-, +) diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py index 366ee476..f8a684b4 100644 --- a/pysd/translation/xmile/xmile_element.py +++ b/pysd/translation/xmile/xmile_element.py @@ -439,6 +439,17 @@ def visit_add_expr(self, n, vc): structures["arithmetic"], parsing_ops["add_ops"], "".join(vc).strip(), self.elements) + def visit_mod_expr(self, n, vc): + # modulo expressions (mod) + if vc[1].lower().startswith("mod"): + return self.add_element( + structures["call"]( + structures["reference"]("modulo"), + (self.elements[vc[0]], self.elements[vc[1][3:]]) + )) + else: + return vc[0] + def visit_prod_expr(self, n, vc): # expressions with products (*, /) return vu.split_arithmetic( diff --git a/pysd/translation/xmile/xmile_structures.py b/pysd/translation/xmile/xmile_structures.py index 2f37be37..8bf482c0 100644 --- a/pysd/translation/xmile/xmile_structures.py +++ b/pysd/translation/xmile/xmile_structures.py @@ -48,7 +48,9 @@ }, "if_then_else": lambda x, y, z: ae.CallStructure( ae.ReferenceStructure("if_then_else"), (x, y, z)), - "negative": lambda x: ae.ArithmeticStructure(["negative"], (x,)) + "negative": lambda x: ae.ArithmeticStructure(["negative"], (x,)), + "int": lambda x: ae.CallStructure( + ae.ReferenceStructure("integer"), (x,)) } diff --git a/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py b/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py index ad1c3321..46c9843a 100644 --- a/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py +++ b/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py @@ -146,6 +146,10 @@ "folder": "reference_capitalization", "file": "test_reference_capitalization.xmile" }, + "rounding": { + "folder": "rounding", + "file": "test_rounding.xmile" + }, "smooth_and_stock": pytest.param({ "folder": "smooth_and_stock", "file": "test_smooth_and_stock.xmile" diff --git a/tests/test-models b/tests/test-models index 849a20da..a6b2bfc9 160000 --- a/tests/test-models +++ b/tests/test-models @@ -1 +1 @@ -Subproject commit 849a20da4852c557391bc099aad5150f202e416f +Subproject commit a6b2bfc9339ecd40eefa0385cc14bd2e50971874 From e7733910cd32c6972123ce0c8b02f56331f2674c Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 16 Mar 2022 18:16:50 +0100 Subject: [PATCH 12/96] Include some imports inside functions to avoid importing unnecessary libraries --- pysd/pysd.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pysd/pysd.py b/pysd/pysd.py index eb55771f..523e4665 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -6,9 +6,7 @@ """ import sys -from pysd.translation.vensim.vensim_file import VensimFile -from pysd.translation.xmile.xmile_file import XmileFile -from pysd.building.python.python_model_builder import ModelBuilder + from pysd.py_backend.statefuls import Model @@ -63,6 +61,9 @@ def read_xmile(xmile_file, data_files=None, initialize=True, >>> model = read_xmile('../tests/test-models/samples/teacup/teacup.xmile') """ + from pysd.translation.xmile.xmile_file import XmileFile + from pysd.building.python.python_model_builder import ModelBuilder + # Read and parse Xmile file xmile_file_obj = XmileFile(xmile_file) xmile_file_obj.parse() @@ -137,6 +138,8 @@ def read_vensim(mdl_file, data_files=None, initialize=True, >>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl') """ + from pysd.translation.vensim.vensim_file import VensimFile + from pysd.building.python.python_model_builder import ModelBuilder # Read and parse Vensim file ven_file = VensimFile(mdl_file, encoding=encoding) ven_file.parse() From f24487bf8985ca7d527e3b256f2ca9971777afd8 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Mon, 21 Mar 2022 15:55:21 +0100 Subject: [PATCH 13/96] Update dependencies with progressbar2 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d2125109..df162305 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,5 +8,5 @@ chardet black openpyxl scipy -progressbar +progressbar2 From 497b777057c13b8156aacdcffbe3d016a9fdf9ab Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Mon, 21 Mar 2022 18:31:20 +0100 Subject: [PATCH 14/96] Solve some bugs and make except work properly --- pysd/building/python/imports.py | 2 +- .../python/python_expressions_builder.py | 27 +++++++-- pysd/building/python/python_functions.py | 2 +- pysd/building/python/python_model_builder.py | 56 +++++++++++++++++-- .../parsing_grammars/element_object.peg | 3 +- pysd/translation/vensim/vensim_element.py | 13 +++-- .../pytest_integration_vensim_pathway.py | 4 ++ tests/test-models | 2 +- 8 files changed, 92 insertions(+), 17 deletions(-) diff --git a/pysd/building/python/imports.py b/pysd/building/python/imports.py index bd69864d..521ba44a 100644 --- a/pysd/building/python/imports.py +++ b/pysd/building/python/imports.py @@ -59,7 +59,7 @@ def get_header(self, outfile): for module in self._external_submodules: if getattr(self, f"_{module}"): - text += "%(module)s import %(submodules)s\n" % { + text += "from %(module)s import %(submodules)s\n" % { "module": module, "submodules": ", ".join(getattr(self, f"_{module}"))} diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index b45585d7..eff4b4d8 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -1253,10 +1253,8 @@ def visit(self): visit_out.calls, inplace=True) - if not visit_out.subscripts\ - and self.subscripts != self.component.element.subs_dict: - # expression is a float, but it will be upper dimensioned - # when assigning values to the xarray.DataArray + if not visit_out.subscripts: + # expression is a float return visit_out # NUMPY not needed @@ -1297,3 +1295,24 @@ def _visit(self, ast_object): for name, value in builder.arguments.items() } return builder.build(arguments) + + +class ExceptVisitor: # pragma: no cover + # this class will be used in the numpy array backend + def __init__(self, component): + self.except_definitions = component.subscripts[1] + self.subscripts = component.section.subscripts + self.subscripts_dict = component.subscripts_dict + + def visit(self): + excepts = [ + BuildAST("", self.subscripts_dict, {}, 0) + for _ in self.except_definitions + ] + [ + except_def.reshape( + self.subscripts, + self.subscripts.make_coord_dict(except_list)) + for except_def, except_list in zip(excepts, self.except_definitions) + ] + return excepts diff --git a/pysd/building/python/python_functions.py b/pysd/building/python/python_functions.py index 73288363..75f986b1 100644 --- a/pysd/building/python/python_functions.py +++ b/pysd/building/python/python_functions.py @@ -83,6 +83,6 @@ "np.random.uniform(%(0)s, %(1)s, size=%(size)s)", ("numpy",)), "random_normal": ( - "stats.truncnorm.rvs(%(0)s, %(1)s, loc=%(2)s, scale=%(3)s, size=%(size)s))", + "stats.truncnorm.rvs(%(0)s, %(1)s, loc=%(2)s, scale=%(3)s, size=%(size)s)", ("scipy", "stats")), } diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 5f8d117b..61c62915 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -414,7 +414,7 @@ def build_element(self): # that can be easily vecorized (GET, expressions, Stocks...) expressions = [] for component in self.components: - expr, subs = component.build_component() + expr, subs, except_subscripts = component.build_component() if expr is None: continue else: @@ -422,7 +422,15 @@ def build_element(self): esubs: subs[csubs] for csubs, esubs in zip(subs, self.subscripts) } - expressions.append({"expr": expr, "subs": subs}) + exc_subs = [ + { + esubs: subs_e[csubs] + for csubs, esubs in zip(subs_e, self.subscripts) + } + for subs_e in except_subscripts + ] + expressions.append( + {"expr": expr, "subs": subs, "subs_except": exc_subs}) if len(expressions) > 1: # NUMPY: xrmerge would be sustitute by a multiple line definition @@ -446,11 +454,17 @@ def build_element(self): # NUMPY not necessary expression["expr"].lower_order(0, force_0=True) expression["expr"].expression += ".values" - self.pre_expression += "value.loc[%(subs)s] = %(expr)s\n" % ( - expression) + if expression["subs_except"]: + # there is an excep in the definition of the component + self.pre_expression += self.manage_except(expression) + else: + self.pre_expression +=\ + "value.loc[%(subs)s] = %(expr)s\n" % expression self.expression = "value" else: self.pre_expression = "" + # NUMPY: reshape to the final shape if meeded + # expressions[0]["expr"].reshape(self.section.subscripts, {}) self.expression = expressions[0]["expr"] self.type = ", ".join( @@ -460,6 +474,31 @@ def build_element(self): set(component.subtype for component in self.components) ) + def manage_except(self, expression): + if expression["subs"] == self.subs_dict: + # Final subscripts are the same as the main subscripts + # of the component. Generate a True array like value + final_expr = "except_subs = xr.ones_like(value, dtype=bool)\n" + else: + # Final subscripts are greater than the main subscripts + # of the component. Generate a False array like value and + # set to True the subarray of the component coordinates + final_expr = "except_subs = xr.zeros_like(value, dtype=bool)\n"\ + "except_subs.loc[%(subs)s] = True\n" % expression + + for except_subs in expression["subs_except"]: + # We set to False the dimensions in the EXCEPT + final_expr += "except_subs.loc[%s] = False\n" % except_subs + + if expression["expr"].subscripts: + # assign the values of an array + return final_expr + "value.values[except_subs.values] = "\ + "%(expr)s[except_subs.values]\n" % expression + else: + # assign the values of a float + return final_expr + "value.values[except_subs.values] = "\ + "%(expr)s\n" % expression + def build_element_out(self): """ Returns a string that has processed a single element dictionary. @@ -533,4 +572,11 @@ def __init__(self, abstract_component: AbstractComponent, def build_component(self): self.subscripts_dict = self.section.subscripts.make_coord_dict( self.subscripts[0]) - return (vs.ASTVisitor(self).visit(), self.subscripts_dict) + # NUMPY: use vs.ExceptVisitor + except_subscripts = [self.section.subscripts.make_coord_dict( + except_list) for except_list in self.subscripts[1]] + return ( + vs.ASTVisitor(self).visit(), + self.subscripts_dict, + except_subscripts + ) diff --git a/pysd/translation/vensim/parsing_grammars/element_object.peg b/pysd/translation/vensim/parsing_grammars/element_object.peg index 298a7415..49256520 100644 --- a/pysd/translation/vensim/parsing_grammars/element_object.peg +++ b/pysd/translation/vensim/parsing_grammars/element_object.peg @@ -33,7 +33,8 @@ subscript_mapping = (_ name_mapping _) / (_ "(" _ name_mapping _ ":" _ index_lis name_mapping = basic_id / escape_group # Subscript except match -subscript_list_except = ":EXCEPT:" _ '[' _ subscript_except _ ("," _ subscript_except _)* _ ']' +subscript_list_except = ":EXCEPT:" _ subscript_except_group (_ ',' _ subscript_except_group)* +subscript_except_group = '[' _ subscript_except _ ("," _ subscript_except _)* _ ']' subscript_except = basic_id / escape_group # Subscript match diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index 975b0662..a861dd66 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -71,6 +71,7 @@ def __init__(self, ast): self.mapping = [] self.subscripts = [] self.subscripts_except = [] + self.subscripts_except_groups = [] self.name = None self.expression = None self.keyword = None @@ -83,28 +84,28 @@ def visit_subscript_definition(self, n, vc): def visit_lookup_definition(self, n, vc): self.component = Lookup( self.name, - (self.subscripts, self.subscripts_except), + (self.subscripts, self.subscripts_except_groups), self.expression ) def visit_unchangeable_constant(self, n, vc): self.component = UnchangeableConstant( self.name, - (self.subscripts, self.subscripts_except), + (self.subscripts, self.subscripts_except_groups), self.expression ) def visit_component(self, n, vc): self.component = Component( self.name, - (self.subscripts, self.subscripts_except), + (self.subscripts, self.subscripts_except_groups), self.expression ) def visit_data_definition(self, n, vc): self.component = Data( self.name, - (self.subscripts, self.subscripts_except), + (self.subscripts, self.subscripts_except_groups), self.keyword, self.expression ) @@ -170,6 +171,10 @@ def visit_subscript(self, n, vc): def visit_subscript_except(self, n, vc): self.subscripts_except.append(n.text.strip()) + def visit_subscript_except_group(self, n, vc): + self.subscripts_except_groups.append(self.subscripts_except.copy()) + self.subscripts_except = [] + def visit_expression(self, n, vc): self.expression = n.text.strip() diff --git a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py index 56d208fc..1e3ad6bd 100644 --- a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py +++ b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py @@ -99,6 +99,10 @@ "folder": "except", "file": "test_except.mdl" }, + "except_multiple": { + "folder": "except_multiple", + "file": "test_except_multiple.mdl" + }, "exp": { "folder": "exp", "file": "test_exp.mdl" diff --git a/tests/test-models b/tests/test-models index a6b2bfc9..4e5a2843 160000 --- a/tests/test-models +++ b/tests/test-models @@ -1 +1 @@ -Subproject commit a6b2bfc9339ecd40eefa0385cc14bd2e50971874 +Subproject commit 4e5a28432a1af229c6c770bf3ec43c15c1d67950 From e9325e3154aaabd593816b52b8e49fa52b64c116 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 23 Mar 2022 18:01:28 +0100 Subject: [PATCH 15/96] Solve xmile bugs and add support for forecast --- pysd/building/python/python_expressions_builder.py | 7 +++++-- pysd/py_backend/statefuls.py | 8 +++++--- pysd/translation/structures/abstract_expressions.py | 6 ++++-- pysd/translation/vensim/vensim_structures.py | 2 +- pysd/translation/xmile/xmile_structures.py | 8 ++++++-- 5 files changed, 21 insertions(+), 10 deletions(-) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index eff4b4d8..a88a77f5 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -811,6 +811,7 @@ def __init__(self, forecast_str, component): "input": forecast_str.input, "average_time": forecast_str.average_time, "horizon": forecast_str.horizon, + "initial_trend": forecast_str.initial_trend } def build(self, arguments): @@ -821,6 +822,7 @@ def build(self, arguments): arguments["average_time"].reshape( self.section.subscripts, self.def_subs) arguments["horizon"].reshape(self.section.subscripts, self.def_subs) + arguments["initial_trend"].reshape(self.section.subscripts, self.def_subs) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_forecast") @@ -828,10 +830,11 @@ def build(self, arguments): "name": arguments["name"], "expression": "%(name)s = Forecast(lambda: %(input)s, " "lambda: %(average_time)s, lambda: %(horizon)s, " - "'%(name)s')" % arguments, + "lambda: %(initial_trend)s, '%(name)s')" % arguments, "calls": { - "initial": + "initial": merge_dependencies( arguments["input"].calls, + arguments["initial_trend"].calls), "step": merge_dependencies( arguments["input"].calls, arguments["average_time"].calls, diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 8d36b743..190c228d 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -354,7 +354,8 @@ class Forecast(DynamicStateful): """ Implements FORECAST function """ - def __init__(self, forecast_input, average_time, horizon, py_name): + def __init__(self, forecast_input, average_time, horizon, initial_trend, + py_name): """ Parameters @@ -370,15 +371,16 @@ def __init__(self, forecast_input, average_time, horizon, py_name): self.horizon = horizon self.average_time = average_time self.input = forecast_input + self.initial_trend = initial_trend self.py_name = py_name def initialize(self, init_val=None): # self.state = AV in the vensim docs if init_val is None: - self.state = self.input() + self.state = self.input() / (1 + self.initial_trend()) else: - self.state = init_val + self.state = self.input() / (1 + init_val) if isinstance(self.state, xr.DataArray): self.shape_info = {'dims': self.state.dims, diff --git a/pysd/translation/structures/abstract_expressions.py b/pysd/translation/structures/abstract_expressions.py index 805cb1f2..d11141da 100644 --- a/pysd/translation/structures/abstract_expressions.py +++ b/pysd/translation/structures/abstract_expressions.py @@ -180,12 +180,14 @@ class ForecastStructure: input: object average_time: object horizon: object + initial_trend: object def __str__(self) -> str: # pragma: no cover - return "ForecastStructure:\n\t%s,\n\t%s,\n\t%s" % ( + return "ForecastStructure:\n\t%s,\n\t%s,\n\t%s,\n\t%s" % ( self.input, self.average_time, - self.horizon) + self.horizon, + self.initial_trend) @dataclass diff --git a/pysd/translation/vensim/vensim_structures.py b/pysd/translation/vensim/vensim_structures.py index 9e6a439e..dc8ae8eb 100644 --- a/pysd/translation/vensim/vensim_structures.py +++ b/pysd/translation/vensim/vensim_structures.py @@ -30,7 +30,7 @@ "smooth3i": lambda x, y, z: ae.SmoothStructure(x, y, z, 3), "smooth_n": ae.SmoothNStructure, "trend": ae.TrendStructure, - "forecast": ae.ForecastStructure, + "forecast": lambda x, y, z: ae.ForecastStructure(x, y, z, 0), "sample_if_true": ae.SampleIfTrueStructure, "lookup": ae.LookupsStructure, "data": ae.DataStructure diff --git a/pysd/translation/xmile/xmile_structures.py b/pysd/translation/xmile/xmile_structures.py index 8bf482c0..6d2211f5 100644 --- a/pysd/translation/xmile/xmile_structures.py +++ b/pysd/translation/xmile/xmile_structures.py @@ -22,7 +22,7 @@ }, "delayn": { 3: lambda x, y, n: ae.DelayNStructure(x, y, x, n), - 4: ae.DelayNStructure, + 4: lambda x, y, n, z: ae.DelayNStructure(x, y, z, n), }, "smth1": { 2: lambda x, y: ae.SmoothStructure(x, y, x, 1), @@ -34,12 +34,16 @@ }, "smthn": { 3: lambda x, y, n: ae.SmoothNStructure(x, y, x, n), - 4: ae.SmoothNStructure + 4: lambda x, y, n, z: ae.SmoothNStructure(x, y, z, n) }, "trend": { 2: lambda x, y: ae.TrendStructure(x, y, 0), 3: ae.TrendStructure, }, + "forcst": { + 3: lambda x, y, z: ae.ForecastStructure(x, y, z, 0), + 4: ae.ForecastStructure + }, "safediv": { 2: lambda x, y: ae.CallStructure( ae.ReferenceStructure("zidz"), (x, y)), From e0e70e0251fa37671e94dd004b23d4ba9212671e Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 23 Mar 2022 18:01:46 +0100 Subject: [PATCH 16/96] Start documenting supported functions --- .../supported_vensim_functions.tab | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 docs/development/supported_vensim_functions.tab diff --git a/docs/development/supported_vensim_functions.tab b/docs/development/supported_vensim_functions.tab new file mode 100644 index 00000000..53a5ede6 --- /dev/null +++ b/docs/development/supported_vensim_functions.tab @@ -0,0 +1,88 @@ +Vensim Vensim example Xmile Xmile example Abstract Syntax Python Translation Comments +Binary operators +^ A ^ B ^ A ^ B "ArithmeticStructure(['^'], (A, B))" A**B +* A * B * A * B "ArithmeticStructure(['*'], (A, B))" A*B +/ A / B / A / B "ArithmeticStructure(['/'], (A, B))" A/B + mod A mod B "CallStructure('modulo', (A, B))" "pysd.functions.modulo(A, B)" In Vensim the modulo is computed with a function and not an operator ++ A + B + A + B "ArithmeticStructure(['+'], (A, B))" A+B +- A - B - A - B "ArithmeticStructure(['-'], (A, B))" A-B += A = B = A = B "LogicStructure(['='], (A, B))" A == B +< A < B < A < B "LogicStructure(['<'], (A, B))" A < B +> A > B > A > B "LogicStructure(['>'], (A, B))" A > B +>= A >= B >= A >= B "LogicStructure(['>='], (A, B))" A >= B +<= A <= B <= A <= B "LogicStructure(['<='], (A, B))" A <= B +:AND: A :AND: B and A and B "LogicStructure[':AND:'], (A, B))" "numpy.and(A, B)" +:OR: A :OR: B or A or B "LogicStructure[':OR:'], (A, B))" "numpy.or(A, B)" + +Unary operators +- #NAME? - #NAME? "LogicStructure(['negative'], (A,))" #NAME? ++ #NAME? + #NAME? A A +:NOT: :NOT: A not not A "LogicStructure[':NOT:'], (A,))" numpy.not(A) + +Functions +ABS ABS(A) abs(A) abs(A) "CallStructure('abs', (A,))" numpy.abs(A) +MIN "MIN(A, B)" min "min(A, B)" "CallStructure('min', (A, B))" "numpy.minimum(A, B)" +MAX "MAX(A, B)" max "max(A, B)" "CallStructure('max', (A, B))" "numpy.maximum(A, B)" +SQRT SQRT(A) sqrt sqrt(A) "CallStructure('sqrt', (A,))" numpy.sqrt +EXP EXP(A) exp exp(A) "CallStructure('exp', (A,))" numpy.exp(A) +LN LN(A) ln ln(A) "CallStructure('ln', (A,))" numpy.log(A) +SIN SIN(A) sin sin(A) "CallStructure('sin', (A,))" numpy.sin(A) +COS COS(A) cos cos(A) "CallStructure('cos', (A,))" numpy.cos(A) +TAN TAN(A) tan tan(A) "CallStructure('tan', (A,))" numpy.tan(A) +ARCSIN ARCSIN(A) arcsin arcsin(A) "CallStructure('arcsin', (A,))" numpy.arcsin(A) +ARCCOS ARCCOS(A) arccos arccos(A) "CallStructure('arccos', (A,))" numpy.arccos(A) +ARCTAN ARCTAN(A) arctan arctan(A) "CallStructure('arctan', (A,))" numpy.arctan(A) +INVERT MATRIX INVERT MATRIX(A) "CallStructure('invert_matrix', (A,))" pysd.functions.invert_matrix(A) +ELMCOUNT ELMCOUNT(A) "CallStructure('elmcount', (A,))" len(A) +INTEGER INTEGER(A) int int(A) "CallStructure('int', (A,))" pysd.functions.integer(A) +QUANTUM "QUANTUM(A, B)" "CallStructure('quantum', (A, B))" "pysd.functions.quantum(A, B)" +MODULO "MODULO(A, B)" "CallStructure('modulo', (A, B))" "pysd.functions.modulo(A, B)" In Xmile the modulo is computed with the 'mod' operator +IF THEN ELSE "IF THEN ELSE(A, B, C)" if_then_else "if_then_else(A, B, C)" "CallStructure('if_then_else', (A, B))" "pysd.functions.if_then_else(A, lambda: B, lambda: C)" + IF condition THEN value_true ELSE value_false IF A THEN B ELSE C "CallStructure('if_then_else', (A, B))" "pysd.functions.if_then_else(A, lambda: B, lambda: C)" +XIDZ "XIDZ(A, B, X)" safediv "safediv(A, B, X)" "CallStructure('xidz', (A, B, X))" "pysd.functions.xidz(A, B, X)" +ZIDZ "ZIDZ(A, B)" safediv "safediv(A, B)" "CallStructure('zidz', (A, B))" "pysd.functions.zidz(A, B)" + +VMIN VMIN(A) "CallStructure('vmin', (A,))" pysd.functions.vmin(A) +VMAX VMAX(A) "CallStructure('vmax', (A,))" pysd.functions.vmax(A) +SUM SUM(A) "CallStructure('sum', (A,))" pysd.functions.sum(A) +PROD PROD(A) "CallStructure('prod', (A,))" pysd.functions.prod(A) + +PULSE PULSE pysd.functions.pulse +PULSE TRAIN PULSE TRAIN pysd.functions.pulse_train +RAMP RAMP pysd.functions.ramp +STEP STEP pysd.functions.step + +Stocks +INTEG + +Delay functions +DELAY1I "DELAY1I(input, delay_time, initial_value)" delay1 "delay1(input, delay_time, initial_value)" "DelayStructure(input, delay_time, initial_value, 1)" pysd.statefuls.Delay(...) Not tested for Xmile! +DELAY1 "DELAY1(input, delay_time)" delay1 "delay1(input, delay_time)" "DelayStructure(input, delay_time, input, 1)" pysd.statefuls.Delay(...) Not tested for Xmile! +DELAY3I "DELAY3I(input, delay_time, initial_value)" delay3 "delay3(input, delay_time, initial_value)" "DelayStructure(input, delay_time, initial_value, 3)" pysd.statefuls.Delay(...) Not tested for Xmile! +DELAY3 "DELAY3(input, delay_time)" delay3 "delay3(input, delay_time)" "DelayStructure(input, delay_time, input, 3)" pysd.statefuls.Delay(...) Not tested for Xmile! +DELAY N "DELAY N(input, delay_time, initial_value, n)" delayn "delayn(input, delay_time, n, initial_value)" "DelayNStructure(input, delay_time, initial_value, n)" pysd.statefuls.DelayN(...) Not tested for Xmile! + delayn "delayn(input, delay_time, n)" "DelayNStructure(input, delay_time, input, n)" pysd.statefuls.DelayN(...) Not tested for Xmile! +DELAY FIXED "DELAY FIXED(input, delay_time, initial_value)" "DelayFixed(input, delay_time, initial_value)" pysd.statefuls.DelayFixed(...) Not tested for Xmile! +SMOOTHI "SMOOTH1I(input, delay_time, initial_value)" smth1 "smth1(input, smth_time, initial_value)" "SmoothStructure(input, smth_time, initial_value, 1)" pysd.statefuls.Smooth(...) Not tested for Xmile! +SMOOTH "SMOOTH1(input, delay_time)" smth1 "smth1(input, smth_time)" "SmoothStructure(input, smth_time, input, 1)" pysd.statefuls.Smooth(...) Not tested for Xmile! +SMOOTH3I "SMOOTH3I(input, delay_time, initial_value)" smth3 "smth3(input, smth_time, initial_value)" "SmoothStructure(input, smth_time, initial_value, 3)" pysd.statefuls.Smooth(...) Not tested for Xmile! +SMOOTH3 "SMOOTH3(input, delay_time)" smth3 "smth3(input, smth_time)" "SmoothStructure(input, smth_time, input, 3)" pysd.statefuls.Smooth(...) Not tested for Xmile! +SMOOTH N "SMOOTH N(input, delay_time, initial_value, n)" smthn "smthn(input, smth_time, n, initial_value)" "SmoothNStructure(input, smth_time, initial_value, n)" pysd.statefuls.SmoothN(...) Not tested for Xmile! + smthn "smthn(input, smth_time, n)" "SmoothNStructure(input, smth_time, input, n)" pysd.statefuls.SmoothN(...) Not tested for Xmile! + forcst "forcst(input, average_time, horizon, initial_trend)" "ForecastStructure(input, average_time, horizon, initial_trend)" pysd.statefuls.Forecast(...) Not tested for Xmile! +FORECAST "FORECAST(input, average_time, horizon)" forcst "forcst(input, average_time, horizon)" "ForecastStructure(input, average_time, horizon, 0)" pysd.statefuls.Forecast(...) Not tested for Xmile! +TREND "TREND(input, average_time, initial_trend)" trend "trend(input, average_time, initial_trend)" "TrendStructure(input, average_time, initial_trend)" pysd.statefuls.Trend(...) Not tested for Xmile! +TREND trend "trend(input, average_time)" "TrendStructure(input, average_time, 0)" pysd.statefuls.Trend(...) Not tested for Xmile! + +INITIAL INITIAL(pysd.statefuls.Initial pysd.statefuls.Initial +SAMPLE IF TRUE "SAMPLE IF TRUE(condition, input, initial_value)" "SampleIfTrueStructure(condition, input, initial_value)" pysd.statefuls.SampleIfTrue(�) + +Get functions +GET XLS DATA "GET XLS DATA('file', 'sheet', 'time_row_or_col', 'cell')" "GetDataStructure('file', 'sheet', 'time_row_or_col', 'cell')" pysd.external.ExtData(...) +GET DIRECT DATA "GET DIRECT DATA('file', 'sheet', 'time_row_or_col', 'cell')" "GetDataStructure('file', 'sheet', 'time_row_or_col', 'cell')" pysd.external.ExtData(...) +GET XLS LOOKUPS "GET XLS LOOKUPS('file', 'sheet', 'x_row_or_col', 'cell')" "GetLookupsStructure('file', 'sheet', 'x_row_or_col', 'cell')" pysd.external.ExtLookup(...) +GET DIRECT LOOKUPS "GET DIRECT LOOKUPS('file', 'sheet', 'x_row_or_col', 'cell')" "GetLookupsStructure('file', 'sheet', 'x_row_or_col', 'cell')" pysd.external.ExtLookup(...) +GET XLS CONSTANTS "GET XLS CONSTANTS('file', 'sheet', 'cell')" "GetConstantsStructure('file', 'sheet', 'cell')" pysd.external.ExtConstant(...) +GET DIRECT CONSTANTS "GET DIRECT CONSTANTS('file', 'sheet', 'cell')" "GetConstantsStructure('file', 'sheet', 'cell')" pysd.external.ExtConstant(...) +GET XLS SUBSCRIPT "GET XLS SUBSCRIPT('file', 'sheet', 'first_cell', 'last_cell', 'prefix')" pysd.external.ExtSubscript(...) +GET DIRECT SUBSCRIPT "GET DIRECT SUBSCRIPT('file', 'sheet', 'first_cell', 'last_cell', 'prefix')" pysd.external.ExtSubscript(...) From a186d2456ca27db7328fd540415993d0a2af028d Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 24 Mar 2022 10:50:26 +0100 Subject: [PATCH 17/96] Replace init_val by init_trend for Trend and Forecast --- pysd/py_backend/statefuls.py | 12 ++++++------ tests/unit_test_statefuls.py | 11 ++++++++--- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 190c228d..fb684af7 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -374,13 +374,13 @@ def __init__(self, forecast_input, average_time, horizon, initial_trend, self.initial_trend = initial_trend self.py_name = py_name - def initialize(self, init_val=None): + def initialize(self, init_trend=None): # self.state = AV in the vensim docs - if init_val is None: + if init_trend is None: self.state = self.input() / (1 + self.initial_trend()) else: - self.state = self.input() / (1 + init_val) + self.state = self.input() / (1 + init_trend) if isinstance(self.state, xr.DataArray): self.shape_info = {'dims': self.state.dims, @@ -483,13 +483,13 @@ def __init__(self, trend_input, average_time, initial_trend, py_name): self.input_func = trend_input self.py_name = py_name - def initialize(self, init_val=None): - if init_val is None: + def initialize(self, init_trend=None): + if init_trend is None: self.state = self.input_func()\ / (1 + self.init_func()*self.average_time_function()) else: self.state = self.input_func()\ - / (1 + init_val*self.average_time_function()) + / (1 + init_trend*self.average_time_function()) if isinstance(self.state, xr.DataArray): self.shape_info = {'dims': self.state.dims, diff --git a/tests/unit_test_statefuls.py b/tests/unit_test_statefuls.py index f3eb3b4f..17fbd9a4 100644 --- a/tests/unit_test_statefuls.py +++ b/tests/unit_test_statefuls.py @@ -222,6 +222,7 @@ def input(): frcst = Forecast(forecast_input=input, average_time=lambda: 3, horizon=lambda: 10, + initial_trend=lambda: 0, py_name='forecast') frcst.initialize() @@ -238,11 +239,15 @@ def input(): input_val*(1+(input_val-frcst.state)/(3*frcst.state)*10)) input_val = 7 - init_val = 6 - frcst.initialize(init_val) + init_trend = 6 + + frcst.initialize(init_trend) self.assertEqual( frcst(), - input_val*(1+(input_val-init_val)/(3*init_val)*10)) + input_val* + (1+ + (input_val-input_val/(1+init_trend)) + /(3*input_val/(1+init_trend))*10)) def test_initial(self): from pysd.py_backend.statefuls import Initial From 3895ee53f0448d9dc54a96fe10641d8eb3f5b1d5 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 24 Mar 2022 16:21:11 +0100 Subject: [PATCH 18/96] Solve bugs for mixed definitions --- .../python/python_expressions_builder.py | 49 +++++- pysd/building/python/python_model_builder.py | 51 ++++--- pysd/py_backend/external.py | 144 ++++++++++++++---- .../more-tests/type_error/test_type_error.py | 4 +- tests/test-models | 2 +- tests/unit_test_external.py | 13 +- 6 files changed, 200 insertions(+), 63 deletions(-) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index a88a77f5..c590df0b 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -116,6 +116,23 @@ def _compute_final_subscripts(self, subscripts_list, def_subs): # TODO reorder final_subscripts taking into account def_subs return expression + def update_object_subscripts(self, name): + origin_comp = self.element.objects[name]["component"] + if isinstance(origin_comp.subscripts_dict, dict): + if len(list(origin_comp.subscripts_dict)) == 1: + key = list(origin_comp.subscripts_dict.keys())[0] + value = list(self.component.subscripts_dict.values())[0] + origin_comp.subscripts_dict[key] += value + self.element.objects[name]["final_subs"] =\ + origin_comp.subscripts_dict + else: + origin_comp.subscripts_dict = [origin_comp.subscripts_dict] + self.element.objects[name]["final_subs"] =\ + self.element.subs_dict + if isinstance(origin_comp.subscripts_dict, list): + origin_comp.subscripts_dict.append( + self.component.subscripts_dict) + class OperationBuilder(StructureBuilder): operators_build = { @@ -412,6 +429,8 @@ def build(self, arguments): + self.element.objects["ext_lookups"]["name"]\ + ".add(%(params)s, %(subscripts)s)" % arguments + self.update_object_subscripts("ext_lookups") + return None else: # create a new object @@ -419,12 +438,15 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_ext_lookup") + arguments["final_subs"] = "%(final_subs)s" self.element.objects["ext_lookups"] = { "name": arguments["name"], "expression": "%(name)s = ExtLookup(%(params)s, " - "%(subscripts)s, " - "_root, '%(name)s')" % arguments + "%(subscripts)s, _root, " + "%(final_subs)s , '%(name)s')" % arguments, + "component": self.component, + "final_subs": self.def_subs } return BuildAST( @@ -461,6 +483,8 @@ def build(self, arguments): + self.element.objects["ext_data"]["name"]\ + ".add(%(params)s, %(method)s, %(subscripts)s)" % arguments + self.update_object_subscripts("ext_data") + return None else: # create a new object @@ -468,12 +492,15 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_ext_data") + arguments["final_subs"] = "%(final_subs)s" self.element.objects["ext_data"] = { "name": arguments["name"], "expression": "%(name)s = ExtData(%(params)s, " " %(method)s, %(subscripts)s, " - "_root, '%(name)s')" % arguments + "_root, %(final_subs)s ,'%(name)s')" % arguments, + "component": self.component, + "final_subs": self.def_subs } return BuildAST( @@ -484,11 +511,11 @@ def build(self, arguments): class ExtConstantBuilder(StructureBuilder): - def __init__(self, getlookup_str, component): + def __init__(self, getconstant_str, component): super().__init__(None, component) - self.file = getlookup_str.file - self.tab = getlookup_str.tab - self.cell = getlookup_str.cell + self.file = getconstant_str.file + self.tab = getconstant_str.tab + self.cell = getconstant_str.cell self.arguments = {} def build(self, arguments): @@ -507,6 +534,8 @@ def build(self, arguments): + self.element.objects["constants"]["name"]\ + ".add(%(params)s, %(subscripts)s)" % arguments + self.update_object_subscripts("constants") + return None else: # create a new object @@ -514,11 +543,15 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_ext_constant") + arguments["final_subs"] = "%(final_subs)s" self.element.objects["constants"] = { "name": arguments["name"], "expression": "%(name)s = ExtConstant(%(params)s, " - "%(subscripts)s, _root, '%(name)s')" % arguments + "%(subscripts)s, _root, %(final_subs)s, " + "'%(name)s')" % arguments, + "component": self.component, + "final_subs": self.def_subs } return BuildAST( diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 61c62915..286109f2 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -413,24 +413,30 @@ def build_element(self): # TODO include some kind of magic vectorization to identify patterns # that can be easily vecorized (GET, expressions, Stocks...) expressions = [] + [component.build_component() for component in self.components] for component in self.components: - expr, subs, except_subscripts = component.build_component() + expr, subs, except_subscripts = component.get() if expr is None: continue + if isinstance(subs, list): + subs = [{ + esubs: subsi[csubs] + for csubs, esubs in zip(subsi, self.subscripts) + } for subsi in subs] else: subs = { esubs: subs[csubs] for csubs, esubs in zip(subs, self.subscripts) } - exc_subs = [ - { - esubs: subs_e[csubs] - for csubs, esubs in zip(subs_e, self.subscripts) - } - for subs_e in except_subscripts - ] - expressions.append( - {"expr": expr, "subs": subs, "subs_except": exc_subs}) + exc_subs = [ + { + esubs: subs_e[csubs] + for csubs, esubs in zip(subs_e, self.subscripts) + } + for subs_e in except_subscripts + ] + expressions.append( + {"expr": expr, "subs": subs, "subs_except": exc_subs}) if len(expressions) > 1: # NUMPY: xrmerge would be sustitute by a multiple line definition @@ -457,6 +463,8 @@ def build_element(self): if expression["subs_except"]: # there is an excep in the definition of the component self.pre_expression += self.manage_except(expression) + elif isinstance(expression["subs"], list): + self.pre_expression += self.manage_multi_def(expression) else: self.pre_expression +=\ "value.loc[%(subs)s] = %(expr)s\n" % expression @@ -474,6 +482,14 @@ def build_element(self): set(component.subtype for component in self.components) ) + def manage_multi_def(self, expression): + final_expr = "def_subs = xr.zeros_like(value, dtype=bool)\n" + for subs in expression["subs"]: + final_expr += "def_subs.loc[%s] = True\n" % subs + + return final_expr + "value.values[def_subs.values] = "\ + "%(expr)s[def_subs.values]\n" % expression + def manage_except(self, expression): if expression["subs"] == self.subs_dict: # Final subscripts are the same as the main subscripts @@ -526,7 +542,9 @@ def build_element_out(self): self.section.imports.add("subs") objects = "\n\n".join([ - value["expression"] for value in self.objects.values() + value["expression"] % { + "final_subs": value.get("final_subs", "")} + for value in self.objects.values() if value["expression"] is not None ]) @@ -573,10 +591,9 @@ def build_component(self): self.subscripts_dict = self.section.subscripts.make_coord_dict( self.subscripts[0]) # NUMPY: use vs.ExceptVisitor - except_subscripts = [self.section.subscripts.make_coord_dict( + self.except_subscripts = [self.section.subscripts.make_coord_dict( except_list) for except_list in self.subscripts[1]] - return ( - vs.ASTVisitor(self).visit(), - self.subscripts_dict, - except_subscripts - ) + self.ast_build = vs.ASTVisitor(self).visit() + + def get(self): + return self.ast_build, self.subscripts_dict, self.except_subscripts diff --git a/pysd/py_backend/external.py b/pysd/py_backend/external.py index 2d89d99b..fbeeee50 100644 --- a/pysd/py_backend/external.py +++ b/pysd/py_backend/external.py @@ -85,7 +85,13 @@ class External(object): """ missing = "warning" - def __init__(self, py_name): + def __init__(self, py_name, final_coords=None): + if py_name is None: + # backwards compatibility + # TODO remove in future + self.final_coords, py_name = py_name, final_coords + else: + self.final_coords = final_coords self.py_name = py_name self.file = None self.sheet = None @@ -323,7 +329,7 @@ def _resolve_file(self, root): None """ - if self.file[0] == '?': + if str(self.file)[0] == '?': # TODO add an option to include indirect references raise ValueError( self.py_name + "\n" @@ -647,10 +653,10 @@ def _reshape(data, dims): numpy.ndarray reshaped array """ - try: + if isinstance(data, (float, int)): + data = np.array(data) + elif isinstance(data, xr.DataArray): data = data.values - except AttributeError: - pass return data.reshape(dims) @@ -698,8 +704,8 @@ class ExtData(External, Data): """ def __init__(self, file_name, sheet, time_row_or_col, cell, - interp, coords, root, py_name): - super().__init__(py_name) + interp, coords, root, final_coords=None, py_name=None): + super().__init__(py_name, final_coords) self.files = [file_name] self.sheets = [sheet] self.time_row_or_cols = [time_row_or_col] @@ -707,13 +713,10 @@ def __init__(self, file_name, sheet, time_row_or_col, cell, self.coordss = [coords] self.root = root # TODO remove in 3.0.0 (self.interp = interp) - self.interp = interp.replace(" ", "_") if interp else None + self.interp = interp.replace(" ", "_") if interp else "interpolate" self.is_float = not bool(coords) # check if the interpolation method is valid - if not interp: - self.interp = "interpolate" - if self.interp not in ["interpolate", "raw", "look_forward", "hold_backward"]: raise ValueError(self.py_name + "\n" @@ -747,12 +750,39 @@ def initialize(self): """ Initialize all elements and create the self.data xarray.DataArray """ - self.data = utils.xrmerge(*[ - self._initialize_data("data") - for self.file, self.sheet, self.x_row_or_col, - self.cell, self.coords - in zip(self.files, self.sheets, self.time_row_or_cols, - self.cells, self.coordss)]) + if self.final_coords is None: + # backward compatibility + # TODO remove in the future + self.data = utils.xrmerge(*[ + self._initialize_data("data") + for self.file, self.sheet, self.x_row_or_col, + self.cell, self.coords + in zip(self.files, self.sheets, self.time_row_or_cols, + self.cells, self.coordss)]) + elif len(self.coordss) == 1: + # Just loag one value (no add) + for self.file, self.sheet, self.x_row_or_col,\ + self.cell, self.coords\ + in zip(self.files, self.sheets, self.time_row_or_cols, + self.cells, self.coordss): + self.data = self._initialize_data("data") + else: + # Load in several lines (add) + self.data = xr.DataArray( + np.nan, self.final_coords, list(self.final_coords)) + + for self.file, self.sheet, self.x_row_or_col,\ + self.cell, self.coords\ + in zip(self.files, self.sheets, self.time_row_or_cols, + self.cells, self.coordss): + values = self._initialize_data("data") + + coords = {"time": values.coords["time"].values, **self.coords} + if "time" not in self.data.dims: + self.data = self.data.expand_dims( + {"time": coords["time"]}, axis=0).copy() + + self.data.loc[coords] = values.values class ExtLookup(External, Lookups): @@ -760,9 +790,9 @@ class ExtLookup(External, Lookups): Class for Vensim GET XLS LOOKUPS/GET DIRECT LOOKUPS """ - def __init__(self, file_name, sheet, x_row_or_col, cell, - coords, root, py_name): - super().__init__(py_name) + def __init__(self, file_name, sheet, x_row_or_col, cell, coords, + root, final_coords=None, py_name=None): + super().__init__(py_name, final_coords) self.files = [file_name] self.sheets = [sheet] self.x_row_or_cols = [x_row_or_col] @@ -790,12 +820,39 @@ def initialize(self): """ Initialize all elements and create the self.data xarray.DataArray """ - self.data = utils.xrmerge(*[ - self._initialize_data("lookup") - for self.file, self.sheet, self.x_row_or_col, - self.cell, self.coords - in zip(self.files, self.sheets, self.x_row_or_cols, - self.cells, self.coordss)]) + if self.final_coords is None: + # backward compatibility + # TODO remove in the future + self.data = utils.xrmerge(*[ + self._initialize_data("lookup") + for self.file, self.sheet, self.x_row_or_col, + self.cell, self.coords + in zip(self.files, self.sheets, self.x_row_or_cols, + self.cells, self.coordss)]) + elif len(self.coordss) == 1: + # Just loag one value (no add) + for self.file, self.sheet, self.x_row_or_col,\ + self.cell, self.coords\ + in zip(self.files, self.sheets, self.x_row_or_cols, + self.cells, self.coordss): + self.data = self._initialize_data("lookup") + else: + # Load in several lines (add) + self.data = xr.DataArray( + np.nan, self.final_coords, list(self.final_coords)) + + for self.file, self.sheet, self.x_row_or_col,\ + self.cell, self.coords\ + in zip(self.files, self.sheets, self.x_row_or_cols, + self.cells, self.coordss): + values = self._initialize_data("lookup") + + coords = {"lookup_dim": values.coords["lookup_dim"].values, **self.coords} + if "lookup_dim" not in self.data.dims: + self.data = self.data.expand_dims( + {"lookup_dim": coords["lookup_dim"]}, axis=0).copy() + + self.data.loc[coords] = values.values class ExtConstant(External): @@ -803,8 +860,9 @@ class ExtConstant(External): Class for Vensim GET XLS CONSTANTS/GET DIRECT CONSTANTS """ - def __init__(self, file_name, sheet, cell, coords, root, py_name): - super().__init__(py_name) + def __init__(self, file_name, sheet, cell, coords, + root, final_coords=None, py_name=None): + super().__init__(py_name, final_coords) self.files = [file_name] self.sheets = [sheet] self.transposes = [ @@ -832,11 +890,31 @@ def initialize(self): """ Initialize all elements and create the self.data xarray.DataArray """ - self.data = utils.xrmerge(*[ - self._initialize() - for self.file, self.sheet, self.transpose, self.cell, self.coords - in zip(self.files, self.sheets, self.transposes, - self.cells, self.coordss)]) + if self.final_coords is None: + # backward compatibility + # TODO remove in the future + self.data = utils.xrmerge(*[ + self._initialize() + for self.file, self.sheet, self.transpose, self.cell, + self.coords + in zip(self.files, self.sheets, self.transposes, + self.cells, self.coordss)]) + elif len(self.coordss) == 1: + # Just loag one value (no add) + for self.file, self.sheet, self.transpose, self.cell, self.coords\ + in zip(self.files, self.sheets, self.transposes, + self.cells, self.coordss): + self.data = self._initialize() + else: + # Load in several lines (add) + + self.data = xr.DataArray( + np.nan, self.final_coords, list(self.final_coords)) + + for self.file, self.sheet, self.transpose, self.cell, self.coords\ + in zip(self.files, self.sheets, self.transposes, + self.cells, self.coordss): + self.data.loc[self.coords] = self._initialize().values def _initialize(self): """ diff --git a/tests/more-tests/type_error/test_type_error.py b/tests/more-tests/type_error/test_type_error.py index f7da1b62..7e0a7ece 100644 --- a/tests/more-tests/type_error/test_type_error.py +++ b/tests/more-tests/type_error/test_type_error.py @@ -1,6 +1,6 @@ from pysd import external +__pysd_version__ = "2.2.2" _root = './' -external.ExtData('input.xlsx', 'Sheet1', '5', 'B6', - None, {}, [], _root, '_ext_data') \ No newline at end of file +external.ExtData('input.xlsx', 'Sheet1', '5', 'B6') diff --git a/tests/test-models b/tests/test-models index 4e5a2843..fcfa160e 160000 --- a/tests/test-models +++ b/tests/test-models @@ -1 +1 @@ -Subproject commit 4e5a28432a1af229c6c770bf3ec43c15c1d67950 +Subproject commit fcfa160eafe8639de605cfde3be59d4880e310b7 diff --git a/tests/unit_test_external.py b/tests/unit_test_external.py index 0b2cadbe..779b79f8 100644 --- a/tests/unit_test_external.py +++ b/tests/unit_test_external.py @@ -150,16 +150,25 @@ def test_reshape(self): reshape = External._reshape + data0d = np.array(5) data1d = np.array([2, 3, 5, 6]) data2d = np.array([[2, 3, 5, 6], [1, 7, 5, 8]]) - series1d = pd.Series(data1d) - df2d = pd.DataFrame(data2d) + float0d = float(data0d) + int0d = int(data0d) + series1d = xr.DataArray(data1d) + df2d = xr.DataArray(data2d) + shapes0d = [(1,), (1, 1)] shapes1d = [(4,), (4, 1, 1), (1, 1, 4), (1, 4, 1)] shapes2d = [(2, 4), (2, 4, 1), (1, 2, 4), (2, 1, 4)] + for shape_i in shapes0d: + self.assertEqual(reshape(data0d, shape_i).shape, shape_i) + self.assertEqual(reshape(float0d, shape_i).shape, shape_i) + self.assertEqual(reshape(int0d, shape_i).shape, shape_i) + for shape_i in shapes1d: self.assertEqual(reshape(data1d, shape_i).shape, shape_i) self.assertEqual(reshape(series1d, shape_i).shape, shape_i) From ed871b0bc7bffb4237fd623533673406223cef24 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 25 Mar 2022 09:56:48 +0100 Subject: [PATCH 19/96] Fix broken RTD pipeline with new jinja version --- docs/requirements.txt | 6 ++++++ setup.py | 10 ++-------- 2 files changed, 8 insertions(+), 8 deletions(-) create mode 100644 docs/requirements.txt diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..e625d65e --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,6 @@ +# File: docs/requirements.txt + +sphinx==4.2.0 +sphinx_rtd_theme==1.0.0 +readthedocs-sphinx-search==0.1.1 +jinja2==3.0.0 \ No newline at end of file diff --git a/setup.py b/setup.py index d38815f0..b0434325 100755 --- a/setup.py +++ b/setup.py @@ -33,14 +33,8 @@ tests_require=test_pckgs, extras_require={ "test": test_pckgs, - "docs": [ - # pin sphinx to match what RTD uses: - # https://github.com/readthedocs/readthedocs.org/blob/ecac31de54bbb2c100f933e86eb22b0f4389ba84/requirements/pip.txt#L16 - 'sphinx<2', - 'sphinx-rtd-theme<0.5', - 'docutils<0.18' - ] - }, + "docs": open('docs/requirements.txt').read().strip().split('\n') + }, package_data={ 'translation': [ '*/parsing_grammars/*.peg' From b981f7bec61dd22999d343605d66964aaa5d4ee8 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 25 Mar 2022 15:32:35 +0100 Subject: [PATCH 20/96] Catch errors --- pysd/translation/vensim/vensim_element.py | 25 +++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index a861dd66..027945b7 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -7,6 +7,9 @@ from ..structures.abstract_model import\ AbstractData, AbstractLookup, AbstractComponent,\ AbstractUnchangeableConstant +from parsimonious.exceptions import IncompleteParseError,\ + VisitationError,\ + ParseError from . import vensim_utils as vu from .vensim_structures import structures, parsing_ops @@ -249,8 +252,26 @@ def verbose(self): # pragma: no cover def _parse(self) -> None: """Parse model component to get the AST""" - tree = vu.Grammar.get("components", parsing_ops).parse(self.expression) - self.ast = EquationParser(tree).translation + try: + tree = vu.Grammar.get("components", parsing_ops).parse( + self.expression) + except (IncompleteParseError, ParseError) as err: + raise ValueError( + err.args[0] + "\n\n" + "\nError when parsing definition:\n\t %s\n\n" + "probably used definition is invalid or not integrated..." + "\nSee parsimonious output above." % self.expression + ) + try: + self.ast = EquationParser(tree).translation + except VisitationError as err: + raise ValueError( + err.args[0] + "\n\n" + "\nError when visiting definition:\n\t %s\n\n" + "probably used definition is invalid or not integrated..." + "\nSee parsimonious output above." % self.expression + ) + if isinstance(self.ast, structures["get_xls_lookups"]): self.lookup = True else: From 4c64f4dddff71c8fae7a15da1b76d5b47ebeb44b Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 25 Mar 2022 16:02:37 +0100 Subject: [PATCH 21/96] Correct bug when there is a whitespace between unitary operator and value --- pysd/translation/vensim/parsing_grammars/common_grammar.peg | 2 +- pysd/translation/vensim/parsing_grammars/components.peg | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pysd/translation/vensim/parsing_grammars/common_grammar.peg b/pysd/translation/vensim/parsing_grammars/common_grammar.peg index 7517cad5..c36e4c74 100644 --- a/pysd/translation/vensim/parsing_grammars/common_grammar.peg +++ b/pysd/translation/vensim/parsing_grammars/common_grammar.peg @@ -12,7 +12,7 @@ id_continue = id_start / ~r"[0-9\'\$\s\_]" escape_group = "\"" ( "\\\"" / ~r"[^\"]" )* "\"" number = raw_number -raw_number = ("+"/"-")? ~r"\d+\.?\d*([eE][+-]?\d+)?" +raw_number = ("+"/"-")? _ ~r"\d+\.?\d*([eE][+-]?\d+)?" string = "\'" (~r"[^\']"IU)* "\'" range = _ "[" ~r"[^\]]*" "]" _ "," diff --git a/pysd/translation/vensim/parsing_grammars/components.peg b/pysd/translation/vensim/parsing_grammars/components.peg index 2a8b6ae4..18bce6be 100644 --- a/pysd/translation/vensim/parsing_grammars/components.peg +++ b/pysd/translation/vensim/parsing_grammars/components.peg @@ -11,7 +11,7 @@ exp_expr = neg_expr (_ exp_oper _ neg_expr)* # exponential neg_expr = pre_oper? _ expr # pre operators (-, +) expr = lookup_with_def / call / parens / number / reference / nan -lookup_with_def = ~r"(WITH\ LOOKUP)"I _ "(" _ final_expr _ "," _ "(" _ range? ( "(" _ raw_number _ "," _ raw_number _ ")" _ ","? _ )+ _ ")" _ ")" +lookup_with_def = ~r"(WITH\ LOOKUP)"I _ "(" _ final_expr _ "," _ "(" _ range? ( _ "(" _ raw_number _ "," _ raw_number _ ")" _ ","? _ )+ _ ")" _ ")" nan = ":NA:" From 8e544a1b95c3ecda90ddc55dbb3e3b541cb195fa Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 25 Mar 2022 16:06:07 +0100 Subject: [PATCH 22/96] Start documenting --- docs/{development => }/about.rst | 14 +- docs/basic_usage.rst | 6 - ...Python Translator using Parsimonious.ipynb | 1070 ----------------- docs/development/SMILEv4.pdf | Bin 159909 -> 0 bytes docs/development/XMILEv4.pdf | Bin 131956 -> 0 bytes docs/development/development_index.rst | 2 - docs/development/internal_functions.rst | 52 - .../supported_vensim_functions.rst | 121 -- docs/development/vensim_translation.rst | 41 - docs/index.rst | 38 +- docs/structure/abstract_model.rst | 2 + docs/structure/model_loading.rst | 2 + docs/structure/python_builder.rst | 39 + .../structure_index.rst} | 34 +- .../supported_vensim_functions.tab | 0 docs/structure/vensim_translation.rst | 25 + .../xmile_translation.rst | 0 pysd/translation/vensim/vensim_file.py | 72 +- 18 files changed, 173 insertions(+), 1345 deletions(-) rename docs/{development => }/about.rst (77%) delete mode 100644 docs/development/Building a SMILE to Python Translator using Parsimonious.ipynb delete mode 100644 docs/development/SMILEv4.pdf delete mode 100644 docs/development/XMILEv4.pdf delete mode 100644 docs/development/internal_functions.rst delete mode 100644 docs/development/supported_vensim_functions.rst delete mode 100644 docs/development/vensim_translation.rst create mode 100644 docs/structure/abstract_model.rst create mode 100644 docs/structure/model_loading.rst create mode 100644 docs/structure/python_builder.rst rename docs/{development/structure.rst => structure/structure_index.rst} (89%) rename docs/{development => structure}/supported_vensim_functions.tab (100%) create mode 100644 docs/structure/vensim_translation.rst rename docs/{development => structure}/xmile_translation.rst (100%) diff --git a/docs/development/about.rst b/docs/about.rst similarity index 77% rename from docs/development/about.rst rename to docs/about.rst index bdb33818..05fa7636 100644 --- a/docs/development/about.rst +++ b/docs/about.rst @@ -8,25 +8,25 @@ The last few years have witnessed a massive growth in the collection of social a So far, however, these new techniques are largely confined to variants of statistical summary, categorization, and inference; and if causal models are used, they are generally static in nature, ignoring the dynamic complexity and feedback structures of the systems in question. As the field of data science matures, there will be increasing demand for insights beyond those available through analysis unstructured by causal understanding. At that point data scientists may seek to add dynamic models of system structure to their toolbox. -The field of system dynamics has always been interested in learning about social systems, and specializes in understanding dynamic complexity. There is likewise a long tradition of incorporating various forms of data into system dynamics models.3 While system dynamics practice has much to gain from the emergence of new volumes of social data, the community has yet to benefit fully from the data science revolution. +The field of system dynamics has always been interested in learning about social systems, and specializes in understanding dynamic complexity. There is likewise a long tradition of incorporating various forms of data into system dynamics models. While system dynamics practice has much to gain from the emergence of new volumes of social data, the community has yet to benefit fully from the data science revolution. There are a variety of reasons for this, the largest likely being that the two communities have yet to commingle to a great extent. A further, and ultimately more tractable reason is that the tools of system dynamics and the tools of data analytics are not tightly integrated, making joint method analysis unwieldy. There is a rich problem space that depends upon the ability of these fields to support one another, and so there is a need for tools that help the two methodologies work together. PySD is designed to meet this need. General approaches for integrating system dynamic models and data analytics --------------------------------------------------------------------------- -Before considering how system dynamics techniques can be used in data science applications, we should consider the variety of ways in which the system dynamics community has traditionally dealt with integration of data and models. +Before considering how system dynamics techniques can be used in data science applications, we should consider the variety of ways in which the system dynamics community has traditionally dealt with integration of data and models. The first paradigm for using numerical data in support of modeling efforts is to import data into system dynamics modeling software. Algorithms for comparing models with data are built into the tool itself, and are usable through a graphical front-end interface as with model fitting in Vensim, or through a programming environment unique to the tool. When new techniques such as Markov chain Monte Carlo analysis become relevant to the system dynamics community, they are often brought into the SD tool. - + This approach appropriately caters to system dynamics modelers who want to take advantage of well-established data science techniques without needing to learn a programming language, and extends the functionality of system dynamics to the basics of integrated model analysis. -A second category of tools uses a standard system dynamics tool as a computation engine for analysis performed in a coding environment. This is the approach taken by the Exploratory Modeling Analysis (EMA) Workbench6, or the Behavior Analysis and Testing Software (BATS)7. This first step towards bringing system dynamics to a more inclusive analysis environment enables many new types of model understanding, but imposes limits on the depth of interaction with models and the ability to scale simulation to support large analysis. +A second category of tools uses a standard system dynamics tool as a computation engine for analysis performed in a coding environment. This is the approach taken by the Exploratory Modeling Analysis (EMA) Workbench, or the Behavior Analysis and Testing Software (BATS). This first step towards bringing system dynamics to a more inclusive analysis environment enables many new types of model understanding, but imposes limits on the depth of interaction with models and the ability to scale simulation to support large analysis. + +A third category of tools imports the models created by traditional tools to perform analyses independently of the original modeling tool. An example of this is SDM-Doc, a model documentation tool, or Abdel-Gawad et. al.’s eigenvector analysis tool. It is this third category to which PySD belongs. -A third category of tools imports the models created by traditional tools to perform analyses independently of the original modeling tool. An example of this is SDM-Doc8, a model documentation tool, or Abdel-Gawad et. al.’s eigenvector analysis tool9. It is this third category to which PySD belongs. - The central paradigm of PySD is that it is more efficient to bring the mature capabilities of system dynamics into an environment in use for active development in data science, than to attempt to bring each new development in inference and machine learning into the system dynamics enclave. -PySD reads a model file – the product of a modeling program such as Vensim10 or Stella/iThink11 – and cross compiles it into Python, providing a simulation engine that can run these models natively in the Python environment. It is not a substitute for these tools, and cannot be used to replace a visual model construction environment. +PySD reads a model file – the product of a modeling program such as Vensim or Stella/iThink – and cross compiles it into Python, providing a simulation engine that can run these models natively in the Python environment. It is not a substitute for these tools, and cannot be used to replace a visual model construction environment. diff --git a/docs/basic_usage.rst b/docs/basic_usage.rst index 2a67244c..7a173cf5 100644 --- a/docs/basic_usage.rst +++ b/docs/basic_usage.rst @@ -201,9 +201,3 @@ If you try to get the current values of a lookup variable the previous method wi model.get_series_data('Growth lookup') -Supported functions -------------------- - -Vensim functions include: - -.. include:: development/supported_vensim_functions.rst diff --git a/docs/development/Building a SMILE to Python Translator using Parsimonious.ipynb b/docs/development/Building a SMILE to Python Translator using Parsimonious.ipynb deleted file mode 100644 index 72846c2f..00000000 --- a/docs/development/Building a SMILE to Python Translator using Parsimonious.ipynb +++ /dev/null @@ -1,1070 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "e8d6871b", - "metadata": {}, - "source": [ - "#Building a SMILE to Python Translator\n", - "\n", - "[SMILE](http://www.iseesystems.com/community/support/SMILEv4.pdf) is the language description used in the XMILE format. Part of parsing XMILE files will be to parse strings of code in SMILE format. To do this we need to understand the SMILE grammar - and more importantly, python needs to know how to do it as well.\n", - "\n", - "In this notebook we'll be using [parsimonious](https://github.com/erikrose/parsimonious) to interpret our grammar, parse our strings, and return for us an [Abstract Syntax Tree](). There are a variety of other tools we could use:\n", - "\n", - "- [PLY](http://www.dabeaz.com/ply/) - 55397 downloads in the last month\n", - "- [plex](https://pythonhosted.org/plex/) - 949 downloads in the last month\n", - "- [tokenizertools](https://github.com/dbc/tokenizertools) - 642 downloads in the last month\n", - "- [pyparsing](http://pyparsing.wikispaces.com/) - 221150 downloads in the last month\n", - "- [ANTLR](https://github.com/antlr/antlr4) - not python native\n", - "- [others](https://github.com/erikrose/mediawiki-parser/blob/master/parsers.rst)\n", - "\n", - "Parsimonious seems to strike a good ballance between new, high-level-functionality, and maturity.\n", - "\n", - "We will use [Parsing Expression Grammar](http://en.wikipedia.org/wiki/Parsing_expression_grammar) to specify how parsimonious should interpret our input strings. \n", - "Here is a good [slide deck](https://ece.uwaterloo.ca/~vganesh/TEACHING/W2014/lectures/lecture16.pdf) of how PEG works,\n", - "here is the original [paper](http://www.brynosaurus.com/pub/lang/peg.pdf) describing the concept,\n", - "and here are some [reasonable](https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics)\n", - "[tutorials](http://nathansuniversity.com/pegs.html)\n", - "\n", - "PEG compares to a few other syntaxes for describing grammar:\n", - "\n", - "- [BNF](http://en.wikipedia.org/wiki/Backus%E2%80%93Naur_Form)\n", - "- [EBNF](http://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form)\n", - "\n", - "Here are some examples of \n", - "[parsimonious](http://nullege.com/codes/show/src@c@s@csp-validator-0.2@csp_validator@csp.py/5/parsimonious.grammar.Grammar/python)\n", - "in [action](http://jeffrimko.blogspot.com/2013/05/parsing-with-parsimonious.html)\n", - "\n", - "Parsimonious has its own spin on PEG (mostly replacing `<-` with `=`) and those changes are listed on the main \n", - "[github page](https://github.com/erikrose/parsimonious).\n", - "\n", - "\n", - "We're just building a translator, but if we wanted to build a full-out interpreter, here is how we should do it:\n", - "\n", - "- Described in a [video](https://www.youtube.com/watch?v=1h1mM7VwNGo)\n", - "- [Code](https://github.com/halst/mini/blob/master/mini.py) and [Tests](https://github.com/halst/mini/blob/master/test_mini.py) shared here\n" - ] - }, - { - "cell_type": "markdown", - "id": "b1002ca0", - "metadata": {}, - "source": [ - "### So, in general, how does this work?\n", - "\n", - "The parser looks at the first line, and tries to match it. If the first line fails, the whole thing fails. \n", - "\n", - "Regular expressions are included with the syntax `~\"expression\"`\n", - "\n", - "Statements that include `a / b / etc...` give you the preferential choice for the string element to be of type `a`, and if not, then perhaps `b`, and so on.\n", - "\n", - "As with regular expressions, a trailing +, ?, or * denotes the number of times the preceeding pattern should be matched.\n", - "\n", - "\n", - "For example, in this grammar:\n", - "\n", - " grammar = \"\"\"\n", - " Term = Factor Additive*\n", - " Additive= (\"+\"/\"-\") Factor\n", - "\n", - " Factor = Primary Multiplicative*\n", - " Multiplicative = (\"*\" / \"/\") Primary\n", - "\n", - " Primary = Parens / Neg / Number \n", - " Parens = \"(\" Term \")\"\n", - " Neg = \"-\" Primary\n", - " Number = ~\"[0-9]+\"\n", - " \"\"\"\n", - " \n", - "if we try and parse \"5+3\", then the parser looks at the first line `Term` and says: 'If this is going to match, then the first component needs to be a `Factor`', so it then goes and looks at the definition for `Factor` and says: 'If this is going to match, then the first component needs to be a `Primary`'. Then it goes to look at the definition for `Primary` and says: 'This might be a `Parens`, lets check. Then it goes and looks at the definition of `Parens` and finds that the first element does not equal to 5, so it says 'nope!' and goes back up a level to `Primary`. \n", - "\n", - "It then checks to see if the first component of the string fits the `Neg` pattern, and discovers that it doesn't, and returns to the `Primary` definition and checks the third option: `Number`. It goes to the definition of number and says 'Hey, at least the first character matches `Number` - but number asks for one or more characters between 0 and 9, so lets check the next character - it is a `+`, so that doesnt fit the pattern, so we'll capture 5 as a `Number`, then return up to `Primary` - and as there are no other commands listed in `Primary` also return to `Factor`. \n", - "\n", - "Now, factor asks for zero or more `Multiplicative` components, so lets check if our string (now with the 5 removed) matches the `Multiplicative` pattern. The first element of a multiplicative component should be '*', or '\\/', and it isnt, so lets pop back up to `Factor`, and then to `Term`.\n", - "\n", - "The term then goes on to see if the string (starting at '+') matches the additive pattern - and it sees that the '+' matches its first condition, and then goes on to check for a `Factor`, beginning with the '5' in the string. This follows the same path as we saw before to match the 5 as a factor element.\n", - "\n", - "The parser collects all of the components into a tree and returns it to the user." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "f69bad3f", - "metadata": {}, - "outputs": [], - "source": [ - "import parsimonious" - ] - }, - { - "cell_type": "markdown", - "id": "015fbb0e", - "metadata": {}, - "source": [ - "## Start with someone else's arithmetic grammar\n", - "by [Philippe Sigaud](), available [here]()\n", - "\n", - "This is a good example of how to get around the left-hand recursion issue.\n", - "\n", - "Our translator will have several parts, that we can see here.\n", - "\n", - "1. First, we define the grammar and compile it\n", - "2. Then we define a function to parse the Abstract Syntax Tree and translate any of its components (here translation is just to return a stringified version)\n", - "3. Then we parse the string we're interest in translating to an AST\n", - "4. Finally, we crawl the AST, compiling an output string." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "b3da2ede", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "5" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "#define the grammar\n", - "grammar = \"\"\"\n", - "Term = Factor (Add / Sub)*\n", - "Add = \"+\" Factor\n", - "Sub = \"-\" Factor\n", - "Factor = Primary (Mul / Div)*\n", - "Mul = \"*\" Primary\n", - "Div = \"/\" Primary\n", - "Primary = Parens / Neg / Number \n", - "Parens = \"(\" Term \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ~\"[0-9]+\"\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "def to_str(node):\n", - " if node.children:\n", - " return ''.join([to_str(child) for child in node])\n", - " else:\n", - " return node.text\n", - "\n", - "AST = g.parse(\"2+3\") \n", - "eval(to_str(AST))" - ] - }, - { - "cell_type": "markdown", - "id": "68751235", - "metadata": {}, - "source": [ - "###Simplify\n", - "\n", - "Now, we don't care about the difference between addition and subtraction, or between multiplication and division, as we're going to treat them both the same, so lets simplify the grammar to take care of this case" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "52fc05c9", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "14" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "grammar = \"\"\"\n", - "Term = Factor Additive*\n", - "Additive= (\"+\"/\"-\") Factor\n", - "\n", - "Factor = Primary Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") Primary\n", - "\n", - "Primary = Parens / Neg / Number \n", - "Parens = \"(\" Term \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ~\"[0-9]+\"\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "g.parse(\"2+3\")\n", - "\n", - "def to_str(node):\n", - " if node.children:\n", - " return ''.join([to_str(child) for child in node])\n", - " else:\n", - " return node.text\n", - "\n", - "eval(to_str(g.parse(\"2+3*4\")))" - ] - }, - { - "cell_type": "markdown", - "id": "4283dc6e", - "metadata": {}, - "source": [ - "### Add floating point numbers\n", - "Now we'll go with a more complex number definition to try and capture floats" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "d396103b", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "-1.6800000000000002" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "grammar = \"\"\"\n", - "Term = Factor Additive*\n", - "Additive= (\"+\"/\"-\") Factor\n", - "Factor = Primary Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") Primary\n", - "Primary = Parens / Neg / Number \n", - "Parens = \"(\" Term \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ((~\"[0-9]\"+ \".\"? ~\"[0-9]\"*) / (\".\" ~\"[0-9]\"+)) ((\"e\"/\"E\") (\"-\"/\"+\") ~\"[0-9]\"+)?\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "g.parse(\"2+3\")\n", - "\n", - "def to_str(node):\n", - " if node.children:\n", - " return ''.join([to_str(child) for child in node])\n", - " else:\n", - " return node.text\n", - "\n", - "eval(to_str(g.parse(\"2.1+3*-4.2*.3\")))" - ] - }, - { - "cell_type": "markdown", - "id": "fd694257", - "metadata": {}, - "source": [ - "### Identifiers\n", - "\n", - "If we want to include variables in the schema, we need to be able to handle identifiers. Lets practice with an empty grammar to get it right." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "83f8c731", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "4" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "grammar = \"\"\"\n", - "Identifier = !Keyword ~\"[a-z]\" ~\"[a-z0-9_\\$]\"* \n", - "Keyword = 'int'\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "def to_str(node):\n", - " if node.children:\n", - " return ''.join([to_str(child) for child in node])\n", - " else:\n", - " return node.text\n", - "\n", - "hi=4 \n", - "eval(to_str(g.parse(\"hi\")))" - ] - }, - { - "cell_type": "markdown", - "id": "01e6a0bf", - "metadata": {}, - "source": [ - "Now lets add the identifiers to the arithmetic we were working on previously" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "f83c13c2", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "28.400000000000002" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "grammar = \"\"\"\n", - "Term = Factor Additive*\n", - "Additive= (\"+\"/\"-\") Factor\n", - "Factor = Primary Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") Primary\n", - "Primary = Parens / Neg / Number / Identifier\n", - "Parens = \"(\" Term \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ((~\"[0-9]\"+ \".\"? ~\"[0-9]\"*) / (\".\" ~\"[0-9]\"+)) ((\"e\"/\"E\") (\"-\"/\"+\") ~\"[0-9]\"+)?\n", - "Identifier = !Keyword ~\"[a-z]\" ~\"[a-z0-9_\\$]\"* \n", - "Keyword = 'int' / 'exp'\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "def to_str(node):\n", - " if node.children:\n", - " return ''.join([to_str(child) for child in node])\n", - " else:\n", - " return node.text\n", - "\n", - " \n", - "hi=4 \n", - "eval(to_str(g.parse(\"(5+hi)*3.1+.5\")))" - ] - }, - { - "cell_type": "markdown", - "id": "a0f79955", - "metadata": {}, - "source": [ - "### Add function calls\n", - "\n", - "Function calls are a primary unit in the order of operations. We explicitly spell out the keywords that are allowed to be used as function calls. If anything else comes in, it will throw an error. For starters, lets just use a few functions that we know python can handle, so we don't have to worry about translation." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "46d28297", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "-2.3245038118424985" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "grammar = \"\"\"\n", - "Term = Factor Additive*\n", - "Additive= (\"+\"/\"-\") Factor\n", - "Factor = Primary Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") Primary\n", - "Primary = Call / Parens / Neg / Number / Identifier\n", - "Parens = \"(\" Term \")\"\n", - "Call = Keyword \"(\" Term (\",\" Term)* \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ((~\"[0-9]\"+ \".\"? ~\"[0-9]\"*) / (\".\" ~\"[0-9]\"+)) ((\"e\"/\"E\") (\"-\"/\"+\") ~\"[0-9]\"+)?\n", - "Identifier = !Keyword ~\"[a-z]\" ~\"[a-z0-9_\\$]\"* \n", - "Keyword = 'exp' / 'sin' / 'cos'\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "def to_str(node):\n", - " if node.children:\n", - " return ''.join([to_str(child) for child in node])\n", - " else:\n", - " return node.text\n", - " \n", - "hi=4 \n", - "eval(to_str(g.parse(\"cos(5+hi)*3.1+.5\")))" - ] - }, - { - "cell_type": "markdown", - "id": "72fa3f9b", - "metadata": {}, - "source": [ - "### Add exponentiation\n", - "Exponentiation adds another layer to our order of operations, and happens at the smallest unit, just above that of the primary elements. We put them in increasing order of priority, or from largest equation unit to smallest.\n", - "\n", - "As the python syntax for exponentiation is `**` instead of the SMILE standard `^`, we have to make our first translation. We do this by making a special case in the translation function which knows specifically what to do with an exponentive node when it sees one." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "468b531c", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "9.3053961907966638" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "grammar = \"\"\"\n", - "Term = Factor Additive*\n", - "Additive= (\"+\"/\"-\") Factor\n", - "\n", - "Factor = ExpBase Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") ExpBase\n", - "\n", - "ExpBase = Primary Exponentive*\n", - "Exponentive = \"^\" Primary\n", - "\n", - "Primary = Call / Parens / Neg / Number / Identifier\n", - "Parens = \"(\" Term \")\"\n", - "Call = Keyword \"(\" Term (\",\" Term)* \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ((~\"[0-9]\"+ \".\"? ~\"[0-9]\"*) / (\".\" ~\"[0-9]\"+)) ((\"e\"/\"E\") (\"-\"/\"+\") ~\"[0-9]\"+)?\n", - "Identifier = !Keyword ~\"[a-z]\" ~\"[a-z0-9_\\$]\"* \n", - "\n", - "Keyword = 'exp' / 'sin' / 'cos'\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "def translate(node):\n", - " if node.expr_name == 'Exponentive': #special case for translating exponent syntax\n", - " return '**' + ''.join([translate(child) for child in node.children[1:]])\n", - " else:\n", - " if node.children:\n", - " return ''.join([translate(child) for child in node])\n", - " else:\n", - " return node.text\n", - " \n", - "hi=4 \n", - "eval(translate(g.parse(\"cos(sin(5+hi))*3.1^2+.5\")))\n", - "#eval(translate(g.parse(\"3+cos(5+hi)*3.1^2+.5\")))\n", - "#translate(g.parse(\"cos(5+hi)*3.1^2+.5\"))" - ] - }, - { - "cell_type": "markdown", - "id": "c0733eb1", - "metadata": {}, - "source": [ - "### Add translation of keywords\n", - "\n", - "As the names of functions in XMILE does not always match the names of functions in python, we'll add a dictionary to translate them, and a special case in the translation function that handles keyword nodes." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "1a3b2dc3", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "61.12136331904043" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "#try translating keywords\n", - "#its important to get the keywords in the right order, so that 'exp' and 'exprnd' don't get confused.\n", - "\n", - "grammar = \"\"\"\n", - "Term = Factor Additive*\n", - "Additive= (\"+\"/\"-\") Factor\n", - "\n", - "Factor = ExpBase Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") ExpBase\n", - "\n", - "ExpBase = Primary Exponentive*\n", - "Exponentive = \"^\" Primary\n", - "\n", - "Primary = Call / Parens / Neg / Number / Identifier\n", - "Parens = \"(\" Term \")\"\n", - "Call = Keyword \"(\" Term (\",\" Term)* \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ((~\"[0-9]\"+ \".\"? ~\"[0-9]\"*) / (\".\" ~\"[0-9]\"+)) ((\"e\"/\"E\") (\"-\"/\"+\") ~\"[0-9]\"+)?\n", - "Identifier = !Keyword ~\"[a-z]\" ~\"[a-z0-9_\\$]\"* \n", - "\n", - "Keyword = 'exprnd' / 'exp' / 'sin' / 'cos'\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "dictionary = {'exp':'exp', 'sin':'sin', 'cos':'cos', 'exprnd':'exponential'}\n", - "\n", - "def translate(node):\n", - " if node.expr_name == 'Exponentive': \n", - " return '**' + ''.join([translate(child) for child in node.children[1:]])\n", - " elif node.expr_name == \"Keyword\": # special case for translating keywords\n", - " return dictionary[node.text]\n", - " else:\n", - " if node.children:\n", - " return ''.join([translate(child) for child in node])\n", - " else:\n", - " return node.text\n", - " \n", - "hi=4 \n", - "eval(translate(g.parse(\"exprnd(5+hi)*3.1^2+.5\")))\n", - "#translate(g.parse(\"cos(5+hi)*3.1^2+.5\"))" - ] - }, - { - "cell_type": "markdown", - "id": "eb0f67d6", - "metadata": {}, - "source": [ - "###Add XMILE keywords\n", - "\n", - "Now that this structure is in place, lets add a bunch more keywords" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "e0a6d2d4", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "-8.2559618167117463" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# expand the keywords to include a goodly XMILE subset\n", - "grammar = \"\"\"\n", - "Term = Factor Additive*\n", - "Additive= (\"+\"/\"-\") Factor\n", - "\n", - "Factor = ExpBase Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") ExpBase\n", - "\n", - "ExpBase = Primary Exponentive*\n", - "Exponentive = \"^\" Primary\n", - "\n", - "Primary = Call / Parens / Neg / Number / Identifier\n", - "Parens = \"(\" Term \")\"\n", - "Call = Keyword \"(\" Term (\",\" Term)* \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ((~\"[0-9]\"+ \".\"? ~\"[0-9]\"*) / (\".\" ~\"[0-9]\"+)) ((\"e\"/\"E\") (\"-\"/\"+\") ~\"[0-9]\"+)?\n", - "Identifier = !Keyword ~\"[a-z]\" ~\"[a-z0-9_\\$]\"* \n", - "\n", - "Keyword = \"exprnd\" / \"exp\" / \"sin\" / \"cos\" / \"abs\" / \"int\" / \"inf\" / \"log10\" / \"pi\" /\n", - " \"sqrt\" / \"tan\" / \"lognormal\" / \"normal\" / \"poisson\" / \"ln\" / \"min\" / \"max\" /\n", - " \"random\" / \"arccos\" / \"arcsin\" / \"arctan\" / \"if_then_else\"\n", - "\"\"\"\n", - "\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "dictionary = {\"abs\":\"abs\", \"int\":\"int\", \"exp\":\"np.exp\", \"inf\":\"np.inf\", \"log10\":\"np.log10\",\n", - " \"pi\":\"np.pi\", \"sin\":\"np.sin\", \"cos\":\"np.cos\", \"sqrt\":\"np.sqrt\", \"tan\":\"np.tan\",\n", - " \"lognormal\":\"np.random.lognormal\", \"normal\":\"np.random.normal\", \n", - " \"poisson\":\"np.random.poisson\", \"ln\":\"np.ln\", \"exprnd\":\"np.random.exponential\",\n", - " \"random\":\"np.random.rand\", \"min\":\"min\", \"max\":\"max\", \"arccos\":\"np.arccos\",\n", - " \"arcsin\":\"np.arcsin\", \"arctan\":\"np.arctan\", \"if_then_else\":\"if_then_else\"}\n", - "\n", - "#provide a few functions\n", - "def if_then_else(condition, val_if_true, val_if_false):\n", - " if condition:\n", - " return val_if_true\n", - " else:\n", - " return val_if_false\n", - "\n", - "def translate(node):\n", - " if node.expr_name == 'Exponentive': # special case syntax change...\n", - " return '**' + ''.join([translate(child) for child in node.children[1:]])\n", - " elif node.expr_name == \"Keyword\":\n", - " return dictionary[node.text]\n", - " else:\n", - " if node.children:\n", - " return ''.join([translate(child) for child in node])\n", - " else:\n", - " return node.text\n", - " \n", - "hi = 4 \n", - "eval(translate(g.parse(\"cos(min(hi,6)+5)*3.1^2+.5\"))) " - ] - }, - { - "cell_type": "markdown", - "id": "17f14f07", - "metadata": {}, - "source": [ - "### Conditional behavior\n", - "\n", - "One of the xmile functions expects a boolean parameter, and so we had better add the ability to deal with conditional statements. These are even broader than addition and subtraction, and happen last in the order of operations - so naturally, the are first in our grammar." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "c24007d7", - "metadata": {}, - "outputs": [ - { - "ename": "ParseError", - "evalue": "Rule 'Condition' didn't match at 'absolutely_nothing' (line 1, column 1).", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n\u001b[0;31mParseError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0;31m#eval(translate(g.parse(\"cos(sin(5)+hi)*3.1^2+.5\")))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m#translate(g.parse(\"cos(5+hi)*3.1^2+.5\"))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 62\u001b[0;31m \u001b[0mtranslate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparse\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"absolutely_nothing\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m/Library/Python/2.7/site-packages/parsimonious/grammar.pyc\u001b[0m in \u001b[0;36mparse\u001b[0;34m(self, text, pos)\u001b[0m\n\u001b[1;32m 81\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mparse\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtext\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 82\u001b[0m \u001b[0;34m\"\"\"Parse some text with the default rule.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 83\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdefault_rule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparse\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpos\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 84\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 85\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mmatch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtext\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/Library/Python/2.7/site-packages/parsimonious/expressions.pyc\u001b[0m in \u001b[0;36mparse\u001b[0;34m(self, text, pos)\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 39\u001b[0m \"\"\"\n\u001b[0;32m---> 40\u001b[0;31m \u001b[0mnode\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmatch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpos\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 41\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mnode\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mend\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mIncompleteParseError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnode\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mend\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/Library/Python/2.7/site-packages/parsimonious/expressions.pyc\u001b[0m in \u001b[0;36mmatch\u001b[0;34m(self, text, pos)\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0mnode\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_match\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpos\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0merror\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mnode\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 57\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0merror\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 58\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnode\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 59\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mParseError\u001b[0m: Rule 'Condition' didn't match at 'absolutely_nothing' (line 1, column 1)." - ] - } - ], - "source": [ - "grammar = \"\"\"\n", - "Condition = Term Conditional*\n", - "Conditional = (\"<=\" / \"<\" / \">=\" / \">\" / \"=\") Term\n", - "\n", - "Term = Factor Additive*\n", - "Additive = (\"+\"/\"-\") Factor\n", - "\n", - "Factor = ExpBase Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") ExpBase\n", - "\n", - "ExpBase = Primary Exponentive*\n", - "Exponentive = \"^\" Primary\n", - "\n", - "Primary = Call / Parens / Neg / Number / Identifier \n", - "Parens = \"(\" Condition \")\"\n", - "Call = Keyword \"(\" Condition (\",\" Condition)* \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ((~\"[0-9]\"+ \".\"? ~\"[0-9]\"*) / (\".\" ~\"[0-9]\"+)) ((\"e\"/\"E\") (\"-\"/\"+\") ~\"[0-9]\"+)?\n", - "Identifier = !Keyword ~\"[a-z]\" ~\"[a-z0-9_\\$]\"* \n", - "\n", - "Keyword = \"exprnd\" / \"exp\" / \"sin\" / \"cos\" / \"abs\" / \"int\" / \"inf\" / \"log10\" / \"pi\" /\n", - " \"sqrt\" / \"tan\" / \"lognormal\" / \"normal\" / \"poisson\" / \"ln\" / \"min\" / \"max\" /\n", - " \"random\" / \"arccos\" / \"arcsin\" / \"arctan\" / \"if_then_else\"\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "\n", - "dictionary = {\"abs\":\"abs\", \"int\":\"int\", \"exp\":\"np.exp\", \"inf\":\"np.inf\", \"log10\":\"np.log10\",\n", - " \"pi\":\"np.pi\", \"sin\":\"np.sin\", \"cos\":\"np.cos\", \"sqrt\":\"np.sqrt\", \"tan\":\"np.tan\",\n", - " \"lognormal\":\"np.random.lognormal\", \"normal\":\"np.random.normal\", \n", - " \"poisson\":\"np.random.poisson\", \"ln\":\"np.ln\", \"exprnd\":\"np.random.exponential\",\n", - " \"random\":\"np.random.rand\", \"min\":\"min\", \"max\":\"max\", \"arccos\":\"np.arccos\",\n", - " \"arcsin\":\"np.arcsin\", \"arctan\":\"np.arctan\", \"if_then_else\":\"if_then_else\"}\n", - "\n", - "#provide a few functions\n", - "def if_then_else(condition, val_if_true, val_if_false):\n", - " if condition:\n", - " return val_if_true\n", - " else:\n", - " return val_if_false\n", - "\n", - "def translate(node):\n", - " if node.expr_name == 'Exponentive': # special case syntax change...\n", - " return '**' + ''.join([translate(child) for child in node.children[1:]])\n", - " elif node.expr_name == \"Keyword\":\n", - " return dictionary[node.text]\n", - " else:\n", - " if node.children:\n", - " return ''.join([translate(child) for child in node])\n", - " else:\n", - " return node.text\n", - " \n", - "hi=4 \n", - "eval(translate(g.parse(\"exprnd(if_then_else(5>6,4,3)+hi)*3.1^2+.5\")))\n", - "#eval(translate(g.parse(\"if_then_else(5>6,4,3)\")))\n", - "#eval(translate(g.parse(\"int(5<=6)\")))\n", - "#eval(translate(g.parse(\"5<=6\")))\n", - "#eval(translate(g.parse(\"if_then_else(6,4,3)\")))\n", - "#eval(translate(g.parse(\"cos(min(5,hi,7)+5)*3.1^2+.5\")))\n", - "#eval(translate(g.parse(\"cos(sin(5)+hi)*3.1^2+.5\")))\n", - "#translate(g.parse(\"cos(5+hi)*3.1^2+.5\"))\n", - "translate(g.parse(\"absolutely_nothing\"))" - ] - }, - { - "cell_type": "markdown", - "id": "5bc351fe", - "metadata": {}, - "source": [ - "### Deal with identifiers that start with keywords\n", - "\n", - "If we give the previous method a test case like \"absolutely_nothing\" - something intended to be an identifier - it tries to parse it with the keyword, and then gets stuck\n", - "\n", - "One way to deal with this is to say that it is either something that is not a keyword, or its a keyword followed by at least one other character. \n", - "\n", - " Identifier = (!Keyword ~\"[a-z]\" ~\"[a-z0-9_\\$]\"*) / (Keyword ~\"[a-z0-9_\\$]\"+)\n", - "\n", - "This is also problematic, as the tree builds up with a keyword in it, and that keyword gets replaced.\n", - "\n", - "Better to just make it a simple terminator:\n", - "\n", - " Identifier = ~\"[a-z]\" ~\"[a-z0-9_\\$]\"*\n", - " \n", - "and count on the fact that we give precendence to keywords in the primary statement:\n", - "\n", - " Primary = Call / Parens / Neg / Number / Identifier " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25aef18b", - "metadata": {}, - "outputs": [], - "source": [ - "grammar = \"\"\"\n", - "Condition = Term Conditional*\n", - "Conditional = (\"<=\" / \"<\" / \">=\" / \">\" / \"=\") Term\n", - "\n", - "Term = Factor Additive*\n", - "Additive = (\"+\"/\"-\") Factor\n", - "\n", - "Factor = ExpBase Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") ExpBase\n", - "\n", - "ExpBase = Primary Exponentive*\n", - "Exponentive = \"^\" Primary\n", - "\n", - "Primary = Call / Parens / Neg / Number / Identifier \n", - "Parens = \"(\" Condition \")\"\n", - "Call = Keyword \"(\" Condition (\",\" Condition)* \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ((~\"[0-9]\"+ \".\"? ~\"[0-9]\"*) / (\".\" ~\"[0-9]\"+)) ((\"e\"/\"E\") (\"-\"/\"+\") ~\"[0-9]\"+)?\n", - "Identifier = ~\"[a-z]\" ~\"[a-z0-9_\\$]\"*\n", - "\n", - "Keyword = \"exprnd\" / \"exp\" / \"sin\" / \"cos\" / \"abs\" / \"int\" / \"inf\" / \"log10\" / \"pi\" /\n", - " \"sqrt\" / \"tan\" / \"lognormal\" / \"normal\" / \"poisson\" / \"ln\" / \"min\" / \"max\" /\n", - " \"random\" / \"arccos\" / \"arcsin\" / \"arctan\" / \"if_then_else\"\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "\n", - "dictionary = {\"abs\":\"abs\", \"int\":\"int\", \"exp\":\"np.exp\", \"inf\":\"np.inf\", \"log10\":\"np.log10\",\n", - " \"pi\":\"np.pi\", \"sin\":\"np.sin\", \"cos\":\"np.cos\", \"sqrt\":\"np.sqrt\", \"tan\":\"np.tan\",\n", - " \"lognormal\":\"np.random.lognormal\", \"normal\":\"np.random.normal\", \n", - " \"poisson\":\"np.random.poisson\", \"ln\":\"np.ln\", \"exprnd\":\"np.random.exponential\",\n", - " \"random\":\"np.random.rand\", \"min\":\"min\", \"max\":\"max\", \"arccos\":\"np.arccos\",\n", - " \"arcsin\":\"np.arcsin\", \"arctan\":\"np.arctan\", \"if_then_else\":\"if_then_else\"}\n", - "\n", - "#provide a few functions\n", - "def if_then_else(condition, val_if_true, val_if_false):\n", - " if condition:\n", - " return val_if_true\n", - " else:\n", - " return val_if_false\n", - "\n", - "def translate(node):\n", - " if node.expr_name == 'Exponentive': # special case syntax change...\n", - " return '**' + ''.join([translate(child) for child in node.children[1:]])\n", - " elif node.expr_name == \"Keyword\":\n", - " return dictionary[node.text]\n", - " else:\n", - " if node.children:\n", - " return ''.join([translate(child) for child in node])\n", - " else:\n", - " return node.text\n", - " \n", - "\n", - "translate(g.parse(\"absolutely_nothing\"))\n", - "translate(g.parse(\"normal_delivery_delay_recognized\"))" - ] - }, - { - "cell_type": "markdown", - "id": "873dd4de", - "metadata": {}, - "source": [ - "### return a list of dependancies\n", - "\n", - "List is a list of the identifiers present in the equation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "53e9d7a5", - "metadata": {}, - "outputs": [], - "source": [ - "grammar = \"\"\"\n", - "Condition = Term Conditional*\n", - "Conditional = (\"<=\" / \"<\" / \">=\" / \">\" / \"=\") Term\n", - "\n", - "Term = Factor Additive*\n", - "Additive = (\"+\"/\"-\") Factor\n", - "\n", - "Factor = ExpBase Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") ExpBase\n", - "\n", - "ExpBase = Primary Exponentive*\n", - "Exponentive = \"^\" Primary\n", - "\n", - "Primary = Call / Parens / Neg / Number / Identifier \n", - "Parens = \"(\" Condition \")\"\n", - "Call = Keyword \"(\" Condition (\",\" Condition)* \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ((~\"[0-9]\"+ \".\"? ~\"[0-9]\"*) / (\".\" ~\"[0-9]\"+)) ((\"e\"/\"E\") (\"-\"/\"+\") ~\"[0-9]\"+)?\n", - "Identifier = ~\"[a-z]\" ~\"[a-z0-9_\\$]\"*\n", - "\n", - "Keyword = \"exprnd\" / \"exp\" / \"sin\" / \"cos\" / \"abs\" / \"int\" / \"inf\" / \"log10\" / \"pi\" /\n", - " \"sqrt\" / \"tan\" / \"lognormal\" / \"normal\" / \"poisson\" / \"ln\" / \"min\" / \"max\" /\n", - " \"random\" / \"arccos\" / \"arcsin\" / \"arctan\" / \"if_then_else\"\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "\n", - "dictionary = {\"abs\":\"abs\", \"int\":\"int\", \"exp\":\"np.exp\", \"inf\":\"np.inf\", \"log10\":\"np.log10\",\n", - " \"pi\":\"np.pi\", \"sin\":\"np.sin\", \"cos\":\"np.cos\", \"sqrt\":\"np.sqrt\", \"tan\":\"np.tan\",\n", - " \"lognormal\":\"np.random.lognormal\", \"normal\":\"np.random.normal\", \n", - " \"poisson\":\"np.random.poisson\", \"ln\":\"np.ln\", \"exprnd\":\"np.random.exponential\",\n", - " \"random\":\"np.random.rand\", \"min\":\"min\", \"max\":\"max\", \"arccos\":\"np.arccos\",\n", - " \"arcsin\":\"np.arcsin\", \"arctan\":\"np.arctan\", \"if_then_else\":\"if_then_else\"}\n", - "\n", - "#provide a few functions\n", - "def if_then_else(condition, val_if_true, val_if_false):\n", - " if condition:\n", - " return val_if_true\n", - " else:\n", - " return val_if_false\n", - "\n", - "def get_identifiers(node):\n", - " identifiers = []\n", - " for child in node:\n", - " for item in get_identifiers(child): #merge all into one list\n", - " identifiers.append(item)\n", - " if node.expr_name == 'Identifier':\n", - " identifiers.append(node.text)\n", - " return identifiers\n", - " \n", - "def translate(node):\n", - " if node.expr_name == 'Exponentive': # special case syntax change...\n", - " return '**' + ''.join([translate(child) for child in node.children[1:]])\n", - " elif node.expr_name == \"Keyword\":\n", - " return dictionary[node.text]\n", - " else:\n", - " if node.children:\n", - " return ''.join([translate(child) for child in node])\n", - " else:\n", - " return node.text\n", - " \n", - "\n", - " a = get_identifiers(g.parse(\"Robert*Mary+Cora+(Edith*Sybil)^Tom+int(Matthew)*Violet\".lower()))\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dcbe489b", - "metadata": {}, - "outputs": [], - "source": [ - "grammar = \"\"\"\n", - "Condition = Term Conditional*\n", - "Conditional = (\"<=\" / \"<\" / \">=\" / \">\" / \"=\") Term\n", - "\n", - "Term = Factor Additive*\n", - "Additive = (\"+\"/\"-\") Factor\n", - "\n", - "Factor = ExpBase Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") ExpBase\n", - "\n", - "ExpBase = Primary Exponentive*\n", - "Exponentive = \"^\" Primary\n", - "\n", - "Primary = Call / Parens / Neg / Number / Identifier \n", - "Parens = \"(\" Condition \")\"\n", - "Call = Keyword \"(\" Condition (\",\" Condition)* \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ((~\"[0-9]\"+ \".\"? ~\"[0-9]\"*) / (\".\" ~\"[0-9]\"+)) ((\"e\"/\"E\") (\"-\"/\"+\") ~\"[0-9]\"+)?\n", - "Identifier = ~\"[a-z]\" ~\"[a-z0-9_\\$]\"*\n", - "\n", - "Keyword = \"exprnd\" / \"exp\" / \"sin\" / \"cos\" / \"abs\" / \"int\" / \"inf\" / \"log10\" / \"pi\" /\n", - " \"sqrt\" / \"tan\" / \"lognormal\" / \"normal\" / \"poisson\" / \"ln\" / \"min\" / \"max\" /\n", - " \"random\" / \"arccos\" / \"arcsin\" / \"arctan\" / \"if_then_else\"\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "\n", - "dictionary = {\"abs\":\"abs\", \"int\":\"int\", \"exp\":\"np.exp\", \"inf\":\"np.inf\", \"log10\":\"np.log10\",\n", - " \"pi\":\"np.pi\", \"sin\":\"np.sin\", \"cos\":\"np.cos\", \"sqrt\":\"np.sqrt\", \"tan\":\"np.tan\",\n", - " \"lognormal\":\"np.random.lognormal\", \"normal\":\"np.random.normal\", \n", - " \"poisson\":\"np.random.poisson\", \"ln\":\"np.ln\", \"exprnd\":\"np.random.exponential\",\n", - " \"random\":\"np.random.rand\", \"min\":\"min\", \"max\":\"max\", \"arccos\":\"np.arccos\",\n", - " \"arcsin\":\"np.arcsin\", \"arctan\":\"np.arctan\", \"if_then_else\":\"if_then_else\",\n", - " \"=\":\"==\", \"<=\":\"<=\", \"<\":\"<\", \">=\":\">=\", \">\":\">\", \"^\":\"**\"}\n", - "\n", - "#provide a few functions\n", - "def if_then_else(condition, val_if_true, val_if_false):\n", - " if condition:\n", - " return val_if_true\n", - " else:\n", - " return val_if_false\n", - "\n", - "def get_identifiers(node):\n", - " identifiers = []\n", - " for child in node:\n", - " for item in get_identifiers(child): #merge all into one list\n", - " identifiers.append(item)\n", - " if node.expr_name == 'Identifier':\n", - " identifiers.append(node.text)\n", - " return identifiers\n", - " \n", - "def translate(node):\n", - " if node.expr_name in ['Exponentive', 'Conditional']: #non-terminal lookup\n", - " return dictionary[node.children[0].text] + ''.join([translate(child) for child in node.children[1:]])\n", - " elif node.expr_name == \"Keyword\": #terminal lookup\n", - " return dictionary[node.text]\n", - " else:\n", - " if node.children:\n", - " return ''.join([translate(child) for child in node])\n", - " else:\n", - " return node.text\n", - " \n", - "\n", - "translate(g.parse(\"2+3=4+5\"))\n", - "#print g.parse(\"2+3=4+5\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "70b4791e", - "metadata": {}, - "outputs": [], - "source": [ - "grammar = \"\"\"\n", - "Condition = Term Conditional*\n", - "Conditional = (\"<=\" / \"<\" / \">=\" / \">\" / \"=\") Term\n", - "\n", - "Term = Factor Additive*\n", - "Additive = (\"+\"/\"-\") Factor\n", - "\n", - "Factor = ExpBase Multiplicative*\n", - "Multiplicative = (\"*\" / \"/\") ExpBase\n", - "\n", - "ExpBase = Primary Exponentive*\n", - "Exponentive = \"^\" Primary\n", - "\n", - "Primary = Call / Parens / Neg / Number / Identifier \n", - "Parens = \"(\" Condition \")\"\n", - "Call = Keyword \"(\" Condition (\",\" Condition)* \")\"\n", - "Neg = \"-\" Primary\n", - "Number = ((~\"[0-9]\"+ \".\"? ~\"[0-9]\"*) / (\".\" ~\"[0-9]\"+)) ((\"e\"/\"E\") (\"-\"/\"+\") ~\"[0-9]\"+)?\n", - "Identifier = ~\"[a-z]\" ~\"[a-z0-9_\\$]\"*\n", - "\n", - "Keyword = \"exprnd\" / \"exp\" / \"sin\" / \"cos\" / \"abs\" / \"int\" / \"inf\" / \"log10\" / \"pi\" /\n", - " \"sqrt\" / \"tan\" / \"lognormal\" / \"normal\" / \"poisson\" / \"ln\" / \"min\" / \"max\" /\n", - " \"random\" / \"arccos\" / \"arcsin\" / \"arctan\" / \"if_then_else\"\n", - "\"\"\"\n", - "g = parsimonious.Grammar(grammar)\n", - "\n", - "\n", - "dictionary = {\"abs\":\"abs\", \"int\":\"int\", \"exp\":\"np.exp\", \"inf\":\"np.inf\", \"log10\":\"np.log10\",\n", - " \"pi\":\"np.pi\", \"sin\":\"np.sin\", \"cos\":\"np.cos\", \"sqrt\":\"np.sqrt\", \"tan\":\"np.tan\",\n", - " \"lognormal\":\"np.random.lognormal\", \"normal\":\"np.random.normal\", \n", - " \"poisson\":\"np.random.poisson\", \"ln\":\"np.ln\", \"exprnd\":\"np.random.exponential\",\n", - " \"random\":\"np.random.rand\", \"min\":\"min\", \"max\":\"max\", \"arccos\":\"np.arccos\",\n", - " \"arcsin\":\"np.arcsin\", \"arctan\":\"np.arctan\", \"if_then_else\":\"if_then_else\",\n", - " \"=\":\"==\", \"<=\":\"<=\", \"<\":\"<\", \">=\":\">=\", \">\":\">\", \"^\":\"**\"}\n", - "\n", - "#provide a few functions\n", - "def if_then_else(condition, val_if_true, val_if_false):\n", - " if condition:\n", - " return val_if_true\n", - " else:\n", - " return val_if_false\n", - "\n", - "def get_identifiers(node):\n", - "# identifiers = []\n", - "# for child in node:\n", - "# for item in get_identifiers(child): #merge all into one list\n", - "# identifiers.append(item)\n", - "# if node.expr_name == 'Identifier':\n", - "# identifiers.append(node.text)\n", - "# return identifiers\n", - " identifiers = []\n", - " for child in node:\n", - " identifiers += get_identifiers(child)\n", - " identifiers += [node.text] if node.expr_name in ['Identifier'] else []\n", - " return identifiers\n", - " \n", - "def translate(node):\n", - " if node.expr_name in ['Exponentive', 'Conditional']: #non-terminal lookup\n", - " return dictionary[node.children[0].text] + ''.join([translate(child) for child in node.children[1:]])\n", - " elif node.expr_name == \"Keyword\": #terminal lookup\n", - " return dictionary[node.text]\n", - " else:\n", - " if node.children:\n", - " return ''.join([translate(child) for child in node])\n", - " else:\n", - " return node.text\n", - " \n", - "a = get_identifiers(g.parse(\"Robert*Mary+Cora+(Edith*Sybil)^Tom+int(Matthew)*Violet\".lower()))\n", - "print a\n", - "#translate(g.parse(\"2+3=4+5\"))\n", - "#print g.parse(\"2+3=4+5\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/docs/development/SMILEv4.pdf b/docs/development/SMILEv4.pdf deleted file mode 100644 index 44ec6794006950b754900dc4c930fad2e0e7022f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 159909 zcma&M1yCi;(gldi0E4@`bGbMJT-@DlaCdiicNp9TcXt`w-5myZcblE>y|??njr})v zyCXWfsw*qAPM*xJswheY5iv#}6FU;+?%wn~(ib2*8woRsouMTXFE2pG!q&vV(ZbWj zm;?xrApx>;b2D=T%067v?7J$k>-&i@g0TLwIBy5~O5>{?z9S{h~ z#Mbzqu1Nm-`m2M3^?!7Tm>Ag^n!Uj-qG%>a?a<+3M0kVKV z05J<|XA?(&n6-hk$-hMak|Nr?f`Xh}oNTPDY%IbY?99wUB3y#(!a}SZqRc{^Z0wvY zAYL{>ASZ_?3pb}IJEsUYn+O*-iy)g2v!Ey^ilb70Z{RFPg_IyoCSI=efXm?C{)M*2cY zDJn09^yUAA`7gwr{}}flH~w$f{q+#bUzf7}wfjF~WMu{dl>a)Dagoex zC0}KOMwe!yrdJxVNM4mcVI|}D9Osez{!T^G0kzvMY zLL@vHBmyKX8A3!PG#MmBLL{_kLPUX;p558m-k#annY`Ya-d>v7-ksUreQ>NB^LX=G zbMqarGRyG11@#_(NqzlSL$JctfHYVFmES!m3ud|#Z7IJWb}bMm0x7?@BMzr5_~&QC z|ByG)SCfUDix${G7{EoGC{9NKks}NEgM<0@egFQyIO*RM`HvT5O`Hvk4V(=~fXpoa zB94NAnTd>np^3E<36PEB|El_@#PzSzzX&D#SH#-R?0olR_=|H`oc zTSnN?&R)pQU7PuD8x9}~2`BeoO3VHYC4>J*?cbRDEBaR(_rJ6$nK;?GI2xJ!?Th1| z9H8`<=FWekjfDB%%>KvR|F(kjf2{al2qtFX=;TcD56Au;LEJ3Bzo`NU{C8jShL+0C zHvdhuEHxXuHD;7g`p*?hL*EE=>N%B&A9Hmv)s`#Y;BQEw8bUh>wW5&FsHo-TbNz;gWTg!E=R<)Y z69E&GLL)Vi0D0%7#vRhJr_Yk;6>Sg=j9jeZ5~>#k46e$o%d*?moa;tWtf`knO(-eWu?bM>PxoI__{vqEVbfAb!^?TDF1uL^6atdqE_>C zE}rZ6J=wbN!pm!~03S{P`9(8Kpt$pQF~ff(WgZO^4>u zsLfG^?c>nzXWRSDYsLp<>*EM9lB}5o)!)tVWq*B_&CReJLKR&sX4jq6DvPTMjQX~nQC`gNxw^VB-5^#lHGZV` zW85+&)ygw9wZDF}jKyD#NYzm&-Zc}# ztk|UFf+zxNz_c(#D=3$kO}ek&5x^UD(arJ=TYLuubSjWR@A?#|Su|h{PznPECR36) zVroMMkf+cYCp(twxP>z0 zY5D1D>Rfn!+UMDGg7H43*ys^D!K^#^K)@1Ba$ z(~B^-_gmnPyB^OqVcYig4fR3aRP5>Y_oLs(+4J5`PG){V=MApiZYZXmRz03dpOx1> znl;WyJG|+YBo)nE8i;vdo@uJ;NwaM0N_HaRZn1Y^ zrD~TKhXIJqr2&djptK_gW@M9kf5$0O%`{99Brph?Evzki#qj28V8f@1g@)Uj)9IZh zpa5Im`M&on2!x98nHRp;!U9W)WLNUo&>yxWE)(eQQ8(!DM~B3dkty&hOOT}RvV&v| za;$c$;pg(o`Q5tS!wo%88vmsYk~H)i5e1|@4uG+ z1U$>%-|>G3c%{aA?A{QnPj1bzIE%(?r!q?*m{%0*SvEsFDg^8qZ{x@h5oA#o3ikSU zT#u2bLlxDe_DNebo{paI<#hwT7EEyBKpL~p(43mryPZYEpw9*W-soGgwh>!h&rIdhZ9jj3|7 z!bhjfj=DpOvU#3_#E~gGQ3|~C)#IKn86K35cor)cYuQuXNP zFO}>P$~1SNM_U7owYU>fhA+y8Kj>f~nDMv|m~qIdAhz!=OrL2cAOIN^Y|zzpoAUM$ z>}VC+XVuO|tu0Qdu``Nx4p>~;*i*r=8)G^od)5!=Q-^Yc%dxJC({Vx2UPMN)*V)nH zw2VPP6!EGo0ir%D3pApyAs*u^Rqxm z&mZQ><~N@8*IoLae2t$kCpfARYU?QvVj;m(INV|VF!F@oXg4(vv&hT^)4UvhNL;>9 z4_i7N(+7dM)GUNKmVBx#6|37UH_F<5VJL;wYGOTqn$T*K2t5AwoxY&4eioUaP*1bp zw_DT}XUsu}&1s)$LV4*)+=1n${#=E4HQb-wdY;WeMNo4rjFM7NXg%ewm|69T_92fG zZl}KvKS!1u4u-Xk-@`9dcs?bweo57Z*QR^>NW18_uR$Xkfl93g#p&nGrgk9teHXKY z+~*d_ja1bmXrO8to27#p)?cE!1UdSM;^fWD+`y936#+yjx<@atz-_srbP3hT zK>uS;EKdV>%#&^Mszbkcyor4afSifLQ9G9)!Q<4Pn$v0ki(8fKnrh1s+C5`1^F{8HIniu1iB;HHk02Pj4 zVo;juMoC+(Tl;pwa{4;WM1R)Ib&L)5PwtehEEkb`G~Gn`Lu?0;rlL1#BXviF*K;E% zQ>L8e`=sO!D(E(C?16UfSD(;cn~ew6Hs6?j>7M8YBo`jB1WjhQcXE9|k7$7eo&gmM zX$-KtD@)1Qr(>UzgkuI3uS@#}?GuX)Wl_avc6Hs8G23uYP0~-0ohxoDraGL{Lfi5U(mu2}H@bd&P4_8%WE1VT4!1B$cDM;~Y?v+EHPbeycz<^VVq3Wl)gj^|geMkk&hQI%y*PUHJ0L4noJjrAAXL_KudOhfx zzpK;mWm?nHQZeG|itOXmJN2lwm*~E7`k%!jjk#6~XC<}(B24awmk=mDeMkWx@`~9$yX$BZnmJj1~ zZ;u2l$#v0m_=HsMWxW46sQxoRL7+$z8Y>(Eedj<(vfLJUTI=tUW^js|f<2*}-Q8mt zc%mm8VCBp|sBE8Kwauo#1Rk#lR9jSfZ+WpsXGicTC(o^h9 ztCb^DG4v{XJ0g~8w$a&RnVl5E*kE!$nQMWbjQF~#I)F)Nv>8M-#Th~#oO(0!7S3}o z4x8^^V|h7z-9pXj$kG^0_HYJZ9l{+XLuj#(eR$~Q*H~J!O!r2fln?`OEK3Yr@jl6B zrA5X+d>aHcadC4@e|xKZ>18~M3u%G~V5#3{HjMpFEnrnH$Yk4xt4UZQB& zf3kp8!#21DX?*$3S86ckpwaq^p37& zW(eTOFyeBroUuQ}=n|J8Ls_0eq;=+*3{S4W#Oz7sZ&p7oE)-#_E2FjaN5H;)MD_t9 z5m}X+07IfAJMk_35|+wAq)~b#&{m-3D&1KMrQ7F0M}@Ap^;|uE@1F|e(6)rC4`W3; zi8B1r^$}X4_J_`{k>v{q3nafjBl@xjS9(>ev$@Dj+K7vmw`mssJjd<6`+A5QJovma zVGh*YD#;B`ZB(5k9nroTU{r`6n$+zlg*PokQ64>m{lLOWcy`sN&WW1#h#EmKVwVq| z0t{_SUUv@N2Su>ETDoDOJ_zWNNg~`y`9ST{08(wsVRa6E5uD}pU#rX+ZG28|6Bfqo z#im&oov|j~<5G@JIYmnf_K6Zd((vCF>n05j2)@k1ij-1tmrNW6^&*`l?Xi(k8MJfZ zA`RQA)KO;SMqo%d-^dT1mjhX`Piu=D6V67i#A278d-3j90;(0-2KHrza_xjFI?YgT z@y4_$ypm%wd|q4i)x(rrxm%v?gdHWR zfjw>PAe*iB*TORPxZYMrrWS=&DC?Un{114%fzIGNf08_^9<4}BHU z=BKi9uZ{+nxmES!OXqJ0+3_g#@hFmE3^0A&>CE~ZkAqN`+CTWF29E(K-pj*Cx^WnZP|%;nlad(El<(C-POHe8ctN@ zu>3CaDn=XW-<%tYdxhN(W2;rvVK$!eG?1U6qJ3B1%?T=G4@53B69l?+t}_Hgy(&{k zLaHiE7(XlQv}9k!n?;PYtIbKVHJhAH8TnMLOJ|32>`~|0WGxr91AxZhi33zp6mwg! zauB)q(PHT###fqo*(eFjTW637K^*h0Qu*`NaD2j>Y#FYKxUzY&&a{L@J{VH;E8~Un z*>uOGj?4%Su9ZkwnOC`@s0pgb#m$j6 zhszdQ?^k$GIsH69(&8LcYIha!Lf`1RN0pxaR*Kj5ONpy!maaX9%pAO{FguA=Shg{< z$~ZSM2G2Y@v82%o!Geo0&!`fnt*%kXhxHnaQK$@7gW+sxYaUcwf+5zRIB#dp$or_k zNz5>|NN8N7(jGjdNZzQsUZ6vgol=sK5VWS~!P_a-;fa}i>InR{E z+HXsfIaoZ4Svg9KQc+8wA%d?zwcJ~?bHwn?G`?ET1WI5@rv!{Cu|K^(9RFrmld74s zy57@?epA0^#}7X8@g>wN(kbRE^eG&U3<;eT$&h1g%&PwR2CSH#`7^`6;>L^eLpC)hqp`8O{%MIl_ z>u)STlv^@{>SCm#G9r>lhiiNEp#e;Fzxj6TCrD&^Hi{vmsGi~em}@Nepx*;J2liR< zvs#0kgXU-f-%IZPz3PakDmyy7UK;py{MaD651Ro7WLyhf8RG*t&3XGlrjiz-vmeC* z6kUjDGE92zau;n=S}o|TidDj2MWufCmE zdb_jaIP3OEdvMq6I=$fWW7UQsIoJ&E&2}deqX3Cb z&l4XbZ7u_F&FIKLv8$*^>1j<$uC(|eW(=9(W^_5@E6%(zl@XKu4Utu*zYI8Qx9cCy zl z>c!%&P4c`5`hFMr5;lSsV;Iq@%}bYqS`%f5QEg1ODiy&Zn_jVzN1&>!yrt5u%xGEk zO@6Z&WF*11k6Vm~h0~1lhzkN{vEoIeN6QYS3@yiK`9pRrA{;+~pT3Q95SZ9lCf{0p z5-o{lnXB3vDgzB+K%xw2Es^}?PAe7$CUy<3Ks|;9A{5nktJ%Kdn|=mOjRiwsm@R3{ z#qX;-)^{Vy4B6kg_09(zk6d=B2OD`;6@E%TQ}7q$!=Le0CTdJJ-?<%vS_R+i=+s*3 zo)&E0+3V@3-6^`lRDIBYA%y7uzI*vru6nz0Weytp__d&C)G2=5WF7CWvnoam&+~_x zXOhEcnX6BXp7z~Qci>Tfm~R?pjt8EtBCnX!W6nOcH@*gx~a>iW>uyA~_+bak2U5eG^H zn;{>cGSf+p!)n_q%cN76=;>?`j^5+9Dpgv`3P9-KnA;jt;g6cp}h2{+~nP+r@8+bl8ev!ZuBq@<~9D#1JY zi9pmn!KH(cE3Tg{uTWlhXS;-z7k1|$7So&lOg6cE`O|S?li~u#hH%m9$%56KNp^{- z@3Z6kodWn~;nIyox$l zb1S%WG8AHItknZkjnk@Dt-a#Mv{j9DselV%ui(Ms?W&+=`T{COv^BHirG}RkY!qSd zJz`uH!{rW!exULtl$U4`&z3GJpFM)O$sWT~a70rIW(0?}j#_bM3g>#}hB5KUSEo}d zm1@pCj^vIQPu8WEdd|kFwS?78U+L8J47@DThVuqIvxRn1qik#PjR;rzjXL$*=(ilv z1Jr{i&WA;+$j7nP7q!vmmThB~t2B*ub*m%`9l}$4CvjC_FHF2{XkiW3)yU@OjCL(f z?I4({dg6Ndt*yysbt%byhzgz=&>X17M-Y;VDt}WpNo|D}9=$8 z;<0@wvM`p$GPm(3>y}B(FNLI(RfDfjU#-N=(*Wp(rX*RZ1@JiQ zL+!iPh~1M^C2l&YQ5ik0>hhHjw`!JB_wn@Wnn#C7O7j=XBJwF2i@vgUH!wWA?);-q zKg`_(EU)y^xT)$Lb?Y(*--#C*x?VFzA2TB6t*TNkkh<%t(jCE%7Lm^uUmPbYdi*;u zVZ|(y3G+EEJgYEv5n*7jLLe&-Hbh|B=%jl2;?z5Rl~P&yV#|RIYgF&k8`Rn+kruZanMO?C(nH!C=j)T@x&eo0NgXR5r2Ov?pAb(%YtRI0N*
F*=iKfR@FwsB^#)pUON zg7;+BRFxz$vCg|mdnrw}7hVm|*wPqEvJ8LpAZe@2;B}gQ^A`;bD_c_t)@8_#n2`mC z5%$e=gyh!ugMJ>(Yq=pytSK%Cm=~a8#;t{r6<}nB(SVl;uEK9sUHToSAkTj$f<&uCpbbaoN7P-wvOExS4iYK+P=w_Y*ny069-0& zD1x)BOlFRQ0`CEF>LKxps70Rh!)LKuz?b_SC*<8NPxZP9!WnUYFX0yybG{5bU@jv8 zS^N_%T9)gURDg2KhkU$e3yRoGkQh2gTD#E-LN>X%lvTL17j2cVRk=%$a10|d5TpY8 z%J_DEm2|K7dXHY3`1ugB%uhI{{@G>64|Wx{YFG+VUw(USPa)1%s}p`h^XMRrq@#?s zn-RFbx)MlWiQ0{UY?R;GqjBLs2to$ibsqWFG8eOp*p}yn$3bpuItvGaiuAtF_C-Qs z<~mlo$KI2*xbBi0A~Ga}c1?P?Z7|QfMWrOUN`xbMwYuUmux5T@&pW_*nue6%3Hdfg zsW2_LW04#u`nKwbxZl}e!RMv_M(eB&ZWEs1PJ;F7;)z%g%@F50W{kh(LDE1Joc!CF@b z@g|u8?+{jAw{5gZxnuC<(g`r9lEA3EUHnaDv%lu9FZW9NEy%^oB~j{r@k8n57o_j^ zO<||_KQes97GuD69bZ24cl>eiO5ZPSuo_5+U+n7{!4Wi59q7_gInDgB21B0Iu`%g3 zRmw%q&ZfX$R2H(U7z%&WP+u3jHl#^C;Xf+maD{q^Xuor9l8~Ok+FZmGd;*{PHRj6r zb^U_J*!6FfePl5$u1?R&CA{SIl0>!QQk4>T@%KL~bCC7UfHu_K4rq>v-NSb zce64A>IXQ=hn#}1cvtJw1h>4ZM18oIeB$H}cXvjS%D)z#n!j)AwS7Inn;7a&4Rw02 ze5^tfR1eRLfLba_CKscU%0?5I-Ss(dE3hbADgKb$D83R*`5AT^jBbTv0yK4?E1_r# zv{VSae=S5RO_uzF^krB15_)k5sQ&4rIo0)Bn+4qB^k^lnxcAVYtLCc@@wO1HBf)+B zxP5gVQ$Z3LhMZh(j4+@9=Ly<}w-)vh7h!vqS7^y1t#H^nP1m&a(MA9YQ?j(96zqcM zzPY@ND=%DW(PHiBwTmopC<2>CTqOzm1&Z&|t`9dQ;I1C;h@1oj<%yDM3aH6l{I=LV zJN>5m>Ou6=bl&9EN9YuhZ6;nYh!TUx87Z04H9rSyV6kO{zue;WV+Vl2v>w_y=Ek4! z1XW#=x~y<1I+kc9^O~2-IhX5&VkbUIHZ8LEfzwaY27}x0PW>{~3wKT6EKF>DKbqK? zy!x!oCc7+~kN!^E9mik9nEWJg!?73z3-o&%p(@^c_x)er{@smp?zGmBQ0}UN+2!w4<0e#V#&Jy-g3+j>)fTfU!0|)fED} zB9Ak;AD9_Z&r}|=8PmdvjjlYjd5u7u2oD7*Xb=|yM@=!=fm<|!Z3On`cYa;j0g#)& z64T-IU{9=4EGSgB)mM(#k7<7`<>=0@ygL_R;)+hc__F^mdmeCpD!6|4f_28@iFNYA zeN+PT#(I|cX=EKmVu42|xe!bWtaS%&^ZR<`{T3qSj$a*UiN` zfGPYXV$1#N;6V3Qkw5cldEm(k((0h(^!*RK1`hxn05<*Smu(cPAtp2AE1?V5R_~p| zQx?^)a_&E{Kb;iw61cEyZ>O>aK3(=R!KiQJgqxwA8EKj`ocQ9cxWu83IVi!O+hX+PXcm1uxI*pFH32-67pk%!4p_f7pQWg+FfC0X!_Z ze0{mQg5(5e@6plqtR{?q6==K(?TmXL@jCWP{#6?dk5(S!23Nz9nM3s=xhYxQgpi>26UyW1CZt z`#U>iRoKct_)sM*_uHn~4#@J^f~EgB65H-d$l}BRC&~FW9mAC+p;|mQVe-u95V|+(W+;+!wp7vNq7dQ@{M^ z2hL$XO~j|!4PbC^>eBCx;ZFo>`xWsshwOdu^sUJIkJm8Z@NHB2 z-P`Y7V4MH({V5%)6Y_1=Zu3KF#0}w~=gH0Q7xKrl!9t!>Sk?sohjw<-x{ZLECKT&9(xjqV=%0@Bdu<)pBHg2Ng}s=>gm-!e;IV&0jZmOIFF_e zcxJvf2s4jdV%)l_tekLh3HSzkwvd#Q;H5yv3oqIBhjHsDP61Do#C;k)snlxgA)4T1 z90T#r^c$?nLAN><#a0mjjA;w=%3$U}Df=YINj^25M1S|&`9d^9qwssk1{ig!dj~5e zyW5+=4eYTV8eop0YbAPb zYDKofAE^ff2H22b6yRXil^P(bgY~@>d0{SG6+oy3S1^xs$GHt*!9(czWJ<3yK5Z+%0eu%zEjkp_NW_Oy2OiLJhf zj+x#JAnn2u_G1*g)Qa9{Dimm%vul{MN}^QjGo`ft=OIQZ-k9IT;h`yD2~+9c)7oON zQM3`$i0`hN`UmSoOh0UYKow^g(cPs%V@I-uWaQ8ltIbh57sZKrfeBc`>ML*=dezoL zf*YQf55l9eF*{pZ%?=<{`qsf};Gf!OX`oqZUTg&xTUD2?DX#(CES`JP{ zdw{M^5H4y2tH`jk6x%X&=>mv_4DFy}Nkh~QbQiDXY2|rd)L*nY=IFHKGT^dNs0t+r z8fWgE(KlzWw^fX~8p$9zH0qibbe3F&1*oMV>g-=B_8keY;~L=T@t^b-JF3Uicyd7RcS23 zfz+RQC~?96HAOjo>r_3|c!_v8rO(5%GK6?YN6R?wLP;JG9$wnA%yT+_=0?Z(h$SxV zIWA|?>C=W{H_H+RwF`DzJKO5aeAiD#?2+I-qK zV(x>Ph_4#9^T!z_K1`rET2Ed%I0?t?zNJ7s2+-%vDLUYG!RDD3V&!43z|t6g%NZXq z@do6GKa-Qqo8Y0@1+r8TJJCs}^hD-<$s351GeMKQ^CVoh>dA%d{o1 zkrECqQO3o{yk9u{!o+$3V2uH=nk1(yXfv#i3=}*pkSITL@)4Glq*v2qp5SMmq1Uz{uQv>e-eb1)uu_W#px< zPl=t}difkQd$MHCoL=#!)6bmpC6~2$rc&Qg6uP;y*uk`ICbhvC2Mv=Fe&3PyLy(JU9@m>C@7dn6^Xm>Z`~&!#$_)Z}&TN`f4z zR!O-JZ4J{Iho3&(JTQcdV44xtlf5)yRAv2HXi=FXMebA`f@FKZ-=*nyh1Zg zG;RfZVDG^o&f1Z-2OOt=q-K~da;~!=l6Esy2g3+s?cn0kKj|UqFc@Do`r(uA#OQU= z?pi{qCG+r@$S{6&KF{dJ)8oakYzCs;%o{wuVJkgj5!V<>KCe^ZZpT@uMq@ktQjf7- z(}*F+TNxE|x?zNyI7g{7DoE%>)PN`yRpz>xQWg^C4#Hh!Q7?8Vs~hE-O6ZS{zfutcRm*)BHIJSKvf_tBPhJ%q#=eaduGS}6_V<`yg8^&`>dz!WsONp*%CER zsgX+Ylk0{A_@m3z=Vu}Pa%D}05&Pw(X}Lvx`?Q=Ge;}2dTg79714ct9TQ!@Zh`XZd zU>pTf_rl8u_0;MUI%iK;o2g^f+i!4}%kSy4Fbj*VfE0c!-jUSnA7hb0ovMlLYeDCh z-Gz>c$qg0sr=F8E=`DTHQWOcM*~tul3ofUoM$W9k7LF#O;*Ryp$G99)c%Q-MT)O9-@$#2=yck$$j_OVO4Y!bwHwsr@z&iXSteV*{EC(VVUo3%pov$e5T zmmA;QzD64anCfaDwY*4sh`mxTDMON-yrZzmy6_zab@cI#>l$2`TYsXuG^lW{(SIv# zbCj|XZ?JL=DVbf=u&}EvJ=r1_w!O5e@Lr5lX_it`V_IKRy(2%xE8C)6FHf*p z9BFoKb3L(Hl))Rdq*`!=ni*-o#noLK4GO9y`kNwL$c-kd$nOV%8(3~SyQCrZ*3dj> z8>Ej3w&*0PnkJeiN+FB1s`TM zB~aGW?H#I*V4Y<-&PSlPWplr@^|uS)AGx3GpXh3FPf1IUIqnJY6$uB8_1WK;I>Vhv z_pJAr^|biYIf_>6zOdLWEHx=+CiVJ2G^kM7OchN;ASU&9HBO#Tz~kT3|SeK+v{zhx)4%wq)5{63p$gWktW z&TYvg3j3vFxUff6z}!X_lrZg<9v+C%Ku-l2De@+?19_cJlwF7aJXconEMItECgrw{ z*ixphDA#vQ$jDV^X5*y#Hb!G?SxYSqpP8y(wc?@{)AD5v;yc73Vhi2lS~&N>(WS);@nZGH;J6$=F=JuuA{>Hm}O-EwNU`XE%DRQ8t^J?R3tibC8U075eX*fO|8K+zcoNbF`pyX%QRn;(-asDo$b!^~@ zFM8+xr9h!Mdrsr-v$hr`g>RII6xxjkzr)MVv3_tMCw^O9kxhHM4G<~n^7oK_+6J;K zMO`5jo$->8x`FWT2%#e8g*D2#?*UZx>?>L}eM^Jnyg?wibHm&=p$X$6Ps`r1H~`nc z<-j6fD#AU7`*EU-mGxmIr)~dyqE25f9k@h!m{cN1%7=VOF~t9~l!jNWScau8@q~F~ zn~k{8{leKyUqsh3)pgB8NQ|9)G!_0t+DAg}<;!wjukaiz3?kx}DcnT9V0;+l2^+(c z-k=Qg5&udmJagQv6x3uGMJ;={bm<97IlkTuVrM>3qcK(HNi+c-Iy!z%`Lw&i=ge`M zM(yLmLgO_K?%|NR7YGN-mRUr6Py`DD3uFJNsF-mKam)MYjr}E#|2%`ux(Cvt*yU`> z#{DAx?Dj2FW4>}|t&9Dne{5)bd}!0#i>5}m#eQPfs-4ViLP0o&+%wkZ7kQ(&!r>4uncmQRG${;lY31}CaRN?y%ODaL-yx#8XHsO>VVjFiBtPMP3 z2_plV+iYG>m{1ZmPMU$sLh`st5UYGPpqF`EXMZ7{pbw|P91$?hscg{L+fz_r2jUwjpJh@A7wO=wXX4YR_Sk{j#&QeHGOy#du6rlgY1oiNL+jZ!e~=s zNfrcOEEm}OZBK^gw%kL)we#^Pf}E zzTkeXv1*QM8eFbaF+(a(ck-H69rn~cHBjfG`!j?Ea+}!fmiDyA&n40N-AnO0(FF1w z%c#}EBtYcLn$diCE-ZkbR_n^V_EZ(f|HkXAq2bVXz3|=_Se~B*@e?L3X_8XWM3KB; zj1`@g9w6CS==goNb?kNEbqv$NM$+bhzI=ITGp(`s{qU60Av5WcVO#0LextfD`@l=A zEBRF^-(|P-mW# zP{1~xn;wy*%3YdD>UG5>c$cg;I1NQ3}i7Oa4}XWE#RViMo=NafSuiQA5x5S z?_^0e8dk;>JG+_K!NmJ)at+*>V4c4EWAmg%&tuVk*R}oRd-YVMqm`NmiImb3Vz1*U zf~3td+2R1kYwkV#1HKd8tM)KEG<4^F;hwWNH%m_bmk|~z$Qq5njhx&}+@2tZk4K3= z*yyV`0eo+tw?mmh=qQR9ppIl5br`^;`E|WM4Z?61$2YI4Nqsq*Y#KYMpXOFpSIL{0 z=RM0cw45wpjDe@z+gn+p+I)?dJUhMFS1V+j-^BU^a|BD$1LIfKTu(G@?$6e)d=-nb z5}Wh142zamI=c7Uh0Nnb_`Ot`XMn0YFAM1VCP5V(jSHw@)2NTXgs6N!&k~VA5p0}cu0a73;%R{WMx^< zf+HPdFf(ylV^}w4b=;S|u3@~k-l|3Vw4r+6?&M;%qsTofe%E~_d1kct*R-?-Wn8(B z%E8BSEK6cj3iQl2j%fE7L|M+vyZmI-KWkQOCSd4?u4$DJ`8obFV9E>2ou_l?QdLgcsHCc z1~UOCao)}4$&PKY;F79I0Dz^Di4x_Nv<{Atk;8_velZ=`;Ud_QG76eNOgNn&oF2!L zs^>%xcoT(0Q7K1x_GtkO?T>FTxnebQ+^wfQuy_c$lrU9xrB?=Y==epc;XD2Y;X{Wu zjMvJC)Z3FLIb?Viry=tTsHV8_0#Jwq-zQ+5;`v6HIaqLGE(tJHqjq2v*&`RswHpE7 zaEb~{9oIx|?Xi!ceKgWLlEIIECC7v{@$wqAN`!Ga$%Q4@Q(y=(1Y(FVl(_SO;A0*Hu^u|}JrTAkfzRM2t98Vf4VU9E;# zzIcW8+cF8_u}3EoJUX);+>tP!=_9O>9GdfYq`|61J+9Z>U(x#OS1v5= ziLbuZKT4-K%yOM(Bq2*qMXY+W!P4=nuEkcu+1-fY3?5{ccy+^oJ=kB15#`*OXdMF=hPe_^Ov7N2J^4Q!&>vW9(@4u;3aqHW zl@YoLK*+yjC?aJnRp$=T7+9CD z@3lIHerJz`p2B>`yTcn7ehri5nRvT@lc`~@;qpD>J>WVZK=hUNJF8vIi*p4MWsYAu zK4)d^%6t7&h|7&A|q!gwjNJYCA z5FS`?=gs9=s5eLFopK&aO$-1l(%lJv>nNpmBsn-U@>WWJcw`_ z-yUJCKoNW2J`USma=qTJuctD79OS=a7ZlMc$xZhug0S~AkxGc6+Yy3bk(wZA*H4j} zV)~#qMLrr)K0?8yrNvXr)0-#o(|aeDzkM~H#g3^O&LS(Bc|~yeV0Ct zHcabFJ>22@xM_uQG%x*Q{H54Yi%Jv%XxuFp!dqCCsCVoH0uE?9K{# z`GW_2efwOny__F%eCQOjq_sOvA0|_A&VapHzx^3AZ3o-e@-ms%f^P|?!j2lTBat!< z(T0&?E@|Hrod?m5v2Uc7jG$}K=~freaF}1=)-l+;cYj7x1DD|K>ny8gxnTEyi zJ={#Pvs-VA&S^jRJeWkdTvv5@UixU^2Z9dikJ4e(ep#8^HO_Xs7wA0|OioI75K$8l z4a|CC6jb!5TzXk<*anlO{XMxWg=vruzQ!ff8jps9H3!p$Is-#Ki5JaP-dP^9C(RXm zM4MrR#M>j9af9OxS1W+*Rv`vc4>4WOD~ykL59ER#MnJYxcnBm>vgdq23@Ige*K+BD zVeAxH34O6tC=^jUAUR^u!4f8Xz!Q4l^1r?{inu4nvrH-kC59FyOnEw0X(FWMj6ucR zVQNPH49BfPuSkfMGe^km*GTGN%OP>Ozs^a(o}J{p7!}`H&E59{w>vK8$o>V($5J1D zy^MB#b(Vh8dVgQBM^un+()~xw4|{&l_2n|Z<)xKjY|N>YySL&1bE|2>(++vr%}5&IPH;g0@GLGE?9a8=}?M7|q;T8Dzb5{n|D#4+Rpu#T9rSMKgQ!jbILF z%?YkxbLQ!2Z7%u6nml#Q>bn-!3sA8O2um|-Vm2ek6fmzLdPFIOr*u{`0mwuS$#(nR z^-{ig##vd8*kLqk!DKwf1czViFFpdaKEIW{_YiG(mizD_Z;9=?gHN zj0Bd$8BCxgn!(4*x?|3YrV7+hwn7CXx`Sb}e<4MRVab~malrP-Hplos04G4$zjMYC zsYydp=U=oqCV%RI&A)ndMP&O=?|yCRZLi(E`1rk0{AR_-YnDk9(^IbAa@WS_)yy)* z*!;_X-*WBX{-^GFYGFV09fp(h^G3$^%xe2{Q}VF~_kI_|Q84|&5VZ$KfgZeY6wnt2 z`>md_jJD^47cL$$RGH8ORAZa5&-gp|3VI*Dj|Pp{q)-omQ8iMCp|PU;nd+uc2}L2A zGBWCe{;?njcO351L%qwu44~71LZ7GxKuferx6pkwLqCT;28N>DNtHZQ z=+=-7nvjMQ1%29M+W1n@d4)C18g?_YnWc-4i!)>=wj);HSiZ_w#&NO#K=0-2+u{BB zyE{^|(>`W;^t~Ag*1!=fJ9hi?x_H-a!7F(6x5bD-<8@k=y zo=r-3bZh!a=N|X3lzzjr?jzA-vES%^qkqq6=YS5fM&uS^cB8|s7!}hrcn|zn;~l0a zfoTk=gVVq?I3>OaUK3lGUJjPS8`09}^4PNUt?;(k+Vb1dy>u_r&2{se%$qE|CB60s z=?D1x%nw+0+8>WT9eX;RqYv|E^nWp&F`bE>sV?P=LaYwd!qL^tcpezsFm)xCvgshGMK3Goe^9h5HAP=1o^RC}UBzq(w>;AN+LMp{ zcIDpvwbOrp;DwcQ*TbrH>e?G_=+0Kv%xc;8h>gV$p=DpgCnODVOqt+cOn zu#_%^gu`U~GFA7y4(VL+ALyCkf}j~Guzo|4oFuJbZZ2?CZ2_+`PlPucqLSi`hNA(a zyaf0}Qa15;71aB2?PSJ7NKh`PBm$I{6C9*FHLAe;ql#h%VRnK9paMb~HLct<_-5(D zUA?c}aO=yzy5-*gdijy#C~aw4KV$x#^BWgc{)<;ZH^ay?*Z=9r!MmT_y7%0N`SrIi zM@KhZx%kiP`gXl_%iMBOb>pgeJGCG4kpnaxpj>2++h<%_v)$E)i$4``22ALpYQx2J zX1lG=hHTG41?P?bgrLA4XEp6z?64)^W#zQ>^3qx^?hKSC8=J^X?Jv~Wm5n3TroyU; zWTv0w_u|_60KT3uxK{PZ$_wVDH7|=Vg^hmMQtS9#@2v`WQIU#*nnidjwx)QX0~5(x8GJKe%A(pY%4MyLjOT`Xc3 za#~~RY%3Or!{=McSBKV4O}2InyImtAZYQ3k;bE+IoSHuTeled&MA&MDHi&;>bmsgL zYS+2f(2MXtug>3;U%mjoOR!epALxbJ4S*cD?EuT+4|ZPJ%(e0G4GKgd_-0LYRUo?1L155)48LE)+N#TM{hf3(d89P1Amb zKS8Iq1P!eniTaWHuUfUOHquHW-wHkI+$-duR?j>;Q|k>LqvsR+Ag$`0z`>#fRZD#j zA)tAVArLa~{{uomAY?TN=^}*8_YfisVM2wl0j`2wup82lp^*riAtH}^;ciH;f>4A> zd}57HQlN!&BipgrE?J`hrG_a+^#2i1NU{gak{^2h zx!0)O=NHo%v@Ksb7v2jWxF%ml5M7*~$dxlc0Zm{w{FA!;aqxKKXN?q2`(DXq^R~F= zc-PuFD1rEC@R!EHxnE77=e0a;d)$6culx_meGpm{!5uo+7yR5k}jp%V6kfWMZK^hCkE(7D>IQ-~HlQ=O7nmF$< zY-|_sP?HW#zrDsk(UGIBQfs)0m6;md%%$|`s;W72^%Kh)+)oERDUa%*Jnp%*qs1xR zQ_z$rtaZUqDx`*5LUhPAXKoIj>X#q-kuwKJ-H}LKNm2slmMbM~b4%Mw9Qg<|oE^cG zIP$UdQA5M9^kwTv>qlC0uv~1hORw}urg|+;j2S;|)D=u>(&UMgCrqZ;F?IEIh%Ju_ ziY*#c%t|C0$C7sZ6;sxLX``n20Gp(JfUnT6f%bqk2XF2NPM;gkN1)f^a!U$1r}}^? zhF>wcZrmC;b@X%}U{ah9=n~u-P$E0D-*R~ivn4Fz`4BXeifbVF{5LnX)$e{?zN*0i=rY$zD`$uvaTbQ-3pxrTw zBf_!xCN-l-a$~x0ZuBOCQ(nJ!PyVgLpXAqk@+xe98*=c;HFXQ}(fpgA=a+r>4SfFG z8*s+|dG!32=`$?%9UMRDre8m_`kHa`CHci^GdfzvOe(MIz9%?(3Uxf+F}N-gEWZ~{ zKDZZ#c7B=9eDiVst`{Iy0{PGL`~D0c{s!{!6$tm{j~qRc-|^Vw#?jaGFYj9ZQ@E^S z)`ST+S)14VX8XK`=6OdJ?!K<+O6=hh0L*@71@M9Zavne$4_IKpkNiG>s}SG|KwOKI z9H%}64t#L<&`}?%4jy^^l*oJSJ`iZfagB+Y7ZDGV4Maw~e)9EXlK2OXzRy2{$--4cQxwlx!lBqv6FQd(gN^L$^S~wky z7EZhUh0_l0^g)|8sXv#j8KbDjZ(b`-6(>ql{LS)w@oH&K$vppZajCS-zgFs|drVu! z9;wH&#eY{|r?^wvVczLKDjt=7?LO*%Rs5awTi@^ee-Pi7J{3<(r~TiG-$>v3zV(+2 z;xrEm;A#UK3HbbeuVB)71iRPavGa)Y@HTUa$F|NdNJ5>8`0xT*J$nUz27p+5LiPSm^v}@{? zzaM))uA}R6=#zeNX-ysR3U4SwX7#^qj!asW%kkjyXR6h391k8}W_)D4vc-K0z8m>{ z`Xhy-BTz9`OO%cExJ&%@h&^sgRaQ@kOwC*k=eZWd9zbRZNOg1If=F9kdtG;3UmfpG zxvN_MC2{VEKkiD>ETa4le{=e-$S)$N(p<1kt!t^f7F|oVF>P!c*Opq#u6D2XwENdY zR>wBPH?wzo?)3MjyX$_J{C)D%$afLfd|nKA1UV!HJa##pj^Nx{4zh_rgbJ08E>BaH zp?Eec*h}LMhaFYMiKlFj!YFb3x~w)ek*T}Cp^@BSaH4^mb^m9%z{8>FhJiZdQ<8lI2oxSn`u z=dk8rorUrPAKJ|;T2gTo8g-6ZvfqFcOJgzNG%0jhA?E$g&Q8$T@qKv-NVB|9 zNU~u$o36%0PIfoK;jCO;*dbQz8p9Wwt>$7=MQf>32e(aJ-2K5vBi-paio+M1kw#M= zyY_)yw~gGYEULZtXIH*Ba9zuqjzh=izSui)r5x3GsMomLh>STZsU}MVv=2ZCtEGL z4?3af3^RyKoMc4hKo;Z;6l^ zlOQ*Welrjl`|%?^zu&la_`%J;TOYU~|HZNVGe@@`h7G^|S#O!eQ{vV$EAr_#4sXf7 z{Xs7O*Xj;m=(ov-)keRoRtOYca`T4`-b zkMy8*hwT;5E55fS-f6a!`1}-SgFWuM{3y<|0T0#>T)<_(}uiQpnX_3^A|UFWMxfBG~!xe~`kI6XIF_mT*R<6`ED7xul7zxfYcb5FzS z{C|A0V6`$zUQMm+3YIHd^T*%He{}rqC0;lYI-m=V_Yq&m@|%7ZTRaVG)CM)X)O(9} zXX;7kzSObQV3wcjYG>QIF1}0XX1lpwzE=<;0gq1(DFIJH4)ZE82rrvV0l~v_#FR2Q z!^sE*SP$owJP3xd{_v&2ovTv?tMY5XgS1vOv^tZuiHP{Cg`}Y$8Ea#)t+mq7|_0Ks=d)esz>`U5!8sy8TJ&!Nfq~<-{rK zt;8AnjP4wLPA9Z8?Q9puxSQ!_dojj5rxVH$CmReo7*&nDhw}wI4mrePP|2B6#>1Ml z-02T^qH;J<9@p^(8kf{CjLq+;0O2TzOK}t@NGq{e6xkhoED_%aN+Bpsm8zxfrF7|b zmJM*QnS;-3O}Im9B`|47nl#W%8j_}v-=`tz(@y#{Bz?Oo{~_qkY8CodtsPpI4WsvY z>-XRlZcic*CW`2dT%0k4H^p)sQ)#F-4m`~jzRl$+NylEd8(41yHs{roQhHv+(4}? ztn=?Knja^4O~%>gDV%LGuvML{qbF7(SIiwpl2dY_V2xVSRe$1rg%tziGbfgEbrHh>}QFQ{UNit#j@RkL`$;;5$^1=T(HmA}iZS-_B;nIz&rryPJ0dO9BX zTlchk8Uu|pkI}7;A(pW65!=4iWFQ}_zEep$< z>&R+6>yj6nudCq$9wdjH0gpuvxdI+2hlPO0EQc*-Ow_#7g^1U5r2X|$x}Q{UdC%4$mcG6^DOyYyvVCQO#mzow#(h}piHIY7P(EP zPRJ)^L_oO`BcahW4*HJHB3Ek!T|j}1%0IE|MI_L=zcU>+s%BE<2YgD_=kL?jny$I< zXUdE%6b5J1F%v9sk#$iCy3Wz=xWlklJfScaCrl}-g4}$;P(IB>cBfaeyAVXF614=i zkUQHc-62=pDEPd&3*Tv<8!yg2OYA`ws!VL+4GBVuSNUFkH~$RJJkNi?U%*yFMKk?H zEz!(m*X-w3KESGfFrws8Rlhv=OH#=VwQ8cIwF9fiqK$@!TRR%+3-?i6)Q*zdqcgY- z-Z4hv_;*HcBuWvXH5g?fpPB`->WuS^pG+k7BSU#gP!VL zU?1!=y;;rc0lB4Grq{tA!AbZwdYAeXd@RBNTh^a#9TW z>-`hP+kh>FOesLcoOYMz|1$0+&~X&!x>eQd>^(hO&-5%k`$(giZY?8gB-zu#`+_aN z*p{)d2*#K#mW6RJ4nYY9VUr6^AS_M-gs|A)7aMQLK!UQEBsciP$xHH_h2Xqn2p6J@ zbFayT*c!d6nvrdCa^HKS(^b_qJ+1DlfBpXdS9nEi$f((_EF$7#;wWv1hrEGqgg()& znq}T0s65fK@q`{zbMZuX&vdn$V_B7RQLP(`so8k4tF2S*f&e6(4li~_dH^86)Ka6( zYZz?+I@)@;*#_uoV>=xX*=<&fAUFq|uRD=5Bd~(t_XqbV-CbR|Ty}bAXG23~PgV&8 zc%I85PV9b&bsC1w4s&pjgWM?UFzu$J*MWu|aGwJ@M$so`lkTDxm5b^E7u6811XlKh z8tclQ@^tixS+ie*-4t2~PC*M(rWH1-hDd@O4TWHhU6E?TiEin_U+o01P`{4XisIIR zGJ(YiDn>vt+}9y7tf4QUVf7OdC&2Cw*f2D6g=x`S3qATJg_&%BUw@utx>(y4zoTN+ zyyceB$&-hrvMD*sq}XS_vkdUeetI^xUm|B?FX8SyB;8kwV8c3+R!fC}|4)(bZl4Ay zv`pumh$Qc=foHaSXZggI&Q?#U+C*8VIPtxUMAIcjy(y-6Z-Wgp)W((={Ew!2>lXPB zq0?2@mOgCkXB8#W27g{%_$g~Jq0}V1xu$xJ=VmBfm5TXP~Ta&eqfHi67~g_={E*$55A!sHGZS~#&}10$8gqA zNG+y@;)x=aBG^t68K)A2nCT?^rvw2EDhd%FGYs8pC;>xPbg1KJ4}^lj0J4Z8 z0EUsxiN;mfHVEp)gr>!xitmdPU3@am$Dc4;U<*X#77od!x!eR%Y^F*Y? zhJS&l;IO_tggv|20>sUcCeFmylc@)d5kCK-|`F%lMM z2i<0ZH7#p6Z-jwP@+K&^l3f+dJEW~F{X zFn7( zUJX{t>%ook9rD-VZulxZB>xQl9m*dfNIb7TfPL!?m?Vw>G`Z_QRIVTbnPX{=UC8$i zW1=#{Wis4f?^N0y4wkWvN`@y)mt2rqEUh6(S)dzG6VBe*O&$wC)R(`VR^6%KCA0Zad6%%;x?5sD2R92N@IHZ^DLQh1@#T1n z(j8-T1_4H5Nb!fsaLoJ&+22s5q(#gKinzNai)KN!B$fH$I@n8K#Y?3P`M=fh#i^Q% z7O0Zii%`asaoXJlphBepy#gNjIXsfAjsgrK+{mJ@00OM904`pv{1-6;W6k20K%BJG z+6mt2riB->Tgqn8I27UC)jO&mRNt%K{fF28>+ol{J#fo`*FV|zS?u(#ul}m~v+6qd z0PKb{-+XEQ$gb*Z)q@A_gN?8Uu6pr4g6)#XQ=a-PP4M<3pom}fq0W+CyiM7tZVZ1u zH(2~ySlFz*oO&hqd-eC>-=+9qRx0K)<#ajQl{1Pfvo~Z1i^E0R8vusF4dI31{}ueb z%I(U*e@*>1@VnG+vnO&Nr1-FzjO0Wo5v2(jQ-ydE3tC@12_jlkVt1p%%NPUTXJX)KoNRuidfqjuE&WVv_~lgL&_c!|T8N zz-JG=cI(K3@^@YPp0jJ*bqB8N?70T2--ibLYW0@tu}`Y^vG0HB+tq#5m-lbm_B8DN z&*z3WPeoVOVGhiJHiXRmDTR=2niB6w0?PMfjwX*5nE9#a3rL9tifdDh1zOUX^jxq4 zu1D)rTi_OSYwT8SQ{wjYb~vIvR(KJ4s%&^XR*^*o4P@_Hi{@5hQ65UiYGuirvmJUJFy2&(4(Y@n$oFM z0zofAFtrD1NN8+$mIfFC1xSEB4OlmISW zHQwRBpE&m5w_k+H!1nd$rhCGc9~^!1?#^q`9SFkercaBA%I@3l9L-d>+`Gb#9)`QV zbk~y}f-ekD{)y$VoajKy&7l0XCg_4LWMhDf&4C6kzXUEp7I)_;oMRqu?`RJ(D!W=) z9b6q+t@50MbAraPPIjYhqhq6UlWQ21?X^BnUUm)e#&mP^YTmgOTc7(;4n ztfaOtRh9;qhWc7owXSMgRa(`)a+;H|*%~}HwIONKI=dQ-&LMfovn}BYjM+wSz* z?asWt(aB~kwBwU-0((HYW+s^<5o2gZ6KxR2NTLEvnv(8*-HqOML(RR{{kEHNkD`&6 zV|$``iPYTT3t~?e;p@c@i<4NjnR82K@#px20Y$B7V1>sPUxRZ%8P1`J21I1`=Qm(s zIdq1U$_-5nmGykh_Mm#JUS>hueP_+jpF;ELs2@rj#_OWBJ!J@8LK%_=LyZ%V13$5Wv7$O!hV7mTx(nFxL&$8Pw)bW zbu8FWE0D3<6jzxwT;*27Ma{!LNCP~zOz1!q)w9@ZWqR(`D4HsjNoA&rzeo^(c`+^b z?^(6(zWj`T``-NvKYXpLE%w7uFe0SWp%sU2zVj>7I5G=^(zBgSGKHeivDopmzRF^f0lwVolyP^Y#Z&P zw$WeB&XsT_S{Ydxy$RlgZi?I#74>)}z9jw__o%vydrlP)j70s!r%zaj^h^p$62y?? z662$2%=B1b9+&}VMRwua^@6>mumk1HkZ7TD&O$|+g~~ZgBH)kZqeMPB$uB@uimr}6 z6=kEZpd9c|{>8KrDd?y2&yVkZKwHya3!t9qCju{u1+1+^Zm{3xYQqbhKP7e719D_% zfN3k?W9qASsi{L?cqsjp@+Agjcl)?>+(y>~=KjZ3fM1 zOVQq3>JPW&_)dNizuCp6lj&?rvL!n^IXnAoR%pnUv#8hDXxrl2nSDL`afa`9)}Z28 zObx~pjTA8U5D+klU309bP&((ZG-4m^uLsGh;laDqfujSmf(^t|lPng|w3kH_8!4h8 zB9WlTZm*Z<9;%0UiWL*5uLio)C!JL(Nd_D=o-+0sCk@t!X>>C+x|tf?Of4bHcX{A? z5A;yo<#CelJW=wUC#avf;Q#ec;Wngdih6+g{tI9Zt)QC9-XnE2hBfBM$W zFZF$dZ=GCENZ} zJ{vn7XI=>$QC_;yMuc|uW^U@BLepV_dLH_c)|C)pZmJ7!?i=+>E99OZt$%M zY-BfEhvVEz|MEa@{5t*yfxAlR6R)yf?d^6vz>Jj+k!0z-yu)U~9;WtpSeb2?3Y<0aB12b73 z4Zy&k4ZrE%;XmzX{qGq*)7R_U=R57=G~a-4&^PR3eWU1u!&>~2ILW5O!lOTUrvFqw zQ0j;StsEn|1NRCY{PzizwGzk+A8OHH&t`~-e2Z0)%Pvzcvjjzu%A)8gW3%zZVNcnb z^N`sSFS*K7UT+_%n)LAkUY;Z*LMV%gk>n=_AgtF+mbmZCOP3m|*>si7O2PRvP~+7d zMc4;Ty>m8a=N6?M@#gE!eu4eUN^dO5rPG#Ts^!LW?=bF-g-FSUg#wX8>g1nuz_-g65@GM3|e@HU>B_x!D==5vLHJZ zPuY$Sl@Ow6X;Vo~BJRVuH{Zj9c1uZLVCiF5Sf01=89pyI*>d)rCl_i^8*|y_cD@{H zGv@NMgoUQa7%utMy!uCT8NtuU50-@vaCZnmupty6Dm-NbIxc6?&?$xOPIB}Yb&eDqF{qI zv7D6&*<-rzri6d===qu})oika!=lAv4P(128WjPL8Nn0ss-B$Qpyp&dzDqWmQM2Xd z4z)ZwIe0*|S+&v0^`_Sl1f< zdX0iD_@z0jaZR(+r?G>bf5d6DoGwr?h<95dWd}r4vy754b(MXYIP&%bZ6KANC zlAn0@ykAmBcc*GLDd$}nh<9GZi~l_{Ufc?jvs=V}g49jxKI$JV{|Quqd5$osH<$A} zDpAsC25w@CY^#8JsHVRU`Vw_^taD^dJy4ePWQp5M12q>W>wKCl33rw*iF)(Zdvn#F zRga~rw-oH&*+WwT&uoveE20aZ(N_DgqAPZ4b#GjdtIXKgn+NnkV&W0Mk4uz2?31^UDcBDIAoE>Qw^te&2H!jS~8vRcJR44 zC&x&2uhUi5C2v{kG4o~*(?ba3!C10w6S&=dM`AF4M`35;@#J&xIcax%cVc()xx#M! z)#R(`S2M449IpICI;tMkeo`Lm`L+CO?GxMSp0KP-nw-#5`CL)gr%Q%xXkGF4tdXA! zu$$ha^%yoWp5Ftir2-TjXzst z>l~P1Lp^4LJ5xk%5HGT7nk7F8?dy2iE>8=$wdnVgFMfP>ngC42c#lOg9cm$2jC#w2gl$^cp6&aDEhnUnx$Q# ztsRBYuUK9ckN<&5Dc^J9nozr{<4?g1~wY3K_q%$lgkBBE+;g( z+(F=ZS2iw~~GIvUz6X-^>Fo(or1AMVWhv_>YK=A_VytKh?MR0^wAP1mKE%aTLl4)G6TpjE+*rvdiz~;>6 z?EQglSw6@Gc?1lf;LB-7ufcH~ev6zB(So&7rrGfu?a8Tp_HCuE_NoxAM0N zx8`nb95jaEFuzsUnj3B$HlA$!HvBev%J@;_*O8Nkb`L)yjN}+7AQjde!dOOAV<1;l zLCrOcDiJlB$|!*Vc5}U$`~^`Y?2*Xk@L5jD1oWI>h&dsfQMi}{0f@z-#FY&Ar_x>g zQ*kIVFkRFWOqiktQ8yCLy-YpHJzAEmg6t@1StDy^d$WVt;p~pAkR3&j9ncBW1f?_m z`4ILoxll*?180~cow(6%Og4t8cbvj~$`Jq}3#05}Bm z6c^)&7{!bLb41gSMj<3@Cw3v}2tmJOFHaRz6O)i?7p#T;G2WQ@C%Ol_D#zh(L!mX7 zcUO;uGnY3_j1dpD`sGXX1>Ovr71ftahbpvoN7~!5t|>0NW@4hcXUbiLJ*Z<%OVXN7 zH#McMt}cX6UtJ701qqdf0ANWJ(hbo;*~DHQS>MliLTw$c4rvy*z;!RX&GE8@R8m>e za50z-7hBh`pAiN;8`&XY&~q<)w=nG44R%|fb-WHn;p^5>hu0-x>f;!OcXK>Rwn8@$Os$+LV&_f$MfJdDTPqDfGCKn%*B z&GDQ1X|&@~Y}I3@^4)$?DoaAi6`C6Iu3STi@b-?-ln{XTna*4U1LTPTe-KD^lEgtU zOzY_6$1l~Q#8_s;N$CL-FTr@C1`F&wcMobB-d2j6XZJB{tBY^Grq!1TbBoUMgD>*c z$J6X@^u9ab6?i3gRc}NC+)Y3LUo;id0)b#*ST4gLN#HY>e=UJPh#{tzSz>Ay7(;C6 zU|0>C4x`swDXkKk?$(*Ufr& zY7J@$7ecg;=V}j2Ix~tNmeaUFQoTLQ?cmT;9OOm}uoobJpYt`?4B}uZ zW^koefm(Y^{A7zI@}&J2J!-wgz2Ac$vxJX@_&Mj{=lmmg)5p~%{2Efkzks zR-P}G&RjY^F)=QUPvDn2N9^45X$x4Qh*M?B8?a4Uvoqm;z}>5=PgkG58t&!RO|+r~ z6Nk|;k_qr(bq*_6cLD}zM*yPF<6=NNhv4<>+<8h~`lwqvLl6^uL%de3eD*CiS)H?# zL|f)hjx*bsy`Tk5XXe){KD7$X(yW+7viek^m=0B9I+3F{k;PXFH zFsYRYUmp>H(GZG=K8-HaVo&1#|F{}=&VAoP^5HJ=x$zsetjIzFK)d~EEkeEvt$ua1$_ zP1Rg)3vX{l@)hk+)?(%1$}Y=cDF52*37VVYq+_CA@B<%pB9r+KOWZSQtoyR)^i*)`l1U9Nq!`%GOum z^&kb(PNgiI_ zU9L$Oy~b7h89T(9Z@gCJP^KvR+)`IK$$6%Aw09uhB3ebn#}irt@g-YX1MY|?3}lZh z<_Nfx755h0 zTh3>6Onv(Bd#17RR8MVfpH>S=@xFk!Hb)WrfZs=Ja7a~-uNL4UW+SZ^M|1xr)joao zmT1Gz{<8dvN;-q|Oj_Uf5slI*_j!L`ku@X@9vvzB!%+IovS_@x_XHqW|! zS!&zbM53v)*wR*5wxc0-N&cSdPw(#X3XblMM`k?)`@4fp1Lb+EG3QU7ojk=H;T`~f zkb=LgasEqDj&QI238UKynUN_vAbf5o*E`7dcDoV)EV6irKk4?G z7JQl42h_C17Vkp>jl)#l%GUxhluzHvk4bM}rp0b$UG!oT5HR=__;=(tJS<1j?_{01m;r$ra zsxuw^?ONtGW)piaGr~T{yeJBD1=uNivyL85)H_QF*a55hffR@5pQJfPG}ZvtU3)o( z`@rtUYAR*7OO9U0pks%F9mdT*2Ll`=oNwUanB%xZaA59#xx3^TNdI7Ay$+J(nAdVq zC;EqK5vrkzJ5c^;{2cs}5hCzf^HBhq$ z>p1uqzlaH*){+eM^+A$U?ehvX+Nz~WWYeu~xBo(zl!smS?EIHsK7Id-z0WRlX-c@! z2|b0@Tgq2`^P6i*r5yU(k-z@p%-4oHJDEe@m=}_ggA=)lKeV*|59_y;{+bR{1A>tUi%BFzjI z@mPH4`zt>hr-X9sM}$ITS_64&W1$Ts$?gRl%Q+PGEM>3YuHcsnE7TQX;d*WpHw=d3 z2i2q6aqT2{hqFwBbKx@O%JAysfHDx?qzr|(%MW;VxOXVe!DrCkaix456r?*TxvqLW9m$b#-jM?E5`-665*Cm?sIsgX24zLf5 zfs?>OCJ=bx*3jJ{)Ek0NhG1wE+D-X14*@UnHA|P{XC`JIL0_qbKS^C}|Imhs4gIG! zP_oMBE92uEsOUZ=*ICSZMdaGZtr6y-2$shi`Y>;FbacQDlK4RjkSP9z?OTN)wxZs?*7Jm5FQ-)PIFUN)NM;9 zXRMii*|)ZRX7RK(xa!akA^-LX=zQ?fjGpmribWTE=3CF4omt$B-+tEQDVD<$Dh3K@ zVV$-zI;|sV;1x<#VvWEku>mdOrvl4w(`dMg(33{PRT{k-&wgyyVo?hDg%*B=lpPU8 zO^WwoGKIoCrp1Y_^msuUZ(LIoLoE&>xpaKMx`pu;wlBsgagsu#`r2MD<6i;I#xUfk z;lT(rBLfi>iP`XNY zZ^{#K@m7ETvC23>-`^tA^9ax<#}3S$+ols1xg=j~8_++`exBRT4(ogMFac=>Sqq@?pTh-lFU0vN(UHyJm zwff!FFFjgPTY@gxvTW;t1b#_2!V30;B#^Nr!uw(6L5LF;E95~8S>%}{OqP5z7LEbI z4nYuwK!Sp2l8}&H5+>`9guzNY8HY^fXLpb*rlT+;jeO&VNd0 zHCk0EHTDdab{n>Wo4eIv^&0hq{0-{u>KrwvKE^)HzL>M6I9FQek)y6&PbBT_2}dKn z3ck!{%6rJSrsPpRuP`=+vc&`y0EX!rQ#WJfHJ5XcnUpnYmf#j>fm z=}vbnGHpG>Sn$a-FK6AX{g!Fq|hUxEbn(6=Yn}7Q5ZNpeY1~of! z7Q~o0RIsg`En2$wmqs`5oxfr4b$yq;_r33-Ekpn9zY<0H-Wz|jCG3n(y^Ma|J6Rq6 z&`Uq~8J6fltS`4S^OT#4Fn84@I>ULfy%R(%f|P?a^{P@&S*H-t$!Lm_@D6`bL{~yb zcLMvCqHLk0lZ9$z8fZr+;cAv>S9~68S~&k2xdL~7@D!{DQ?t!R6aye=Qqu^D)^Q@% z*c_LZ7cUy)h-Zd6hSd?%VC0jBFz#qnQ71rzC01qUS%zXKFmOD^n%DG`4m|3@ul%`t%W=*e$r)*ib{!5Jb$#9Xb$)?+!aC1?pZ*Q~ zrf`mTbEjDB*q>tQDQMxl19I3rYhmv)xv8?(1CQ#GtMWe2AMuJe^Dp%nM+O^lh@_4LwAfg7|J0zR0ng`fgJE8R{UK$HbpnpkqdSKhE}Hl zwoE~(%?if(P>c=>6tA^uC;o{r);{RLSR0&nVB}0qlYj!%*$MS6n?_GsC`8j!W5hWi zQP*h_|6=joZ1Jj~7piu=abR&W5d4A$bi|2}@hM~+tAFB+PrUKTN<=H?Fq-JpKaTOxFhw!9oA0AEnyatwWP#YL zf(~YX^Eh9rjtagHqF2t!wPK=qvr3)CT#rEgGmEzcM=usY8rl2IQ;iCJ+0g-*y8 zjw;nw7Ny17@`|)#7^Ovk(@WBe@NbFKEY=tD)09ZAq41@8Bx_0FnYb(>3lUjh1tNm0 z$z%Lnw~^II#_{K05Pa=$6!Hxd^akwk?U$DgR(D9h{B!D2&(Rdq%k&Cc1NSiZ2KuQt-Ir_DJs-My71-2ubkEaTH442E-a zOP7N2Vae(C`o6^38yFmpM$eIz@bPvQ(VtphsHCdPL*^L+E8^x^kC7r}Z(3e+tG7dY zot5qLn6<-vmrktuZ=0jF8s8ODtDhNv0~tNhTVt_|@s{}sy~XilaBa;if?Cr1INhJz zvL-d7(yIB&v#fB3yy4xpQ?jhx#*^nuGfjX#k0Tdv%D|jc_b=Z#)bXHm<9XAK z4qvV%py?wGon8KJ_$}8TE%@QWznmFfJSPij3}4g*<8Tch#37$cp-wHxS}XxA)j$XO z^`Qt52WA4`dZhiDPX?i{A*{s0KcKNWkajWUqlfnmr8M?NANrwjF%SqWSevn3vTr)sv-wSOfsxU<)^!4@`Y$d{A z&B~aeF_$xy#Q@JvvoxW1JaOxSg(h}N?r85;-|>d^J3r}lu-G7PzH-I+bi%(RQpo8y zUy2sFFMd&}y+WV;E#QWPmIE-@C!IUjEy!Qd6}PTf)iD#p@Cp%B7#9?Nf8Pp}z_xs- zf!W40U~^Uz>xfaBOV_>u&B_qLU;#y&wF`0m++aG)rl;>C#7Gh*>MaG@LcdN4LhA z2YX9K=?licgA~8!OF=v*@w>wN&H_n#;D1bIq97cJoiCTzavUoE?rb`icVe8M<00tG zfW}O0)$T&)TkI~a7nwSg%-?16y)WMPx%>16vz~+Mrk|}Qn)eIbYfBuwW@o4SrCix_ z@TZx)e`WJ^zQ2IFNgv~DI?1k0!&h+EW`m3Ksf1hAm0NXnIHUG;qPpyrzoENy=T6?P z^mWzI1f)haL95#kaguB0jgCsNpyv25{Tznj7(y+ z`ym`q37A2O+a75lwa`(lpLc37c9`8o73_pODv~%=%ksrp ziCqxI8YM^{K^xb(oWSRV19+r)D%#$4B3jGQaV9r}^wp2BVp}Gf38;-D?y{}>5{6Rd z8RVxa;PXLyLy<8FG|-~sQaRp#?<+Zj9a^%HRhe_!63brax@T4L0*yt9bCEMr*t1-T z)342P=bwnK=E{WFvajrS8&5#SEGZGpJ@zufBkW5h@xqIaHa@$T)F$z*2Mo zQi3VfV%vKxHLIQ3Dn$MzEK|u_gY{|6q(YT-90=kx=q5ylxfcmcNEl)R?7&DcZ<`R4 z!qc^em6FLn2D->=2$xOVo7lnY#v;Pt{0q(_CgUgjQ=gkBc)g+KDZ5Tj;-9=LeXX$R zBRhVIkX}-7B-wZRy|1q`-F2(@+FxTkRs1lmtc$1>Z&Pb2n_+8%YbdEAgCp*@Ms&&hzRBza>F}##ECH&lu41D-!Z7#2t`x13PqAhyZfN4 zOZ)s@NV9F5Kd`a!-2Hr-`gZngXoD2$g&(9dRyz>i& z8y`-K+?w$BW}{yBLxbd3;PE`BG=(BQ zJ6H@pGT+zsG;3+GCZfKSAZExGM2wjkvN+N?vKcGG5<#nm=>WkSi;H5Lv#?^?6xg;r zl3KkO5N!vbhnMzE=EY2UXcq$USg|iqfCH@2`lV8evOrf!ADYxnYLZm$$P?$6Mzw=F z^boHBya#KoRr$xUo9rz!p9>NAvhUd98nfjvoutTUMg2VVqm*Adr1JMHXK7?C??PrJIDI&bT`foFKFv)NFE)!49L%yGtT z`|#mh&YX0u8&zG^QK6k0yvaZRq(52-$jB%?fBabco=1E@q;1k@Rb6>%kBa6HqyzQu zbtV@SNS4#(*|QBlopntm$P2f5Vze3G`Nrx5ahs2mc0BPjNX$UhxC3&!bc-kgMc>iP0-^%x zmb;(Dfi`fTSvzSBcU3*a{XZ~dvd*tfk7es$avFzhsHj3!IX&Rh3Gu$gWZ~;+v6mNp2No9WB^5XnRS&|0LWw z+o|X^^!NXw+rMh|CLt-N%sh)N7&%ALc`HOjD25wt%wDtDvd2qu=Wy$!1z<#qq~5p$ zdum#}A!mvq4V6>Bb9X5V4B~p&-lqv@A>7wqori&d8jFr|4w?hZh{PYw`Xlh_(Wr6L zA7TMn1I*DF1G^9&gwMqGDI+1?r0CL77Jw875`|G+kma?Bc?wWnczAa$D@V4yDUnWw zy~qivZc`Phf*KDo741MQNR+CKgvwZI9g|MXxhtgD1LiJ({|r*IVcQQR;X}N8YHMa7 zo;L$=k@ir|Cl3-GyZH2R@;`#7QdJpQ2$M>YR#BjyNfx$wmPD9EHH&s}ENJQqwEJD=U7&F4V*thS$^uC=V6aUlzZ!@lGPjo^e}OVYB-eK+6nEG=&;o77T!btc7bY$ z3wNW((KgZ`NSBqD6Pz&Xk_#v+FQY^bNqTH_H1-#nG@OODN5p~B6C)v=LTayo#Wqu6 z#jaQc?B91ZN3AAOz-F}|`=A-{A!m_Hbw$VnqlGt+BCg}-2Uc^f0#_fje}U4a3BBpR zEabWGcD7aKKk8@Ukrwg7I>aCr!Wg^M5SqX(DVRYN=irtzB93 zcN42Q-A95vcKFnuceN@T=lZ2caM<>(r%sP{(h{2Eu*{fZ7)EL=dCZ?-zfXoAfDWkJ z4Fs)w`NW;Wh`4|u0hp~hAN-Z#fhhzm1Od<@LG5$+O_xu&?)cEG!Qtp>Oo66`$i5QN z!>RJvo!6oZxr32u0zvF?)f3(nRY=Mvi6)?xQ?%iAG9SK_eilx&~I;SGc(n=0koBzA64Mff)Nv=9Ao~xyR^3L~T-R zAT?mDHSpl#qX#|Y<4k=`xDFjg>^Ovkr&MT z<;CWL@azRa@PaUCk@-BV8= z3cwDf()k)f? za&!c8O(J7$ak$SVVZImGgZKB9ve>_B7vMn%hHb-PWQjBQV+--IbAojb3>=#pg6$7v zPA(1(HZ6UQS!i!ae$I`1YK7z6bZb_HNe!>F8(?m+{Nm5qziFE>?MSeJ z;XjwjjEnNXrUF(UoR*|z#;91uhcLm%A)7&lH4W;Q-}ppe zH3KDObRO5R`Ms2c5w9jZsR%}f#|YUt6EA#UEGqMf1zZgG1EzodIc{CGg^Dg|uDUD4 z>0p|#cs`{OY^bp~3DEhLyRPKsT;JervZFk2e7;Prck1dg;@LwNyQ$XT7hBAQ_q3i2 zR?Ol~!ndfmpaV|>TkEu>6ZW@TBDS%7PXjV0ao63i{`{GRb+6m$k$y1(-yLoBN{CL5Au=e0 z$|6l9U1_9$IJW@CHGzu~XS-jpjB2(#I*6o0|6xv0B3m;(%w}j`?+k(|4r(VN+Amf^ zGAs;(J%dTrs1oNPc%@K92#jVn6=r&DM~M zvw(`XfSr*(L(^kEIx!r-sBKn2a54*l5gi+Z^iUXDuq=zBr9kE+&ZuqvHV?4H$g-x5SS`J`7Dqe4!dn6w$uVIa(XHoq_bh%)VSsuG%0p9jIys9`{y&5n%{+o#QrCHu_o zuvuQfk2fiHB)(tGnSDtqp6NBD+Fg{|nZL$n632Lf1Yv2Txu(INQ4Wix}l)}`Y`ZGVZ`bz+tcj8@cL zuc?A{ zW}yd12oov`CS*xwX(d1gBtiNzA>ab)CQwj8f=L8r5FyapL{*4kyEXG33i_Y;E`IR5 zHo8zecdQcqhl*=gXLfh;yytyahYqg@)fTj=$b{lDl;@-K(t}&-3T$nlR=f5*(C4JFky){(%P2V*oL04g8pX4g5%?e&eANY3HrW zXwz*qf7Ub*QYP!6JVPG7nh#)p|qJ4sZN~HYBtXN94SaSeS5# zlGjp_Tc}k+L{Ujxi}LnEd(&}~6eR?^<6+B}0-g3yf6K)74}pv^g_XwLWU1n|FS2ji zp543c(Cu6CPX1fQz|I$jOQ@8Fo0hYJu*;KB(TEv|1(BJkV#^D=w0j$DQ>$G^u48ez zytB6VeOah(-+x=D-lkr+$MF^kzx{JSJN+Pj%j-Ax#k}{vy|?#srtM`Fzx}beNgwa~ zw(X1jr}yVci~k8Gt(8T!H2bnC?S{zb3ylvPwgi}f_z+5gD%lkg_gV>ZM=x|+47eV# zw_4BOph`w8`CmpHp#qrCfs^5zf(-Y@7%gphtOjLa5M^*Nl9fixj#}hEPiRUvMQ=E& z)djNq#9I0`z<;5!WwAPrT zDk+m(K^U^t( zN_-{`o`SeSAgk_#KvNtffA~d;R+K_pohRB-5~#?jZK9*3+fPP>4>RzXL!00;d688dj7)iDqMz#MX2C1o>0EOz3#Xg3)%!M>?eyIgyx81tZ4B<@F0I@zJXg6+aFf69x6nuJ$N9E;o@%T=##`vS z@MU?gz2CEz_wEnQ-$&oXyy2xSN3)aJ&EP!rntv(qZv12CUPv|@$b0Ux6Abq;QZYCj ztO-(&Dlhg;W9=9slv2bYa4nS*WQNEaXg2RSOc3|}byt!_Iu{Y{H~Um|_J%hqoB@z0 z`vKjG0b@5@BL7_qLS)RTabCfPpAjk>~4s{Y*v6|7D*3qmE(W&{HX%3%gI$RO455h*!Q zC$!Sld#AxrXXgJ0mlq!lL5HtO}>uYLtelg?u4#V&_T`rsN~3trQr z8c*O}P<{%96#0X}0p{Oe3Mo(`AcFdNLLX|;3DY%Ej#ARWcC7vYLg7!r>%K@@m*|S6 zu6Ib}>a+Pi93_D%ME{4S2N$gyNztWV6#FU8ovIL-U(9h42_U3Vj4-1{@s7D57w9IG zRGdl0*^nX%sw_ow)hs2_YeKrJa+PMTMBis2(1e2{+59{vCE3Nv=gE#{JyhvhX--*V zpK#dn^43n2WKe$8%^1L!4~((>QclmTeFaW4_Y-n`nN98%oeiib=`JIF)CyQ=`qe)~2zR zK#l>KyLNrTGugnt z0PWpy*eoqAd&p=LtE#<7WmdD`k1aLnF59g5jH^F)j0f|UhZfP1&GJr|ecXy%n_SCQ zh}!4~RQu7Zr|>uZ&o!iINd3|LX&=0PX_vGg+L7$fY@KNN^Zs59>V6QPL->YdK1_p{ z3o#!Ig%XQ)M~GyZ*94JHpXpn9an6Qf)@#Fg69?K*_%C~<_-p(KeMA3iNZ4Th`2BXc z>RZ)rZa?L}c{ls^xU&C;Y7@k@~nUFTHMS*S+IbKU6`3 z|D?Q_=}djJB%#IGZuBhKt|zOfQv1p#g-y)Gn=?UGb==Yr#f&DFMuR>o&4E?69>W=f z6@}|V??Zp(ecip-J^RK~_Q1#jt9tIizz}|V&S^ILn&7+2jGY5J6Bf_Li;+(afq`fJ z#}p_o&5n9RI&$s=Uydh-Jw6`L!p&@1U?mGuC2>~WvL_Tq$Sⅆlm0M{$#?e7D`E_ zj#Fh_@WG`*DocRXa6aoujClW4Ql0hA9n*PYRHrD$p72O}e5HqKh}GOxakxBERjldg zYuzxYxyxqkt?|hD{?t9`hF%tjo!Fi(=ssv9Y^RNmo^#ji04^Cq=r)t;@-%5`X;PBGW;zj0 zl69VIMOP_TvgsqNh`*}`Vb%>+DdTbZ*x*LToE0g$ytXEf_k~Lk+44~Z&8JL+utxW+ zvK*4Nc^%0HRdH%T9ZG+2?ZA-;F8yy>S7ol9%(?iMv~gy9`;JC~=&il{ZpM78|1!9F zvN_-UuKIpmz>oCc?`C~cH^e-DEKO5AiBd_XUn!l)CSwq(ho7*qD$hYIKO|t)dS{fw z%7tN(kb{{f`6gf=lCk?JZgYxT2s;%WcyWD8M$5Xs(>NM%%kcp1oYkx zxHrdIj!`OZvciqqoWUZ6h8f#Z$=*wp_`I(fO=;sIJB`J`n24$rOhSZ4iatW$3#CEfk$#!QXaIyNQzCB;6$KgULH&g~u zRY8JOsm)mAiUe7uCtxK22pKABt^pH9If0o(WEcfON_YaJ_+qlmteARM!`E>%O9 zD>r_}^fi*9F3G*>r{3`~J0q<8;V`DQ%&fM=$EC0{i^Qku_h@+?`M3N?ww9ul4Fq5L5BpXe?S(#T{cb1PCC zL}@f*s~NvNtfQK>GK{{tbMk)umZ>As-?$j&4d^M0uo~#l;tu4hsAy5Eyqh^^f6l@i%W1YXa>kp z$cu*yPB}@r>!T7!S)4~J+VoEZ{CuoPKX*4!soY`m2}^&L#mnxuZ2Q~oG55Q-4(2`A zGzKux6K!UM*wrVg?^A!mWsaXw>LkZ1x-srGh^v$rs_Mn1>{S`mR-xY87-`p!AfHqP zO6%E#qROoL&^ajD*_tFpZJ((QIKRXO4b2+2_s`C;j{Z~pH{MHrC0CffN#9I=CEvw2 z)xVs#o_<0Z3UZb7@3WtC>b+Gx1dAf0?h0Pwp0300Y43u-cwf|9*zZONU5LU8 z>4h7V+Fb}V= z_)28Z+Q`QL#GttJ9Lh&$M)o5(K2=$;jNGL7`QGLY<_|MTx}J|h!tD9!E%3atsp|D} z26NywEj4yYpOrwc(r8l5IAM2CLX4h)DRbzmT+neM7nP1wWjG{=2^_#hfWuLhhqM~Zx-XF6MJ^^8=u=8bcQ6rfXMp%loB{CFIFwcBDH5g4o9?cr$$2*~E{lG1^Gln} z3SiWp5fO7WO|L6qR2ucD?L$=}oysDxQtc%ZKw>OJ+TlhfRkSL26D8Kv$dE?gm0D`s z^4hYXEL+qd#2GuR_-|)2!7;U%@*r*-OnQk(+e*AVaVzyN`(yg*`}12BgXU4sV4Uc^ zsXZaz)MjkBhbN<4{qFR-6`HFo>*iwU&$FU}`Pn71&y z%`=h3PWYGN$Kb`pmg1%26@|VuFJk`4JdZ3-eNHXlpYuoP$*2Zwm{2-q_w?9B-Y+C+ zjCLz%QMhNSYuhQSjlpTcOM|BhUl#5Q{up@BoZz0pdyEa$Y_)0 zmI^&i0!l)wfw4r80ueyP1!xq40d>^!I|PBj<@NDpO;LJD9MRy|Lyi>HY0yMf3&t-y z^h6(dJjn@?;}hyXsmMnVxyK1%_Cf09{% zl0vfIV=<&i5Cn=UNTfC&+sy|O>rX`f>*ruYiPCDdHq!gUyYi5HMu-o+sLXtu!@pCS zM6U7(`;x!Hu6X6kmm>D?n!hNLE5UIwvPAT$NhFA|O>xkcldy|G)_Jdy0a+aEl7t^y z()q7KfYYTM1h&Y8#*1`vzL#W)ym*4g)h9@iSF2`ow5%basqxl%_Fu0$Xp1 zsC;k_t)ruVfyU{R@|du6eK0{7Ifyy3c$?Dj0@t z`CQWLF8P*}hM1YP0 z%LU5{(^+iEy=-9CqT`uSAWj>;8&2BT&=G9CrvB3}!{d;bo=hm{4ptK^B=Ktgisq!2 z(qV3FnM!u2tW|RMknMtvu&i{rYif&>9@05PeFUBTcWRJL_U%MwZPFA*r!v6AaU(s|OJ*ET*6pvug3TAtS{h z*vznh%B?)tl2anH1=^R5sqt01fJhQo!2`a6WF8z6K~;HDrdP>#qeh9JbjPn2VJ6dN z(RErhg)z62CNA0JNR^fCotIa(98Xa8PpjfCE1NlR)IiqO4JN}(N@{`3iE;~;0>E@r z%T(nul1VaPKqo=4y3=Riyu-%K57FmFg*WUGG~#K>M_O!yvgd8%U1F}XC$}gP)%#h#r2A2FaJm&sHRc{h*5X>hs}DQX@C#XaNX$$_L}qhoX)QqI37wUc&&%%k z>W-y$HpFIgV^=$x7(p_h=GUcbrjuh&OhDY*;(*$&OKXWa%t1~4-r@Pa;Y?Oxg<)rU zAQ*`_%)im4FJdEPpRI||j!Lj%&xUO+kzbs@k25t)=Px@=J-t;kSXRTV*5dST?dIg% zNmmxP1HsWrlb7nlz7M$P_dg*zAtF1V)m3imnmWU_EE|CY}3^~s;Bvwz(_zLDOZyh3;{@AMh|?Tr*3 zBz%d!7yU)tLq9Y>FdCAV4mwXBK8n1w3dG3klG^za(Ou|;ch0(eT2Xzmj&M$--2JDS33Eq5S6ufq&T-m5NKy zp~9$A<9zAVQy^!i7Z>N;Ov8Sv(n^7En=>I-g>~3JgUasnkfa%oc`nx$83J2uO|0)s zo}6mmN2J|R#ZkFh3{@}ZU-pn0Kcl^%qMR8E6wDQnkp*SlB!yuwo|*}c*_IRY2Q%{p zlRXRbDVFvGQ6fyVNiYWmlMyVjz!nOXBynOcJKWOYD%x&xiS{<&rxtZeHFLH+6kzoP zVHDx&v5_XSwW$nIBWI7>W>n>r%rvW;c4Uhni4m+J$OBC?0FHqX+A$K1B<_G=Y1ehh zXkPPOL$jOy=GLk1iD_?J3M(*-p-TLlG$?Y@0hUM6Vl7)IboYttQu{$TKz9ShC*Ax{HS+D zXV(n=@Y^T-W3}z6+X$ws4ZAZnw`6DKi-u+rgBxs=2m%dI_k8IvJ|!%VNvdmB)^MSy z($1#Eg=>8Rc53o%jnt{qPU~$>_jK>@t!ux)^K%<}UcdYU0rcw!RhHG-Fk~LrR@X&{ z>cY<|^Q1m2kyFHW$EFAT=_ToX zs<_NhR!;1UyR>#TJ^?%fj8m46bey>$OjoGKQx@!r4NNg(>vf!|rmBf=S0%ty2X;UT z3Yeo(wSyjHswhcgga;)~3VmJU-|eq*+M8u-UcTNC&!zo)4Z96H@`X-}IhW?ETu-vO z-JzI_kJZ5U~7$8LXJN}uscDBC4W%N9xe z@53l1`gQ&TXk@g`j!~kx$V5T+RwHYM z{%T%UV4z?0ImY$+ttK-6uqRI>xl)7d+R1YmKc%zM;;hfr%$?P!WwRbc&fMAbz1#wy z);2Dec?xz5=j2yt^7ObZDIYjYjiIM-;w<)f*nZ7#{+925zF^WJpwR(j{GvHbb6bsB zVG_u&Vgqk)EU3O9Ws~0#)%!G1sn1qhV9p@j1wnexj-(MfPtz8)4|0pmDJ5JYCtcEi zaaE3veDN~%H+XQDdy&~^bsYYw%%z%P*^OCc99>nxs3<+orR|!ta@NS@M}i8 zb2*P$rB*>toeaN88=-mA8+ZJ$(jsgYALQlNd60dcuF=Pvc7C%k|^@nR1!% zjYGWqiTlw@E=m&($QYKY+bok&ITKRLa5%MD-E3=0t0O^7MxAIR$z~~ghkSPKGqpn8 zLA_;UzbZRITFKhnNR|eF(Swim?vmlRJNzp{ur{4@TXOXEa)4UC@U4B`{b(DsLg&Gg zcey%SrrjQw+--M<0+PLRl~l@F&%{`3?C$2~?H zu||VAFcF6B|Mtz5fCWIN1+X-#>45oXJN(`rAk7}gAs>9dk;b2YP#ZvqJ>Uf(;KmL> z96F!`V0;B&7E%R4V-^?eo=BGyzQC;z0u1207UD0I!Yzfs2x>r;H{kOa01;lmxnRI0 zHy{#46D6|13{Zg9we*f;fLKj8C3wO}(~};sg)h3nz!pHKVL&Kd0I9~_qAUO|Fu>Fv zphoDG`^*}kT{Tb#uH}`ECWDm7W9iBF-TZS;CCOW zMp3ZdxA+=L-(qvDngMQL2B+d_x)N6dJ&Ss@ruXVV6U1ufiuQeE;EeiF^ELzgu-I}n zW8ZFsuzfrkc0=r+hM5A(2BfDQu}r~|7o#6}?4R=9t&legTZlFyTZGP+@OT+vE4;7e z_*yt@-$LGn`>Zrfy45Z;@mj(3haND);$Ar!b;GUfSj0uV#j{6^E4t!4-;qtF9dEZ| z_P5>CwK&TiU(Z@+hp&Bxq?XWNVQ#Q+ukbJMc5801TPe$EuW)bvB84oqvRX_WjKUOM ztuZ&BI=1XuNh787MLSH4d9^qD3+&y}+S$8F_?_gv79+>ArquS)?yvR}_uA<@J>HI5 zcki{V+1tHzI|P;z3$Tl7tv7|cvhTLt!rIOK)4dH#TZ;Zt|1}3cihjwfy*gOqg;wj9 zx?Mxs8drC#YBqa0%ib+^%a1_aj~!nPniq092xDeK z{!NWRVhv{0sJ*v~zc-J6YEiC%K99cYde2GVMh3YpE z^@B#&1J+JIPgA*61@-3w9E!$E6fLZ{=$*v~yQ~Z-p&GmfwJ_qiX%}MuhJS1(?43~bj0{{W!Rf&{fp-k;e zoL!tu4Q*ln&Fqb=U>VsM2^a|eGvnc*7qhf+F?FIBvoUlr6)`ooH!-D`F|{*yu^?b# z;N$!6jZhZ5Rt5opfCO&V0S^E`FaRhlfa?FNXQuy;XBHNAhW|c=k${bZ<9{5q%xS5i zqMmlP`;``CL>Yy<>~&&{7^O?BEec&RSPFtTuR!-NG-IFuQ2==())LDGML1QF+>5+L zvMGrdC6_5jEtzd8dDpOqmgemUbApJc(M|K_vgZ?^lg-Mr^~ z+j&MCDgf{YqW2GhxuvYzZv0=5N@P-c!WxV1ZQ9cOFE@68NTB`BR@^H#pH$~tH~>Pp zk9${JUh6$--48GUkWFWBt*l zQh;YGZSUf@*S{M5=mceG#@E~Xz16`?zn0J9m=f5%*jfEITC{+}zi2o4cjq>@>v+FR zb@*)v0Xu*p-qA2X@tr)wbEBw9NjUkLjUUY_SBL_T^S;2VJW!OK|yK|z{OOI#8+;42xR^8kkG1bHE){UH_R z7#2W6NpwRXz*-$JKK9iBvC}EWK>H(KfP7|3@iz;lAWVRvX5butaL57d7xWkR&~bzY zgOun)>jyMN5caeIRBq_*L&*Al!gKh{UQ#xvb zrzt`u*?WTb*!;M^BDPPw-_(#M$CLrS+FW}>{2CjWzj4`r{%@OB>27E%iN=!w&ji!9 zIc-fC+<6eUA@we)w;{k7f!ZAg9O#uN+F1R*`#ob^v;*XC0PollUa%xr91hONupnCU z$uV0~oJ(TxG0H~5I5qkn&J z{J!-&Sak6XeU^qK*~G}_t_<3~M&wybrVVMYNdQlXD;^QKV@$%EW4hn=0n}w-`{IlX z^-VeW#n3YgZeg(p5*6}2$>BQCsabo2>b2VH3|xco_1%W?wd1KV@il9llc)~r+QiKv zSSNWWddFJl)ApASoc{FNfx>(8H~mL+kfsF6DFkbfJPHhNm^`c>2VngWLeBSZuly?# zGG@@(czKL*@3X9yc&tmx^|7C<(gVmf$UERVguYSO(gZh4{`Mf&gjCI*T+=_*DDe%J zR}yU)`$ud^*lHqKg(2LK>hJnLQ*R4LF^|4BK|aFqg3vJYTjY7Fyd*GIQ0WH3_JggNfYJe4{MZFbBuX_f7HlzaI#9wN_pr`H<)n zUx!N5c0s)XY+D9+oY(OI=AQ7i}Z=E;3qNWi*kJO~Jg(!cG%O zk7>$jJ|-tkTqR$*P2x(UE?q+9ohZ4GqN7%#@Dr$OhbbsZ^qnb>Gqk>SUgUM`CNOl# zQ(3{x%mx(e5haL}q-wVqME|gja`PJM3kaaX6lSF)<8tXp^xUl|C~4P^Y?P`~mpZc| z)ho%Fg$Ye!oq~X!Dv<3LWC~^BiXlv*%R>^in{EdAEGdkO?R(}yVP4u2$=k=ey5$V@ z;?anPLmhIYuj1&3ea4QC&YC-USFMpXbEs=nmNxo(c#@t*nmaFA)ok889blh}$B&Y zj#@teNIFXTwz)Q{r{t4(_0F3LD9@^IIT7v>5A0S<%waN!$)UJV(C`R^g=@$&%3QgN zGpv)kysDZ-rX=)(aSA13kj$0Ij`EEQ+Giy`ETRUoM=Z<$dnou~Ep z6J(l~Ur-Ie$Bq2k6c1n|LQNOc^Aff4WnQo^YZfDB%8N^_XoyqyGK^!MT>i>tqh|QD ziQF8^|`Tzwo&} zm!>n);(MK-iz-nz?2jYt_v|I;i(?FibfI0=>aQ=L`t`uI7GTBr{kpK!-(z)Vm|ZVS zdj7cwyMygjZahJ;(7q7~Jm<03o071Cl~v4B4K zv?{78^kN+`34sd11cWRE9tdd!HliA0&_oGro?8q309TtqoNJBr{}rNbblg(C+B@X& zfP^E)7yaPeLWv~tUpU@?$V2;{8r2FQ?6(jk;0)^%>oC>?usk3;9DQ2X8M0xoTD1? zi|P~t)~N?1#4F$g5cy?*5MwCn2Y(wCVqvi&&?bcUX<}5XUBND&802w*JnAEj>yKra zv#qusXZ1H=EG~?VPAfR${y~X3dK=~Von2t$jN!|k_s5$V*%RD4Pm~N`GE>7hFzYfy z3XXZ+;S$q#!YE4>3C=7#lqjT}movJ*?U2n+zkKrXon1A{^5xkwdRVAAlDD8ZdVgjS zk%T)6hfLd6X&%X|dT5M0Y-5=M7eAuKC&H1QX})ef({v*=A+{0IYnJBCGP9Jo-JAE5 z|H7lgH!`|UnLz{#kN5Gh@>Ee!QTQ#0&v8YG3hqY%myZa(8Xh2Xyhn3v(80-}hBJ!{ zW^-iF#F0TNhA|7CKrn+~1i^*@8~B4+0SKeW_W~R)+x<}tk{kvim(7{Wrf9or=i-B; zU{vsaB^PO8cUecN3T0!LU!g|*!PZWsR$GNlWb!9iDfIRQv7+_~Q5E?{Zh3dJJP^Uv z-QNhnn`gK-h9#(5P^oTtoDV4mo=T67G-?C(c5{&<;=fpX2OvqebZxY3+qP}nW|!4v z+qP}nuIe(n%eHNsUHEGc&g^^dp83y>n2gMb%#8JEMPz=D-u15V@Y{5|>xvk(-GP_9 z_FDbEjA+#HZL9Rq1%h5FdVeO`AQu?}E+DHGJjtWOoi7S(XR&E_=+#j!#mT-@yk!x} zz9b}MHD->(@)kCV4C$H}m26un=P)j1QE4~; zpVwk>4nM~UpsiVL&!>&~esqH*9xnaJ#ao)6zyebYzm9N8zSN%`;xv1GMExoe*kas{ zR`0M4P6TImDn+~Nhh{Gjsc}*^Pu%{^QYyTo!e$tt7|-kF_!)#1Y)^54{2OmEV@Ztr zYa6Q8UJH5mFrYhF)H5Ai-&OlponEqLf@VQ^odJt#+T#ei!hkh>3t6{B4rG}OMNFJ@ zU(3j2SEY0Z!Q{f}JODFib5SAiLa?5LWs)oesxIA8w zwpobPtqbrs?6-k&fT1{&n9NKxY?N!G#gqN;pC>8lkaIt2#NtIJlT2|FE-*0(Fua?N zihi)OjdP5Nf6vYsX%>YMBYT!uFHi52D@1J2CE;Ofzu>Tr^a>vjpumD=HXkn^ud1z1 zrd=*q9~>Maqg%ZwVVnb<}=xx9CNMF?6O=sW#w*r!~OY_8=*!1miwU;y&JiG znRC6b5Uv%&iUkU1=3|>23nmH#98^&(FrWlm1Lan1`{g8nQ3@dn%pYhdARv6$evlx? zo&Wjh9o~VUEjeB^xpVvUMh1Xzhk}zft zat7sltFP_oL^uga3POCGiqNPe`-#iTZ^_iLo~<5y%}*bfNqzyY><{k`e}~zFtD$hG zQ~BeyHu8cI4E6gNU|q|)zD;=hK8 z5A~+y82lx;aq|Vs(aw}o%q6ug8bp4FjTt~SFfh*2KtZ~nuup|eP29yFEaBF=6s}w( zmPUH>;2j?^T93VUJ38vaYJO$@z|pa*adFtn-CcBjnq2bA`(1~yGMSvKUL4Dh4ojcZ zF6?bjzbz+Gs@_N0Sg*THRRm5vhH``H@^@Ju#400JI^S*U620XAZFr7m{BM}m+guz3; z6V>HHlXc18xOCL49eWmn!$-&9adAjVliiekfMz{?8g}0H;ETXTftw-0J$+W^=L#}d z<-SHY7~N8XHnEFxI3%)!-V6Al#rdE#_kt20h30~r0WlBH0LT(Xbt2|rj*0_+4RG^A z(+6W+dURr`ePCmgo|0;&ok(~*dbB~2G`Yyey~y6YTzrgH64x?H*}r%3{ESd+X-QL= z+ia0cbdro@E6}qsFpwjYbJtXo)XG%ZXs;>mkgl+~w76VfUvK)Qxn8Sg9S>WJg7U|whSUZ~Uj3Lx!qY`WBOz1dh zf`h#e$~x-ZW$$yw16R~q$cys_>ROK|_ewS~M&wdQ{4Oy&HjYH} z%}%0k%R*7twO|kDV~gEy^^g-Vowxb^)Cg8DuaO>~vWgG)FRCQ=?%M>E{kioe< zhp!C@7+E;~wxyDZqph=pk%=P#BLmytzp-`t(wztx=vCb9Oz0I1OilRs{wY)aucRa< z=C5*|Kgm={8rHUJ!-yX>diD6J>WM?HilL8wMRK(<+4~64pimw7W)>#avWax5NrfL# zoB2JhcMZ}~4d-X1evQJkWv#F(MH@?ly=} zBoRr>A#CwY^MB-m8eq`=YRC{3ML5(XOPOlKc7Gp{4iwVW_P##s6N-(loXv3OqDGq;Y5h zw*tiOL+g}WNoddv7q5*Y9ZsE)Ys!gDsaDh>6zx&meY?ut@_rT4Yo=DkcGGw?RJKX} zpx+s8H3Y*C#%}WrUN`5`&;K?g)XSjTTv@it{Jhw5HY73wli5yb-a6Apt*6eZE=8o z0TuT05U+&i;(Lh5X0n+2uFdp=6#LU|jZG!EQMC~z_+jy6r<*}`JFT1!_#^s~D?9)LPo>>F`$`EJTk+xA71(qvBLu%sNeX1t7wC2J#~TgW2e(GvtHelU9y zdJ#p!a6r`Ec6@J+VtC)&m>3$d?R_?RH`KhrF@PA{0;Uw0JRjjH0s*vm6IIyQB&0xa zdJ8+hpAa2CAQ}v(Ors{T&0R=_?<3TKQ&O;tx$9}q1jBjbOTdc&z6q*N^*%7I6e^a} zbZmMoo^tvPzhU%UtV#!PlBdm-5ewrwvS4eEY0wasGOuhKEr(yS?hm6yWgE5kXL{t} zN6@x(fFzk1(TNS=nK1t1Yu+Fr^-GloWip~fF&os&rVw4|*D-oUdW&ww0DzRR<0jSF z6Z`p7R_XyZaiiCzw--7_L={WQGmf!~k68Ph{8&P@y9?O;d zO3G7!hiV(}Ci32g?WlkVmW^KqXe9N+1soJmGz%Zg>x#^UDPYl?4x(+-4#)FEuu0jzuXs%P690V zZlQe$oItN3f?`_0K@UObza>Avj7{|wKO^h#NAUY0cXuRT(Rb5*df<9>qI{wr;cwo& zSBrZ>jMPJp#^u^$(!}&%m-#%aX}w7S*pSTbCYL!E{*tQRF?`%D`(V9Jsgo*;L9{3_ zWX$bN!0A=|HmxeWWI$;cwLZFybjH$+`E#O{gNEdm0G60cO5+7lT`7D699k$tRDXy3 zNx)-OMJ8dS`^ElcDB2f~U9LGK0ep0%8$JpHTIdi;NI=v?N_N^&z^0JCe?Lzu5WThw zXWy$}7i6NEf_q?GMX+_+l{8Iw8~}B2p<-7>ifi>3)f<%fyDV#E%N=HB4344ZMRLh5 z`>bR2sE;2K2#=CrJ!^qEc8}r{Kt%PW>olhgOJ}cHSxA;H4hJtET@8Ypc#VOnFkmH>E9S$+#JFgTkve4pNloT_(MAPD2nIpsCLkpS!(7UZ8A_wS4% zo`@a_Q}a-3Y4V^(swFYF+nt5&Tp>rJ;Gz$!%(e@uQw?-MCGiN;YB-v>WxrDuSY0y}^ z#SnI&*}Oy{FmIVg5FAoElM+T5^gmPsvT>rXy4>x(wyTO<4hbwYI$3Ux)1ydblPx}@;2-TK>+jAvevPeJ zb(r=AS#*ijmhf#le^1CS95}nbX+xzIDRWE@-1H0alky<}Uqi5D-pMIDoEt!`lO^Go zVLkqZ;Xrj|5Q5;l^SC!*Z#-%X23VUerh!#&_qpz*d3^W+4KW<}(MRNKN-qBm4J`7$ ztbP?c&-&sUWQYKyqle~JB{#vm#joo~8&@l#3zW->4caDeRbGWsWhUm-mauqQO5?4W ztWJIuDhP))LP!nEn3DcpHEy&!mjjL!wA5u3_!@M&fefsDtjZ;!mZ?uhBcrSmj{2tf_#V=8**WgRwLWJ8e1$&e8N3GSX@t_I#99seOe3USNAXsq7{QvZ z$gi~*L-12n?R+LhSd7KBv6~?e3_h-|yP^d5lAYrq8P&NO`)di1hqs_kU9~qufgJFL zn)0zKZNdIM1d}p#JU7V=?9942v`=JxfJm+3sK0=6%wJIJ-@AQc<_?Zd1Z@9w{Wuxv z

!6j>qH;EtH+C{|v8~SQwNU6zb|{T&pSU;prr4BtaWC0%1{MCS?4P1YKNSZC>0- z1^VI;i1YF|B@l+$02^rYt)xMPjiAeR{R-apx91bg-T~0r-{a;Z5;VBE{Q!Ug?xHbo z|KhY+{&m{_gFspTh8q7n2$cQ*ia`H3+P|N;f7+x>oQ!{-q9lzmhcyngjWg;xkW!ko ztDypPIr5VX$x@vKMcD-v9_X+D5;-#^;DnY3Z07eqZ#sa4gesD3r&%(C?Onbp-qh9} zFR0}jtxio^8}-UKP^)GgknU^Y!#)1rD$g{dqA0(gFZTLPJA$v@^Ykkn5cm7#6N9RA z&~sl(V|8g9KA!Zp+|wyIBCjLxbumQ4!(PDqIthkcXglk;7(Ib+s zic|N7Qdg^Rv%`FAKk6Z}ca*TN?7HwAAXS-To;Wd$7AopuGvEp|qke)El9&szN7OCHl4>}7ANo;j6 zH}q{i95^!Iu$SyMuF_Mw4B_7D>$L#ktoB#X&i4iuA+1HN>qv1bviOUh$-j^wuNo;5fFU9?R3-vuNcw`KMBbXDMfzUV-->K z6-;7)My3=A^8wGAli*|v988#Wi4<|p)ypn*Hz;sLc9N_AP{ttsUzS+PQ0gfI_h{{OC?=Az2NR)#zqvf0eTOqSk z$SM78U04R4+%8=nBF|SEGBDq4z#u`d0G&jpJ32g>6Q*S37K<>Y5 zw=Ve->f*NKzvHJ5bXGL6lEV)RZ`3ynnMag2Kx?gx5g@hMzFi(Q%90N|3~j#A@zv#~ z(FgQ&_52Pq4Q1G&5L*6$st%;?RjQ-p>Qp_(!jH_ta&0MyJ)^^)@yOpQ5p<$54IElg z0|}J2=E7XykLabQi;P1C2X~NIWziNw{hL@=_hBKwCJ7D9pX|yTRGZ!$C}6~A=GIS_ z(DHH9#LIQ3SJ|m+mijy3nk2wZXjOIxP{kB=CsFHsp1*tB|kXoa53lzOxnbwAY-EiAZWgZ>I}Lz0wd^{Xie`;inOc8-?H5k#FR z&zeTo5xQ}qEDC_YDP2*ia;!BEQ6Sh1u-tL9Ug0)TjIpp35YR-(B(aYlRCDcdyU}2p z=hXS=U`L@{lVzpC&9%aq74^BD=rxrlQhd-zCE1Nb4s=McSMs53r`*{0(k#$y#VjYj zxuOmNAhY0Y&Du+Tp9>#ll&087&fyhKfuU5KN9BbiD2WOH;n)kY6EKwW4RgtEc+NGI zjr^1$oAHwR_gt_ipa=dt&NJ?oLPLTa5OD-~8{V)Bk6vLImSYfL42;vH0NJjQy$$B@UDtpC=Pu)e$4?XW*r_ zA3z-2_$LsA%o7;$CJ5cW-7PX52?=wPR?P%9&bAn)-TYfXVb&g}u^lqkXl$4-#)59E zaN|kXwYJ5D{LgW{;F>W8MRhW5D7uolmjov)ARin&TxJa*pQeUl2)f{P+t+xwJmR;- z0Nk{jK7|H*EGhKz_qiTlRW;QcIeO+T7X#z5T(vPQvpMv6ee znr2LedX(XUGScZgZ}M?$g8PMC*XfwWx}dd!w!;fyKS7XYF0<^cLD)k&kQWsMC|KXz8n!E7>z`Qwcswt5CM#H zMrVSL%o2i8e1b4}?n>gX3@8Ys5OH|zcT6!hh8*HXj|?3T`t0`{*%*fLb`c%!`nscE zHw|~paTn`unR09_f$Frt4QgqVAm%p4Gi>U!_VrmS=XMvD*kqcE#lae9xsgGtlY3Jw z>cNjo%+IMG!cXcsv|cb)OW)fo@g|WC$a)!zqe6+*omv*w=V+vOs1KY~)kMU(4$z7y zMRyOeI|Mi_@xt;~z!7TwfNL)-P54k&y6jvm@muk{dl?HWb?mgbjEo@6q9c$W4SNU@ zO6brTE@lBEPBBC`R+GhR^GGYz`v4Iy(Y0O?9hfy1xjViOHpRsf|Il#?{NZiR^Lid8 zHGoS^t~`;O!9a-)X1+_2+0=Dkc_(u~*rdbn5fZVCIh7+Aw%$Nx&kgIVkr1fkX%A#Z z#e(mwBp1gTQJhmO?t)ulnv)r}Q3jL0#&odAkD-DY?XPhha?v6L+qL|F{W=&DQri3ZPjd0B>KcLE>a{)0%Z6wdMz6LX*gri+71)O_(l0EVTK=_cl;O7?GJJ9-vg)r zzlc-)e~CB=B%BfAtbvTRgjQ0ZLI%+k1^`80M*7N0XI=s6Y+eb=QAz7vTmS&U0Pe0Z z@BZSn|3HNQIPL#IoE(2ciT@qM`S*P({RhqSj}iZV;{Jg+896yw|BN_0HAWJ?G|#R( z>Qe|27U^a5t-(S_%@$VVn8Shm9AQ=st9~Fw9GZ>Yt=}fJ06(0M85r{sut5|xUzqf| z$NNL&V+lP^)P1+A_^j9vp;x}QLILB6(Dd>DR@QFrq4VzqKfP@AJaf|XrfgGxJS*aq^4xq3q~oKx;jsHY8gSh{esJyQtMBzK zSH6XrZt6aRet22lcO|mk{65UBoPI^Ahlt#ZD#mPKr0Yxe%en?SwEbv(HEDbW?iS^t z8qZMC6Eonft%R$D&xI%aU8eR)_}1tSaG=SQ`P4Mw3^L>>1_yj49-(CHZ-nW$Z@wKo zxZqb!{2N0EwP=59Rd*&}LYL{LN|&_KlT4#{HQz>@_3^wIdB|HQJmdl)bRK)nq73#8 zXRUV|sLF!VsBWPT>jVC`-0q!I60dScFy)2MkLzsh@N8uRv#iyVE|s-LA09ka_F-C2 zzIt)~KzzM7R=)UE@JBUx&vR1Gk#F{XsrlqLmM0Isjp4f|QMoQ*1tUkbo3;IV=y^Z; z)zeX_w>Hs2xJZiGLAJ{<%p&1pC2E)NJ`xXawX)>L+_u`J=a zsTO*yz{|9(754j_k5smK=~o-xa+89)j}s#k>NI3@JOZ78tDx@Y3H$Bxj@p#A2d?wI zyor|~it~i0%rjIaY-Q`XrxJ+vHU|4cN06N44#jc2Kx6DOp-kdXPH_TF(OsLRDE#C& zg1poyi9@4p1A3?w^VR}3wq-!`1ldMYPmnA{-h)&?5=g|&%}A4euTch{XcM~+4A&Y5 zG)Xyb9$}Ciye=Ut?d7qe=l<$U%|J#sb5;Y{SWReqi`!xhs7FI)x*JY?oI4|vVn`;< z7;l2GMFK1r@Jt2M8wD=O#a!5FEXMpgzc!*E$GTEkf%ZBk;LZ0)0odYRhqfRlzm}^} zxQT$$g?LQ@7JCRy;!QR#6yB3-bQJC=XZls?X1m$0RI#>5TYo%)EL$6f1FMZxg}-nA zu4ZXng4>9FUrG%`Ig@52&@iEd-j`@AkY*#3G(Aw9UrhEc{+@SxBe?H;AV5pqMXW(* zV0IAD=0y(vq2b7-UVan~HUkac5#iJgn``s+e^-a8I)%=#HsGz|O2RT=)z$ z&GN3nQ)1!Vw>k5m@jI)djsDP_#gh@$yeM+MarHB%Ci6yID;=kU^r8`B3)vNhPSJyJ zE%-?A<EFx7Wt^TP6 zS4U;^9-AZswx=BqT$p6w1ez&lhC)%vw|)G=aG-*Yg~a*Blnle!9}nrS`Qdq+WvL-4 zjIn6;KikiK%!q!sd!L8#WbHoJtc81hCYV_9>mn+w$P7_ZIA{~gX-8(my3hN)uVaS| zF?JoX=K9d@X~$)?$NI$3c9r7Bd47q-ju~ZZOmgoBlS7TRWP3lPtxUh2PGe+89oXSEMy$EH>c`O4Q zSuk6~NzTAIItFz;fisEoBAp!7+b{ri`5EJ$4$Ej;P-7>EK!-g#u1iO@;mP<+=eimd zMyo;rQ;tF$a&6d1R0^YiokWclmO|K{-*2Vnfe#{I_; zgv^~B6-*q2ZLRHWZT>KBS?Gmrt!y2X?F@`ezFgcthARD4#ejr~xv81cSH%GHKV?LA zRyKNNXG5nyY+Xs~FJJfXilT9kf|@Q0^Dopi^i)OdO?M z3MZ_$L~a@9mD`$AF>SVf2jejgA4e;N{1EtkBjb`h!est?#>DuoAiEpFStdJT@ZS=o`+7$w#0%n`DBD(<=Ff~{4&V_Bi^_%b>|<8CnDQQp z;1TXE#dqX*Z(J4~6%Bhk%s#LRh+(zutzf#8@|2=>NQXpZp|`Pzrdz=YBdc=PFXJ*X zjikQAJm%b+9gZ7bNr_Rt^f%sZ@nVc;eHDo@sqC*0tLM18Dl1M&Om-4-YTgB#UY~>U z5_2<)4DgW?XIqURYq-Hl(?5n7?zjF!hIpVXto&{MRaS)FZ|G@qjFw7Tt?L67IOaS zsQI@M{Je$bJcTrOZ2L?T^f|mgFXK$-O}dHQQ#q?HXFntdX@{rYqmteDv6-kw zxGZo(b?BaTn_OT#Y%})h}y}V~?a~%)&*Qk}ZKWx7sx?Ir~$bFb!W%i?QK{jGkA{ zJ>1QDA2s6;iN*vgaLrxKyq6qfZaRi=Qwk=_5vU3L9Nv{0+Z0Tq38{vetr7Tf1Eh&3 z60JVEg>q9fCh8Ha3H$g=l{i>7+IM9vs_=N@OSbwIW5s~NbE$@=Y`lwSZLp_qW6W8GpL0y4oG(rAkc151k^ z6&%H=Je(YD_#(WW{T#exHO0)+xU7u4bBIDt{VU>!q-OaD0Whzle~UnMYa=iO1Hd{4 zJ;+~O>z^VX|3kPK|FA3mTPc%&-5{{Y}GjRXrats%ga`7A%r_tKS}z5xmtmt;9qdJ_)o*L$wr z{qp_$QzpI*Pm3b@7A<|ZCY|?T{lRO$WAGU>`p0qQxQQTDz;v)4KAm<;8$!*QiIt|d zTe;mHh2dCfq?XU!Dx~OWt#7*nQoKD{AMM-Tvmiov+@~zVko1nT+jAFifp@nvTF$i_ zBt2NCofqPr^<~lZ6g?{M8aATq7(P8uAu?sskMd!|d2lCt;CWDbq3V%^LaQgMIkxv0 zYsUeH#ifHa-3BgSfE&EjNuYznhluKo0(N-IE;*_<1hj7-=^|tlo(44vZmF&&%Fu%~ z+#&d4JmBVtTR)Xz6IeIYTB@p+WG#ILmYN}s8BLRJ(hOpSl9&&NW&WusVn);IMuNis(s?bE{FQvE!~CSPx~ecDQ@LCnDcb z=RX82hmt+|4=Y`mwgagPdx^$9QSLW8NJ1TU(F>5ZDjq(RDc5k9`t|xXL~i8N*V|>P}579@5q-n4M-ne(J6OIE->d z2QFrYj}QP3Z$$|KO>9-e*)|Tt?b}0gmDD`h9_g^!IqX#n2)0ImKkK&JxN{xkq`~a6sI^Wa8eU0Y_!tYO_A3cbY@sbG_8dx|S`D4Je@-AifX&U7Nom|3 zqWg_-0FbXifq8rx{(cd9HUVPlY=SZxP}-6zaFCj<(>+W7n@G?m3p<=bE~yLT{Mcd8EB#Nq?ayg4TY`?`ti5R#ggw;h zKY_D-^$ik4ra}?z%4CI}v!ut;z-@b;w-NO#mAT+I57Z3x4;BM;^jbf&}JysA| zl~@ASJFNiMH^Q&U!nl76TSS^Tn6IP&-yNQBG4~fRsAgp}-6Nm{G({(M!gSk6eobxQ zP)Reh>NCvfiJ$BjbHz6nqjs%UQNibTLV%cGL-u)u=`RM#sjROQ*uLnE4x5ebSAlwe z2=Xr^l!~XF+Gsgp*GjO%x@h46;0GhG2qZ%Z2=i*K94$B}sHzshHAEyOu4oAsakhpO zwGCb{)9D7bg%4uV#H@*8Z#3`fKG?1-n1>Y!f6H-jBHlNoX2eF2hifVKG^QZCGaU1y zR==+@|14atv1W5xI)n#E>It~Zo9M)`6lww^FyVeDszJW{K6m(vTHLJFoz|{aN(nxF zzaWKvLkSJn<`n-sIb%+k%(36&4X_oKVL9VklJJPz8L{iCi%0?cXUMk0uvxE``= zsjIPf6WNi;KTBIdiE1J)gfj1)sKnp+?ZFL0}%t5tAx^31jO4K32$B8)7iRs=t6=AsGRQ))- zd(t3nS+&bX_@R0dJ;6;IY4D`rE)XGA0Cm3R%pP!eM5jeT*EDk)Je5 zvPNKj+LzjLvncazH4@7LuI$85r!wa&-iAKBdDGoy@o#O_ueslXuFyz)TH1(!(KI^g z_JZJnmwxX*GLg#|)6W<|+XIKl67cg}g&u}3PtHt7gU01fq)Z?V-&9XM_H_>LQ%}X< z3oN;x1BhIZ4hDcorp1XZarqCP_&$)`7^O7`p{&mm` zyt;OZ+I>0Doz;&JyJg~Hky6L~x+m7r+=_j2YG&KOb>7v0FWqav+HJCG7Z^5n3!kfC ztE(|G==!zZB3GNzUcQzjfP||<4UF&Ytr>`&uPX&T!ciL3hh&ysgX&(M9(Aly4YdAA zRGReUq*!U^#b4Xa)E*IRq|pZF)PrgqAXmxb72)^{)#jxSW8TMtmB-<(KB8MTYK?{EdF>< z(;lW{^BA<(>Jn*;dqt1BsMh86YE0N(zNNm!)Uhl%zm)B73i91vY=-&10c>#Rn-|h@ z=n52Tqn3yV;Mpzm_hu=^auf#>*Lt{2$3*w39NoI5=mKIi0$b4`j)Y`lcD`iQVC1^- zwUHAcF4KCu`MvH7^CfYEXQW8V_UizMslHAI25~AZ^!S{6TCYxmIeK8NL2+_UmrvQ> zfMJ>s11UR#kQB93d5dxY>hyZRDq>Ob1%8Qz0hO2ze>&n49s z4Btd@WBk38!Oeb0)dl=6`XLg1r8y+5&BKG|cWvCb`-|_6-)VB;mdH^PDhB*0Smsv~ z>GWtMD_%$p1k5vQbYGhh7ns}}V;(e53<&3;*Zi}OPYBj_pB@t2P}xsAek#r%<-0{V zvtUzv_D>Kv{;9n0U2~ZbKwb6~D# zlp+PQIE&_9oWO=i_ssF5J1ot}lX`=Ia>h$AXeaoC`) zseFK}v$Envcd|z^K`U&UQ!Um2Q=NQC+I;$Z_AQK{`rFriUAB$cA(bW3rxmN^Zr|2s00oF-T zIC^Y4`!jOk4{pIsdml%sx#3qi&d&UkkY>0o=30N(GxvIJ62Rm->Gv;K11)tCPrV1Z z9BzHe#ox+*O>jt#`CqPE@Ta8Dzlg}+BkKPZ>xc0VR;NV&pR6Co|7@E5Gi}7d^e6qt zY%@!9#9{Bt`#GbgBJ%<;6oL3X%idfvG`CmpZRX+>BpO-(fhHP7;0Nnha4>b{z&Z34Qc!SG&T|q z;}TSQ;kR>gbv55B4ZF~y%N%AJ3JSEW@V;4c_mX!VFGrTCInRBBT4#DRk8wMz9J(=1 zW_*&g%KyG#Zua)H7N1T+#ijn1V(X?EA;ss>bfc&e>%@f{h&{{LRDrrzg^F9bV6=EH zHM1J8W>XSF;wW;s+>0Zv?qt=vgS)mgp*$uL-&Mmyeo{_vu%diH)$MRKTq>xkG zjNPG2_vl<6UyC?YYHy~V`j{%0R9ksLEY6c?8rpk?sH0MgF2yf%Jr@ap`&@MGjwUsr zEnb}CEBM{4^q@bouBlR2O-W7%KM2UHIcwgAG!GaBi~IHypIrV5ciwL6Dl|GcQN7V{ zqU}DPWLov#J-1Zde~_}+iW@avHF+m;0vO$pgQzfEy+hvbTV~~~t!OurzJY*QM&AI- zi5E}gZwlLy5K)&L8p5L7fpZC<^~1IJsrw@y)s&a%1lnq}GJZ!zro-wmSNe4=0=0`t zW&qutu~C;0LS%TXU9{Ic$_c1=gjXTVdusu=bevlQ{O!m0-?D%~d1b87u)CT?yD98Q z0CE0%d&N0E$3ZvwIXN-fKa`U!mp@ZIv#9fOvA<>wVt#nXbX~B0THsoINqH5xrcS6VgE&0>(&L>8P*RpFdf-3pmyO0t#sf9 zwiV30IYc>8(laI>cVKFn{B3hwET9yqZn{9q5C6T!`rJQ zT=(+pTJnzQqzoW>b2F>237V4nv9vXpY-DBHMSFSSOXM-Rn5%gX%O$-+L`3?)kqz|( zYWMv+z*7Mi`<~)o4J9-21eC8#+Y6%kVO!LEg8B|l$`#~5qRl2NuN{>+CQD1? z-!z^QHzY+t^Mi!!C$`c$BcYKY0yEG=l03L5!R5(*&=ZSpe}D;ZPM61?#97r zs#YNd`iQr3c$mf$m@RT_+5!kLiY0gqnCBf|z7tnNWW>c}-0Ah_&ijs)rXsBQ8X|D}me3rf0PLcR=*x;rdz6p@ffvs%j zS0L-C$V00}elA>KSzM`*f;mcpOEKo(qOIjOu+LA9eg~=&*gE-sok3-ZWg3l)9JB*`jYuh6tZSeImIGQbB<1Y;Vh&=R zBv%Ww%%coRfTTF%T_XUDP;?ZW;w7&nogSy^Xy9Ap>>K4V;PsHqkwZbzpd zEo1(<@rxq1{RaFW3+SS8Dn6HxtU|Tw`M5v3{a(BH3J7M4XeM#frndj+IajqG2zqW>T*g@YJ+F1 z!XJ0)GCW&rgH>NWBe>xdeKdMS5q0}B3Sds08#%O8kjn1BlRtprlEKB992b51;EQER{&!LAy>gezA34XrI>-vOevU9{)5XML%t}~|< zXkt)l$dOwABG&}$hli`ob2xUkhAA92JW-VQ&Oyh18V^f|yp2XJNL8_Y2w%%N3<7dn zMkuWfrR_&0&KrH+PlfBJ2M)sa3*$})7lP8%N6z^hQb>m!ebIa5VN6cBM>d09MM@49 zZ?Ck`r<5Gu(c?KmL^nCZUJfof8l$5Tf<;C?72@*1m&_pO+lutvA2V8eOx79UXXV5D z9$~p79K3ir&-M9azlehJJ#b-T#*=7qW-=7XYeoiwUDlI9=rR$Yq32%>@)pU@%8vpM z<=8k}g5N|%#BC~0wi+=Ork3mYGo~cc@}Q3N&fkG<&7MoZM!~ME5&H?^`#5HdrBpWi zlpzEP>}J_u`U$!=31hC1X7C>sMP#E}O(8@k*&IQMpToJvSg+L7-N^e& zS?vs@9IZV+IGIM$yQWxkhcBDB%2}-#AcSp1bMAHXJq0NtgjPS}$HCNQW6wROY*7ZR&Z$r1vJ_Pk%Isb4x;z&UkTD_d2>AeD- zCWmn2Jgl~uG%pcxkvU_rRNV+`QX@K=Ye5Bly?1fze({^r8?8|Dv@f%m!As_FeQ2{# zA+7-C_dIJ)*@IBxh4TQJ^y@Xi{M~5|*fN$$Y~Z7G5vfF)D_=Fxc#?Q_)8|z?%ErGB zUvy$|QNJYkGLxl5!s z_*{P99BMyk{wm6yfYM9t#URXZ#XZFo)k=+ITUx zHMT2#!=9!!*(A711itNcKBunBG-Fybf^5suOS(r1u~FN8P`ZYs(ylywKDv!vSA<`E zW^4zN6=YL#`UoDXl}$=|ZE@x64}SZz9rW{vHEtl@6N4>MStDjqu7}WPBk8n>R~bXB zFfKaB2sqr44?=mwb4{P39OglUHfv&e#=s4#A;?U^D)7i9-BQ)AN0uPf)vJ+;P$Uoq z=5`4p$|Dz1MMe%+Am0`dPhlHNUc*Pk$BChWTZW%v-2%upPsGhq0A9nBS%!C_{MIgggNQey zd6C3}s}QR}5d8@H&C8KDNm+S|E=7=o5?V99+DR3Euabd#qPykD1Y;EfslRyFJ_KHnA(0YN(P5Saj1g&5rUA!;U=L~ zVsD>7u`8+I`~nKhx5(JguRbsRY3TgYqC8KFnvAm>eZxs!Kd9=l!tXLn4`}7*-8**i zBm|9P!%~;Tlxgy!u@?qGt!I{gI`zVLY(IEUMHS6&QD%g$A#7M)wpMwdAS3|3tT*^3 zg%lwx@f|6zJ_S?qFcZj?jK|t7$2V2`+fbc-nEn&&EfW#3p_J;QNwYb zstksUguPqk;zl)KxtjA`9k=T%kpNTG*mnA>_A1Xh^*^7mKm0zcMy9%G#YC&v##QKr zzBgz@yIw^q7FN#BytfmL;6jOhV--B; zx-rJ~#z%`5ta7ob;yI zmF?D+-CHB_$J?lGp$-&GJS~NL(M=`;)~rRFl)ym)t~~6ah6X`( zwF21^=1wlTMT5l4ZUmoKuNZinYSn^Aa7YDBFVZt>U9RZN3wvFucOT`hY&WOR@|`~^ zUqoc|QRSeH!aB@U8}j$_jZ$&jnSgDIJ}xs5ITum&wX7LT2$t&_gt$#r93$xbuoq81Nhu_ZRvLUB=Y?GF=D0c;+G17<7yllsr z@$uUNhORZe0`l+3p6vwg+t{3T!o%C!Z$#zErntj+Fu%%Ig9gke35^{44h#2{r1AFBGWW1$n{bz3w8M%X8iy-^K;7F$G|&p?MjKQsP(jOiExsT25>#@Xl z1Z$vV-t`F&w8lD3PfKLd*PAED0SDB@*`QNwF*+tx@_uzSuIVEgvq0b4fJpFN3Dd{xfZ>lpI;)7A3hrtEnW9e3T<=D&?T`iFQwDIA1jIr&faF7LCb zo?QNnKGJ?2yZrm;Bkb{fQ)j7!_hMGOs^mGwdCxFe?3T)V#T!9L#`JR?u{mY$-1Q{` zcCOp0RyBKH4YV8MSKB35ol~rHIKGaAw=*N*m-hGhG!ti?Kv)K+n$#N~Ivc5$$`Nvz z!k9@YB#lwckIJ9pnqlp$!c}IqRt+I;qOL$eY8b$y8J?UJQ>0(|E+vjRK;(?Nsy9RYpa^ zM4FnUwJCI7Le@A1GCJ=R!M8}rgadGF8W)K;maZQw+zAG>jTHG~R2oCt>00y)0=vH8 zH;!D@99aPXS%5lYK`)exuvW{eRu-d7-#_Tn7dff*jSMS4!tmzEMKbR5t`bk+gsDcM z<48DsQG$I3VpHj3n~b(mok;B65;O9t+Y@^0)N`n0KLkh7)e`L$DNBcBvGTfc^OWb3 z$m$M7)vu~cbGf-Dv)(Cjy^>_^36BEuR3I2ioAb-LSbMn0bWw{hEa`C+pd+3>{Eo^N z-_$__Xle+UWAIwtROGO8`U&+}-uqpr93O)14X~_ophu38QQ^zE8p^50nG47B3K>3& zUKhd_QasD?tTnyXysls_BWDo^&U>F}Yj?*;D{$OT&2c?ceC>#ITu=DDKwee6(!a0I z6in(IuM|Sswnqfa&#AVmG`*sdvB{vaz*zy2IOq^HswFuI2uhBcFCMOyyQwJpsHl`x z-WOB({I2ob5{^nuOkgd=+~@?nL-- zQ0_*RsTYVwn9pqI`90Df7QIU&Bl93>viq#G9kB+=O0-J4NdqETLws*)y&+O&qB(XWUSoHnA0k+q$wA+C}#lV ztAq;>lp|#hSLzEAQ8~^^6S@UV^kM>C;=0s0oM+;?sqeT^?#kN}iD!S4b^*aLxk8p_%|)p+D%!mwnypJ-7jnRBaIB}DI(FP7V~?snd#8o6Q` zvgRfl4*R>TS?qN#D%;C)+od&La|lw)SqJBN_-1EnQ|Letj#?f`vEA7$P(0@laSd=t zk=hK*#q#g^yD-&6F3MR=sKbu%ugtgi@zK8WWuz%pZ^HX008)jW)VQpuy?C-1 zVIywVka>1e$v1O#@?1aY3epzv)L%mtYE8r9o)VU{auy!|uc~1gqdx6tw;jk#&C|^V zQz4=%@`}m@$VB?3Nb4QXiED%e_32gHLrI4sXXJOJh##^Fjw9#WI?Upf#_t;&(Z!=EIu@+dK@+ORPGMGi4ogP#>A=j1m$RlyGxMy zAGdzr>~2bLfDeIR<;a?^bmBksESh%OdRw;6zP%hzReS`d&tFWYk#$UB*fJmvvC+ox zxmjzx@9w`9Ly7MMMkJ)ryUjpu63QnM(4BzlES#baOl9OjFxtX>7u(Bzw-#xf7&C)^ znGy>i&_Azvjh1fXw@LACSYqmhs-S=43m^?ca$e9B;BWi5cbYk)aasp2@v7?2n3-ZaJ7NI{#*AU#1hf(R@_7iPJ$5 zM|j6C(BVg$jM0kl&(>#IK!GkL*mx>n8mSvao3-edco4o3{plt#$-`o4Iap^C4RB5J z2Sg>-2LbH{-OIWt{FM`J9%lBIv76MK5*-Sb8DJ>@i49 zogt#HUfm3eHQR;p(%0+z`@z_R6mPZc=f*qXM0QC8OAR(0>*;E|<%TQ95+qTG@8mg7 z{{$GEUEz*uqd+W;<8L0l_a+0hNC&LZMRI$`a$#=d&iaHOK2sWXT|eC6$%{Wc*8=Ym zZy#u;7my-D0`ig`s-mPnD={-Mz*Udk28PG%;N9clOyi@eV*yg5BCR5%%V30Za(mfe zWr;8~;!}(WPVppB_)MVm85shVMrvqoo5~AzV_TIt#l>wnFuZZF^qVzz>9xe zV(C-yuHRVb!e)@sebPEKK1Ffkh2!LbX)`b#MRGEq379Q0;kB4H7_9(=KgK~4rmE;O zEL)EC?Z_1gKuRo8*wx>#-WK-r2_G=WWcGs2bm<+2$h7~^>BD^QehQ!P>pq)y(Tq@P z;VJb8ln9r+56WwvLq+{rr9VV&VxEcy^9q;*H+ZVfbHO?0m~=3sik55|@eNe^7F>)M zww?VRKR#Tmy&Qzx6;kY& zJkrfv1N(-}&|uiUqMPeoxT+#{l!R-Hgp{`IH9sA7Wb@cKk5QRJT~!?zU6$|&nNWad z=#1dTBK(s~%Gd$W;1-9P8wh%q77IObR+iY{1M*(bV-{nNBh3X=01QnusKf%-x?<-Y zauOWm9{BCe51IPxCf}Z(zyXE6`XpW+Vpa`LqBLI&Qkn9;`^=Q_kb$gW!OXXdn(KHg zwXGWx!K17O%2Yx@0lGK8C2HV|Wxy4mrtd^En^`mL$wJniHEOb_RIgmpl9P{Vjgrnto86X6 zhy>~nducAM9eSeJ5u$xdaqLit)8C8kxPlyIx7snOl^lBCGGj8b@y53WYkpy_eJq*u z1^Czo08Iykf5L*H`KYkMM5H)~snw+jQCP=iy$BW>NJW z@(8?UXIGc);#I$$R#y4qn*dofc2J)BrjDjs4&7z1cqM#KcZ&g8lTx_G$4d5fh>Y&% z9=QhDujr6#%i5>PuC9gXW+>{1s-{bhxTcv45MCWg?FATW`>-3g?GKXmwe zSE5d!?)X_#j5PMfiN>h6?H1o{LP%}a^Q+a3YQEAdEF6u53kIFyg&siQQ%<+PJ7zeG zC(|{p?yTewu@_r}n-pIMm*vfy$hNS~ENV4B2Y(9%Fn6N~{xJ>K@J3tCO!o4OcJW~f z=YEV3f%T?~IzLAl>n5-|oYjm(>GjE)9bZIJX}U(kel|Ve;KKC>7MMjY9|aRUhMQ}6 zDlV&Bt>huAbD8AT+oxR~Rv2Zu4UtFrXDy`P| zu6Y2JWt#l8X1gKvDkF5AELYnq*hSr7zHHLb>BP<}6Y-|-Vpj(>QhFl|xO_HWdT96R zoJQ<=!HXY=8vQ`vDbtnBDOG6#~vi_!~O#^ub;Z6+>MzYc^eX3bjBUL zgt`A(TCF^zdtaKQaY&Rti05wZK)bGFbv~NfnZD(hYujW?GX-OIQzq^CpTUiQwR7&; z`Uxyjv6O|j-Jl4mHa|b;3Z=6L_j#}aMj=xH%#_7g{cN--pV?#B{-{IXt8k70o88Mm)*u${1~*sP3+uu zO{`)V)E){61g|gac-YFiw09i|J+mGV2vHY2(q$`Acj}t}F%qT6DX^cb^`9Bc;6||`g2ya{SPe|l4b72+8?tJJiBGSS3m_&5Olnsj@oWH$VP=2 zRW-OeCB@Q|^};(HOC*BSSHWN=fTXKy&zcsB0DN;@4>H_BopNqBRqpdCj{q83O6m4E zcqxW(YKkG*8La{HU4a8vQGR;R_h4d`*gMPA{?n*yDQAaNkLq3C!r>=lB|X@ae(fSG zlMNEWs5idtoGQkY*))lF(L~&sxS6APH478Vnn7W0HJf(GSPT-B!z^mF{J-VenFXuB zQg0~27Er(KVc`L^Y}|hN|B7UPZfm;^X91X(+&X2MkUC5&2E|4&av0<$QyPMYX^cOo z>zrFDoB>5fxtAxP@RglEof^4E1U0o6ApfJ#`#jhu(-yQRZz>Nu0h`8P zRL8y)<3>xwFw3Qed-3S4!H^$`av^7-Cd{Du3K7z)XX~GL1~?!j79O&%tDEUYY&TVp>IVc5V^PwAFrwJ1Z7g2izU; ze=0m~Y8Qk5z`_FtDajzv2VrOfnoQ%S^L!W_yy3aYF-VH1;78|A@1$@jywMj;AV7S}3gUzCDs!es(TO=` z7Q>I&T@1(fB_<;`IO&)pA5t483BMiM*4_NWj9p#ZgNaqg!cAWMH72?HpS%Rz>D;Zy5{kiGe3 zWBGL4($E3;CcM3BgHhqx=yAKz7QV-o{Z^7TjXJL2#;HGby9Q$sNl%xOKvd{b^`?S^ zDDEhl6%0ge>;Do4xq=P#NnpV^R-Os1kWx_UH}WJ_8Z?f}8TG~w0je!P=E})Wcbgra z&t}6r(B!**41-OOh0oyX*!%;uW1{=WAUx17H(7KZYBHH!1TUik3b3JvC#ZlTG~`JLlnXN`3t#kk9N$Q92X0ig_4Qs}JQ` zfZ2=tROWXBHZBff!}$%H?#L_n_SLYzc=v~*HcDN7`9kh5k-~yzAPCdg^Z|kT5dpt) zd(6*PWbYL>%)b`bm+l%|XQ!y#nOmJ;f{z*B34oj)ia4VSgo`UI-q5}*s`n>0Z{Je7 zd5R|C-vkA%hP9WfivS!MPk`g8(=0ozE87=b8TG}H$SNhuN+9h=%W9Wk!2ix$Iommz zyn<>Vm8EyN3y=}|3<`wGN8@$rEwjfZyx0+(sxRTNrLIy{~WoX&>-Osb?;1fIKUUtC6uLNyDU z{UyZDD;+!DKaI75+T8)oz74Cy>%<+x)VbtK{Ctv$Q-P#f@plEnUmi1)nhJ$@=Y>N+ zR0<2Y)Ay5Ux5wL>uV>92FZO>{vdoPVYgE>ux979v@xI2CkIH^5@}KOUeg>6T(={Gr zZdB5J2$gupmM_i^i5{PUv)ONXeJ@EjUGG$4&y`sC|M;DEpQTcrQ|KLf&PGcdGM&}2 zkfzw47oFK;;dr)j$ezjBqw=vCk#sYEW>ctbxY*FnuR*6AmSRqaHk2DHTK=(mAOBEO zp>rZf7~+VrKY3OG9hv|hP`3ts2pQ>L7hBh;YB>MCp*1|+mBAU*@sJhOZgn5hVV|XH z-chmawYs1~-qcP-3ja%o7qN^w(#wc0n>KT|kdjN8`)HB&IwES^V^MHgaLcKch$aQ0 z{-G%c?hG`*y_@cAZdboSE~tAE+DU^4jRl%xNn!EzCwQ@LiH%5&vB7MEBLru)|H?e6 zk{#MfWM@Sc>V>Y!rsJm{{zxf^S>^ticyvSq#)tQnS6W&Y*49W(yE9B=kMm3yW909F z0d3}BM*SpFS(iPHgp*Xi6U{I&HEDr4;(0YR@y=(p6YiPCMLmqHDQ3}6 z;kYRGSjz;j(|1m2acP76wvTV7iBm%EgagOb);0Ewd;xczar9Px`@WZsPDKtR2_#&W zGiexT(Sb@w39bRN6hdg$q2@|E5j_OS9pbITsLI*5LQC~QN=fHIJqje|Rr`@mCUKwld+_eByr#Tn`pCi*Iko_ip zX3EAE*$kq?(8o>thtbJAp;K07hDUS9-_UpvWx4O87DVNova2v~=J~)ZE{!_On&~U; zecTN8t{9IQFDE}VS=*Ig% zI~&hi#{#iZ;N7vNxlu zHWP+6;?5b*$_E~vHzP8T_yK3$vP^3no(dm?V=-RNfbzEkIVsr!pDGn1JN-ml*6Pr- ztcwH zfOt=C!I-IJJ-pGNDGEo=56=%{AwV~gULDkD;Z4P#$LCISPCEsdT~?-UcC1YU{uD5| z0kJswMb3tk?1=o8%l3yG8^VyDVk>wvi+aQTW>CR4jEU5Joa9onfY z9(S2Y^f-8xoJ~kmv?Kf7(EaK zBFntp{^`y(p)g0$%}&GNIK_2ppsMgzTT(L9+#g~xYQ2|3RixF`0^){&-|?PoA2rtU zSwB5MEH1xQlc$EeV- ziB3k&v_k5SJF(BQR0AwAxu$DbQ$d1fA;~w*9W}18d*a5;%#GVQOpmNM?Gk%KdAiq) zOOM2?q-0-XU|X(KV;rTz_Ota(td-y=&&2>aL@*cdpX2-c9)|c*{1ZRBa_;~4P!2Rg?`4BSjkM1NCUdTiYOy-&Mk)F?wZH5m4QW;~2W1j(j z3K`-ah-kk2ty}cFsgiJx@lqx;s;JZ4v4O~xHQiqCU)27&P9{AXr}8EXL-&H`P``s@ zMpAiYl|=lvYq|(d*iUa;wspFGLbGR=?DH1#n4ntdgf20S&F%)yq@*Hod&-(MlC?8@ z?}S0#eFPcL5I)s#;~$n~kkedj0i;Rcx-Tdl)$hekn{cL|gz3m0Cng6+s94>XQ_Z^P zgo3Fm-w)n^v*49|3uas9>tEv5#@q6+bLq<192~kSm$jFreADtJrsX+GHKfIZtVzns zA?dLeE(rYGOj6E!VXia)Z3FcgC`8y7LJ>Z0v2dCb@I#jjkKQ1P>+i(OY!IEKI3Fo? zPb>q+X=p3Lodwd$*N>f0Lv4@^Jd7-Rt#>RpOylayj`86T4Z%uLu&t-cGGp(9Tt&Bord)+C#>+|u#U%tsuz z`}3@Mv?Jb`6&CiK^<4o^Ty`NW#clzL7P)-UO9L!2);)Le3$l}*kxT!pMm{DcQWpmM z{Zlgk(=5sZoYb#zbets5t+1$N8x4&c;+s>7Z&2vz2}ZEe#oo&_ZKoY^`1wZ$K%ncu z&XDrt&?EotQ-n6|EIipl-3o+x6qoR|ZBc4;c7u=8ssO|=RT-`bW{Pu#z_DzkY-;yj zz^YSe7Tb2u`#ZNyBwW2LO;t$xBDk737k(mz{dneTl@G`+^kRp*_k|dV%sa4 z(~RZ5#78g3E#_zEGzs3KrMp?yn9$|m{=l@3c)7SY@N?j1HH>d-ML7dLFl6E$-#gy* zDpJ0L+0A*gT3#M3U4K9ZoAY4CgQKA*Dfu(KkV>J!OGZKrk;8qAHU9Z@WG2G|mW+JL z!;E_LS2n9|k)c1Ki^EC&@;=m+7sz8`?*R5A(V7X%+zg!%=y@MI9%@c82Y7eW)cA~C zXOS2Mr*t|Cc_pi&n>}u47s(sdM}!XjG0kvhX(W#%Ah>XrYn?RmLG?!~){x=|rmItt z7VV=rj@XdrwT)!*@7;>c5_5RvgblO(zQpvn~Osv`-myX9)WSbj*G0!9I{!o_%Kf33nY7R>k@a<1s^ zAy!c+{^3}C6fC$)Iy?h5cvHqIjc79IM>O9B7GJTTDz zlej8o<-d0id`SPtatTFHK?fr8b(~RFzj_)}m?)auU=Wl;GVqdQp>y!o*3*zemBhid zRX`9707G!9{gHSp1OUTSzfD84^tWVa*&GlTL{e%GK6*-;{W!S{9Awm~6Be(&+y z_q$}%*=u`*iMgE$>kqkrqSd?z7Ht32fc#eb&e|bC%N{EBqX|eFg-nK*;+IOj*lFFV z6I+!n=jI-FCJ$C`n$-AqDK_o>5DOF^U*gq^Eb@~-P?Wz~QJh;G@*-NTK2iJmAcd=; z62}ZL;Sl=pQ55RB`%2&~_5sUvWlp{5JD_PuU@O2%5H8kjBigI-T&epUMTwufRx7wZ z=6!NzAB&28(8Awi2IWpX*&P?0B@mSi9}dekcCZ|vB1Lb^H_@z+6&!imKA$TNfWso1 zz|I^d|N4uYX*5u!zCpb@Hz_bgWkdZs_|g0;9rs*MwM9g0`(g*-P3BbGFeA#VJrE{mY#v~nqUf<-CRa&+;%6?WTG+kZZIsZRy*sKhHRF?p zBwjZjF8YoqnS?tBM#)~#5*1>_H7j|2v?29@{}U;iZ4O-y&|#v z7IwJn-P9WBWH8D25fjrGOWWBprajyEW#fARv?B z4?$Ns<%F?2ut_KC&}qx&VV70HWLj>P@*l7-@Ts7zXs@6lKbJ0@I&#@_Xaj##Mhc}L z#|;T2k407>ZQ+5vGO$l*YC`O5J$sS085yR~H|F64n{2I>2|hDmsM#}QmG8acR!pLU zl{Ji~svz7j0GQb_#6-hiW9WBJKbarQ-Ay8hQsDCh19uYkAmEKy;qb2!e2ymD5b!w)mx^IVYkY*udNCmPk4zuHMwz)DI&8cWxOGhye!lfbJdVRS z2W=K9iB)Bb=>sSMjf04{yB1u(Hd3=O*Z)b5M!`k{ZYjwNzR|dVf-+bi{VVmUTdD@J zYKCTgT*X<~!QfpbM#+NGnNH2lXM3zfeV%%%lv-J$4^5^mo+wF&eXcy8`Y>C?O6hrJ zo1UV1FSrZCS*ho@g)u%>*lWtsm(!j&@fIQpIsn`qy3EA$nJ;5l5j-*ZQO%B~WHkxz zLKGuik+*o90KT=(h}rx`3Yp_L=CBH0(-z z;^po7q=%-&6Z?4Ci^f?q4$#>JtHc#NDx2~qa?%N9_ zUPUI4CBhnY8d9o6V=)b5e_E{7EW?yOm7d+<4Mu-PM(pW}p=ze&VfXL-v?!`<^maE_ z5`pHOBX#C!l-Y1Tt8KA^rIcZ{VR3?fc<1DTKbSk!-9}iHH$+vaoTpwq+krfY^B#WO3_AHaX(z*9P-Tz zPOD#J*pIB+mokX-1Y6hjZMXi}H;#vTQ^>+qH$Ge5_4{DNm!&lwOYvYp3T!U%F!=BK zavlx_g@uB<$Kl#1U9`+>-D#?(GF}kzOA01=k!;A#=Ha?zdS`Df`7P_SezY@JdlcOM z#$!kz{YkZx1+T#Nk_5EX1=Qx34UXfGpKDnHVjd#+g%7vx+&<51&FrDg21U^MYS4Do za8^wjsy4q{m4ShK^Lo(^Kr}W=E4T5%ICp=eR}aVCP}$OCoaJ6OSACmEvJXz@d2dC! z<-`9*sTvi$sISuE)-uQ8tm5djbFhEsql)-b5-}GXP_d}Uo}~?&iPK|f65Y1EH-L31 zD=l4Qe`NrboPU~RePqhFzI0)U-KAfR7_8{LV$N`ZaNFfcXzqKnX-SNF+;T_Yep1*L zZWl%W;oaKH_^3F?hZj^Ua9YB=v{}0`c9&<}(w<=H=(I#&aJIg#!nmZ6*=Lq?URpr+ z4TX58p%;DhF0v&R_3VDA$&s?QR@=mOqRjo$^}x2fsVz?)@#-FZtuotE0miGV)6RN<%in+K zd6iQz{lAl8e@O%Vo4eHiAtnDQJ^%khPub1iwni}hg;n}5`S>54&c8-J{@X?OKar1r z;g0_1+xaF@h9T~uL9;b!NEQ62fB1Ux?Y^STpP4Qq-r0n34rhd|8z&Z zD}hjtyP<%;JeeWZe)@y+hw`#Q$K7fQacJ~V^SC~{c<*g~Iqn~8x=ICl3w*!pp^$Jt zZP_11P@^+s_uM0LJi&d1qiY}jxyxkr9(A~u)3>|ws+=;Zq3!2g@*!}o3qZdmY`K|4 z-ZA2rkmUn6qJAbeU-S-PrwR&BJ|WUxw5r0s>L7<=^4OtKnj4+j#?2055CFlunpDc^sm#9JsmR};H%6KxM!go$wl0iSqp=r zNz+1BDER{{Hlh9Gn3yWh>J1@oS9R;jQC*g}vp2F`jz#hVLdjs?YF3NEiV5TG6c69B zCjE9^JmvK?g$g2J!Odu+nxeASUe{1H=%RfdTmkc#r?$$vZ5z?Al*&wawRJXk>ap=4 z?$bQzH`Z|l=Lf{XWaAY0t!gLyhWAId>*e$H4(xD`p08JQch;!K0{1u!SD4^S`i>Mp zQrK9(y07sCPndC{(?{cn66q0DN zLHW9XTdA|)5GzXvB6ndC2JEXWf;v=mF1M6jX>vY5p)lOFiVEdmXPpGi)eBvaZRA0it zgW2s?{)`J2PbLph=0Lt_AB8LtEjdpk2+vJ9nziH|zYOT%mNnTG5R5p028Z@P`??Lo zvKnM5F0h$A)qAr&IZA?lJIt$Z5QMmg)U&vmD;zDZP#?Rfk$M52fqf{Yo#(hdOY;<| zvnz*omNW^m0&*tn2v{?BF0Wvf$kfD>&l}=KM+H)w5&)veYV9u=9UPL@nRw!LKMh7; z24%{pGk;leks>fXHz$+6q?RgJaE7~(Z&h@v(tcP*-gS%3e0qT3nFN~v?HSrif-{9` z2n<@#-XdWky_&)zQ=Zn|6w>xuj#n-mVw0Nl_y@W^Hm&zt^!ch1VrEVnf$2JKBIv$N zDEDj(NX~qlJlWV?wq;-!K&~N*WG9WG|M*S@D{sOYH5bDKAFQ)+PxJo%XS6J^WL@=FG>(V!<#hCvS|Oi zmpB7+!f#wBtc7F%;1g`&JberYxIP|iuYb@ z@=o;1{s@Mmnr`%)DeOkF$S<85RnAT{Ud@ZJ9-Yv(W~0g%D(L;B!Lqr0F(6I819uSC zzL6Mp#0pZQs%R;|No=TT*eL}rclD9wc5YT-(URDMKQ$%5GWD^vpX*6$@q#QNIJD&Q z9A_GFx{+n#u%y7!@li^Zg$u)lysj;4-b|I{Cay3d3oD#mlp{PUlqSUJQ=!}39ICv3 z#drb9cpEh((;Sym;a)PddVAC-&x7*ddy%?G;lo(=>)LUgH65}*)_tCuROxtRdV}Nw zohswnr!~m-3l2c_srWr!H8pYzWq`(=x{L%d@I&f(e5c5xEacp3S>?UUk!_kjyG+Gf z;v8Jl&`uZ_9~^&Pbun3s(hZ6$GA0wyBX9*Ptyenejqg3o#D%b>^qOtzd;tsHv=buiWlCZn~LWt5C3 zeHXfbNddLhMbhUPx)unuI(?NYIuYP-dXz4UzCE+Bkj48jH~joKWIu2LdFP~G7^a}+ zbZR^c5=35Wzp`Xr2&7a!VQbBQqzmswQ z7(f0WGLC_P{-1=d{Ac^u-v!wJv(yy`MskS%y9!!`8G@w0dzZhU5&tFa{zH!aYoy(O z;0pf;F<|-&=I{@s9TWS1&xQQ;AF(UTst-hvk*%FNnI%p8u$h+ifYiG9OZ>eA8b~f2 zh5g0G+^>W5#-PLDEoV`u=~@I80#z0Xb3{KydLX6 zEwt!dKKFsxhHeP^ZvoH~bq&4myEFoCBE={!Zr-Le2G5|x+V(bp-c_5(NL(|4wc1?zK%1ft_)y76NEevBizTezsZfG*2%aK0xT+(;oVpqqVW3{9C)EKcRsVy`|T*n{$3NyW6j2S@Mp5N zFc$d+dYAfyjh*|aBlDwRqa&8r+YXv;QoYrx9or*e$;C?ytY5ViL^~HPHhYUMJx%>U zz$RuBnAEj&4a-}BlBEhyJ_|L8`&iO&Q?nG;;xQnghEQI_G_5W@Vn8#BtFWG>7GT+Y zq{IdIYfi&Oxygbrf>Lui8HPM%3&9Tn;PsvSORJ-bZtM`evhwX)U_kC1di%xq4BgjJ z$l1QC+*lz3emXIvVS#a6m!KiVVF@H*ZHA~uUjJZX@=nX4Ksfp7bK+L_QQ4B>a8=?_ z%}M6aikLxz_P07BX<0Q+Nq&=2=QH0ll+wxC1$@x(99LELu4~Fy5%vi1PyPN!##Fu- zO%OpNlsC5*WW#JgM|jVBr^LI$e&HdaLBFBx8$#-S_Z+6S0F2Syof!CGytyg7)C;@ z(rn|qal0|oQD_@136*0kc+5Bh0<|t-%KWTn#1+QdViP5LdwHm7F2)Kpw0~yeb^O;# z)XljinQ7u}ee6c`JaBRlvjyOhgr5WIi6QZ%-vwWlJ~!08p6yPON_6(^CU#fp!h-i& zpv=hyq}r`hVCh{v0!yCt(fv7Y_fV*uoK{BAVg@|s0WpWOiO*HWI!Y|zM}{(oH`0vY z;?}|!)S4%Ws4caq} zA!1*7{2YO37vwGgHBC4tTucw#1U&K+1yfNKYZsJmr-!Z85<{G^9P_^ffJWbZCC=By zVEU4jvyGSiR#?C?;P&v)9ulRFDcq{$SUgeNmtzm=KvpXqsH2Mn3dFOLs_I(qe1hBN z#E_c=n=w4-e@0Tn-T+314%S|MN(>+w+M73Z{8CK*kUROpNTAkrV6KXp*@X5jUxr%r$*szcfDd{4QZNUxVsUlwx0|sQ9t{IKAez$5 zS?XI-Y{g>lL;(%lu6?>HF&00W~8sF5`&P5$(~a;J6AVHkdCH4flFjhzmheeNXlV2$`r<& z7TKa}DQuAqB3$t2`Me}POab4^%PwItI#(W6GRPURwB{3WYe?g0OIH1%vvleEnR0k2Cluc$n3Fulh znu-#l$4mQtMKZ5#@;dp5ywqO_#V0TloO^GM^W!kQwC7-5;qz}~S8igBe$?aX4-0dV z;q2P9CA)7dsST;M;c`NG<28loTfERjY>-@Zk0Hfe9ZZ{MAE%K68<>KY>b7;vA`bN3 zZjeBF8qHHaPH4#i2R;%XOaF_tci_@&OP7Vyw(ZPH+qP}nwr$(ath8<0wryutrL%6G z?*4A?)8FphNb{vGnlTV=11KTAhrjwOaPmu@F7%XO=;*O%SxZ8)z@1kjjHroZOxb09fKWSMU) z;Ew#Jdim4X#Z%&YdMVgC($vhivdIQ4YTw2eDDUf!n-n5>$RAar<@4qKVcQ9A!qqdV zqFdjO1m(g*=2ar~n5ps?oHavvF&HD6S4m>O`nbrOb|?F;x~2v|GplzNsejn7FjPLC zFTtDZL1eVtx})II7mpI-Zo@oUPwn7*lsc0C7+U#`ODT9Nm?=Oz|LrEV6t_G`^nBAW zZhiQE;k{AJVq5MSZm)JJV{!Sjjs@D1C8XAz#_Xi3xFYl<7ZsFk20SHFvcRp(N_lxc z5V17L=AHH8(Dt$OH`EoG4mtiac=*@u&wquw^c;T$b*Uq{;QR>?Kx-S=`c#y~A!2%0 zAad&ILubP0X}Ei&1ljCP2r0??Fy@DucyD&%;zu?CB##=@sY2t(#JGwXIY6v0v`5?# zrEaBCZ4##vHOyB0QzlMdBf_AYBHQsp;PGu%zJ4!UDoGQj{@JViLC62UNcSIwZ~r?; z_rDJK>mlafj{A=qI0F;ge`~)zQj>DpWJTz?RNI4amBt`|0CMKaiO?xcVs>Dvhs&vV z^f%EoC03%9;JE3%JrD>l!fS!&bvcNzkX##m_UT;X`T9&O5MkoTk&P`~1YuzA&Y_ny z4%(WHBryS_FMB5(W9n>a=CME%Z}a+9l9*okO}&ciDuikJ1!TQ-=ta%H{iI zp{s2_CNcHP1eYGQOHa?4<}NT65;_3#sEq>Kx4k$8SwL*pKm-y~=?yx}&TFXVH!r|j zz&0H8u@IAA^!4lDHq{yAYORQ2=-u{6wgdH;QuuLG?NO(dHE5vhAtc2byN-b635RS=+0u(a4n)&$Wp;WMq5ym+R|!u@gh~V~UVCE03=g6qQ8wpg5Gk z)ZXy7Bs8!^CPE8HQkfS?U}v%7QldqGdY=fc3j@G3j^AjQy0XyggE0EVhQgik832Mk zN$FTo$q#W1fdKfO;a#Qg*81i% zZ~L*|d<`eD;9qVIOs95^Db+!9v z4K?x`1e<#(T-==Eo)F$3uu-{MJ>$coa_msO!uu|U+q1u(8hE`?nFvT#^E>VKjm^c; z`m6!;4*)>&n5-MO7pZCdh0B%>-M&`QPg6{QI?subB=>5BY)(qDt#yF>C*@HsN+St1 znB83DFB>Rqsm1+g3%#i+pX%NWE*6soWgY!|!t7f`8D}F5g0E@i&FocdW_^Tv7=bGx zqP+_o2Oij{mOho>c(KsFNpv8<<1y{u&pQvbmJlNzk$^cK$jS~c4qD%KR|2v5`v0=O z->ECe$8`&;R!YwK{aXMz!;^mG8Km1x2iL$&iNgF0``EKquFfCo09xNw?|ZDlSu0VQ zqzx;d4giwlg*G*$$4qQRA$o$~I-B#c^szuh+%8;7LpEo;Y#${@axf{!5&{Xdx{{Bl&kH_U zAtziDKgMpR>zgcDTQt+0RlFi-9f^N>Kz^V0aVMjB(!jBCemmndey&r+y8to7| zL-`#I0;aIo9wFPR5o+z*J%H+>!n{2|xf$fCpAJ{voUi}YmGR6VeElak1@j-mqW)wa z@h_L&f0F5%{*kF$P3{0sD?u#*+OQo2iwZL>9q=CRep@zc1@2!#`ckXa0k{_|F^ee_S^B|95%)E&c01+;D%83jgHW{X?c>U}K{H3am&uVDU(3@7bZ1C;#^0by#B>3P+~Rc zGzLIDR%pDANUzAM8+81$;d11;B(~95M-EbMd!P^J;8V~VIX}0EL7`!vwy$Bpao%p% zR9j)DQ8(ZYhv3RH7nm^A&*V!ly{h22%3ZWA&7x_@3`gRK6@OA`t%fEw^RcYfS`M!p zY9Sv_3U9o*$u`-v?1Zka$jYkr9tXiWnF$(;8YRb+?P5?0*OMC)P0ZqG>h?+C2G3&D zaoGgs$em#oZ(y`{W>7T}n}rHrI4q7wjN{pe)ELEk)Z~ri73^6PN&vqqLVLf zlDDUx?FrNkTN-l<(tw-+A&ZT!u}ElQ?1aa*HpzZGh)|$K-a29*EJwWb>@b4i^uaso zK_fa0=wrD`KJYnvA>O(UmGAxjmMb%HMdVq@B%~Xd3m5|j^Hl^IGAqt~Ztyp$lo3-B~~q<_Hvx5F!x6K?2J} zQEVh1KUV;ahgB?EDhM+^;gc-yXVK=57xY zMwgeFk${NzXGITo?X_HO--%m4cWm^C6$MYkbh)G;-OK#)Yh|7YM-df^^n;i5|DnFoSIpe1Yqp4+hvM2&&~-K<=hhLgqZ=?3t(UAveX* z>%rfh87YBg%4%w@gKm9R_agTCoPPZB@H?DGe#>gvF~rt}uan6d2xL8#yS%_QR`ILB z+9Mb5Dq@1fFQ~Cns!TKUynv)Xb!i$5Eu|zB|kE*sjg`?1Gb?yEq~#WrgY_ zk}&uwf!sh-kVOK?VA3WuFq;JvsFdVWay;;3P^f(+8ODd~iU6@aI!i+GN#F!`rBI`) z`rOel%GtynVd*TvC|4b@#yp1G9%X>gCz5G-Upm^J40eWqhGvj_kluHeK7W}_lje!c z;9S%eO8*-=EOU9sIIDxK?=~v-nRx>Z=|)PkaPhF6n; z2l6%f$Aj?+KoS|boz;>~_=r9q_RycD=8W<={;(>p?Lt3x4FC>7?86VoGGz^s+P?^p z1zm3DvY2T{e1jGn{L(J(lYQAOzZ;QuOk@Q%$}SpHM|Kumh-BOk>H1&9#dEDh9$!OJ zoPs5c<)Mi|kCp2Xu@BW`AZ8|_3-7(x=~8r$okPQy4JHXg>`{kYW{*4(Bee`i0IglazEHPL`lhS~FQ_@G#hAxE#$d^B3eCxcTcxGrL*2`clca1;Cn8 zyL=)RJ^(HPHvfHKM{~EU{Mu3hp++HKPnd7|=-Ox|CLO9(TAMf#)G=+}=;+0n) z20ic$U>K8`e?mV^NLl6oB&k74EONuFfk$39K+nldjK=-NqkF+JgUza)kUJvXN=$e}W|bb6XhmAC#$ok0gE8=A3zM&2Z0aRa^h6y$aK1cT1C)qaOBjABm4 z6roqNZ0VL~rgrIle_c*+d%3PYLJ~+lV;0Cn3mJy*mSSIMSRx2#M`xPdmB%nc zx4Q)t9jz~85(~clzfx3u~9x0q0jZm8plHXB$wLhDb z8D6mDoH3rd9@C^)c$F{q2^{7-@!Kr zmwS)6A$zeTOgG{RmAZ>^Yufbc&Gk#m9*u%C6X+tEkcm=-rM(IWR6$esxYF81IMYScvac>?JN~O+(rRX2*m?4^VSoX8uTuzqTGgaQKv-YMyv6To$u*vJqPtHs3LcP#bC7 zB7i+O%wHpFnTe8}j|i)<#5N2Zh+f<4PLt+a36QVk5Oem~{hGYbn9E7_I~4}EI@&gw zVM)J|(|POSBXHy;lV#|RC14eqx84Z1+^#slt1{p|jDC1b4>L53g4;ZxNeK1uLk zy(0!L`EUl~5c%-HTA-INI5zoMp}qw%abB0GH9%3KcdqqZDMgK#cfP2%{W-m(sLCk7 zM2W+;LLY#58`_#O32^2v1&XMB=S&uhcwIZ+;5k3}=N9Chxn_J*g`+=X`N9a+$h$&= z@r3KPw#8P*#561n2J5gONL8F0X1dHmq;*smp^Psg3EKLObBS3__om0^%gg!rB;02h z3Rm4sd!-vM>hm4p^O1-zU;;~|_ApTae7QRq<}ae|^>5^{KlEgcU6oHE@56-AK7+VT zoA2jCpG0&jWkoj0P#`88yvaIY}aWfwTDajWBxQ42B6 zOj;h-P`-;Q9(=?XCi(L@dR;jZS8?vqF-u!B(e$Q-&#iknwA(<`VeY74wsqMypmNOG z?X?~+xc3#y8lR57$7`_1=;^@*3ih1`+5)SGuwD<q zU&TWf^PLc^s_+t7KF70pKy8MffhC4@uHDq2M*)Jvr+RIsPA}lL-%KMc9#p9yHZQhX zjDiCVu{$SlMK%C_VJM2xEt5&-{q`wF#g|L)cfcB<$X=T9mgUhoY>VOqX%qAzNiZK^?6ZRn?ni4ntFX9ZV1ma{^TxqnhVB3u`s)Pr*8p~%dZ@64 zFGa2)OB(J+5N;hT3M?k64lSKcT}#~2i))@|@Q^h7-zv)_5xNNAT$*^ zB>{@3fKw=I0n~MAy3jW-C|#BAgep2&#eE09L;yU(X{gI5-SGf8M9#`V-VngdJB}r= z-RflBB=c!$p5)_y16=rJCSJ)!a9v^6pKKdKP!fl~{*fl{xK+iI4O{?I+L`YvuSy;5 z8r$VmOPV@}`x9PfV1(3Qnbe?lgx2u3ue^aBmJe69b4}3{y)@EPtSCKv6(3q}|909^8{w;_-RCwM4`?+xYA!|#;l1n-;og7Q;K?kO#I@eBCmybhdPgSPy zyt2x1n}=O{P&bp3N|mJ3f>Mm6pqU^K4eQ*bz27dVs0*lBF%A!Q+*?QW)vh>fE@>m) zLRmS>&}ZS9Rklxfojn)!(DAf$n6P?s zWh80Eq||hm>3ZHn*tHF6VXEIa#G!r=eBOSz+ZuJhHRHW;Sr((U3n&uw=P%GmfTyj} z{hrkP8?C_H`nunp_F6u%9lnn=Wsdw*ElOvZzBW)OhubmcD$#t}7li%(JuR=O0_QNj z6(-x+^Z2djWOlqy#*NZ!lQezQl?mJl$v$3;g-2>^IH7QTW_*xK{Ox01wtFizb8YLW zybjwlgR!5bbm+VL<$6$iQ1dyjjaicob=7ir{5}e))1Gr-QG6}1t1Nq0ex|}rc7wye zB51dG?tYbCL{ea!k&ni%vghZ^`ymSR%?1uQtN5`Gc%9wpc3zs&fI;LXf6byb?*6t} z%bs2u{~3DuYd^`qKraS*w*P>={)eSVAmrwTFuzNYV*H_|3apU{Hj(~qCz31%Vep^5 z$RE^~f5Nc8%MAZ3lEps~(ErMu!~6%?h*#4 zPF)}3S7bYETpKkS7sFie)FD&XCW-_VU0Zs4!0|~0BwTUb@DpW6nHWV7(Vq{qo#@ zJ@0oj)jc~i5I3BMzU5)OB{3L%9Ptw@H+UqexU(S^#V^rLybgssuH1TcvP7pcwJh&h zruw3X(yge|F4I>klgmnHGs>X1jgjp8ys@!NqHm?R$}K!kyOri+wF)1u@@gf^d8Jny zO&L@wLsu&tUCuPA8TX1S-#omDR;=7XzixAmYf>0vQD7f!7g_64l^73^R$R8o{1GGqJ7V!@yMd3K}+*?)iO+ z))`D$0BF1N*x7brzMQLsaf;gU^wlA5o#mAB5utW2oNGitjYTxajs4NhKI?jvL{i^C zP^lQ=GBgkDaOR0K+^^vU72^jA1CHm4oLJprAx-dB|Gs3)-II(Ervfv5!TbUsQWdd_ zl5g)V9%U<*KKpxzpwLISq)=1wVh}Joi2$7*j`Hh6R62~wGo~rrs-3v6okU@9XHD%B z7~a@QEVKt>!Z-B3pN#@$!p~ekz(5#QKUH@Y@ZpYYF8IDTaibH)xPUqf3O1n37_t}n ze?x}c?S)LG*sEd4EzKr<^6CAYHJ-vu(@T#UIg%hGi-S4{$tb5$`oo)r%i#VI z18uqUT`+0H6% z{T{S~r=m`cM%*Otj33sxV~CDSgJP0tF=RR<8F3O+Lhz2-shfSe6yv?x)#+lB@j&|Q zRER8WXL@+A-r0pn6?e}_HildsC@TJ#G55&SIHJE0n?Evc^|sb}H?C>TTWJ{~SarX# zqS{bBAc0!)x3y?K9w!^%5aGGRQ@%L~#g>$`^r(jBI;E4xJi3_^Z78kk*YZs5xIvn! ztefmn5YzejX#6?d{1@?vAMQ0D_WhwbTck+7mwXiG_lKC*2ZX&rsIgv@lC9vD7%^Mv zIZND^oTx_SDGlER1l-Bt=wq;F-u{GEm3A#$SCd-KGPV3tM{wP#iijt#fUF|GA1Zn* zEQ$3SsXs)i@EpjRmE(aGSrCud=cUY6=U90!6xMQ4%r}mzEsl3_EE`qCPWbDXL{Vlp zLvN45dVSrTY{+7i<*?=j#N=8iTxZ?f}$k~;xI68b?qG9_4J_x*MO z)w9v^aF+HBX1Bo8J(^k9cBmzc()2OF>kMSQkh|M8>Lmm6UX5MH>Ltv{;(sib*U)W#Nw20o`m&i$!osxn-_CclnvQ7d)+{?NS1cT1neb(T38IbW z!o-~K)AHB@I>`i=K*HY6R>t#Tx{nhaYE#E46XC?s1kT3id5*2}bl~2AA%NKLp?sx( z!uTG%LVquhg6Qo3)D7AV6k5Alq9xX)n$ID@>oby>@#jMmXK!R1ZWidlwPyicNT2Q0 z0q+A=%`|~j@frk49^As+@UbpAUnKr1AEE_ip+(gwPY7(LS&$B0`&%`nw2#K=V=(HjeLb`2`nhj7b3Co6tg~s!@+6iym4vL;Hbric@9bz2XADwnFAWQ*2Za_?}&-=Et zL&rGXp7TvC&4^&8=AWX3)jw<#O>xyz-2;w+qhSo2yevgV6=LTEVN1zFVi_G3azS1q+g>o#c&XHRhnBCZ`CgF?V$5{KY6(daGK@AA@p&AY zIh)u6Ga9oIjfm+se)suBmC;z7q%qI;n<_~((rGC&gyz1IHV0&B!V&7BNB!4~O6jNE z)Suvszs+U-y&d6SfD$|Nf51uq?`ngK}u4EKY+Ejo`e*sBn_{x0DgNoPFK+X zY=MsD56ayC1w<_W;~JMgnM?g$E&D4#^ta>w{YU%%7SY%^{@YyYclfm9f0ac3u5mde zARWNi+CLiJS-zpHzgmhcmzB7Z>A|~GMJs$z zLL|QaV1J*oyTgvy`{jIGRI?{tBr9ASiw}~3{m|P!g(yKOsQv4x&vC4EW?#?Xli?_gsgW&tfd8?uF?HP1|7tF}I_UYZ4ok3QB|Wa*sjp z)uc+6n#o}HF@CKWR%rm0VM7~#G7)j3-fMFsgihYC18Q%!lik!_9iJ~ze3Orh#p@JT zdwVJ24D1qODf%OeHng8mvOQ+474ns-D#eD&q0M2DJmZG7Sgv%~%_FDM4Js-%P7m5# zJ>gpT+Dd;ps6iUenZM(c3-B9(XtrIEY$ zPpwT4QETs**! zvbBQuDCWYJMg43~ICQU&BBCKzIAeTjx$#7Pz%69qXxuyQ6#E;%es90~s|;!kt!#*( z3_H6B*elaJG&JoxQI|20o|!iL3wRseL!tij8*w3NzEbc8>i1#Pl=vTiRhI;=pykwN;<$^>a`>x9r24ofN%2Pnb^`TSFVt@?;nG= zlgB@%QManel`#_bq80|%ZwZpS3^dzvOpEdGww$cHlz);N zLod{&6akHZ>zuz8WWCjn2By+TfgvGSf$Yvds?-Q^fX?GaW{!Vj>P14IF14d1VqH4| zozJrrd`$je%DAd-i}pF2TVW-GgXeRF z)h?(AV?;oGgNhMD%nC&CB()rA0=!lum&a7e}XD|K>ai;rsBRUcjl;f=73b`CTKHmxhhRWV`H31 z8qnP?k|=_wQe#f9)BKIgQY;LjF?mz6(~G+I;DM$ zCtmqgbH||Z89@CI!;AI6;9R$HsO7>Pb>eCgr*7vZ0cXl4*#ohX%VmH8-a40J^Me*^ zsyuq+9UziNZ9ZcIanetucsV^^)Ey0y4}H(uLNgaR%b6RSr0yY9j%D>c`sMsK@^_Lx zIiO*D<%lo!{0sPP~^4m5MW-(=2RGb%*6la?-nI~p2VfeK?x^{U3=Yur*OHvmQ_ytGG zm!Cy^=R#kE%)qjL)5fL8U_i4b4vrg*O~U#(3*ILi>qi;+M zNK@wx!+W#@ELBCFe6d!@OoC}$xuGpLTnvf9^U)Kz^KMHR7%`ZMZkrt&eAe@h>wI>6 zG+r1Lnr^nsMv^Nt?3OGU3WkmqOj=UFl$x!^ zKvT2=)W5Bp1S}-Fv=Y-jZ&wua2!+88N9p+Wf$+mQ^5V1jAk4D1be(6hWN=o`yfa~L z$sw~dA*<{AfGJa%0s6n}^z(UgEkM>?;2yONw6Xupb!(RvgIZ(N+ra1I&__wX5{shN z(xFqWbGy{S>h0-Xr={(DDB6=MUfDpau#(}b(q+41bx5St`PhI{y-$W?6!We}rT3n5 zUKQ5?;ozvk{B#TekKxJ7S-_~muGUqext}*=l`WH6U1@l&(X0%-P!d}&%USv|0B*Sw zvEx!;DoH@bx%Q*A?XJU2aF5MS!SXmUZs!5TVksOE z{SGGST7%Uk61q&kEY;&Pv~VvHfpJ;QLk#*Hw{JSlV|PZg*fSjz5;Ie9a6^*48t)v$ zsK6^)!<$U4WKP;x3!Vqyam*L;VRhL^chC^e8A^i}l`DW7^Cp$ClyAal9S=M+&2rt5S5qOgxznLiQ6*1^n zOR7$ZoxG6fS5@A#K>^Jl=5?5~*EF{zOO`*1bW*MgDps~{0*nO+!iy1z-h5hpB&eW=`DXmWVq|T9pf@6Pi;xwRZ{gitP z_p~Zu{ftIKrg@^O#Ca>X1kRivcb^hK={BJX^PL%3invqE(CWuS%*T3u!qx`(`Ed&( zqzJWvjc)iWYrd4u9U#X~8hM{MC#dDII^QHL(pMDP<8!-<^=Jp*>%FAdlb!sD`PGna z010_CsA5@aG&*q3`B15i9xI1~Fz6S$K0tD+sLh|zjKB7T{R_?bPkuVa|NW|)N18v4 zUuOA(g#S;r@%PB&|DEyc-y<~sl5PA!iU0TGS4K9D|K_KQ))-Ix%{IE9)t2fBk}#}? z7#R*BXsQ*YEMdWcVe$T0=?At+#|F77*wW? zV}pOV>~4&A%d^W_`cZtxWEPMoix3LUmYAo#{~~cAcK2BgoJid6c4ABB^vx07@fyUI z@)-)Q^g&BtR#tSNHx=K``#WNJl6!g9a2c#Q5LO_W)5}Q`(SvUFWd)*@N@NOJv#y&< z65nNpeX3u+gFq92#H5@1+U%79odH#*B>6nbtL^PjzK&M*nG!cgM%LKG{k~ZBaK`~S zxTpM4Rnh621l?wo=dfXFw`w(*KsmPfk^)X;&6p41SnXsHHq=Ix;i9MUR`kC>E23=qZ?jUC@!%HKvkt zDcY2>c4nh%Ie+7#D@1+0?4-L~Lr`}@ZFkf81!OW4BahI$bsBSj9VrX#!oHNEc`Hy* zf%YcCg9`;Q)>0TRy~U1=GFcJ{IL4s~QJ`x6RXB-e^i*E4(Y==F5D&k85b5A6jOB9N zFeOJVZQcIE0wcFXDAjlJOv^%tKkREMM#o4qvtHvBZ=dmaQ zxRnNB+}~J+IWbQn_+8>-LXS7HH{5Mf&5p0@>tVC{{T_MzVJtm8!-%lq zDZwcPRQXB%YTrtPH35o{yY4=|l{^04!O-LO(9oE7rbmfAH%C0+RQR6xSaMJeAve~x zA<~M@xZ{by+1ow2qCz*}0x()jbwWnL*Pg~C8=?^C1t~hOZ^i(}Fk#caanbA$Ls*dz zJpln=v^-^nAp+u_+zT|#v$um&!&Askt%m_ISE%@|_C&@yIJHDkPn z&jWFx0*tfNuqh^>zDe&##-OD*)05f*uSXS|MFvGL`n=^H{I(S)K0)1XCXY}d$wS4y ztqo>yQ7-^o?-ooX#+Z!O*5EW1lQG4hI*)@qMIDq~=&Y)V@M9^$QfV#X!leY&Eg}3~ zoVhp?<7%KH>=eAZlAbf(Cud5^CB(x(S(H=61v2pVCT#zup1+E$(Zr!_F@N$Ay=$J% zl-wLimznN@RtWEyWY;S1mOlr~O{GIjKB7FJ99R!vJz_hl<2NYA;l@vI51)d4-|wgvMil%a?7*Ekl!}fuez; zf+OGgfmysU!sj5y!5zXSHo^2wqWDzdsI~+!&XBCGWaWtCP);wK~KW3r`545SG;N8CnMWk7k znO6}p2rSIE$$ei?Ru)CRf`af8ks-aRf>@%j{>$yi7tFjz{SWS;GfS0AwMHG^-&F&@{ntZ-b zrcjnkM5B1BHXKmPDQgS~t-uK(1}f8C8nZcb04E@EtHaQ>RHYQr0Gp2P9eVhR-9Zc{ z%`B(aPfKP@(pc?5K&;@ftqtWx%_vl;uHf_GVtDmd90hbC2vB=z4KpVt7yjsUK{eHN z6@kH}@7`cc6`90Qo5YE(n2X(g6o&Jb>oSOMZVLC_L^$8 z@Gnz>Wh+$<_CIa3Ibl~9hZ6P`aVngHuq`SeTfZi0aIe|oyjT6IFPY#*V4l!Fm<;EGcZuW-t%hT+Alz`-_IlPDaIjI+!=u^m z`Oy_i2Qf??0meonCfkikoYk4?!!eozgI;pKOicwz;|V1&4i^<0v*JR3k*0jYf zFOi79Aq~yfd9&b==Y_iDEKgTmQd^sX6TY<^VQslm?<{K?1Q~Q0`ws*M6!LR0F*u)* zG*AKG1?fN^24M>MfGAuz3yhWjziH0?g=YE60Fq^sfZgNQ%#juoF#diPN?n(WhoZ!;!q3heW@CoLw(ij(!fw8>q& zpQxagbyv~*7PzNLcatRnAHLQ9$!kWEmc^hr`=lmf!`ZD*=604X$Z^M@%SdXBd?+)@ zV0_tne!Y(lwjr@L?1kmqn|b|+>rcZPNJ2(YGuHqFHmx*T$GC|Mhr zExN`!3$0$qOhIGhSHN7>qgA0nDYySc%m(lX8H{KCI$Jksm=#TBeqD=iEM4 zIC2YQ@-`Nv^O-*}q!@qfl^}~XM3RK&6oMmA^-aPS;WNW@Fh`>h6BjoKaTYf7^-5tT zOc>FvLo~noc6Pn~S1`-#3G1J2tFrvHW%OT&2E$)(tNu3K{-fySe-g}M4)@#nA>(f? zMZT%nKeq<-Z>=8+D}+gZ#+m-$VfiQb_WhPGcD+HOCA)kPUxA5FPeSexCN$w4E&7W2HGD1#>@B{u##>|^ zN6|cY_Vu>?aZ_4L%jFF3x5?+-y6ZJ%@_x|o5`4%2{#8)AWZ+Bm(Fb^O88iq7Kj6Uq z63^3CFRsmJ@cuYR{OipG`C<0gTa!faWX7qr;K#3#$4_~XkGw#eEp`WB_ur$EY+&E5 zE8&Sr83Msv8}5X=@4>998 zF0p@BrB`__S7h_t7Z#0{!fsym^0d`JY$34U{DPwd1QyHlNyYu>DA({)uHyJX%I#Wl zJNZU{@vc3mA@>r&5%t{RY*)IYOhqNi<|5q^xh#21xl7a;#oPJloluuUwdcqlt5~Y) z`vDce%_fx6=vtV9fqRQpCxTeo#Y>kfjY3xb_ zi=>QBz?w)wgKm@;`L6P`d~Ve)qsD2KmHK*p($yRamYqGho1$q$t1I`H%~?1qgMrCz zdBt4Sot9%*34#3_$dDajG2g=zEWgtxFaWO@_{)O7OrxluFOP}|nOD36bK6Sfot*H| zthTYMKt%{x5~_;ROjd%I-`IqFh*ZwOKp#4GRFgUd~Q0C1mAingiNSTKaggIO9yw!wH0h z5ru)`oAZtcO$y9O6b(4saaE!-OFv|OHsIpsW9t%Df@FL586!(Nu<>44O56Y_HA|i- zVf+ponT=3H5-YeZh%z_tE8o{k965ecyFgh>{ROHcHNn~KU4ya*<^}~tg@6_~foyAO z>o}T4bXWrl(r5?N(3_n+p3TMggaap0NZHK6iueK51DDNy^eeU6d)vR&p*eu>Y`~tH z8S;4%4@!X2J5D6NN+~XaAmG;lvy6HrUL;jT-rIoQoDet(T*+c)!vKM_(LRzFy3_X( zk65Y!eNmxBY2#goI)E+WtV*$l9w#E9=B$YbXjDiIQos^gojq$7_oI=dG3etvxVd)O zJ^Lg>)=mh%4E?1DQ>#j_;v6dSL1vVMd6i&f7)W^{L{MrM5uC03t39<*n2NU7MIY*rZaj6|tyDw2LuO~2xEDhhtju)P=@TE!@ zTc74tqP>qOoi_8=k<>_@wVjKqJI`<%x>pl_g-4CR1!K6M*-}Frg|gK^;bFez{+wk9 z5kL|Tj6V!xLEl#FXu>Dw5oAKtLq%HM5u-?KGh$tQwLdw?Pr$D_0+xdJX%HC5*AeQMQY13(Xqg)glM zYlvJ%piCOZC4qYlP7GOt9JGpd`K4d+v)M>av_llp*`XU=K3H_H7yXvlhG*wCo+dK& z1N?wToZ%50p;j}QG~C0f266F20^R{K+E0PZ9c= z(sLqoZpYthfoB@#&sk4vtNC{$T8Asc6U;ZkJTsdh9^zwTbf<@F;nR%sES1QT20Txc zE7yY85!VMkJ$({0kEHV+rh|$b87)X;KYcP^{K=M7J-z}cUe-3zka-2}?}8*aKO0?9 z)7|`VE~4=!zZxP&6MR|})cp^KN)|Pn?pzr^w~W%NcI21=p2Yk~t;&Mmy%6^{bt$_{YMh!Yr)OC9PB{|1;3*C z{B5@+Xv zLOynZ$fUrzt`p`xDgctv$a2$fWUz!`OXHpDnLu+d!Imm5W{%e?H9h$E%j3JH4sQI2 zS6Sf^=aG#l8mIy8OcTk-pqNCCK-P63VlzSmSSDuI`pIo><^fs_6|xtSEq=In3cad_ zU1M_$?p*m`Kg4lC(ujT+3nzpGyl{+OA_V<@rNg8d?78z!Rt207J-JWZ;+X_%Z;g9-2i_vK_ciT#X3e^%!g6e^_RBYXBHz>{8uSl`A;Npm7tH^g% ze3MYNEhgDE5qjuU0xB5W7n##GbIxk2eoW=;2X9xbx9L2)?tP29-!pT&3W}j*oK#U* zDUMw)$xjId;nVO8+O~7IdWT)zgT|4JegnG$rk7-YF6pD_l@$C8Mp7-N;nw{S7+L~P z9mf(b98hsFFxjjalplAsLJiJL|C(8`=|SGIvJ$vGx9 z{dGf0Xp%?%fjY%pEeWMT0- zbRJuaj&jksp(Y~>W4o=K<@SbLEYly zNg2)F5<0}Go2DBVUK%~TYc zT$=cm-^c(Q*Ws0w7gwH>NEI#~x+l)H06YKsAeE+rQ`CoVhJJWygMg2cF zzWiHT)Y%%k$)7wg>z~!Tu>O6;?telS{;{0*&w7`?6=?qT;{V@{`_Hy0X7>MPi>lK2 zXNY4vhJ!~hbs)g~hD7$zyCI-&-QyfFi&!*AQEFPk5A4^2?1~#Q3Q1Sj`cQ*7>4f2q zY)+So)6w2;_#vfUkLJyfw#^VCgigo5r#SjOl@6dQ+=$f2+2yhIxNo@k;2-e4%fx$l zARHUI?RY!y^g1D*Uia>_wWG5W!E6QlcKlB0*1h^zM7E7T7KonZ>#W1!5_7vml|3HO zJ-FJQo6Pw>G}!*weqx1gw7=WwXAN|S*qQX&u2yse%YDC|MB_s_hG@3SN_Vl~dO+?) zI2?6F`KnGepO?%D;mzf2^GWcg+Sd8YI&f)n?B@*&c2re8Ke&s9*#BYeoq{ZF*R0*l zO53(=+qTV0+s>@CZQHhO+qP}juJy0p-Cy_qV)gDn`f$#ZH=Z|U#2n*(u6vBSl&Doa zsb3FS z_8Y5HIJTg>z?yB_7atxxd-s^xjhCLZ0W@+To~3%eH?V+3%&t__8|3B!a~IgT;qNW7 zI%UmIh}-Gy%Y)nP&S7BpLZ8R6eF%PeY8=y&c1sg>8jmg^z-XZ9oH z>;fYIcS7DR*UdqUcT%e}=bx2Yobt87O^k$fjJm8XgZmY5AUqpc*OtTR6j5ZA=U9-* z)h8VCPaY<62Aq4a54*G>8*{m2;#_I(77ZcxUJ`6Lz?N5H&YrXh7T)am9fPb_0NM9a zu>VHt2qK6-C8!4(0HYVhmyPOsIF>6l{xKadDqlE z{?s}hA?P6i{^k%()+M-leHb713$M<#B@8VO5RBROmr@}RUyUTkGEk)*?ye9+R6k)$ z0ent==k2^YvAw8t-U-|Sme@(>*HKLcdT|-bM^K0W2cJZp-7of>P3P<0d_P(nbTTED zZ~c2|B8BU1*2$hG33V1PqF;)?t;C>Fh;KB02q#t2q%Y|A;W#FsO8|gR_)%o&sk5Zg zM-;nCpgXl%QOIl%$)D(fG{vK{{scl?+A{8w8TzbphCUdTQi=mEwDeKK8DP#Y)Z@YM zDT>q|Ae=M)#8uG_0|2Lu)sb{|kfjwbRt5hYP+-|#Y@*XI2{1X%OOUZ$5CI-l9EPrD z+L{Tlu`x9?oA4tSv4S8~j|tAh5=hw;IolKhB+V@0_TbPN{-EiYWq-6Sls7XW?hL#n zVox6zg@s$aa{@Sp73?B6OI+cSY^1m$TnMXS>Jop#E6jwLdJMz2*lg;4`Fx* z2CRPIsf&QsqRLOZi>mu(_m76XY7_W=&*OO#oPkWW$i{Uez5`>qq8Xn?H)I^S@lLs4$rXbNHuX;$8|CXUk&(nvzo&jN-Q_>_jT<|<% zCHm~QK+_}y1Tu3aIce!p(t|gQ_qur}T`BK(*LnJog(;2@C(ubjdgI(hMU|O_e8ZVS z&e)H~IGjHvp)7Sv9QZDE18CDSTrAR^%M9U_v%$+i zxbD4iAOo&p?vQ|1V6D`}{m-*cGyRHumJ z7bj&uaaSf8-5sCfNnU@Xzt(}o3-T{xCT;2ZW7P_<;a3Y8AqyGdNGPOa7k)hhfQMM) z+Cp8QkoC*&Hu_4k3-Q4USGkS4%%3~}@#g7r9mkM+n9<#SV2``3g3g|3Y=EXOqK&Qt zJ39tgwOzO2t4WVV9_O2(3cnZvIEgI$2T!7nQJ3R$JoC^Baw`R(bh~QmvMhOf;+nfan}kc8rPX+F#?T8Z1n4r9~&=)6AqgY-n`8^C}6r*d3 zc&~o!@dtC81T6rz^Afo^>}*O%`9<9v&uSF3x|qti?Df;nGsP;fWU@ykr*G2@)d{k1 z9iW`_;q08R96c6sp0_t*urCptg*H|^gvpO*fEl-2Y6p8gZxQH^$+Y$|O?pV;t&xkJ z!Bihs+1qVX#}Y02{e|k9V=G>}?jsvxSiS8_*563_0M+bZ+<&J({}Md)H-qB;qL&%y z|2x6_e^G4nKj=s_hQhiQu_CaJL^9Qo&OyX{m)eOe>HLcKaczGMKl{b<6oC8 z{~@9eh7r7TYEDN?@aH-c9Q*;gD+8RD1^lyGvb<08G!b3d3;QR`j%IF!y4gy~Gmy(B zB4N;hB89_sbXJr~(sw=&3h0`zwq9ObcQ$vx`q5r8xON=7_}yNac2u*$nb;z^Ki|Dh z!mhF|+yQCtx%ix!_$CLx_O25?Un7tvUhuX#(k`SBfMuosf&0{9js0rxzgw7$x%~nI z`-A&9CSOaU${r3gy8d8G;0hW_^P+s)o|Am_`u&h)%VOxPNav^qVGbmdOTetLZX3e& zCVRxC@CT@C)?D;%p|}VrWy{|%k6T%a;9ZU~b9L6M9x6bc{z~Ecbu?@b{r19FKWc~E z#@Ni$pb(B?ll`b!s@ll&>>eBAO}5-*BckGJSfbl_YBB0<7r8G*<$2cOBYmIZ0)$vm z&XU|%xelroSnqOM6F4l=g!0r)bzc6pEG`&mh(&;Q|AP%&Jy&epgZS1qJH_Nz2#*^x zOH8|h;bgqkDe-tUS{C%PSb%rI8rVgT-i2gDvy#k96+9mYM5874YJEH<*%4}f=^&Xc z!RHJV``RF-eS-e}lp8WG)?NvHV*9!a*J=&t-6SkNUde5pJ>-paUqd%@{8)+tcc>iL zosY%fLcc@#?|@BamarHSDH_Uxk{eR0f@6njGN^bgtl(yKqRiUGrR)B|ast!pP2#qA zCcu?|rx;38*61+7ixj_`;DpK6^kslk=w(fEYH-wp9b z+3@X}N}S9qj=qqT1QVT76{@yI07E-?12NtveS9yfjN>vkf#1j<;?tSnRuYAGhp~-R ze7qIlZ@ZsK{UX!PL%)yn^RPR0(PcQYRfnaAmfFs+$cW+d2=7CP0XK`{8H0MDVfP}U zaiK{V!7ytIW$+SEy-FNzW1R6Tf9@4&{Pf}5O{Oc+_z9dQX8rx@bm#v1sPW{swO>;dRleP#cIkNhVgCaGBRp4`J-p@3lSZPl$JL8#d)Eu94RDA=ul8C z|I};Hfjic6%-m|*P6C5^EaP83k)n9ijgHshB>IAnSSyr9=P%| z58pb>9zE8Kpu^lxKR+55vGo@68z3FCr^~|PY=m=}t;6)MS3O1_dJ2dd1{BM=Q8)ed zn)WgUr1h;#Up^#MEJ!1?M|!d_hm8(j)#27 zMH&EY3?R`h__8KZOr%{jl+#5sO^2uidWZ3mgE5qzCe%^122c6=g+p1O5o%l6PQ-6Jo`(o>AKfiMT;bEh;K zCV+k|Y>OZe-5v!PC&pVfGDGPuP!3SNwH9LXSjTI!&|_lu@*_cJmJV8T>LYhGVA-&m z_T=M=rHC_4f>;C}Hu0#}CrH7yD0|Rc{+22l{E^VupwJ<&xtN9Keu)M4M(CKeeK4=|N43k= zbydi3wLWt&B8@oglzQmUXNyRs<7%>o(w2J~nbrdE7TNdV=m|${1NkKJ^ ze*l>wR6B}i83i_Cx9A_tZ`>B(Uep(vD8h_2`w!C0{br|{Sg7>p5btJgttrC^??ybb z7OUfFxm8?^_kKY583Bs`>1QWlSYbEm!S|OgD}M&Y1GffhWh{$JAJLwf^$jvz%OnJ% zJtZBc!oUf4<2kD~`Y(D9SAb@BR7x4@L@n}Z^Z2o>Uu72N;lfY9JIYqRdWGnFuFInG zc{L6^8U#wM-iK^5W!G1P^PB}{0F7YUTOuu)Y>RC&HDF)UpD#Jg1?zwl!uFNK_P%7h zfqi{f>zW*DmT?@&!upc+3k* z8yfovCx9x43xESPhs<#`cmxP7IuC7RZ8xso1~eA{gTnQpMyGlnQ8-0w%~Lz@&DOyp z@|H@Hf(D$e$fAEgm``bb+5(8(s-9P7GH6K?D+IokkL|Jh4q>PBM-^YO4b9$_Euw@i_p z&%}_gHk(U~|E1SnZP%xP+M$J34oq7K&q65YYunFhtwTd2<(8&Uo*4M2Vx+^68yjMHX6M@fB)$~5xpJUa!{ltiGTWQ=Zdu_DbRlRB?fSHA|E^&BeyZw6aN{PR2$=R z-bfDz@yTRcpLcT@uU1UMBqSnUmg_pe^ji37v=~Zn^HhChae!E)=-QC?`Ms|^OVaCp z?{z8uYB$@g@TN?!?#g0DUskKY>QI<|J%&!+29y>>e{-Syc;&}@t?-?9D4)k&_>_tI zB=1yrv2J(oK>(X_%B#E}+bGvm$%j~HJ<`dPl8Y&VTSsIuIW60~ZU?l`Q&uV+QS3=N zibLx)O#xjOHZ|t8FaA}FqtC!h;nYXI>&^lVfHRmwZSdi>!H^men3E47&3}B~`2|qB z&aksw#7dgZ@D>#x^vcYU^2;Qqf!I=i=_Rrs%An%-VM-9TA#$5aX6U=y3s!CJtzSvD z!4wQ97=vZ3-tAaH%f!~5S-^M$O&2M?7QTH#N7{+iDA#mSV# z8eBF)Xd#>4qz3KS?sXpg90nxI)f$S+I=F(0%$#*~H*X&@ETb%4 z!eDzFhr_3nZaDu>Y4EgGCnxcPuncepF$F`>4Ks&oGNTLEhbBquyNR`a_Oti|tW>ho zoM%bDDo>A21}5XCW7PP~_02b3FTjRD@9W`Tx%lGF@UhlE zjI;#TiXnrKMdh10{(LmnqD05k;)j7CAD4rbr3qVxv$J1-lc*vOYia-jrw1?&#)J5~ zL;M9*`VY79Plxco<2L@u3jIsB@fXJEZ*1HDyUEPN@}Eccf9+io*WQ-E5CFItmdIRP z13--hi@_r=C$U!(qTEVkc{U{$_=nx-pio4s!TT}5gsoeC;c&iq_Pr;EM_9MefTz}g zr&^&dY}k|~VgNr(ILPPN1TCTnDuKCw_CSr?p#dMv@Lg)EfRL!57{28GJoj1|CqnxM^ev#v)F5A`B1D>pVP$F==w{B^1IiB$uwDzq(L(QXYQs#S9ydebAnrg!v1(>0-u@= zPb@(0v(8y`0Hd-~RpqN95bOIqWsPt;fO~9^5Hf$#+XrIo_!$~VQ1wZdnFdw1Gs+7= z6PHTm$$@BDPnhKV`Yp{-4@9u@3C&&JO^dH@!p%%jxJ?uR)h4O57GWLQAgKRB#)et4 z?vdQdk5BgV6krEIc4`0QfRG$`eb2gvrDKn8A z@_vzhTqATWyvsMvhDv$q`Y?e{%uKK7T%XMAK4{yu1ZbXMO&mK=#HEDPep(e??w2CX zyjoL6+i7%Q!nxh6ps0*_d~T>HU>)7@fn(8n@s^G%a>sFlmBu}d*MUtG3s06EX zSOmUTD7_`Zk(ou=(669E_Vsan35OUP;g$Oa$%yC2&SXiVgZESUBS9>Sp;E?R<1MR< z^!xDm7l#nr6T>J$C7}1V7trQAE4T$o)exve{YT!!{8jZ}vRj6`WnSm}c{yIiZ_Tpv zh_hQs%MVZy1-C>^$;N9g5;h?zqk`_P1k}9thkmC7YdYC6x-7)3b#WH76K|oYuxxrd zY_26WYr1AP<518F>HIeO!FhV&jYk-dGpdE9%y&zrCzSL~v2I^^>VWo4rvW=btYkEH zk;^JDZW}n17SxsdF(E}Z#T8I_1!fR^o(_Bk7R$mwcFE;KUvdA`d-HdEnDHX@LDvWd z!mYu3Z0BD7&L3Nv_6o;_3y~Ldj1}mGb3D~xo74>LuG8A5h9FVSZ^K{Jh-|nD7fU$tyT_`?k*bKIo`!PW|dQXDw;}eHh6l zz%Y5e2z?#4CN(2sBCH>aKy+O(Gk=KA?HM~z+Zd&6v;Z9vEN?xpdv_6Y5J9ir0L*YK zy3~5hEpq7^pUbI(Q|Mx1L+A%N1`l1g^^o{1I3cS&#^J6fwa_6vU(FJh(~5;&+KduR zz;o^U82sFt%kZ;7n6GiyWKOc5qtZGMurHH8VlO{ejUQJl*zx2b#N$b8spPWI7%{P> z$Q7bp$vLILGp0Lkm>K93jDC~<9VQI-8u15%GsNrQTMnk@?(t)M9eKAKROXNTg{1?0 z*=zc4B1Ze{e%pmevc1#>!V!(JpoZ1>JY|~Yt+Fk3iCJrox}&dE1GDB8lTC0&)#Lr~$yJi-iWXic zd_JD0gvcl_!d1!8aU=+>>i!EAL;<$Y}d0LNr^`9+-#H5kjSGv)n4tO%q}ST7N^!`BCipN&tN zxXqQ_={0r`oWZP&Wnv~=SL>5?>xpm$bGo~LQ1KI6<}F}Y&-g-(Y1PUD=?lh?nGh(55z26{#jPkEQa>F;p43!;3hS3Ze`q4^T)WSNoyEDG2nc;bMOjb= z9;=(Ux&TD?VVhto7Hkx=^j`(!3>>^%GBod-bO_hu{Qht}!DbsSiSlm3<4)KI+h)TN zazp=4o3iTlLptNg-W&gfZB+i=?$lJDmUKi4aos8tNZrQ53_ky5BbK$-NMNE;ppC`j z?ld9!LOxavvTtVjcC#`<ov9=!yjHW%ErKS>bz% zcB#$Gi^!W_zkd5@uQa)B!s+~3=xMJpZfAg>{B(zttz8LsOGx6O;>*mb%e;~H=dV)p z5GaKJsd*MaAGkzosLt2lYOf}cF_M=Fvy2jnM9%SC&WuLMdR`(}af?X)iO@UlpME@Q zlxNx!8RSCKO7C_%qhvz*YI9k1B|5B6%Q}=)gPUeArqPY?0bBH&Lss;5+`L&<-_?!P zFD}?WZjxNeIj_xEZ23_@?d^*|Z`Ym|mt7h*pt2F8r<s@YN=M*x?xxVY zW4`H`l0F<=hf`I#7vZsN@uv20ZUjNH>`|kk@(m+7+=I|KcewyAq7eJfWr3n;ZE$8Wq9w5dWjanN z&o~&@-9IDvUjn~>U_1$uA!o=87)e)p(g@X$gv^vU*HAh#1KpmEV_QbXD4~jKq%|Z< z5zBtgVeGOvi4F3`BLpm%5&|+j!sVeKpg6N;$(Za^n*3v69_DRG_IK*jTlMertg#AB{ z`)>#4Oe}2wsTF=xOUrSy8O7&MI-!<$3@QO_r#mOkgnhaMmOJB5=Xd-!zx5%Dv`UGf zT}jnnq6ud=R~!ItWKD_|Y!V{}YdY6I*x6HVyiNe#?=xH@H%}8*(8%<3we9*xhn@pL zn0JYNe?9A9!pHrIyn1~ekl?qGHG{W%{gZ9RAd@OSI`lPG^m4rk*Ja!Du`Wmc0G{F# zTEmM*SUCTIMZz0_M7sB-#yd&fmDQ79mhurFyuE(HeCL7zE!&Cra=aFhvG+BkCzqNq z8j1u4Aw!f1N(@0PSudDYh*GGSMhoRFd7ed>^bX+YKo>BCNJn?SAKb|W#vtKdQC>+) zB~#y}LsX2N(&qN_I4Z;a4rjVT>W5IX@lg#SC3hytSvmZCwau zReHGxwm;E(GI6D;pHoY0X?^Tyh<yG8RKx_J}(m zRz@>H5-hQa8m6m!IGEU|BA1fkR)F8F3+S%t?OBuwKCG#>4f((mYM{D0>3}$r22N&4 z>>(XV|McvM4+x&kXe)s}&WW zKPQ+uvl&XxI$M>JSO=*qkOA?|12f^Pvn6RWHSQ7m#+%lZ_%+sBYRrUE*xpK4F9qP- z0n5u1319wHmW=*Vg^1F}RNvvar9VeJQK)+$$B5dj6(GkP)zu9>X5C#~mJ(9hL|qqV zS`~<8y<9hg)j`&w`mO)k&8Xd7iwV+jG2oI>{WJ;rgM>){V6o?j5G!?v6t~2VR znu(#nIzzJASvoPy^`QdahEU%wbLNLT3RP-eyB81H^l(rj7%1(%c zT>^|>(} z`Oy3`qawV(Z9EZwoEL(v9W|eFq z(JWkYwA8nCtywiGZZs${D!pe{`NWd0wGM_&$*C)1>gC;w^o2u~=`PP^#RC`L59G-Uum{PKu@G6tnHs5v z2AC)|j^+r&l_~jR7~^|liu!LRYe%~j;I|q6>LV@N8acExN5Ov`qa$ zT2ipB34zELkT#sO&m*0pX*t9fu$VUbnRu;Tkub*wf*T_FdERT%$w)WUOC>zjHC&$E^6nb~1*GHDFeX)#IJSRxeTWd3E zMz!O@#z=x|hn0g(`T1obL9`7QW?6~%hhjZeP4$@_5P# z*rGyW(;ni3#Bj}I{btxr$^ZQ!z$eEJM(p^66rlf(^Su9CY)$`I80d(1HFqk+er}IGC*)VM87g7j2QnRU?_(yeiyL$Fa(y_ zX7N+X73aGhxZjIdr5OLw-v*UO19bDS(FMNR7BdkSXMwx-dbWu44I&(j)#9;-0eJ-8 z!`D=*BSR7sG4pwkf_x8;^y12ES1_nRWPm~D3Vwl~d*8WvA{wag&|2$J-uFz2?)O+H zb|Qf<>915U7aW+t9@kN<&^-!U=JiwwSkOfSKma>(_+^%|MI332wtz!P(+?%Z$Lzz^ z6E@(nOVWJlM|71<2EX@iV()Ohn{gPcQRUPZj>4Xg`ga7KOb7$Llf|$-n--x`e*svwIXK{U< zcCu26Kk?~b;I;ctaMzPU+ zxxRR?83cSO(T$i}YlL3ilwliK;J;n1mreBm?9|XBV>W*}o$JJGOg$akl~R6Qz4o8j zDv@=nU2w}{7o$;R3tr`eGhxB~#1u=wSFiLyPek=%uPjR3a(6%i)*6+VoS>mq9mN%n zVQ+NBO!aCfFzrBin=A0a5OC04zj7E2(Ff<)zIx3?lsv94Pwb|Nh%*0^J~i3wUUX5n z0m>}R_hM4_0V4mVy)cenB*DB!jWzG;$yG$8zfrJdnf4>XyP4SlU^}PyOkZDVu~+S` z6m9k>r#IWTC1oobQ8*bLchc`7Ea>{33iS8e$H@EVm|bvN1>V-$E5fkF4Hzx>V>nH{ zY3cK!-DaZv?ooonuad`Exs&vc5iByspd!OY#fIYqZW68nrMS?@H^?C2Vg9XYT;Oe8F{ZcJtP09R-4mX*@gI`#vebL z?2GHwXbws;!b1jFa*QZOg{`Q~ED=@;FSG{3*vCy+Rsri z!~@e4U*?zD$2mmbKaOr8>-@1~=0(&qKB_zCvl-T7DdU=ja8z_N;_)As!CnKWSYmm5 zCZ1O*xD0tf=b6AW)TgC>OJbws0GUdXCP<%q-}*SdEG>SH>vk`%f$_2I`sOftH{!$* zGrKAOIsSO>?dROA?$u~UbD(p3XY-bbCxOVB#NK@7)ohwn3s9bt8Ps9y^w>`=BXyk>7qpv93Lh>%s@y${Or-`p68O4+944#erk1-TN|^ zDU;lk!3KjeI0orgTq)3s|KiL37h_^Tk&y5GH*~YqDZWnnums*bm*^hkP>i#&=d|{PCF&kaSnZ@lNVca;e#HI()xYS{XcEHcXCwcSyMpHs!PQZh13+& zd&X5BFsSA?$0P?G&YD{$Dykdo?AzI>z~thWhRdF9>Meo%9PfNtZ6a2&Sc~Cmu)%f2 z*~n6>7yzj3+D7P-ba;2*P8)ZpXJ=8);Z61+53aZ{Q1xgKcj3f^NL#F<9tH#_oUo=d zZDPNUC@veA)yCS;6`LdyD0W!Y1WKZaxx1QPm$-MBW%q5ouw@u2=3PSjp8@KAnLJMsD0lrU4d(H2BeuF_kf&P0h|Z$P&Rloc4l4J6gzaLtsAA+(8+!i^#Z5^ z`;z>Pg=PCop6uV;fc#e<&dBhuHa(~(8DInGp@a4+UO~%*@P!36BnL2A*5H8z*UCW^ z;IMn553e3RND5qTea#hT-pkZDUkVxz*Z?pO@1J$MY%~$C7RTe^?M{mV-$`&Cm$}zj z>tQy+JozhzlC1|}Yq5?bI?v-Et0<=hjvN+_zx4hTSbFJ`R{q_w{=(z^hco|YwC(?j zGiUmz%m0_o{4Y4&-@Mv?zg%Kq{!b{Ix{TuiD`M9_s*Y_Wuz!3$XD6;U9LaD?3`tTp z)x$LkuVuzbzzD3V`iy>WU)lKP5MJ>VdeM_0L=9i8Y}MSI>9zRtl&`^6td%TDGWMx< zq}f@CF@k+RDqYgE)$7do>E)QhdS1Z6%dJVIB#u`?J43==l z#WI06T|1SmvCXLCF^MO)2_*HWL|Q#|=aYN81PVKzLsBA{DvC4?9N2piIh7k+r@WPG zq3!cW!qlqu9dw9`JEU1)rgV{Q2(g+*o($z{-%;gi-%Hw$7rZmetL_2Re$?G%5_d{W zIS`L=EHr}xz4DUA#cte(%MS*Tmqs!8j3u>360YXW;p7TAZ^y4Ae+{S|XZ?pDXz!2; z({Pviosx1xx-r~J6Amc{>(s9oY7bxTeQ3{Ib94X1ZWZ%x73k*H16xOz_J-g+jnkqj z`!Oaty5B4mNRPsZ;RrRmxu@lJy=wM-yNAOB><;fH+f(;W(~2MV)03v^01*#d-IXnH z>tD;-y1jj}Unx8Ew4{|1-NqIPmkEk-I-Y~KnH)`5BC{i0m3OFleV%V*IQ>YVwtaoi zLVc4^Q;nWKI5!Fml!wiTDP#_ixkj&c_4XGJ|aA26#wX%Ng~rOfU& zc5vgCuZ8(@GrxQSU^}9B>c2|QqPgH#R8lTa)Vbn<*9DxgCB-Ehc~ftf*^~EwQV(Gy zuZxYHpem~;$1ABjZON`?AW}Sn0!Qke-f=~A}s*<%j0o(*1pMK8@*1<+q>xo1p!; z3AeLK%K%OnSnbgbl7mIqH3hKXHpBGFGepG=bHA(!9A8|EZ%y9_I|LVWi8?vowAm|r zSYr93gcp}XiJT$7SltnKLPZ%r(#kOU5U)^1oAlGiTfcKI^Q04O!&NX|9hE~Hn~h~Fz6gZ6 z^4*uk>pm6mAUVsaKB%nQH$%Cfd2P5JmxXUX;R49;mD(Y83Mtt^>(o~;^gJ0w#SGdb zU@=r69V-dt>iSZ@4H$)EYOROeMYjzfi9T7M9%+@Xhw*i-jm zezLO(*w4;-9*kgCZ07~0>tZkAh=@;AF;*dFX7n08cyjUJqdCauLrc|9sG-K%YBPdO z`S|$_>&rSy9^OXVLy^gC=JE3aZ^r7U^7;f!d!8NS;B4XC-)%iY)IN6+8z*yLDj7;8 zo2n#VYS66qmm(2qsG(e{94|XnstIVu`ym{+Mern21LE$Y5LhSvg1lq>TL%yDRbrqv%gej{=9TE~(1xCBudiy&Bl!WF?^k zdLvG2I#q_6B^5Rki*9lb@oE4kq4i+5pV$v)l_CKqPCWw$;cav7MF?#*r@=n@mh0LL zwo8bJC`rtV3;O}BEU%JlPmnQr%0j7>sNRn|ad?~9*E;0FOoxv7w3v&5X2?a~&TSR^ z#_trk$)s%5i87{80#QCg>*dAU4^+87BNTMFmakU$W}X|xT~iRX-T{+P1vOsAp%Yen zGori`-RO{mnAR=~t@9%*yzLv2yZTS#cn|C_q*Mw&%uZe22!t!S6iS%nyTHPfI zwKrrsin=$a`Nif8#YXkgalVBCy}rkhMWxFHpNTep6*VBIr{UMQi+2LnKe_I| z&IIxpSF*#ccGv0^Rf@Gk=vkfVl3YLuWlpS3ikYQ_`pz^}Sq8%A9$O2S!?PNpLeyYd z2lcO!j!V%gf(#fpku|+kH6s*V0Gx6xH=r1|kMXycN#0F@ImMeVT6ys4F3D!bIFUt+W>pQyqJI0?8R)uRtzVZFCLYr%nn)!-;c=_L z&N9qV%hM^88vlnrmDeYT{X2d77lP|Q^y&Y7z3BgEUzzFOzOi*Oa5BcHr&D&fGp3U_ z_(SsX!Wi2a{o}%j?Joe<-@ML$)2B=f|Fy*($hesYZsPE4Fy zABLc=BJTMCU`8;1#(CM=sL%nlq#=E*{*cIRU)E}Y-1)lrNvu2E@r!%Y`{fosLY$j> z=bzM5&v7~AKdGmcl@9sNO%Mb010ioT6afs9A~(054`ROW=CQ*NymPjwv6u` zVF|=gif>Z*tO^RE6xF{r`5h=cLi7aCRYi@|fk_?nGQa=4$43`1q_f+?GP-L#AtlB`J|IUb_dICtb3= z?%+SeWaFNDD+vu#`A`(j)#J(XrdxuD4LOuVQ8gduS|bbVehrq{!ezka|3WR`EftW_ zoZQOQ(!Lzbn`^=aBEWsVwIR%i(}~i$C^NWIz*UxLUR7|LK^Aq{3A5+jJrl%zH^3Zf zJb}|>=Bhf~EZ#T^xPIN(3idfCWhMF9kcn}ei^S!HPMD!AswbYgOJBO$75ZYb4pnvD z6^A4upy;Er1ECNA%pJs+1c3i6!8;j8cNX@3GxRVQ>5CK z?Bj#*N(|O(5~Bguhya3~aA=}{Zq&M{pAMp`DeZ&4sdLWtN+ z>blEo&RaAz`Jt(d1)4)(Gme$l&PB9hKvFQzbj77K7Um7akTFZ?wa85@Yv^>l;QG&9 zCl>!wm#+FUHP$EI=4|1nB8N=idc7VBVgsgVCP=L6?^mo`$ef|>Uu>MH039laj&!VS zh6d0>(T@{;ns(~6Vp{r6+7|H}n#wOV#}jQtFCXhHC4YL6*ebWB3}3v48zsO=o_rhM z`P(C+g=5BzE$S3kLa3YJ>9N;jiEh>?ZId!Q^tePkrc6rakqpEhRw@xLWibLuy&T#e zB<L2M7WYCV_z(T~0)ZBer7cmI)y5LBvb>_+7Z`rBECQTh(nkv27Ph*yHq!HZ|1r zpi_G<(@v?AFsy=EP>@VGA>?FX5$d{cd0dj-PiNnQ(j33R+OKgTuBv85_D#4F5JpJY9-4slOtf4(>W z-mPD&F{GQ47v)7}u~+;vomd?jE`l})g~2+kluseQR(N60;`^>R2zDy<(C)&(9SSqqc!{3IRv;Ed z-s>V>B?Lb`XDK~IcN6D{}*|NdEzl}P+Cu@#{ETxqqn#wUdmsEI4yPhipdg`Ygka;1ERW`lwP zTH&72&VaNHB+^-d7GmQ(Vt#}pr2RdEFpp z&FG)sw9*h6D7}5A05CrhO`r(aeFA`IU4M$Z(uHM|dIabZ_ zV23S##B6iCnjk97s{?S88tNNuZ_jddY0s?&*(18PBW8n~ks3Lk&`%#Vz~!HQd`V_l z=UwY``qK9@CdF=CyBx$eaW4O^)D6{VB!1-XnY>RHXZ&k+hZmfQ>Ed;xXUNazt`p zW9|rLsX4sX<(@*~x+(Q5$=#vDF+HE}zR}}m(EnU-m@hu+!ob| z@si$x(`O68jhCk>=klQ2c-{cbb{wg&AH{eISc%LRw6I5_KL_yG6T9$Yt~k`3%20tN zkx)Wf(yR;?oH8nL);Dls%w#l;8nd~u^k^&7WfhUD_OVb=FF$kQfvG< zj&w>ZqdgqG+FSLo1&S?tY^&FRc>@z5e!C~l7UAa%mEpTqH(P_ zzDraV@-}jUgy`GpscF(=O%eX*79R6VUXs@^c6caPd?1nec!@a*jyyuu;fAh#22Rm# zPP1!JoLB@8+DDd0f`LC&-!ZxZ%2n88mWN-6Wt6X3 zAqcct86?a`$+cV@_t4sP<#yRxgvwJ;92mZ(C0iNt)(%*=Zqmc(Eo8#u{DYj=Q>8vt*gMn&=fQZ+>v#y8y>SYr^2pUahomQfKAw@YvnORu4?|U zZRhOfL{askhY8R$jJzzIi0*0cypXkk~u0b6wSMYDN2D-m;fuV4{DU1(6D{~@h zN?S`Pe-eBUyl@$To;)p{e7C5tT!)7^wocB(#>vr2Rmt=wvZrQysp_v?($SkqR(4(> z0c%y9-HcBryl(}zXmjN0q?85p+i81DUcA3-d7_@AHodY){*q|(Y@6$G9j@cFZQHhu zF59+ktIM{nF59+k+qci`x!>$L-=2N$Ow9cY)_NmW#GCm%zs!VdB>s>Y>uTXTE8Y5O z_>#y5*Smi8z6=DM)?Bni7B!j8U>aD1j@lHTCJE)ZlOad8G{r6;Hctzb-Y##|Z= z&^NXpc&Z92wLVNTlbqAsW>HzaNseB=KzglvCzdBMQM@;{t8?uQ`z|6Fz}w}Bt`SHI*>IOKo$!9VD~|DF4aKPCeI z_P*jzG~<8m2N_sd{%L4qtZr=(VPmWMCvhx+9stqJ^<-V5P*_rwT2hcTYRi7CP#{z{ zIv=u6pU(N*1|z_8i^QlhDmf7M2bZ0SH~L9N5jS-g~D2&A~Kj>g}^&$ z_N`oHoo*xS&Doh^Nwh3K`rvb&NBAsWgi7jaR(7XrHt);#t34ascdAe>l;Y3e&-=Xv zbEJw6AF$DwSt^aayJ{Y)>W+jiX#48>rJljH7rR)uVjx)yhWvRS zJ{kNqUBwrqZKk9Hl%oThuU;xVxhRwB@1Ib$54DvqbqOCvJKIWRnloA24t3l+zSK*9 zH(TLt9R%g7a=sswJ{KJn6k>s*hZZcJ2g`?4xfSL;LW+!j=Dh|Z`PUP3rP$KDCEZN?#Is1gh@`sq@tI*65gdE2sr|1K) z@^m+L#CgQcC*F;|N7wWXkY`+1f~gxX91ZYuBkWsrdTiWfi!utkb$IITF zGcM7f7c0t`(QiR7fJZK8C^KHi>oj4@W#_ldM0XaBkPu|?d+;|~r_fX|VtiTQGOdkv zG)>`BH$opi`P!mVq@QT9IM%=3uxG_)QtS+Ygd`nC9&Frij!G)41m-%LQ9aP&&^v)FJuwM#i=nENu>p;EEXLxJt zy1P|nsF$W2dCqwG)zRi((y>kgk+@CSFgK(`v?d8njNOa7sS3eC(P1u z)7lX+dklqVm&Bp>;S}bF-z&ljEL%KdE!{c;O-v3QG8gxo1^98_!ebr$#Nssw6TSrw z;zOpFPg9_TEC$pTZ=8I)oF$~@<{9#R?o9tG>q;(Y(lo?Fs`Zd6p&z+=6*YL`2YZ@! z(rbiCFxsuL0jaFua>IGfeh84Y^)JlS3IKP)JwqKFc&wsGdtleT8%sj~3$6hDLL6H+ z=WHuCv-ADM(|2LN!$;b=;S3o49uWLcdNxj zC}K>BTIj+tf46LObJHnMLmboC1^|9OSJ?{Qth?OIq1gqISkh>P^->p%Im^A&uTZ6~ zHi$e?WW%*YgF!BQ*_q05PDSmCt!Uu<_aTRkK~asP6&Gq!A<1DxM7qk&B=E5ryoIj+YzR*XI39yTpN=){RK!9a4N zrPN}zkg(-c98z^)g|sa>!EAe)PYP%;sq%q)c#(y{WN{O+E1LOoLdXiI+tj%-qp&u! zhP$BEDMBm5)sU}+r~oE2qjxln!f#`h^|o?I61u_mePyY2!Kt)k@b*{y}ik;zri9d5G_CsDJFL^iW{B{B-H9idt3a?dfO^iC8 z(^?@A{7QQ)LL`vxAKO*j|275^L!;K(rE4yw_2vEM1Nw6S5b&L)yKeJLWzWay%Q#V8 zOnwq(eF<$x*bZF)A;HwNV4XTeQ_$8aAe6UvVL)pjfT_LbAyOJXITI&F_?7HQHBWf1 z_M+}A_;c3_nUjb%6yVu;b0(_OWT&>}EGaMsptAeh3+%Qr`XJSw+SytDkGJ+0U}c>q z$AIL7f`md3k%)6=S80ng&B7<|2p%WQ)zr|;Z*yo`Q+*O^lC%u%NNwITGH~O7DnHxd z4J5%Qv%fR*N`8N?>R~rHCDux+ih&1BHg)~Ed(!TMyq z5ONRN?|+l>tC!!`NY;vb%_;Ea>n8#&<_Z4IJREjFNjG&FrU(XTRyQ!y_sEiZe+a8o z_XHlk7U?-KgY3#eVart(8|gC;tGlqGkN8*jYVX?SXoa!PT}DVsq>r|T{>xV!-00h_ z6D@u&fzq${li&ZbO3L%1ov&~M@`7kT<1z}D_Xk`N#Nu(`nbiphW#Aj8DTptEY`gTT zjP1Hb;oi}bYue)qM?SwxxFL@g9_u+mC9&L8a8;@MrfeL)q;a#}c_D+I8?3q`GJ{iv zm+->uB@AZyc1mIG9-1?@X&1O!)xJSW27|pO{SwI%o;kbfNFMOj3~S|lF2U2U4Khz@ zx61}anb9ecKdz(_TY{sZWuSxyA!=spHBVGB@0i;Yp=ukoa7_UVTg~+n-m7^)u%(XYZ#^;H1T#Fj$R#{+02mE_Vun?exz}ti6Iz`l&}}4ki0E+a zo+^As>QUa|}ydNZ-H zRdhp7L9NOCRR9`tOra z5`tUfoR~#P7E_UBM7SmNLkF_X?dQM7;%fV09;L##O~iG@z54=Uq1aB^+kpxR`6tjy z_ST%Ss)nF)G8Ikj$H5e0orR(Z3EV7tPC)!OpyQhkzH^Zpc_(x~8-NWQ=XO9@BQ=hB zaBQh^+}J;{yqG(IT|8vCueAnU{aj~qw_rXVwAT}*=VV#~{MCw@GD0di^-!7F;X%^A z!V@{iibyOtycvETZ`EUwtFcoaTXw%Fz=jZSeYgE+-kesE@%R|eaA90Q&H@h_V?6g< z4Gpq9Yc8p)nEd8B;*m*$Wi_W_Lp`^y9I1+F%^FvG*o{2foStT~4p5|a^Go*YTIsC< z3CrwoX8$hp*-}%>u^tc%5rFE)ps=9!^kfVA06 zYkL|RcsaO-=;&$4#a#C93S7ps5aqv+G}i{C}AX_J3h6{(T$#|KBMM4gC1O z`YC^cy#9~7VEe-n{O`DnKgg=Ta~J>ixPO^bVqm6c{WA?U{=3*~z2zUpUfYDAku8W$ zPXuu3%McmoG|*CnfD8sy%ZMIn7E$xD^A0yl#cBofh;Iy;Am99hT$nagZssrM$zMZw zw}SR-yI%HA4?`Bh6Jje^SoOMrWPqNhxKIGY>`?~*J>MLWVu!w?f?#=*+w?{r=p`mB zj7@NNybVYmA7%5pu|tvXw5A+qqF7%3tMmRAa#%VEAawy@Szeg=osu{qED#t@sxS1MzHQy zdS`o>ZwTSeap}I85`~fyni@1F71XheRvrtn56e6RPi6a% zoB%kPQ;#%x;RCCA+%nj9KD?uQE7g%eQaSAw>Q(i1GM_u3l7uzkNNppY>4{wiH*3UC zO%z>Z_2;Y(-Y|DR?HLNOvZEKVc$xC$0uBQe4~9R=0J9z|-LbvI&bKY7+Y;y z##)Pa(KZX+7g(M(5$2T+7g>RcP}#yPHV?kTV{!MEW;0C^2Ew;g#%CUnR#FH@HA&8- zUw9|%gT}|pe{)KmVO|Ln6?YF@vMI!R1&6z?`jsdw*DjaC z5iyO?u<)`*b`i`o+H6Ao9$Ms@z9%sgTj?5H7yq4!pyvDI1D`uPG(blPgp(R&9flI= z4TvVTgN=V7X`&}0{3$6&p{!Z9(Nd`dBKA`mfgqH~ns%NW(i4(v@pIrh0E;kZ8Y2zW|*0=BE?9NhKTQ3rmHAe*pVN2rBIv)dBFrqbfyC`GjFiKDs};NJQbauHOhthv zdM_j2YMhZGQPI!EeUG&?I0&ZJnoB<7ZA;9J8&aS1fJt1^1(_%%KfgG}E#JHVO++r& zpDBQLMN|4FJ7~&d2MF~~4HATMOrKNr24~17@O_ptjG@?!%QOy3|AjAf;EaZu!Uo+(J8{$LEvw%$6m((YqrWpt2br;Fbi(}0fHg;(lbeg#R7#qkbBzccp>8qjJj2fVn{+N zKd7Di4P4Xs3X4CRM%WisI@5k{~K9#ZO?1pHh5FdVs8{ZenDffIYaAnPkLrx)j zi|JL50k>}4`reVw%@-Vw2t2@zZ@pn4$ZdS#5xy|s^b_<xc#O6ZsD=liKjY+A zzCL|R)JNrf!Z0_UdfnBJ>Jxu7V-I&V2EuVyMS+^lp%h>{-bFc3?EM${e&D-rlZZcw zva)9dc${IE8V3~hr6aB{Cq*0Nf#^wca(V8+|*pyx*Ag$ zh3pb4HB-EZEGOh_{Ea%GIlp~xobWU;1KMH?ly&RpwYVcmAsHg{P=~kHr>0mmv z{$t^j^&t_|DJd(>piM$vk4qO|`I1$Y;b|5KIREa`CPOkrUIP4vv*DvK z!NBM#HbHDlmcMcO>Q(?6D`^hS>g9r!nviBJLvvTAaO8(+JRX-8GQJn{VlophcAlqP zjbg3e15yGbJ21v#gBP`|zBbv`d`HV5l<0P_;Cy#rmF0piA#N3*oXfQf2>K>_n&y@h zj6BFL3a}f?#V#B0cgvb-z$n-Fu14vP!!~#n)!>cjm(Op*qYFRuoTCuFY>DrQC_(K< zP)bhR%v?R{=zDuRm@o$%pU7h5CJWl)pzlRFUy$adO?quO9Dp;b^FN?xXoTk)0<@z! zOA|CU;vc*LFnthgIyS90Jol+4f6s#XhJT!ejr!ka4vm*}@M0xd#^MAmE}joOYwVc( zkhNQvu2NKm6%kF@KZ?wj4|JEDO4d~Wx$KQq*e#0;apv z%qB(f*{H~nR@kXCKfs+!gZ$P)9OMYQvDsB-yKYQnclW1gL-@y2_cPMi+$CFt^=x$- zZb%3Demx}&d}SRcMf`Rf@wqL(J&ozg%g1%{@Dk!Zc2DV=k1byDoljXSL*L~rNZZ*z zq&A$}jP_sY)4wO>|4C}IG5rgr{okL*{;%gI0?-Wq>V^MAqWy=U{$o$>e@9UN(YpJ$ zg8EMg*eTAkmA~d!bPPX2f&o7E)&#iY_=^j~o`l(-zJGF?z zZ5+^oy<=f5qB!2i;HQ~1kH15c6ff?x!nwDKe|XpKiae~N+#MlNvP zERY|hTVeQSBtP~3l1pHvu1TU!04qa9qE+Fka6bWWM3FcB$r(-7G3fsJizNKMXWZ3d z+(eZ^S0;^iO;!>-Y^WaKsY})aH;^g<2Y;JhI^9iQ6y-$jgIVXjT3Wwfxw~jbiSRrg z*nzFv#sQIg1ACM8mC!~dM_4!(u#Zf&P9(mdPFP)3b|4bcYZ}&zsk5M% z$a5={t2JddGg4ApH7-$Mx0IGH;PW8Sv?G9=usx>TnlPL90wx{Ya^ot1YX4zX` z5Y3#CVAEWZA1X>&>Uy_q9lh&{?h*Ev`{^&3rQ=gmqPII?4QOi9@Y>Ra72#xMM8uuW z){+^YqL2v*bTUzA>gJ;T9qBoOsS*H_Z?!>;WmJvPnIVBHjX5epo_x4UHhT;sY#54S_FHyLiJzV1P7J+lv$qB$wYhG0FkWep$-2pXTKj_E>EEuCk64fomHs zekfnCE1?v2X-P~Fu7;7xPCocBN`R&DueU!y!~rMBcab_z6CG(&Y)1=vKd0UTF-HAi zQ;bKj?6x!Ln3cnFn&Uk~ke`R2Pk`n_R-d4J$3y}AzD~Wc(hn8Sa1bc=eYd&H7?omd zl&7*9NpY|rj+NG)PSn8%#i;buL$G5v&?e+vqz-q3QQ4&|(HU#x1201a|5%ZzhJSfQ z;5IdpM%ksJKQ+_5&}_d2D$`hI)JX+JeSk+pp9lb0usBo~R4@GII6&YPJBUY)*!n=b zsg8!ImelO&+)&+LrWER%Vb3F+ATzD?VBKdGimYLIc>t|bogag`Y(#>!_O3LeQXncy zL2b#79QD-=rgBOdpVfee=F8g1y-{Mnqx!8#-H0;Om^&_Odv)D(s4~o13^K#n=5S1W zkjEbWHH#*ultRvuOgBHC83<<%24-W0drTh3h~%{Yt<>Z3gUU?JiMc_+>O&c~T&Hs9 z!%y*iOKc;*i9V4u2Lo%CHS%fm>kgGkz(AB6N~Y_kCPA)?<~UUX^{H+VRJUxN~61zh?kmj%os!kkd1g34!YGr?nyosuYo z<|x{far_K`kRm?nqugL|vp}Ft3N7MOXcbbs5S;tvA+R%%g`QS)~Stna)*z1<^@Chr}23BDf z3@Q-|ULea_zm;c3he|*rLbKPBXEmMFZd`p*LcY^`Ze5Cmx;4Fb4ye2|Zlri5Dd#W^ zUMnw^vKFK=o~jpYaXo*`+TCXJ)5SB)``*QhC{RQI?3-bkr%;uYjZXlZy36b~b}x#P zB}AWedT5~TTW>uzs=e#kd&R&@kDjcOulP^_8SEfI%y^)BKhW^jA?ug+ zTGyFoi60DRw!AMios6VgF`%lKx$r<%w_yc}K+J(_2Uk*hw9n)cPs7Obt>ux#TEyb< zx>sMgmh$gr4YWF){s2$qGblyJaPc!jt0p?DEDxprbDIEobSPKxUD6!nctp=?KIq5O zR&Zb{84~jPlHX$}UMKKD65Zt(#fqyizfW}!fV!jx0Q|enZb}yCZ{*g;UnhsSS=`ye zbhvyX!1_2w233*9&0uTrexj07$WO*jxQcLLwf^Kd2_J>MgO4W%Ph82jWDeCZn&g34tJGuHGIoP6{$NI)maBxq{YX;W6n8~)cJz0#kXuq4@$jXkWhL6 z-k#LiXsd}OP2i*iQZyms$mnXg{v7j*mAo(oqL~2km)-P|3+CKJ*q~~y)=%Yl*wArt zEGrRQ*7A#pB?tWn51k5*K1AUKWS7)L;o_!402!$Lo zwdn9fYPqXG5kVhq^aPN7fddIEJag1q-Zw0O zuzQBqy}#Dn?hA;EQ7Vx)=z9G$L7?J0xwt6OMnZ%a5a5jFJLrGNzmrQ!IqnC1XUz~l z?gHn3uc)U2jl&Mb!H$sL4&Ak9>J5J7!q1vh@U#$%@pOK%D`=2=0PPpvcuJ*8zy6%| z*F(3P9JDd!EzNqp*^dZ5xGNC}YI=OAyC^UeZF*Wu_kp4V$YewLt9UZo-$yV0$?&lM zZRq+pxAgBT-T$(`_n*tpN_Kto-uJiX<4r#R&{#iW=fmUGI5~ZP&x_yjU|jr_@A(sO z^&h_HkDlHC9pCeZhx%LJ^Cwj5Z+s6U(|?LpC#wDSJ-=fszcHmDeiZgWY+Z^ZiZOe{ zAr*DyF(Nn(Uf=H6z#vdv=n|AWjNe0hSx^AlDsjI-rDPBfcbRM+FXPU)HG^*0)18UO zo%RbBL%_95Tp*oi{JWcMx7BB=QDNk_=f{^GlMwKoH~ddBTz{#3{>1u=u|Dw6=Sj(D z*{=8F1A=X;+m67`hh8!Xp>ZG7ai}`3(3|&wkYRdWcZ=GcMzn~)6m2qlZ>sCP3?IvT z6O->d%@@2lKUHd`d7SX&fKOl1r+vI_#?|<*Drk#ZYLhFk2$n*lt=sh=& z6SBchGE4a9){g_=VxOPD6OOaHuG`b6)%7 zlF7X|eKZNkl*VrzuxMsmvDY)RQ`Wy|M`H#6P0^(1I`BxgCtLc4I*P>Dopluz6YK0f z^IkUzLySD=fmBqW+ugCkcgE9XzUYMG?;iI`XPEOx1p*c^_{wu#OV~GZ_kJ47G%%PRYj5JX;M(A z@nuTVoa~l4VEwi0{nWfwJUjR5gNti+)a+QW<+bEGTyb!yrYkVB*v9i1A@na?X5x9s zrgZZ<{f%xZQRGcb!pLlO%0h2(G;olw!cQ_S-s1Fj@wOA;hO_`eEiH@J1{O%+MzU9J z)V_jzlH-)QrispR4;Rd%{uH;h*+k2uc-URxCz;#6>f3_4%d;ZA*gTbo5;snT11Mt4 zuTwF!SqyE7;wH_dw~$YPUChD-1P>c?!Casd#RFD{gCp|Eb_tegz+Vsc=a{BI^FOBe zsgIZ>!CO-TRJ0N@!yNn!c&wJw6oBI51)dJpqvpf~8-*Z0HdGybGF}Lxw4N+zs3=4r z;XM$7R!64@-#Sa-E*SaUH#Wz#Blm;?tI%u?XOoE*N|cV`KI78qL(M}fdgNRKqr5Na zD?~L~O&@;wvuHZl5+1ncov+QJ6i+!R zq6t#pK(QwSMOD2u;}#$RXuXID&0weR7B{If8xHz66wHx%9vp5`9v)a2ay}{|3{{Uf z>l?mS9`sie0qInzClcCvIskzHAd zi(6U*B%so}b65=HKlET3oW{^O2Nzs1e--kep#(tP9UHx89?khhUocmmiO4i@fmpiT zj+(lF_2Zi}FgX00c6hId0-o=22Ult(88Q8NN&+L9GWZU>6+_FEWNZV*n4^_rmqM@~ z$axNIKDzU`lQmVp3$`R-XGH%(l()cOaT?d6rHz5SS&bS^u!Xj>=s_mF& zx|0ZpJ5#?bZWN_1HAr<}iVMy0s1P%>(xM**WnzM9bIEzplCzdS$IPt1j^EUxpJ5}! z)takdsKkYY;Gs!5yp!62+olam=$`sf&wRIu=H_T+enj=jW)|>p4Dh~idlNryffRK6 z+$(ZkV}0^;Z$9y*~F%Mz9$eIITqFsj-x1*q0sHaq&r<^MXR}ymop)VK{)jFS>&jfoi49GQY^Eo0!FBdvMz7Z)zDeNBm(IOn@fJ$`D zO&$Tg-{Y1FJc zPsa{7MX?a)GVJtB4YTR|B1bH~ynI?~w`p4Yy)EJKD?$KekRZ^In zxtJ3rnErivOE`!kzW{mds2%)eE;~S~^J_;-u1qYP2$JEQH#C7rvGhFwk^lF$1mw~d zm@a$Lk#0VPS%8v}J#K$lq6rLc%I)vTH|x#bp$l9DZI0gZQ9zs@fWHVTl)|WV=O>pUhs`#&_jT zk=|<4_;K6@qx-$o5pC{q(Xq=&)oS<0fHDcojb#K1CiIAokJU-QclW&w)Sq(UTv5vM z;Y>jOkV(oYba~7!q6PQJt7MheZdpZT5UE3^ZCJJLkP*5GB5ovKRcGZV{ornNLwJB9 z=GR6v)qp=1;px^w!P~y#KD{9zc=*Em)QE3D+?^=n_`_(4Kufb3iY$}mkQxrE@@LJKkCGuqEG}B*MxGdO+HWiWh9?S z$(Wqp-mNgH83;{q*x$@jP$d$o{e+A~heXzh$gxyHny$U=mR^0TznDt4(l2fG-F{K% zGwR%l5-g-4rCiY^Cb=5P{{Z*RRWPqTW~&=YCs%GM^F9gn!=G+@>ZQoP9Js3y6ajt{ zV5@bMg%5{D_qY~Lwjmrvl!FbTlc-ZBPV6}RSaB9`wS$1oNwScbb|Lp#mY+E99~b^b zEQY&0kwVGQ6Qc0p0FaYoqVFXT&+00@$gxQG*o^Pf6-s6U9XEN58dLtF@5AeWfk8x{ zXu3b*?5qmRNK<)I^(=OEnr_Qe^WDayw5hX30G3sx*)XFW$(je99Wsn4AeK7N<2a+& zmF8VaYkh77Cx<7$vBScB4nTCn4(ua{1a1RNfoQ`GA95i~UGdX1k z_-5$g=;u19Zg&HEIll%|izu&|CUEL<_K1I;d_y9HEtwq6u-~z-)YE=-fbfd!K5^)E zXJ^XZlz z4_RR^_O*^Gei}LxU*5DtWRXnfa$F*bem;GgQaVB1nb6!I5nAR!tZ~1)BH(DIx7X2e zOMoJac0XcMZ$NuTy?$a8Q6MeN4f&XIUO?8VDnWZ67?+$<>{cWci&mi960&Y`+4suY zE^YqPZLgQlixkr8fr(c;O{9?H_#S*iJA^f>y}FSIWZmGo`?l)$hzDim?3r0@-LIQzYq}W@%3_z z2Nt?!o)(WMnV)*OQXhk*vM$tW@^s{n+p}EC2a?`7P|G}3O7irKfhWw}J$|hm?$^@} zo=eid=~mp7|MH2zM_U6>cP<*r#4znILfftOJndM+&`PydzRIG6}#8t#3qwCEx>ZV{z$nqJ869!^PNr~WhREeSs z(?_TF6Wg+PpX=s6d^U+Eqw%9YVA?XaIPp}>9+mQL8)r5%c{KsfL{HJL>&#KAH3mGxbsWGVQ~drJw1C#~ zDsx5pww>fZg_wgd`%sTVX`fsf^FuA$CXtGLKaM>9nhfm_<(zbj+C8{9@pR$2Oe(s9y^nGVKX(_J*SqD zY%$ZYwZaDOcDHTkI(U*wjGEr0?j|t*$N^(2RHwR$+3^eS8LNNin;2BppdvQQ{5Cc0 zV~0<<(Oh&EFW5Y@?!-wZ?{V0?kK-^>04l%vIILUQ6z*+5{o!&V$owgP9h&lKW1OtZ zp(x8rjZl2a{vBoiH%rsn?y16N$smMQqc=Mx+aZ==Liz6ITw-d+0CV zPm!5mQgqy4e%m%a^@+RvlIdoTm#e+V{$!rzUj1oqju+jiiN>xVvrVj4g7{d$&0JI3 zsrMm^J=d->nH9%atnlcK0#)q{-iOD??o`W`K9@J9^M~_DSZyz`?BNO`+|j}YD2s@v z3%1i``-M%e=sXa;UBC0B46w?Ben$i7XOuNWissNgtO-db z=MwBUQX~g5EOlt`VJZ$n2;3lRb)AHt+Zj;4G1gAyKa6U{cRR1xpKgo0d{EDO*57}1 zyqL8|rRuJ2M1{H{*F$`*E2GpF?2BXWoXko0<5;^Q4?N3nh5Dxukgq(x9MU|$Tl@sY z<@JJ)qyb9q*AwySV-sh{0{hyhLKY@h`0<4(V2^p;2UG3OBISlUuQxuXOKaBxjl=12 zH6Z!i_AwN7jjHYtUb3$&v)CO^MkvYBVjm3Bb=;}?Py*g(Xu`BMW)mJYweuL09-iXL zt-tp4XvmdWK$$Twg*&M7RA&&vk2_D-H2R)2GXg{4o1gwO!jeOK=4LqSeWT-3pxS$x zAdq-WvS%-`5GdDxtGuktFraNkz6Q-kNMq*imsHD7ZVHojPgC_4abXP5ucQrmj7MVA zc^WBr^040_Ec$*UR?Nl6^U2LG^RF+&)#aWXws1F=RHD4dqa|qxc&*wxGL3b|TzK{= zsD6P8efkiE{>lA|G5(%&D$2n2MNSpjR6lSLa_4G}k9Xq02}2SJIwur#Q5C<51Q@}( zpVMbM;?9Oc3bkaJ=5A}9Se{NzgWslZ88xk3l&Dfi4Kf zCoKgJP+Oa=P*SgoQE!;EPNhQG*?P&>=~hD6!`s=>aPL}NsXq>2PA0d=rBvK2AEp~R zu&gT4Uko)B=*^ZVh1bnDAefBpHRf*2PC&a(5GLwV2tO6ibEG9<1V#$BBn31Bz8ezb z;ftg087~o@aYU9V^OpG$HI%3^N;s;20n7~dP+PR_HlMpyg9RORZdME@AQw3ZWsccn z8FhrT=Qjm^*sz-3a=>~67{&^FeSt$CM1Blm87+0N-C;K%b44uCH`_aeKqUCMc9?K( z;CuR_9Ci~;shE`*%Tk&sEeAuqa(n%?mXujuSxk?HI{AfQr0jHfGiy4Dau`9KsQm=WAp-U35EQ`mm+5 z9HS6{`5}&Io08hW;|v2IgdJrL5NNDa=ZC^8JGAE*MYhYN379ZtU$gpfqvz9}`0G-h z3Ra?9yqFyEWxQ5(fX843Dy%7C-vuweozOPhc) zqz16v$4*ZW>LCg%tq^>yFbC~90}$vN!!g^w!-8j3^$Ya1avMQA3Zj*4n>UeeN;|eU zM$^*Nd|geR*+gyvZJFIV`Lo2L@#Da3Z#Z4OnynI1Z$?*-3_;k58|siZ$nrQN-(bCe z6_tT^jY{V2RzdErotu_m284DZB~0})rtN z93KM_-qKnpS>#5crP3uLz+R-I_g&~j=7XCIoO)AbSi4I>kVZY6d;AhT0;7#am`i%y z6anTn1Ogb6$WbPl7G(HEswPU$o(3RA7qa z((A}PD<`sbNrmAWkFU6FkSFq*-dYQTJ&B~wk(V{{l`A}1c-W??@uX{jqqc$o&?+Oh zC~XWp?{1r=U1~o6WTP?jJx@5ikz;n-Xz;B7oI@b+gFx>Xb_{&2f@1xNYq>JRC_bhA z>ja&8mX*$DCD53;#ojOu)#MkZ<`@vvE_D9FbT6Edbt?O!M|QIR&=EWvt$r^l#7ylB zpA4C(FrSd%oiJhxeZ0QfnGD>z8$#x7VDp~VQmcsnRu*~Oyc=p#*0yk4%he3TE^)M) zkHz`-I`95a|J7^7#HKE=wBfq$qa?ZMwR2xKb--F`$rRc0Yi4u~bRMzr>y&FCK$;34 zJu@i+O%iXj@yh9PNdv^4sb5U1n-Z;L6taBdRuRR>$0$VK@XpITx2b4VcWDu}-Agst zycVdwRsriABwgcQ8DK&oe=li2^8AY(HDNe4}8AGC^}r#7H8TiVQdiW5VNd04}98V7>Oo$-byL;h{3cH- zXj3p&jeD21#J{K#7FV|?z=mBDFIFY3XoIB7dj@XG3o9#gLmGJB?V8)Bn5?zJbHgxS z^{Ln)6@IyLc{i|uWkSu2T{M3fI(8uGJ>=O&nv&MEWjJe6Rqh7>N9Q!@7~|PjtKUa> z6HDKcSq#@=1h5xstvXdTSRm*tOwp((wRC9*ztH%o3;UVhaSiUws6sM8tb%yre`h@Z zuot{&w8$rN`PhWD&iSl?H%j;n7?8ga#^Uvv`8+nw=I1>pIol?&GMKin;2}usX_f z@w;6a3%0ocU`X#-Pd0I4J(G5adTz*$;CUI5x5d-`1^sEsIeO$L0d!apm#aR z+J)`;<9v@1sP}VW02cOvE7ROGS$Lg{=$K^0-8Z*YF_h z-v*M{N!b?q#QDe+=^%(;Yh4lvafkM-D@$ONM2GkF$rmU( z01U$u&0i_Uzo&=)N$UPf0^xtKAe;;tJcuec04V%2(o;%2^9o2~{fb|TLR{Tq$-ouHe=a8ID2^S!6R!v>K@HwHr=1?Yp8whS%d6GA`$6pqdD1eyN}xpH_~@yKqZ0U(marJ)5)2Et1v-3E7On^O%E zov!@=0CV*PECQfYKPm0xzwF^7qGd&MZyWyh9zATx?w5Prc(P&7ysjv*43N-H7PZ?7M%xnG#=g}Nd?er|}P*}t2ixH4i% z{}k%#5gbdX-!Iw^it$6HDAXoGjZysPILeKtoh3s^Dl9-aQ4lYl`I4`;XIA3N3xW>F=XJ)=#ix*q>UV`x0NwF$ z^ZGM5&2eu4jwT>HmoK>VLwo=tVK0lWn*le|UAHha2nDOiC`Lxcs1~f`xR#u*Gart3 z7_S>{!W-wVl#t8OFeAtW5J$TWHF9xs0UmDdq>vtK>lSJ{W&#EwSZ6YXH)%b6qG zz7Druw?-KEgP?%K8Gy#gfdu=%WrEh|#^r+Itbw}me~*qj1BLq$ZqN^nMVMg@8Bqvh z?$uR*j1u&oFu4UJwGbQ^3WMM9Do-=QkssW9n25lg_pt4d=jbp-Z&}3rH{YsDA+h+4 zkiwdxwFnC+kz%8Yi#njAVT!bj@W53LLcE#ltqy- zVAlg~-eO46_egS(21{|Pd}tummtu58dM#WO>q>(h7Lb>ZtlUJSs0JSvEF)?ogp*IJ zEUA20LOczarm=#(M39%D%S9RIuwcBP1rzaL>H9HAa^@_KCy%?1Z|rj7BZq`ceAY7J zV_r*v8R3y*-i9c`aUy2*d;Mj^xK_)(0&)g*{{ztY)g07;&jIrR_W`L3(LJu@*V6S7 z`=e%fBDCZWbp7mWqwBB^Sk3P3*ak>;f!AN^Pv>tiz4$wke4z!SvZW+;$~i)n}l2Z6J|(HCKI zRMzot1~E(_-Kkq*y<#6y9y9{Tj40M9KPS!6C{s=_Fw>lhU^13c{-kKpV3DlTJ1f{k z8MJ)Yp3syKj;(4HsFpbWuGFYh+cK=7v>_W@2d)LjU#S0-eq9mK6p@asOJ%rSGa{bK zj3_dZ)2yVG07&o{q8#GF5&fR)p^WtsmTp(_IQ?4t^*pdHw@$lGz0TZJ(gI#SeOp{M zFL7L9JjCKSnTF9pr$@WaM0z;xGo_Pbm}8V9QHw~6Mhmaw@z&2X+q2NKwk_UGl6N`S z7dTMZEI6pNdMYUz1+)vab=XKaiZsib0f`rhP>D0|-e~Nprm1TvUnwc5FIRghyXyoO zt(U~*s1!MB0A&*Mtg4690jm7PcM%M-u6fiGbi4c)uh}Wt71?ujDs{RR+FPSMK6+$% zl;4q~1i*@tYvjx2>zYWYs!wUMYSwF?cj6j^=i*d1DwbFWNp9&WS?O_=_F&%vwhJs|26ga$2Q-N@y1KV+jhG3y)~WP zg=4A>+;xqgS9AJH{G%Q7-hE$sOfvi36&s2_?bwbPF#;KrH1lUi5X*7)UrHgww+FV< zb!6Y{-k*54dKsdvphJ+wbu-SD5yo+_R{3qWA4k|^Pvu#K}Zan^8t z#Kgv##=fAM<8G$75&mfVk*R=JjfIV+ff>v&!d}4!GEF=!yDYEzKo{ing_pbrBNqb& zqrQAe<5-`c*b+k7wVOe2rV?JU= zQ!r2jQ#Mj<#O22;o|?Lawr61{CR8N&s|8L6OvkdNH*{FCdfEAi@+5Rpb*KvAJjz`n zI0x$n=fA#st^V4MRg_iHOxpak?yl~#-qlobvw6p>1GO=#v5Hp{;Oca=>rs5!NSK4d zO!$(h2{f|8)j4P~HSsOzTWGrP0P7&#H_r}AkgR5><5ST?Z6&d8IuZrq&8shqpikdH zk~TZG$16siCeKld!3RCo;#A^W;v`|(y|bMxk7euS!I`zZ_pxTpjhc)*W zrC>{ipL4`3{D;7cx}LgO&-^O`fT4y#eRPdtvt2P=lUGuCus@>qK)pN%8>NI_m^V})eZfk|uyWeg0Uf|&ItJgN%!H2MD50W+k zcVCa@nD&~J^p_~u>!9=dzj_A!Llo?Pc;00GQ^j31C4TE(CgdNFbZkvvdt;GY^tyJ2 z*LTA4F4y&m)8hjY4j~Ws3smlfffV3L2kJrD=O6Lmq$~63kmPbv^c#De9PpYMZ-gtm zd9b3ZOzU?cSYkhTM>b!uxm)PI6`Zt>Z2z*vHG}q@e}J=TAZj~^ZSS1!?Mnq5GzKXQ z+FhO+E(CY?i8Xab_&W6GZU;2@vWv^yLqL@0_YkXKtFXU9=l>}_my45)jq&$0 zip-=etZaV<>%h~~E1|dLnU(dptxXzUy9Yqg-8oqIBQ6HL${&YAlVJ^0$}cESS_&#i z_a(c$7MoVsdnI96oJy#LJT);6!frp}(55}*@YUMYLX8^ZN*iUXL?`Tl`}@%YTX+n# zg{kxYW3M{K6?e0&ozztGEcflRK22jWY>9m-rL&9n0&Gv`b@Np|q!9*vb(073wWY<6 zTAXD2vPfmP4EL8N-b!Dth+~1fJ%!e|?H*Sx-^B9Eb2fC<3{+em;JjzB9=ikY$npF! zZnZaG<-zj~J>mk&Z;eDfqBWI?6|nj3ErFH@!MH%o$e$k^d+OpyG=f?0RZHxIRu4J|yIeFGVxpvd;Aq9fBA?Tk484H6G^**|DmKOgtj5ai{2&9% z?}(7Mx^M8P2&T&wX)sh%puOIvd>l;zaf}cQXPu(6%B#v;w)wx?gzyNr!4@_3fyj9E z^n>NKb5cw_u-07y9DLRZ@lg*(n8 zLePU)R`>eEIc|Nb$E47C$}NWs+LUBP_Ph{Q>^+uqE~J{|wGsuONb{SlBb9{!|L9De zB27}5Wj%*pIcM^?7LOFQHmaklW#p5~fUr)OPS<;W?Rxixhm@=-{KWi1k|`>!gM2dq zG1J|)ft)8Ww0u6Ml&{!vO%`;R3jiS zHs^7$-MuOgC>!;Vuc700{boKDUoq1V!$>!cM7<@5N_U{C=3WcS^FzlM0AEt|sxOAY zX_}iNVd?%he#3O&U7jKJw_NcQf8Di1Pv&L*3$l)$*7_%U404{}JG>jMtk5022QFVj z5V~%I?A;;2T<2-QaiW5nE)N9tpd_oG>}wzun`gX*=cX&N3aV$D5}+fOZiU z;6oh~5{Qf5dCiy3xJh^d+MvF9_hdeyn-IoRi#mqtAl059TVz?36=0s%sqOpYEX_H` zInTNC@Kf@rKCxHJ$=N+%8guC^FzH?=%kFkA!=}prkwK>5Z4H>b!V7}AnQ#p2<5$rO z%*UiuV@jpDppk2B zcBwgUUsL*H*qfenHr{|83f^x|U!QsecHoT}(7!PV_)(v#MX&`bu1?*`@gccWX3=)W zRretCj}qP*`N)8A7-DH;z9OFbxyiJX`cQXHHYep!TH4w)H7V& zEI1xFtof=HqHCs&tJ|b|rTYYGF>Taxsywf>uI#E$^z@df$M_V7;;IP zYH?U_)$k;`@rXX#w+Ghhx~AB@a9|a;vAVO=d3I*smulT^0^bM8dYduq(zxT+j!k0a zv%)^_4WX%pxtk_Wq)1=45R{o_-A_ocq`AbF2qw6`}ZxGZ*Q zyFLfkh7EjluAoO7Z`Sn#j94Mocd53U)f4y?kO{U zSBeVqAZE&1rh_DYBi%Qn;lzBXo<Qr=8hFs?fJ4fLmxbyy@dSd)Eh+glxtPkZQG)j(uE_Di^SV|)yDM&7n3ouMB6;q( zohzJEIDZgaU(LPc5+9pnxx@BJr%AKa8fII5Zx&9Qp;T-ca@+j#eQv{mURdU}_JYm# z+Utt^&O6($4DOTWTb_NU0P!iIF61B4s zboW+os+B9IBg)zN$Mwb)rZ#3T8mw5_)~H;K17f$>+WT2DC9u+Y^C52ASTch{t}})d z)o&|lNus+%1fu*n>je5{6`aDy}c{ zza;8c3%MR4nCc$HMa^%Y7#*RvV>B;n=%&-trUWkkc>PK{mASSyI26GkNqH~hn9{tJ zlgyx0DP*tDdhkR?ol_#12*rkj05fg>7*sd)uzsTe9IcCUe`TPoLUX^18STT`!EYx( z=v?@*kDL{eqbafP<6T7wqA2eEJ}1GlJ}csaj0sRj>mx_|k3@aF?8vC&AS95x_$tYAUb)G5veHgUlio=Q6-Y8V<#@ap z{cuv9$>8gSyH8KyHP^dojTRBfmp67_w|v|cvRKXgen63zcXF*RW;~S3_}uPlXp{Co zKym#jZ&@vsRtQ=iX?Ydr@7qeZn5wSKharo?;n8M>{Sn__ckC2DH%;VW)Y?WH7m;VM zi7i>Y*H=6KdkBOEk(sIIVK3IBrI7{bz_nu9G%L9gyF~@SM0K2UQ$B1`O=}1=DE5F1 zY<%LQ_O@E1SY9u06$CNK_sd~i)X~rdvmRRJy^{?C`WYU|n&C$-d+|sgll=*JsdBr7sgctFZ zkjj~q^&w7*2p8kToesdYMTK4Z4hv88mq}_fy4!B(%|_vJ3x2b5cs0{!F!Bur#@KWg zu%@@&=qJY$Gx11w!^)%39UqG0yx235GvF}&__`sspKf?mPeni3YwqQDQh(W2l@;@5hmQ+FQ;40)KyezYP zv$>&!3Qbn&s*G#_Gmi6M(NHhBfSg92m2AQ;DC&cAkP`GDJ`S2oL*RM!@N-}VA}Bv9 zy*RCKz*YDO_NEBwEYpvE`d((n--ioCAJ|Sfq_>y1Vn2>w8qNV1`bi7f6Wz7!Q;G0! zv2Bd2MtAd8N}x&Hm27(FtNF>!rjx~DQkyMJ&^@dhZ$h|{Rh2DL#N}I`h_bH?=T4iM zNwx`;M&99f#$LYADvd0Az(bR#*BlCyXyfn+HbMyjp!2t_`d-4s1M7h6x3)tTL?nox z;m@s~$oEVfCSW4)o{AuAI+Xf|jdMvZ&eR@R_R77*CI&nyKw`dDmsnnbEn|@1{8j`T zW8G10Or>%%zjU9MtYAG-OOp(^o7d}JiCkk1Xh47e7;7M;IKWwsFY!j33J#$Bb1FbA8>< ze=irM+NVaiH;tvXVudm^XC(?ZIG3#EHjNK^YDVzRWafCERGq_g@RDX$EC%*4c3-s; zL#Z~XRc2+-9c?m)QRgRvqcDfK?*}RiQOhVdj8j3hTfK4RlQGmrSivbD3G-Xn^%*mn zZoQ$#dm-Ld{T`1vfQ09_6QJ)QK7Y=xG;wjJX=~@~gV@Tj-6eU7o$O*D`#msDrXkVW z@JSHbM`lG9DdsucXOgWk9$fu!o^x4E|C%WP+l!74M8YkHKjOk(dT| zTun?+$Ufy(*{0*laAKZ&NMVBzDbDZD*~t5q-ebBhIV3zjMe?Foal!*(Ucf@#ny4|l z#jY8TJ*;ecJDQl{2Pg^XtV4v}3R>g5~G_)5||7@vAExZkaCe z44Xp7TpSjt1p4!`I=uCoax*DWG5~9zox^zeH?_(8tFQX!t@um5QCSR?y7+A>pJCWf zsYh1Iy>IZpp;TGFr&$ennDzIDxo6te%}RAmJ*O}K=5AcajkdvOIuC!>GCu&9f>LQ- z7On#2sSQ4%PxP6LRz99_niijezE73SDqbMYSbK76DCZFrETVaIYW!44kv!hT%Q2WJ8^+*0y4*i5kFd4BmzVli= zO{S_Hf4rm82Q^O>IJL8aM_iQCch>=-G$vI6_jMVb!?W0^$m4>&K5T?MiY2wJ(*m8D zchZ6-t&Loo<4NpUWb6v3kZ+R9>XENsZJ{0b+jwDcrS}hV09e&WBuaR`j8xmiCeY>z zN{~-<$zHO@_RXo3b_py#s4dZvS*E$A#`TIhl9E)e`;d>82y-z~uY19K{UC&7Jaxoa zUazBkPUpNlTYBCepC#rr?JY7yKRwIxF+|EjVIb_CGGB-bErzBc`E!D0+5KmdlJEn< zhlB1%v1P1g9Y(vHWx52K*^~z1OwP$HI1Ib~!M9^#`}rqEC3=AopI17dcivMB5rJh1 zK`194_wT~Dj>kci!^Nx*+@0Bi<_xjdLZ=Kkp@zm!SE>~$;pg|_8IdLqA&Mc@3QGQN zZ*I9xTp`Nsugi1c0Rcl_Nph2+v~l*{h!RGNFgWn;wZq+EFOeD)B2GlSNVfIH-?&j{ zoX}rx=)K}3<`WK}HsWh4-;9@5Bcrm(lhB%SM!GNZG>XtX!PiERL15bw`+<5>Q={sF z8_K;YVV%A@8c&)8hS;{J->{;t*-4uPV@-G$(o$(d$RTrd%?FxvZx}x8Zh^EC3uq>2 zYY>bs67qz^_*A#e0JWb0+`)D7PvO<;ZrFxLK8t+u$Jh)EMM=KEfKSkKsO`83jll=| zIuA|bCL`pUiD>H;kaKwWU+^@C)v`ZLNL_HMw0-6ydn@qyHI&v~NxCzRGZu9Bak$6f zkO%Tr%S#0$k1IX211?HBlDoBAhL>D#G1i({W6xz*opelH&fSqiA&Rvm<@kA{2MM>t zyC4_D9T&ir3dELJLL2Pk>kR4>-?E#-ZM0gEfRSg=n4O{ROSUDta|v3j@b^RweP24* z6hsKoymP$guMi{SYue%G!&m67qYdGl93@mD?&vH&mk- zkgcNOeUF(ISbiKjWe?7bh7cO}i_H(n6TP3HOHOCIz|=7)uWikeQ6W72iv8rK5t^LP zhUkQRBhO%bb(3Sd9w2Kl1|W~Fr(`ev585Z z@mRFqhuY&iucZA`wPfHi=4{Ro8st5|=HRx;HKUwnow@*EpX@TkTi7l++*Z*pIDBtH zmw0T0g>y(?K`lkT`I_c<^z5k&P1V;f;aKR@W*M$NU`JxHBhg!!7lIcZD@Qix9B0|t zpno3QpZ`)e;y4*|4-~&c0O1);cl++VWEzq`#F1p2G0)5f)Q@Z8{@k-36=#EuyDuUx~;RapT6}+=05ni{gBa(*PM;=>!yU(scJQ<}n zFJG&Zj#dbF=g%?5`3ne9*fY9)T3}EPvmq(W@j~Z_&Rc?Z2Tc+|7)^0__YELyqA zxvOzH{Y3cf7Ob1D{>$ZC~Dy9p_goL@^FQk2KoobuG3+a&8{F=~=}w`=H|a z#=;e@o#Blzou4<2WDb!-(AhV$Q(+Sgjj&<_HlL4qo&JF;sIvZyt*%2|p=dYpY<@)V zGp;T_D#u-W|NQAcGBEKZx3T&)VahUh-TMxCb=}*6KtZ#S01j1}I~x}j7VDF7vDs#h z{yN&`D=zo1ZIO?6#Rq(}K1jFa>o~Nk-wjTKU8RKs8N-X;#SyK?aN)-gfewC922Ykb z6fjvyctCMMXTY(QVMnXJsd+Vrwi--W3M~ayBpWqHchxhxR6^pd^h7*Ln z5V*W85`CaoG6=3oy0n4U!zW@HlxYjNQTDYl(`FlLe)9YG@s;iH$ zB0KI1KrJeW2#Nmi3*18F1-315Nt_|zxF2VIy8=Flm6Vp5;_n{eSLX<1OG%iB24L8f z_x6)FxyXtpSP0#Dc=y~gOQ!a~0Pc8kVw`sg;pq*5Wa4Bkxl9Afn`Zf8qGcj8!SJ3R zF<$UT7(|(%6VH5!-4&$>JvDBn!SslqKd6s-4ZEKD{xvJedajDePRnU7lxfM@vMHk` zqc-EyZV2W4dhMo87(hMAIq4;%S5i!6Ql*9gkp@l^gWTMgh?-3%MJ?NuqnbK>OTw>S zwi0P&}{uB`DQFQ^yEGm*rzacEIn7`w>Fg20b9Z zE=|~k>J?HKva(Es*_V1eSAiN3oO6Nw#Omb<`_nxYnkpYVN`*^iZJg&b<7W|G73hIi zLF<{2=;8N?0Ug}~Rf}OaFoN7(*P|3%JS}(y6)gUmnJpuZrP#R1G z94%zzvE<(BefJXFkw6+MHYAVLLHr_;ilh+LEpMXSc0q)j&A8G2szh#;2WTHc_ap?hD^$T4~!0j^k z?mUX9T}<2$ErV;H6j4e1kV&L0(Lt(~71Z>A2O*uCi-r?n%QcHi>RuJD<0_SKxp_l% z9{ECqEZf>O!`_f*a*l`N+BLNPkm`>pU2ipJ_9Gh!N83u-k7M3?u%b1uB2?(Ocz7~EvRx|bv3&5Q18IvCxPd0MVG9{$rb%a2g z?0xix)bRd?pflfar}3VY37*&ED9>bTJ>wjOe9|*Uj<0#V1>>&!cyLa1YC-Q{yy+hM@?B_M1jt^c4=M(#5-U!y?`+*5L9vv&m_P9T??=X)b zT4s}Luua3qC{gqdoX}hfoQ;r5Ns?O&H`gcQ-uV+#=WP|{U_B((dm+?m6dmM*0&g-z zg<+-WvVjUi>Py^S3hV;{n`-hGRK+YjA$nmhW*9>^_F<~i+fK_N8uV~_=5L;V`beN= zbedvxzKKH1?!7U~zmen^{4{&oXY`XHd&5lx%ju0{`;k}Qsc{z-$KsBKf<>LMJzH*Q zeP9$U*F2K`C@S|AH9h#iiT0;0bwSEr=8;(cSfU0zX8vA%wcouoS@XYXxq*<`P{65}Mxde*kXsfKRj0a4c)oX*&A{Pf{DhK&CLX%g1 zg6m7hoaxi#*b_fFr8+);;Ln)nnu+>UKCs5^{&R#dRh0Yo=d$uCx4ec^kt^6MYBuRU zFaWmih(T;{n3J7(h*8K-W+nvx)?wYqnBMY>K66~vlD($U+u>7=WR+2`FP?AFHsCXv zXUln~@=N;5PL(&v`<z^);}V&1p5thCxs=6wiWA6SL&Bb6TAvs2q8ZX=nWe zIGGWkP!`UjC(Ag5vex^{)Sn>{$nbBac48_@C)HEb*PPb2Mr4%VwdOZgN3>eAX;2dm z$9VH6IiWa2VGB~-55;o4Iv~ZH~*r{if01P{P08 zXWIM6H@YR~BfwGS(W7lgQxp2ZK)yr&hMTe;P*49yQgf!m{RX`XQ}t?0e+TPPJ(qI0 zg)#dL5LhN)EqtnoYi(7&6+JG@zIx2i$sv$W3W$<%SOhkwQmbfoBOpVcL1g+wUdXSC zu9Emk?8iP~-@F>9;U-h2FcVid9U?~B*eb%JR3V6$EQqj$0%)%myE{FyS*K;UPO0Px zd2f6OE)ri||F#Q1sp;?P-vwa^-71CguIyCDcN$ki)tN8wHb^8=u580vi;qRFwXKLp zAdAA(&jdDyV5}<2)A)NMjOKe0p`3!|?{zwr1)A8kaMx6mp2LeQ+Ll((=3^&2_VSHq z`2v%TR8K!3g>yAhxd%#;zvYnxXT-k`qzM8TaVo2|KfXA$o@tLr*VDtc1kvq}m~pv- zwfe?w%`r901o|vu=akV3c!T3<%7R1f%ISF>!C4}yTqmMUY3H!v52zQGMZqB!nLQQq z#@$o7Jy?_OTn2Y>aqID;wG6TRswSwrxcc=%dkCt}X-Vm}?PqKIWvJj)L%8jIAvm6Ke^hV|^qbwkx_s{P zBin@^d~T)o1<)qC({)5*B0YR6;Bldi!Ncw~e|1K1HVoyCbVdl}P6yaR8`fw*w~rH- zA{{HOmqOWmb%O=ve#^bJC$t>!ura{8X%y{77xRICK{_=$=8$pxT3MhnmzK8pm6}Ll z4(uc1K(RQjU?Qz0w-VMk3k`H(;=phN%mZSfkzYObFG+Da09G2bxI(qX zGF6mi3MF4((RVnCER?X0@50hU2xNuL^F~GHl=7{BxdDTV5oq0ud%gdI{TrW)U)k66gjQ(wt> zD(fnhD$YNCWfe3jqFqf?sX@1h_ezH_uB)ibOIX^n<&=)xSGk3`(a{}f~ zNXd?qI2>74SST7X5|h%siRIz;32NucxMJ zGeTd@VAxXD4w9Lf9#oy&IHyZW)KsK1Y_Cy{~SUiS{#z-pomqZ&cy2 z)!W3vcQo=j-2M|%ZFEd&jw70taW|D|AH8sODsqUvcdbBm$v;9br=l6E=ixn@2PS#k zW&E%&!J&?NgT-%k2-EglZeprXaA zI1`kqjI8_|PV4im=m!a5mCw8VSHUO$Ia|r!l43J+{V^rB)q5d0B4klQh?j$*c9RgH ze?gX+=@02d{vzb(Z{hOne~V}#L6AI$R&=zNF7UIVth4M~JxP(rUJ30UANqr$Q?#7LaIqX>G>A^j2ZFXPg*T*E@)1 zovX)w^pf8uJKjp502iLBXXiOKbBAUxYnSb-X2P9+G#@Vvsh?&Ss z{1c6RH^nJ-K)%tQM*NdUMf%-S_M=^Sboh4|S=O)7M$My^Qy~Jwws@rzNNp^8D9 z>gXOTu1Bm)DjtrR4sKbAQi-SbmM*8wlF}m+zJ$t_8+0mglMYghbp`@62Azv2Zyo$Q z7WHG86mNJH8&qPd22{q|_1BfrX~Zd3c|CSi-o_5N1w#~zP+7$J6 z(R64wFK(;zyKETevINYc%@AuLxoKhRTUvn+fe!8bYZQU*fM(sRRp=@uVqYll#9$~J zy*OCaRRCz%&;V({oPaTTb@up@-<@!Tdtk9e>C>T`%q;`HMFrmvI`92W8s|U-#mduM zsTRo2;xCzEzMPrSgDci#&nqtp_SZFzqW;o;|4;$y-`X!T>)-8{jpN09S^k~*vam6~ z9BpP|W7cG1QL3q(xTgCChzfznr-A8Rn1}2QxYR#5fOrw6%>}{mFNOKTG3&qWNY-D6 zGgO_8!M`?)mb~2WtmT1VBU2-=(LeVNpP-w)k%=`BOlk}?x3uLaJ8tVDBegW;C(~q? zXOg!U1zK22c{l=9JrvYTJgiN)P00iW5c%AA+-&S^UZ|0}*??@Fc-;8Oj7;r}fjlqQ zznU4zNPimww&o`j{?$RMC9gy(YUcy08 zF*7oA0hrl&n7DYD*hv5OA`?J-x#Dv)HRDkcllU9Mmrwj;7GSVF4!{}yY|DsE#KkM>kG>_=7MFYiN{smZ=m{|c#oB(EK zwHG60XW?OD0Wh)gFfs8l{>#We==UEzc8+Rxb|3*I)!$5yoxv0YBGmvoI{h+2_Loh; z@&_t^8~ZP0EBaf1!ovE`4&{076 z#X60Rz@%c9PGCzA2hzB9*FO2@`2TIuUsNG4|JTC*UHvvT zf0qBZDLi6!CeFXMyR?{qv$LfskEyXSw+SaF5MXS|#SCC$;bH?Av9p>0j5(NDO}SXP zOt_hVziCrm{*QD2tj?D{XFI@j41pY^Z|7%_Uh3kKW!2f9Qf353(4KBogdVWCL7kAI~#jpEI-|`O`vHq>2 z%gXt8N0*uFm#54A?>ya%i7vz*CUn>n%@$56$H6VM1coSv!sbxX&twd=O{kq6(w(a% zyK#&`ZTFO4o2AksWH(bIHf>@uLb$d%n79Gc&Q?y|dKz^VO*u)5hAGQAwLC@Q!k%3%*WT0X2^1M`9AxD zNtq)ToL~wbggs)23*deV!^S|GLcA81h&hF;dz21HIhY+yemS%@TI~w@OJx3Yp00n% z_`uA``T`8|zXQf=r1k|EY@rE5cis40%QynDcL7>(pG9(wlNlOA2@t%`2rjP=R}Xx~ z)T!VQ#A9s?PousPA93)k&qwKz@J9hj-q6kJl<6@fs`OcusfTWTL ziI2xm!N7osSpPU(0f#v&v}k3ojcP89HdUJQMZ1#-j(I>`ny_J?fufmkyIdm7u?v1CBSa_D^X(>+j0KmHiRD<8b%Q9_ zy{_J0Ia9Fv@4B24_rRO`AYTvo%@bnOeyNA!N9I0{orL&W6}juPvk-E^i>DQ&j4DPu z8}L0SP-+|c{i#9Cy-7Fd9n&DRobxl4dVso!aSTrAw{Cc|XUUJq3UN4tjbCFT1TjEG r&u;2Zknl_p(R#w!e_;bJo{S^d%@JsZg2>Fwfr3a$DXu7ig7|*`-dPw@ diff --git a/docs/development/XMILEv4.pdf b/docs/development/XMILEv4.pdf deleted file mode 100644 index 8def7c5fd0aaf398896a83a68ce11c504e3191c9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 131956 zcmb??W0+;jmTlTLD{b4hZQHiZovuWsU1_t@wv9?u+BPa*o_qSfd%EB0+wVv3e{1c) zj2UyTZ;TN$$P`7z>6qx*;K_D&r|02em>HP~83`SXt>Jlj8Dy>O&5WF_yvmK8N>)#8QHlQ8C3uHi-m)UL6T61kd=*-kd=v1kDnjj%--~m zkqH0O?~ge+{xyfFnTdm`nWB-ioy+HpA`Z50cJ?lWtPG0IW~Npqt`5$GOw9cJ4B}R{ zu4c{*;|Bxe6En` z(?n$kSu=YJS4%=xu0QN?|8tql49X4;uAfa<{?SC`(?5jFOl%CQUXErAn#yM83>rF2 zgr9?by1>QN+04ie9>#Lh*w_@w(9qZr0T=}?Uof~1UXrm22&LN_`x_QEu@DMejAqnF zOh5oA7#2{KB*Y;+%%}7IqxXNf(Z$us+11n8%p4wu5gvw&OiV!>9_F8}{q!mm>%YAE zckcS!6_(FkWZ~lY=dxIsnHW?ES=br>lc9!&hUSKbNftmLseEv}a8Ny<$wuaeM%$DY zW|Z3Bzg&0nnbvI&m?7%bl$cR=!*^p%W4^TY97nhTgT_a?*ozyLiMwb^YRWwy;*`xK zn^aekZHc%KfXVVLnmujuvO0LUs<)1vm$#p~Z%l182l{aYXx(f)csbZQ3>csW6nqbheHuu=;>MM-Jb240TzYBGh~W`I6x3jK$_vegO3r!21N*mAOvB?7SjI4 z9Y`HW8UnEr7zcroK%U?a1pp_W5cGlq(wiCRdCH3h#@mF$+eSjL*fx4WTTyd-DFf~= zkdP4+K8K6>Fb!fQL3#vn!URRlr3;-N0Yx$df&~8B@i!ayXQcc`PRW_M8krio8hyqR z`#<7G(a6F~*2vh*)`gIX>(7w-|*O+@vo%%%)x(cw}^witC_v4 z%V$vjIjD%UgQKv6rw-$18+ImULJlrwJqEeYj57Lv-hQUuXV=f!{_Lr2=HlSyZ1Ooj zGb86e=2v!b{Y*MS#yIhg@BwZHGnik&zPDmOu)F0qYtxw154f5X-#w@>rIq zjDR>#rL&My*GubyxsrG9N9`WyrgEul$8PJ_ZkW!We(q`w+g1Imgj(A!n+z&-s%_a7 z`Yl=}wG;3D2VF%xy+TuidkGDw6Y)wcxBuMpZ$m-Hsnsg?vM4kn9W?8$;6v2m=MH{5 zav`k#f!##>Ko{Dm=48E6W(IwJQ?D`|&0~Wr#kBGl$sZ}yY=tp$ioV(OEYii_XU#nf zE83{kgz_bZhgwYean<(PwVD`M9aC}i{2R8`<-!$7*Y_k;IL=-zp>Uk4wPU|=;B2Y2 zzfjdYoH5myt7Rfy0y?ZFCxzIyM>XJ&D0aJ_3%DbpXp0XCAVy6m8_3l`*?bML;0TSdVDGCrG(r7{NOVr*0x~D zWwZIsxbw}tGj^0BEyq{J2TT{_?P>imCGs1>$LlLUl*sb|#9%`7abso|jfkkXx3{Oe zs|F;Grl+?z(l4j-ErarlI&>ZT0pvz|)3=Pllly#Ch!OirlH&&ZsQTyVo13gvo?IF7 zJ}XLHK8a|L!`hu`VaS`^X}RRK;9b4zv^Vpjrm$GKUtRX(nYmLn$K%;#yL?;(lFDf^ z*R)$WVm!U>%+qs6w?rF|H{=cY@jd*lqLn=)817N6Hb&mW=*{y9zS9?-CT9)a&DBI7 zWXBBEpK({|kp>a88a`2vi3sEXXhrs-02EpA>Lxb#4CQp4U>BV}rHFI4G+klNhrQ{N zYZ74*6C>H){VhK#pEhUhP9UXP4X05>fwo3Hvu4>_CH456Gcm5qS}@ssr%X+|!HjHU zfmuiGW@teY!_pCtTH74mL59W{%01Vc@JUVCq__4(z^+ON%( zEyM+9a48LeK_y9D$(yukB`|HI6Yr!7bT}bjO`7<>Nf!#9tNK!J4d#w(L6z+BtQuYEVTBK3;T}OCQM;5)O#@2Pq?tIa+B$bY z5L9>QgV7n3-&Y3p$#2I=dDxgxEa;s zsuLqccu5h)&QO7+c+Wre2hNnZ=?L3gtn=)8vo=_qDsR`%4{KP?RFxmp+B}$Ck>bn5 znIR(cUC1*~%|4mn)jzYC#D}aS;-|N(MEQMK?nY5NSI;3v25((P5}#*YDhCa_xn~Kv zI36)YA~CUHf3Vi?KgmW+sC~&#@jClSb=n<&iw56*y`|h6 zBe9^d2TMTLu0Y<%M}Acfxn|vzy9HMd`RY zT(#Oq*DeAL>9V}fr;pqoF8`I3Exq0cvq6&DJ_l6@fC!J6iY{vq74d$x+?8_hLEk?^ z&$t1Q+WC{K=egzr8t2O+7;;Ww*dHJV95;6lLZ>T&myNfiK2m48) z!&sU#{~(ye^!<(oJx*94wi;{~PbcrxPtBNZTg~T*vNj0*hM8NI@EV>n?5Fl>dzdR9ABAe93HiON?gaQ60;>`}w0b}YpAJwSP4z`Y-P8DKCedjTPcx;Z z3I~Ese_>%U5va*Qdz`})8O|k%8>{`wf$8ZG$c|5_J+r!!8dw`dmv37l6&+bRF1z`m zV5y4u!q{AdAJf?oJth)x>C#{Q^J0WzGnyw;EV*1ceh{gAQJ|fF-~u4C@us+lg!ibd zEq_#2o?><+KZ0 z5Gz{??>uGfYo5pINGE%#oyC@~jiwq{BMfZ!Y9NW-CWahss5)ruX_y?V3ZBwnz^$0*!y3RoBEUDKokGOF`Yt4y3R`W4 z_uau_tiVFi5s+GcAC~6aO?~sL_w|{Jru~vQ`>?XzD5S^P%>4842!hI%O)l6T25y?N z@1$Sqi>CZHOiEw4>b9ylUhCRuhNaZ5&ODTM5hrOo&_u4a=B z+C%LnOkj%&*w! z+5wHk_gBerjJRmAXV%o-qHl96$BTzia+c<>ZA`Y~M{^vjDi>NmR7UsJQ5ZHbOA>oY z#0;l(jNMk4_n;UMCqR;EJj}W3Eg$-^`<53r!j2vua380jc6m}?{5DkufH1|4s5((arWN0>z|o{Y^B<~Mb|^Mnsd zOjt^(%l!ZXh-xgagp^Ke9@v$+mEHb+=bHdv2q28qla_HTB|mo6-Gp4SFSDYSH~xlGMnft zGwon2gJ^5lA+^8TnXwiKjj=d=h`)yTT#%8CKV0k)^#Yk_p)gzya~B(DOuxTnf?Q0dKRnqjs@9PlVVN^Tv$_BohMwYUJ#94F zy&OOifzg}KTdlrH)D5#~V#K)s&Ev{PG;me3){&|mgrHR$X7 zx>)#5w2ed;pw4`D(gE8_2c5C? z_wON|W}*~M%ce#3JIFLtf!_s*Jpo_T|Si{_!Jf!T~swQ1yN z5({BB9p1WXrfd(B95N?0n7wc84@=RrX?;4U@{RsP97?()y(Mebi>Gv5mUaER!t7OX zVv+Q*|Ip}xsJ^7>I`B3Y>l>OI-uST1j&nKv1)G1hW02&6z0mWRI{Mqm!Ze$ z+`mmw`hqbM;(@>@pXQJ({4INhou3(h5;!7m?vh`kPg5!LLM%7dZDm;(eLM^#41P0O z$XTda-1U3J>u_NmACIr)1Ek$yIMLU^LF1&Mfp!aMQ-T5zqs|Bqn$aFUH#T&kCb$;n3Ad=tk-lH@GHByB%0_{i%$5VHw z0_2mN@2dhT?i>aIdFalX{3FU)p#@VCCu5IbdzpR*9ZD~dwyZF(6weHd2xCPB#YL6O z(c-^4)}J0PGb`V^1Pa*%9ZK0~_uOksC4)$gUxD!nk`z`0YV ze`?Dk)9cjmKY+ymIV<$w3c9GO!@JguwH$5Quuq~vXlK+=;oi>QKd3&Mw4_6xYUmO` zCxRV^=75bF_X>kC$5CGIFnPida1N zCdew1r16=;Rz)cRpGR6QD1x#C4wHWc?Lx6M4zm>ZtU>;q(55wEnbBdrbE8R zphCws5#GO_bNK5Vm8SoB!J;9)-6?TOs_!Yo+b1MHcg@&-j}v0uLd74KdMH}hXP zq9IKmR3~wI<9>S#Qdi_S=em5Yk(F(JAK_bO6r)|5MqlFQlR}M-+*3eM zDaS`|UfwG=g77a51@Lo!W*nYlkXXt)Wm!o$8(pB4r)j++m{##vgxG^ey3(-qRD(gG z@9K0??nrNH{G+~J>vFyXJLxW?x5vuS8e$^373dW6_h7tP#*tp0kikNkUxymwa~PaE{x!U0yUiznAcy}|%b$ib$$SoeEo;aS z#?wEwf8nmZK`pmK;3)%3 zsWi~c;T-|Eis%!Oc|?lDNw1kuLMn8GevbVsWV;xFH93h~=f0_m;Gk*UP!jZ@NV#8~ zr<|FV#XM>@HyajdfVCI!>#Bjt-pYcG#UHLSKUj}+>HY2d^6tM6sF8G0U(&}~7i*mJ z##pOjzyy23mPl*$i?~V@!69fIp#dcd4BkU_fmR);i_Li)3y;6DvvR!>z8mAESjwSW zJXb;JiRYd+5Xc)1hh%~yQ|+E&xbdh9w{is1q_%LD&1+qhvEX8g3i5I{^YkARKpP5C zRV&uwpj&ZLaKXVRpx~6{(HsU%is}A_Nb?&64ugg%ZJ9m)HIRENWCwGw*p#DW0uneI zp_miA4^h{IV?whs#CBgGpmn;#=9%Ga)9*|2D+9bd#E^25n56nc1MuQcWo@25ab zv%98wVv?=i{LLBuYWdQ=WCF*OV2tH17uMs zApbY;=MPc!zn6aglqmnNLiwqV%YUAB{6lp8e+5crRwhM8#oD@Ak19$hM0zP&De(G@ z5P0+$INBf-A$Ru|`)4mQ!QKQU5^xd`kp#j7I}lS`k=5@o;TYc)o4FF~;1;nIlXLO1 zv{9-Ews*c0Bxx(5*jPY*ipLmlcxHcN{NJg(f9k^jJJ{{dbB=$9-TuA8`%mn~&cgOr z?AEBY?!3;4;`gYL_Xeh>g#;YTp^;RM&ok;tzA8mFre40|mnSK?6Hd%Vj6VE|D|P;8 z&jF-dB&wA|HSCY%W>o0Z_IhXKc=G~E0XR8JJ6<*AMi2dZ{1(GAfjkft;DXLI0H02S z|K#p=kT8htf=55X%C!p&TR1~9-`+5NH@}`}nzh;C#Rc;%Zy>DYiW%i6qNZSgh3Nq3 z6PK?iTl6=Hv_d8eqRAvPI1kY~8Gi?d=f^t^S4r&%U5v0HB0q2h@VI!5ZN#S?u%mIT zPMBHr6XpnCZ`im51GTJjIWh714_6P%mXgPV2n1Lil@6vb@!kd3vdS{AR2pTval4$P zbmDT!G1X#Hnvvn-3a61s{m=;7XkJGC&ZP%a8wH%?nLO$fTTpkSWqv;%92Ti{^m-V% zLo@K^%X)9<2R1wt8*IYe$ftXHn79X4dv@$5BU{CJ4dkJOr><$=tqXOBRQ}iI;4lNc zkVu))j*D1t4*E@N216(1VOSImQ~d)kFAcq%t1up->*B!dZ$;L=Ogu~oi8V;Am9#mz zE(cj(b^X+yZ(W?K<#=!)G4b2@LoPQIu{=jx)bv%Sr_K|ZHZiZM zWvJEdF;MYmZ!saXYyd8e;22mYoeUGiOHEP0r}?2b!3p4U{LgmQZS7C%*@?#OIAKU{ zt57$Bc8m{hnGPES`5e5)@|+2pM;tggK|wlgo24*wG~E<|W9LxyD3}Y|tNWq9$!N{y zr85U(*`?f$Zl}tMSijWF?(zZvMNA023X8lnvQnyJPk7AqvD3TFZD-(l=?kqwj%YvM ziYwEKvOs~L==M(REA(|lGOQ@i5(eBlN6pu@)kwpY`+SD_ER-Kwh&{BLlmhJYPn5rl zc&I&-mJxbmwNO({=^kjg;_IQaPhwb@yiaPnj#oQLM(QaZl1Q6V3#KFT(Wu|pGBAdK(jTZ$-x()n3Z4YlICEN$^aXJa{C z#J0w&L0Zq7mNdYG)#AR1Q>%}$#wQgL9@p7Bt%zlFMM6XXgl!y0^hgPEvF zzmtnO;@@YIxF~2zv$zW}2RO%$2y<}GB7~v>n@9^7hqS6CJbK%laN73WYEt^JbDN#4 z?{yc$OH88Om95VLbLtYlXb%2TF>0UyKI&FY6t)-QQH~cJ>aC{Z!}Q=W55vfAkIMHTs30zKa*CHfLChoM%X=qk%^K;sSN*XdKKan(k;%M4-sDEM z@RN{|*dtgBKQ35ISWSAio>H)R34+b^Ufxu288Q(W6V}BGPnN1p99dwNm`a;;S+53{ z|25Z)(unG=HZ7t(d|mSh5Z3I|at@c?!)V>}GfoidMf!o|4u9o~El(c6u0s**o<^Hl z4d+l3|HXzazQxOLb*7Ac-CeB&^UF)U=AKqlsB|;{4NWjbP)GfHH(`c%F!LD4J>5^@++yPS?FSWhOO#clZp#T z#I7C|OnS5p=vTW+q{Vxs;_e=ZNw`4I*Y8l~jA>oiAs&THjU?YtKnR;#v1JUeSPJZx zl@V9W@R!HsC76_vUh&x1+GNM@((&RA=_ybxEozhP%k_CMA6kvIGuR0Ngr(C-cQp^rVdevU}cuxt8 z4{*9@22rT#FZoS@lZV*sn26KLaF=*t@DP1Hmn%o=03;ujoXlSLLmCb}TD(9+(%e^< z*>RQxy8vv~FTHuzwJtm*gi+1zS$SlUeN%cz5si%J-dNw0dgiea^__3&Ss2EAH5U%( zTJ88Xi5xgLc8}#jyl*sS;#+JJfirBpvjPJpzX496a<;)%Ge_hA$oPFKWh04 zEsfU$fjbDyqUxlQ&ZV+9ck}A=faY)g75GZ` zqqyuC9B2h}*Y&724-m}d-&rfshJ5RrP11q+9&vS7Yx01L2KSEjbJvqQ&)CPqz0>ol z=wT)L_GulI6#cF9p7-&{+ZtTU*H;Xw()nWwG~>sN%R@hZ>r4;`*HNgMWhp#uY++Yx zendR@LpGNo`W5-~`861661}wiZ9b?Rg}1R3e$$lb5H^x$G$}4y@Udh=h>T5G2hyy} z9(WpmLH_aPq+aVNb`C!~OsZ78#O$r3@wqmZXLZ5z(<{sb7w|Q4*!~oMD9lBK` z6t^3Q+f2O8gj)jdg7>d{zk@=09`EDDYlQ8*H*SABWwL>ESuHXuT2&j97=ioBe^js+ zm!D-?w+{^~r$|R2eCofuU*JTzohPA^Ct2n*RA0W4h?ZrR?uH;?r*_b=T1{;**-Nxr z209nnSRQ#{h0=*x?84+0_^N?66h00}KL&zK2zJsjdJai;f6$~M0~+#d&B!8DlD4p6574^IM+gWPFlafFHOJ6({q>7o zPwAdEa~52Zmo#x1Trn1HFfuo05zZ)uLo~{m{B$mhDHO~sDin>0j~d`6*~DXn_WQ=j zGThKxhN>Z75I7*u3-X-5S<+t$qW-T!`)BpV{|?Ikqp0iOE3|*i_TQ7Ff1-RAPS(Go z{1{C=hxHM3zsH(y2UbwuSh?QllX%a_?dI*9!D;h=cM9@~6DQ!sthti$?>j%*4>z%u zlWB(s>G7g=esVpY{$z2vJzSTxomf-=RAdecM@iByX((ZZN{2p8`%{Y`V+v)3KDqgz zM^CCDg(so__SKRh#k7iyuU+re{GZRrgh&P6Z)V3q$;5(do`zMFQXzl5=@52M4xf5H zHE2MT`Ch$IQkr)9mev*|Z)Jq&+|&#jKr8M5ak0XYAp-Im1%U{Qs?6|2cOF>SF#@*W z-Ij5F(fMb3p}`n{D-mH3KzVNwpZ0=8>RO#dw{MkZtHeJ#nvgsWKTv>4PJ*lE9=6>S1Wv zwRe;rAtEZ+U|=dtK&ND-n-~)&y)V^BJo*(Y(ZcLrR50ri}juyO=C%v#6Gn zHaf~F@mv1F+s>7T6%v@?vO^FFC{xZNxns}vWr`WE>Ptmi*#&ZYS*p+bu!eOSbHaD3 zlo79AV=aLuzdO&?B>awEGGV10Yu>t+ef(b9Cj(zQPhL+9H+e`k#}s=AlnQKpTl4p8 zN;E@atnH}^XYb1A(pC^JrZ}aSJbyo&;4oC@0V6Op1SU5 zG?Wqs&3t>68oCep894-KTZ%%#?VSTohD=z9S3AFRpR6&ZsF9Q#9i6_u=MA_x6FVa=e1SUkWD#UtfMJ0&=fF4-?bcD!0V#H437*`eDA5xQ35|>D4aFV~zh4iE zdrEnCdnWAgczE!jkITM%`DvIwkzr#b7!Svt#G}|mTpnj?08d%-8lvg`=Ga~(OE?s+ z!Jea(*B8}Y@pnlaVFN zs!wG6lOs&lH}chYZEGC;6}#jis=+KsWY1Et&f5Wak5J42ZHKN@9)(S>Pin>X0^TrP z^wwupP$JJt?Zkt}NG;HmH*Y6K>Z8O-j>)Pr%Fb_WNicwUam_}RYPKlv8au1kGH%{I z*LTHlikqd~Cgl?JC?yXJI6Q>nHzQ8Nz{nUn5^9Bt@5fJnmRdeHx5aMe8x8)NBwJ9| z2rcDz_kn*RFLr=fliH4@(Uz}@Dy+-#z1x| zYnfF7X&InYwd~D><+K&VF#K?@24^v+;;@5yPb#orzN`vqU#xJR_sVvUnG=X8$rRP? zs(^zH;sKw>ARdk~72l(38=<-QP5zAA#5ze{rPfM}iLSNhX6}1Ph_xqWfH|}+-$*bs zmzyDR?M#_P@NWfvxWxr0%6E8o_6(9xqdIv7mzsGN>pze&A@=}5fgkh_=P!Iw(* zYm4yg{OH$N(OrFkA|~DS^IZ5BoRo}&Tt$kt0d)IKHW1YVc_CKn92qX1`fof94}6n+ znteB*xWNgM2Dah}nYBA*_g5ucD_H$`_Lkf$*r`3C#VJusg}Y@F*wNGYhdxnL{YPIr z`7X;%pqllM{R=lKdfwXJ#}}5agHyNaT02w`ie3e=-f*i#*XK>R`{oOI9Ncl8Gs<_F zc%3_Pbmdd{(iAv_%wt%0-o@kM8?+SXsW_Gx=n!`zgEG=i9c)4ewyqlq$qQ+ls)!R2C3J6kB}LnZOWKe)0Uk-E;? zykPCx!L{E6Y)e0|bV8oN#==!fHc7@KMOpqQlLIe;pL-C`Wb*C5z#J+IzIAc*_>=3?+Hu0IG-bOR=}n z%sO?^^T4n}xiz)q)gF)Mfm4Q1?^2R{sCeu0=$^MPw3g2nmNJd!a@mYUKiLO(@q@duRI4#@He2jdoH6 zJ0TQNL8EWJhN#FC+=J@=yc1ETkv2X*1Nwxx+YLnJf3p;(KdJ=&YZU_1pYrqn4#@qp zc>3Q-+5edCe@3~?tQ>zuxkZ}m_PgRJpH-l&2W3AJ)@(P^%0F32sog2OV5Q8aJ)Y4?=y=CbJRPoyLTrtNi32YSyBMt{@(wJ3NWKeO~}=k(@@eaE+Y-5#HvqKGiHfevrk6=DWYjy~}B3fYyj$EB{2**4M{I&-QH( z&Axi=x&tMkT8@qXa9-=ty z(#!jNamvSG*aF3E6JxNhHIq5?snNBwp7M3nO|aXPhRyrDsyz#APqiR=c@he z<)t}Up$#4HG;zrsZ~=CHp$5sFdpn6U zT#GX5oO}CGcK(hL0aH_ND8WN8z<@Hu%DO=a@&v*lM{xib?&93pF0#p<7~8x*4yO8SiP0XGEIm!tY4!wSgrU{Txq0GPMJ!JDNPHVFAlF(TcN2nsl*$ShnqK~ zz-bRm%Npzegw@79X7aeZ`M%PsQ09)opB#$&_VIK-?#3m000qGb(jVoSre=|}2>?5> zuH`5{70wpw2k`y!ns1m13nRfgc}dNkb|iqqATLP>l3Bym2`kaPA<-CcNec&)@lH#% zy`AtMKYb|GY=U%SJ*%_6VTYu1N89rv&Qex%korDE#l5r%?Wtc%HZFJp%@BFVr%&@v zx6aoKleL}M&D-2Oq;Qc-Tv+~VOIDU zE<5jKo$G9`7TBKnCP-6(++$+s@1|LTh@s4=Bfy0h~C&(@{jZdJkogcBP-3+wQbCZ4c51?#j&V2ala|MiLn@|>Z75#2#I$c ziDATLE2wIS@aw1)Aw}QzrpVVf2~JKV4QL)*(^^h48=lB$WlhREQM2b;KQ?&QjyEg; zmz&4%n|glJ^2MM}%rV80C?qrbHrfIyVb(b6O3-}!M`&I@##^*tdqF2MB&rg`Z!Ev_ z+qb4j$e3qM%1}Z?C+(#lfVtw3tQAG>_D*&57vi!F5E$_LnQE;sJs+%;4?%1uNDD$x z(n}Gfxr(~5^B8}{w8uU@>Xm=X!UL&{L`oc`d9k^Bwian`DGFybr_ zAvqCTcqxOBV$&m14|`;jdX+FCWY;Cdwf{q}X-x7L1t0DPS7ZhC;XNk8eFUr3*&bRH z(n?pQIVxl}n)}Y`nyGe1C#|!V)QOWxTi;klp~^#I^ov%1X&X$4diJGzv^B_q(7fdm z_(2Vbm36Sx&*#m?W*v#lp*yf&{F!6ouE6j~>C#HcW2S-ZV}d(F^E&o>3}4PNR>p9$ zl_e2_EY@7_C2QHzcS;83#wofGp?&P_2_=s0LkxJUYe=p1^mYc=pb4#fX*Tc(q zEi5|ZslTLGAFqB-jpodFct-wio^UqbmM$SJ0S3a41u|D+X1H(?WvEZ=?JHZE07#%? zJ?jFidpu>G6?_;;5BrhBF38+BxhnzLV7FT-?Av&t2P!C*Z|h>L42q4$Ur^{(`c<6I zG=~h7N8&iWH_COMWZ;hMx^Zvt@?}Fp#H-A~CLLfvgkOHnxAS6DlS^pSUZJW{B4yq4 zCH@@VoI{qf3Vp)dfv>R?hiV2m?2?WB7wcRep<hz&ru}yesj7XKLlwz9rah}?8ySWvWy&2Us3Ziz-D+v5_UL6|+8@e*T z&d29Xpxtd6Uo%ZbOsZp$1`_4#%_Nk1TKMmfQHT9=B-|Y!AqB<5zBZ-uxTlne#|ast z2wdRCCAmdlp}(hON<-_q3r=5ys7T=$avVmN@L?R53e_K&EWc^!o(sQn2&ybdD>#_c zNGf%CVlzSE^5iI~8UW9N-GyJCo(LU|9=-`Z@-HyKjIp*sw9ZjbUXGU_64c~Nmi3t6 zAnx(HeGi;AC}X3eZkVn2N5o)_vv1rTAd0#szMtkdzk7WgyH+um~^ z_eIB-#3+bjH;uwD%+m4glpZN;Sr85jIfExveF+`4+$x-j80rl3(27w)CuuAqwCJVSsaVzaqQ@9$|AHDEtfRC+(+hH51&=^Op8)fj@V{MpN_#ZAWBgB z-=(lM2|6kZ+xzg5$ki2U9KQofPw#gSKfz`8o8!UXA+o<*_57Qv%>RNextRY4aH;

eUI$52&JJ6@$mrK|`|IYBgRCN4bx%3~` zAC(#YccA1SRTuwWFZ~~GTmDlp<@mf|`j=avjanP_>nq4z%PLtf(X_M~9|A$BgzZ|i zbL8sF1lRG$+(xEiLk@IC+{pIBX;KBk})-d z`T{qHgAV3#OF`(7XDiae+yoZu$k}|uDW_$HW~&EHXo%);b!z0WiS{kbRbs!d2pf3q z%<|2&jZg$ncJOhXkdi5dd=jhkL`)m!;0P-)&c&l4mZN4fTznfCibiBu2KY{<#$2yj z{0+a(vDZjfb(=iP2PB`nOUy3LPqeb!q^vB}2VE=|^fBG5Y)WQrjcI=abWa@6Z#RMh zCuHTJ?z$P{iEj|zvolYc`dMfT)3SS!TmY<})p*Y`)QbjH+J-rj;)%=ln0~6t`T9_)TS5v+{35}UhThH9p>3WKH?Im~Oa2@x- zk@wAxcnG1gb7}HNas{0GpV2Xv_m?`mSV#JU_+(py? ztYC`|*kCKc{UT^zqzJS|pCu0oeIpSuccO@<+;`Z~Z zrdM&Z2T1g0gSyw1-Gh^wB@qpx;9{c%Y~^6!;r2dX5`BFO}$oTPkAjvWLzW$f*#JLJWQBqpP_B_(d0R{k}*dP8$cgU?J3}R zIeM0U+XnNr0Q0TG99YX2&%{DqfSV)V86Q`#Ud7y7!?F{M39EFF`D3EJT8#RR9asy8r^7j z`sN=2f~c3fJQ7Rneiy3Tvda45=}C^m=S~0Gy0gG@(V)t{QF zBQ9uwv$%HV22akYw`B}7;3soeiDsGFF&Vd`+eBj~b^C->HLf=Fm;v6b0L2QwB+6}N z1L~tD5bmf~p*Mi+g*+?|}6t-}$_ zQCH*xYA)DoqRgrv2r}s?1J8(kP8byeT>vRqwk2kykOKRm&}+$O?gRy7_fNF!9DQh0 z2jpJhh{@a+ViVKoBWaCr1;gWKtYeN(52ccJn%k4NlK-Nl zrH2#IK+d+|Wc#Re_OxaAP3PL4TQd#=j~(Q*kT|K-W%7|}%mbuxd=r76VXpP5hk|P^ zFWLi^Ix!H{!d0#4QJQfnQX}B*wC2l%HFpzU`NED|h3@*k$#N#SLMiiIQ`M6?eI74_ zO<1{hlokc8wRCIOg;MDc~P~jb5|t;tiIm zy$270xjo<6PL`c5AMapm{W>zrkWE3!`H~cIjc`^iW-oP`(OKYuyn)bzAE!rRU|e9T zoIjd6{FvJtl~c!r%)lkS`Bn1_N0xQs=V9v(f8Ltd=_l8yTiEYj7U!`D1 zdD^Fj(b}6BKLZl;$}j5?ZwcN~Z%=we@^NW}v#v|krQ6axv=^)mN9eV$z~W!*0M89d z?Z`Nk>*co|+i?%b{q9SI<)9zt#2PT4@&>WPOSZyJF0JI1saBKWl3Crjsks~UHqFRl zIpH@^u+(S|%CBZ3a$_a4{@awucS}arHutyU)R=lBg7H+dE?y|y=UBi(w`5hPu3Nk; zt>CN;Uq-QLh1#wK7j5<}h3gW}n$(DP-Wl1=A6_ZH~(9i43FY=kPM> z;?iJ*mQQ%zK_JAqPPsuP0dew2T{fXFjfEZRxJ6_M5#b|JiS(!CR*_-@LGU<$#Yk?= z=RQE9L!St@mytq0j8X*2i=w1r639tcze`xrrMqV~NDqP_Rf>pRcyO?=vQU1TrRmTJ zZ>Mc>Uvh{my$5+S`peHB;ni!@xw-i534Z1dd*aYe6Ov1wWl%bc;jp)Q4P`lPo!JT9 zJ!oru4zSSOXp3Ln>hQxPF~eSdGMncySwv3OdKr~EqJFzk5}tXO%SI!+xnES!;I;4W zf~mIOoWXC!@AZ1Ec-BpBkMPmD9b+6%BdmFTi=A`0KBe*^piWuUXq~Ss%h_h<7=XjH z&Tl=B5v{7&jd8=z3MMb(IcP7pa*9;ux;hK7jV70fsPRwi!PFNvDLVzvgV?8{*VNTX z#K7y_tqudhieFLZg+U8}OX#M&Pv@L16@I1anZ~_-hhS~3PDYQ)#KZk1kNc~j=$S%1 zd6h++Gs1&%Ybg@ZA?3Fb(iQpMz^W;L!w$Cbt}l`7-X(RVH$SJl2LWBlvbVF9=DCF< za(TBEwmlYOe|C~HajMuqxI0KeWkbqo2KT0gd%b26H*~xyP+BO*sYKKc#bXWgc;c3? z99^O#!}5;->&+w~YGMLBE1-IC$A|6*d3#h{R5wIe)7x2#_z;oy#0 zMf1*rF(a4mZMAgg3kchFc;tHKNOb3@hPE_qNadX}6zH7W@5ts~aRHEnW78V?5 z!m>CrBfCVw*0&nBbzuBR z0Dc99%=eZ4;x{a26tzV>l!_1n1#Sc|$D|zS8-U-HhLiU-GViBP*kL-FOm3JyvNBxp^M^ z-9+=&JFIztqr4wD>r9JMhxR!={6g8Kcb5w7Z};(45x!S%E*;vmV5er?$yU47;MJ$X z8sbc@{QXw;9R4ZW2gU~5OxR{wbKcrM%Da^tuZOSKyAt#Nhqia{5^PbZQHhO+qP{R@7&xZ_uS;<YiZ^-W*XPPw{f zqX~?rrq{=ga@7_Ls`Kj!C?1|L+P%TJeH;H^lk#sOrD{()PmZW1g}_Rd27*~ zZs`YRH3n=N^&{OMpyiE)y0t4BI?h#LapXP*i&}Wfkq0$bVKAASRtB*ZwS-nk@R%a9 zef}BWt*-pbGM*(e>ny3tTr1=ZM&liONfkm!>iIOpTpIjX$i|@TiW!(s%b20sN(T}M zG{%fO&+wKkH?BQXt8kH@r$9cfVy` zWK;()qF*@cJ-+-j{q+?fS<}m&ZO5GWXHYgBpAw-2-%@)$s%oymUZJlPL>yI?ZkKkk zE#(9t-Qo^ zzrt-{w8k|X68wpV6p%Bs%Co?GTgI|#(8$-eZT-!B{s1=P=};Afn!T%;fpOA9Wh2Fs zbXc=tYhm+5<&{Hxj_+NjhXu^s|uN#4a?!{DLr1e0$5rhOGLM}wVUoaCYVH^AAK_N_p{aOvG z5qxLh$(4nq6=6|2;Wfd4L}Sa&bh2x>?Sgw^g z8l?=cZp9d_2hI;<+DrSZMV-4pqBm?ME|h@p5s^TbO^L>8U}19hO=^CseZxA}SFp%e zu*}!vk=sh|@nWyAJau4#pM> z*9AJo_9p)91J74(7XX_r3n(!-gWq*{BTANlxu?0cDhna zMe+0+R4C<775FSHH3y`zh+6i^t8uLy_YL%@RgLEk^^U-m9gYwAZ0*|h#K(w|-kwHx zYZ68)N@vWMM9os=Ea}x=kz|3l;B-?T{<=5C0YCI!U?1R+f|Binv^89A?V6-CnUHbY z99{wmf8d77MOMpJSw$^iL;NA?=$zNRNZrJ95i52D&{vCqvUwv#3 z+l8`bWQLC4S*n65kRsq*_e-vELMY7S_7KQ2P(=Wt1jDlfnKoiAgtB-W?P_wvSgQ(P z(>}#&%uA_^GP&#Rfps0k+GX~FM-U_TlHX%=Cf{OKWF+^l4j3p@k~xxq(fM`g0iAf9 z%rwbNe}ox0Hwq%~atE^BG6nRK-oQzV&=}s>uX>?|h;<>K7#EJxZ89HRd&LncW{T!n zJ20)1Q}GclnRnzVF74R%h9PYtk5TGznQG1qVHX6Mgd!3VpEbKgGf6{$htxSNzSG1s z5U>y7wa6)>?*m~cP!q!)2|xp)Fo(X2Lc=35`3!Qbfx$)$aZSG<@l)|Tbd}c(Sm#T` zl(H_ddBhS&pw?DOuN^i*{5ucNC2={RZ;S4p((QP_?GcYQf=T&`Ls zWSyxjMdFbYk3~tw4?KN+b~Dr2apRQQ@2I^)BQDPjeo*L%>4$Sm=g5tnCb>}=_6=qU zD^*it<>Yt~BoW&>IDkA&ANd{PY4o5S1V>+_8dy}kic&8C4xUVvuv(maAO589Yilxb zm7BFsXXt6f%0GM6FQN_=zbq4)Yj{JtR#w#;OelYI_*wU6XR{pqT6gQy#g09=nkS{q zSnJk%l{~priXCX2K7yW&Ja-VvG_{=cTBgh+ww!zT4rq>st!+NwT$lbK;{yn==8c#3 zX9DW44Ws`*s6ep)g$mRW!g>6&ApYR5_;(dZ|970h|4k$LU!iJ$g+};;FXF#efpko) zzcGmazzN1HOGT|Q!*{%?z_ZEuOdvm0FhWi<^UW}y5P@OnG?AL5X;I9G2a;CoZN7EA z1?C?_X_AoIRo5a0S#OTJGq7}d#Gh|UdZo%(9o_|tF>A{hkoE=#JRfC_Mh^r8$H80U z68~t+3Z{)mNTVU4NB!-(wdhP+zm>uD6BPYCdvoTa#-D_GN9Exotkc+kckTg7*|UGY zIm$*N+X7)AgVLg6fg%zAEZBS_@SgA{;4J~)p8?QTt>zpTN@OMVfuB97>ZPUG9Os$K zS;s~c3uL%!`tEN}1A7ueYik`XoGcff;=L;M+*Y;Lc&4^+-|DXTAmPNS8Mz-n;ETm6 z#2Y6xZ<0zj}uH)ycY#qIrmdoI+ZAC#uWY8>Y!=)aa&G@uXxw#e5Oj zaHhqgux$u;A-+^zh2saLG_A1MB-v(hp22_~MktBS8g;#QCo=@?fedIr9as?gk~<6evjzr&VYtjM$4n(EiN`5=k`TuDDg4 z$}GO~qElssH8$4+rKG^8>y!-9eEyf06qh%)Tde;)0qXS7>*(wt!G0F_T+~OnNge5T zx%k#sO}RU&qE8_sK1zD6m<@ws772K9DDCN&cXS%z74CX`xp_ zvJ=YDI9^})x>LrD;zx_7nFuE8TyQJZt>~UEwFeOP*HqE1;fbEWWOd zJUWv#_2+jpDfs9`;A3s7?fFpbCnX^A2x*MUZtHlm;W(xq2zW_0&k z5(KgG{h(v`t`iyCHr|-#vaMG;7>8+>*m-t_4k*@#Ke~}qFVHJu{E=R{fX|->--GR`2&41uThd>>thlq4Q;A85@ zV5ItKf?}s z?`?YR^HIwcHB(>}H7Rrl0|i_9{z{c}C1o6nGi)0;jvP?X+7Oq9#zVCerRW_M){A!w z`bNfRILIN&G%@nsXUGWf2VyPbn_{gu{{+Tw0(^n2R{1hbK_DOBBE5+D?4ddDC4BhS zMD2}6E=P{np(Eq%h1G|SQJL>lPLw;6V7+zsApnTW+I;u-7*&5;2dPI_4O)Pj9qi6w zrg-iJtr4L^DiyA(ge3JV;-tZo9~|!_-eR*tl8L{KymXTC0vQ`jxjE@`=Sb*kx;7@o z4)jQ#G4X80RK>5n1gk$QLM_sC?4bEB%QzCept4v2jiSz1wr$J0e+l!N;l^_W@AtuV z4!I0vZ8Uy`x0^9pT+ctarQrw`QJ@}|2N6}@9m+#YU$AbVKV(E z-}PTxIt+A-|CG%8)zzZb2NArsv}dlrgD;!jaKo&1zlxuWM(S3?0F>cvkKdc1*^baf zX+~|`p}xD{DJjMEu{kM+7>XbUTGY0+wnkRLo(^Pbxu06PKfGKlgsuJ5c0G0SJZM>c zNC@k)OKKq(b%bwqarZtbZBkivCx~ekDc}%y~@?w&xzbb|i z&@k#mC1JPy%v~6lpv8vkN;YJ-W3Fo{QYccjvMV1>cRrY;ei^!G>X$V@uU+@yVNnW6 zJ7z^v?)H;N^WJS0dtJHcGLgd%b3oIdxJXjLBcTc0Gi8Zqah`O3vB1$EazD>v4}8AK zh*}utWfmut01w%o3TRk~dwmY}LV+_#E_;pu z--=i9@DVm&ai(<9VMzzm`B`NC&?^^Mo3B)hAE@YLrh2-m$Z}l9b0#@KeJNseOrGjB z5^ihWaR?DY73g)Dl9w5O)l0J8yNpitWlez2dJ@jFKr(?wybeDvIzeVd5aonb**5l+ zmJ8W&P6hP>-=74l0;Mf^tt~{C@q^M&AcUx0bqWaPwwCEMqK@8aIcZ~Pbn}k0-J9Xp z%DR2BXbM@E(Mm}1QA%Xqv8?BgPFYR$y{K|#q2bVt=d)??F6}Qs=xglX>+>szR3PYVl37aTWSPgz zHUt@kL-mi5+t|$?iK>|hI)2n=VVvd)PY2>uFkvohG^2W^j{5=ZW0})(r2bne8LTP= zUM`*tEDN6Hru04cIRY=Ev6-D#Ejt;n5cKI)hB`wxRKah^GG()M#Zl#m zLL6NL`yNCd-%B<+yaKu_gljJv%n-s;Os7_Q=RJWCr%df?v6>%i4}+ebT+0@8!aE~@ z5OO(AS+H3o4g0Ee!w!v6#Lq1x>_6HmJ+O;O?;M=u5G|Wp!OGF^@&)|>K;TG2TJO1^ z)in>4$}r{@4}QscF3<*rM82C;9ow*~_D6zkmv1Q#(7?;o__-sU^)d?J5$+MxxH#1u zVYhjIGSiUv#&^d&Kc&mctB`btBKE5`n&6`de-AcmTS><=dmvuk(f|OVqYGq1TxunqXlI1ePjm)AHrviE2BXox1xI6NZ#epS&$O6#W~nk?Z8)ef z>}wAq28V_xuzU}yyapVoxY-FlWNlQG!(Ax8D*)zhJlV(P;zw|Ymj{2QEA560EVZ!gckKSjO(lpf zT8Vt?6JaWR39D-WCo+x|zicID$uU?HX`*X^o`6u%3181T@A{F`)rzFJFY; z6a;?6@_(bj@SY_SAZQ$QWCAC-$+-2JFS z2(9BiIs=l`yo9}9KqU(6wz~mC4-Yohv@7qm(shtGosX*6|Ka*&FNfl)ko-6ijsj4% zF4oCRJ{$&uj{q4aUaJO~NXR*|q<0}>(2(@mDM=RA_XqBhZ_U3Ften>4{6?I=<%%65LHjq1pZMlP(cs+H zs;VjEAZ2MgVnxu-4Tr|9lgwFB&aixBU|>&9AR_;K^N1p!B=wVk;a*!9PQn@pSslm> z#1))h;mCDRKmFz>`9qXKz#^KR?P$W?bTnCp51x#ml3BHtOibkis#rjMjV8IDD|3jM z9qxjBUS6LBh|nQz+3ztZ2Q#ga#SoiAy6|2574JS zg$?zTG7wn)MB7lbivM7!*nW>8)X7BT?yk|QRbmoRcEeNL$ls19-7H1@$p+QV zSh?XsDHWZC?1YMi%o|&rwp-0{u3ihl?Q)H?Q zzUpY+-9WF=Udjg#yN8iW3_83urUV99F>du!KT&ATz(XEh<(j{U{b$!H+$;v?;6;_! zDI0>vreJLR>x@?F2_t9Ny1Z9PRw;A?YCLMukl-WO?m(j*CxnAf2pFEw2!$hFb4lqh zG3RH(H5F|SS9^Eh4NA5Hszm`Y$9VB)*3@97+0?Bb=&N*h8ug{@wj+{|A_o;Q*N&igutna8=wLdTqdu3}$jLA)n<2hXZBsh~F# zsP>=q%ASypb!+KYF5atorqcU5v*`E0h3*i>Z{B@Fu+t6amWjSX9opIjrY{TUDO*jf zV)5XH4fOhCS+J1)A&=aI*+K!I@O_BeYv_X#KK_T2juT3ggdI~DHaWolk5aOD4^Y|F zlF!yt%G~5q_(Qij>GL3j_Lvx*92lee)la zf_TK1M4t`r!-1)DA|c2mspA^h=?@Ea7*YgWkJlYJd0h2dLSZx46HXKaYv=uZq^p5rNlg2H%yF#8wO~ESJci@t|Ya5tpWD?qanz_-B@eamG1}04pUK4%?;Nr%aom< z8qO?7{aW$rI=X1lYEfvotd=Z>>E(}RY!E*I#1IHI74C!^-i4e99!kc~3}q*?MYhyN zL>sV;Tcv(P;fBY8*9wmqb|i9PJcQb>cpS#@yXI~PXtnz*Gc@-7;xZ&A2Yk7rXZuc& zBvyX0k)`AJ}1kQjGk^ zBJZyU^}n~rRQ?AB-+w2cef!>|NL$b7gMs*8@XvJFaGw7xh(EXu{!!Kay{-CxQ`P-{ zxN`9av%sHx$bTEuGco;>*SB2l%xZ5C`SYp>t(lh&hA{PMtk+y8%^~d165uMTI3SpwFUF zReL4Tlx@71#}WDGc!cCq-Sx%gs9qYyTrE}-)hq=EBk)wz%I?Xj@0XqRb77zN>wSms z5}WcKPFA2kcn{t-rC$I068|Rd!>Kpbr}*fL zI$H(0LjyjFdcKc5^r>q8aD`#O%*HNHJ*i@tBu#hffU^pGQRlG~6P1i7a9nw`!T1BR z+P!9i69?L!J)n@MR-;&&$^i$n2{IgYSl*^#vt2nFEM^IK&SUnEu8Gq7OToAv>LL3= z^8#U~sx(Upg!2<4CB>7fa`if8tX1@~$NZo2NFt{BmT8R#JeWr?gKVSC?FKy5mpnC& zN|<-u;jEMgVq=M%-_PIDZ6 z&wBBHTPiy`qjg6jTuG5(W2_+}BGPJoA10Cn^oJtRK`0>z=E5ceh_@h#&g}g8Em%>0 z?CqLUo$!N-R&~V_$k8U;$|EQfbKHEsKAnY?HtR}CXF}BM{+Ms2CmWO`JW1puy=B|~ zJwQ5@oQH5G5X3c2U=S!T3Ly#e^DaKyU#?CdEhXvqjHjxa@c*h4|JB2u0sE9|8XKYW zRqlfZXss^SmCJv#(`$@Bia}6nBuFgM^y#hB;L0D^W`21;mApcJ<_iB=u1uur+PXv{_-pO znjbWgu^gq#(e>L#P`|pPYs^8u=`hQx^k@WF;2`Hh;%F6eB1%Gg4wn4LQEiL<>Ze5e zDKfycr4WJ9JzoM~bMER9B^kpDjJTMGkr-8csC+GUN@JPjr6ZW+Be}UHW?W5HHRG6F zY>v|pxQPWfawzZEU+gmZ#1Ob%g`5E5pjOH26s9-Lo;9yEgorq7o;DEtg#p^h1zRJhvo|%fIR_ znyq24`Q@ucfJX+t_w5Ri7~KXgKpJ3hi>v`$>ix<$F!x{awH)T-UR9tPUJKl$AyqvU zQs^v}Y$~+nsre0M#-p5zSSF@<6D^(vV3FNOE?CBe-9m-rjd1%BEyZ*0>D~x;N<8Hy zh58$CTj8;TaZfSgg$B zHPkNgsABPQcKy*CJ9vdEvR}bkzO;O3%z3kbs8S#JCu4E(I?`b{RZ;;WUS?=J(tUO! zoY{;MQ>=T+89x15@9c(MtC}E7Fdx{8HjFS zG^5b0Mbe<;VUP#DSppX1+>EmN`}-zjsYnPIXhu`uQ(G zzH!#w;{Wu5fy#1voKSy!82eang2&;Al!;L0vcGimZ;2&-p%Kp`(JTe$K4?WVunuqO zAM8|{eXJ4OGp=2m6O{n$x|C+H!?!suQFQ2u$9ZmqR9)qDm2VT1xb5eC6_I=O>oSSG zwLurk1C(J8*Kj0J`8lh>63qyk9WvfPi_HH12w(v0c5!?A(cn1YGy<4a=H3tt z@H+)a*sDRLVis@EhK1l`!M&~C_i%{uB^Z)}Kyi*s0j@?+Co(L75NhwNP?atCm@mb7*gcZwu@{J0?q8vvc;xGG^TZz(mNH9^@>* zZqnpWK^J0L0yGj`%k>wsMDe1YvY9QF;KCIjq;aG37^b{ezB}Bls^E#vzCbSbL(k%x%8{sLk-n~5#bzjSZ z&V)Ub#t!PkNUWmF$p*CsyGVclpQYLd5B&xFHt#hHRg3Y#S6@;q$6zQX zMknCfB+?+@XnsaS-fqidG)jxl70aL&H=W`^r8-l^7Inb2aQ%Q+u^$5gd6yK&>wPV) zk3ouUL(MLeCv$JPnizr_OVyU^Egc`ghu%ZzHe(DofW4Jzgx`ywKpVK?M5nOQNgszu zVTk3|&HCZDgW^dxB>s`gKE)#0BUYg5DCMG8NsGXnk?_T$OzscZ7Cl7Gm~d(DLMg;y zzt-uPshwXG`ilD#( zaipyt@$x$)Pe9^=KMUy(CXj!K^S_xY{%0=Qe;u&=`w2Qa#=k%7FGnta@NoRs;+&3+ z?w`nI|HgE9Qu)0ctPxysdAMB7@;P{*9P*mFR{8{Hi;(2#*YgD5l4qvUY1u@wS68UxB6#pxWmS_9f!M$(i zB+!=HQ}vJq-$)L=uzG!qleNpkPJlQA*X#3m_Y4-ocjhS$s~w(rk8{a+^<>r~?#aScvWn^^+liYw$E6saX&m$qtJK;P*H2m0&pITf;wWblH74CFD_i} z`|p5JUcg3eu1T@Dpr~k>gU$*!)pu=8%;7D@e;MGTtAG!6Bw;c=yF_U6(~j%(>krC6 z7&SXm#R0VpOewE;j+i5)OzhN@-)-r{nVLK?&2!WSLia*G-k_OU5EQfL;>7Q9#mI?QGCTc)WfeBnvx$j8^YJt=8-K| zTtGSFXA+gCOev>W`)OdPsiw5Dm?501@al)wqWA!duy(tE_S$Y-%ImPgUr!r*KRrx_|=v1T$_F zGw{mAJq*!{XDDWauBD}aSqiJ*(9kXK+PWGXFMv)o!P_J`Gb8{j+!`ILaQ(GX9NK%$ zPl-X1H>y>BNPrZ~59breS+*_LQ)&_)0u!M4Tvd`EZV6-Q>LXl%Y<3&Ee|!VrD6y%# z7Bl2{lAPEXVijFmYB!RAVyI;bc0JWvxWixPmAM@wc0b`1C+XMM`mqrLdH_5=l9Mj1?mJY~KIpxR&x6#+Z- zxoT7W{`SR&xNN&iA>!vlkoK!g>Ahu)C0UVwSi%Bd>xMo%)?m6#kU3MEjzz)o8ti|x<&0tK3>SksP&NdrJj-&Q;H2Q zvc=72@wwB+3^j{`h*)dOYl852DE$>X*D>W{AdU5nkJoQ=&tq^9{3y-qwpW%;njpo# z?{0aQ*Q!!W0XB^6u~Mu;?M|dJt>2)aQcRCM!d+H6+<)8X^og3G@KAZ9yrwDisE02) zjOpOj=V7ZjkHcP7Up$;Y`luLweloz`yyrt&B--3R$Y;h?NZ==4Z&=9%Md|br0(9(1 zNDu4F_Ik-ynR{9+O{7)bVVHYUw!5lJCt=a$=b>x1$vELaFQXCFblA4*(-AOLFZ@f1 zHE}v4QeIlUHcikl0r5`UEW5iM7rDj3>@?6k`V9t~LGVMYjjdcD*E(*N z?fUd}vguDT3f`Su$b7Eux{`+R`Be0D`@7H%zEUIV9Lvz6A~7|2xeKXCX2mwpQ~1K~ z7E=``i{c4B;C`l5X+2voeglmZ2pz~yQMjcguNu~vUCC_Q#t-361L;lz60>|Bs@tU1 zDpS;&veoF{vhK4fEt<&G%YfS~j-z!sPoj^2wh(|NC0E%c^mt&+mxZE>S}UA6+$IpW z)Hy?TEn4|KYr$snhEFuGPVq%U4OQ_3pc11_#p z8N8BfkM%D0A;>Bi95_9Opkbhc+F9C%wJKc^Y{WzY>}jZlD|SE<9!W^2ITBDCe_?C1 zS{~7k&DsL=#bL|Q!qUhLGjpCRv-Zn{W7p>R4z-pkuR_p7$TMeQv~14$c20kYp*w1J zE)Z4IF*=l6OKr?o%N@X;=g_4%oYUROha=Bqg{h$7OxnMFYXGUA4nkt+u~frm!-=4K zz}iYsi^S(OLqP}RWfQg87504r>o7ZJ30~zS1juvtZpzWe<_`75A9r;cIT7AjR+*KJ zbaPWmHw^!7%UIJVlizaG(WoDFu}z%TsL^GVisUj0KSMFMU(Y^eB+ z{qK0LzjcxQH^fxo`W=X-j>e6ln|L3`2}5S#dzy$u=1{|W2=Pm#{JSt>_&bI5f6}%5 zPr``l?y8j+_#X|ef>qTlSR_ni!u1hLN?}{*lp_G@A!n}y?KqNDq;h=;* z*dU6|4gD}uW}=O5_kb_HziC(Q*0v|gO$U_BcvcPT59{`2G+)njiccRaSJqS1*47bF z3m1o!Fz?;P0AB}{5Q_0{a#-7ENp-v%SucD#_>OJt&#KS~=dOFE1+-pKljg zll&xb&~czG+WCag`w;Z8dqB>*_0MBNXtthOjqR;NuXQ1}=+yV4Psq#%+`y^B_M9U8Jgi zb@>G$`#8e@inA|5nnI==& zWTNN7I_|QAJQ3yir-eY>gRymeq5Zy4;kJGpNgTnKJ8oedoI>L5+vI0^!^G(erU!&( zoK56iKxKw!{Xw@EM^2g8_~$uZmPg{{o22JGz2_dNn-fM0;SzR0>x{IHkZY`cW`AmW za_#rMm5`SvHoD)7RWD*27Ax_Ty(!&7A5k`a z-SD}kM%5c}I>J(&AFbcQ#XJNBF$%og2zkl^zsDz)cco^^!M_<R|NSj{(>Yg822#Y^C*D{BlS#38omSiPZx}=se zpCTNGiP~|u1Vf!8&oDW$ycw%F%N$ruW;3%Q_yHt0zRX3P7$j;B%!TqtkWg z`=!i|T(1M~xGTYu%aO7=uC3qE2X8Q@v1D0n_YY1t)|8$ugd3ED8pvOCQzXaLde26u z6IWfZzSJH4F0GfVrx&henI=<+@#!8N#2y`A`q_d#*&v4KLz{^?hwJCA(?Kaz`cMX9 zb%ELYT!M2;Yj(5z?yd}h0tPgK=shoYqE@*Sm*-P9mD}A1Ysf)U#n~;Okej&l#)XXN zj9f@TqT#z||B7Tr#2MVuJn9Q ze6bh|0XcQEH(Mae(>y%@R>d6zcD}0-*6wmuko+dNj34tB9J)G#Vt7aHG6aa6)eqnm zF1rg1uEFQ(#K)kKWu)wS7!*|24ilTx?%Z;-ji*;w4171+)5%rt!F%2dfvq{6{1p?0 zGtcwUW=)u9D&0U$4W(F`)ObqKIG^Mlzr`;M%(MCP#86I4cE%zrG?4)GfLv-O&c=j@ zAz-94rq=+nW_XYMWC*dwE4DY=F(kN`k^~oV#)XiUw4JPN#t^$rPGRJW)h1zOi+DTd zCO-SdAl>CMT-5egBy>v58Q2n%ifhXQwO;lnst~XLE}8X+Fd7@Ja6)NO2HAxFSk%^3 z(Kwy2HHxVwR--X;vPi(S=dM-GzI68H?JusQ*dQrNXCL5b|21GDRN*txrZyb5cYUz~ zH%jyN6D_s0dN|Dx5ELe}C8b{(^FEnGLIW*fNHfJq5laU;pgqDJeg<-r#N9CCXl~oE zjw??Kfl029*Fig`3IYZ}0cqbvA-ggNiwvIxc?1CwP+nvLVty|PQ}q70aN2ec^|ydg z=%j-bM?-|WAK0I5mZ8It@#t-UCe4R1nV+NdgcO=>>)uD=an?blFy^Lyj0Ajw0%*^> z{boB4>nDB!fD&-i=|)3prVRz>N$92ia{vpL+p7Y%{~aR3)F~gg9T5^C|FMmQ8*1RsN?^ z%hw!>#3Raadi1IVMCQV~)C!OD8zeKpFv-V2>r<*wE(6rsn2VU2cld4^eMU{V8Fx%gEO+;&C? z>aYl32iB7&NZNg41vVkbP&I)t?I@UL0lENfU5$M?AC=s`=^=S5Y1oUelM+fhl3wT* zd0t-gi62*PJ& zYwKqa8-}hhBdWs0Z8iGoJEC>~4E?8mbS_i;3ACo5}9)$~mGHGW+NSb9)t<*V=8IvgfbMm|LA&CZ#F7ac!2F6RUALZ=q2 zaAFa0JkrxB5tsxO+x*Y?^ApNImBLEBuATW)o_CQ!FK>6@9b}MucX70#Q5kegcK61d ztx}YErcfo{+#Rp@iju6IugphI#(8Ezc1pc;-X#>rg>db7yl zd5%!+ir}^3F=1+hevTI8rYEP1PM2aqgDzWysLAx_&RhPANitM@mSgmG$+idQ%pdsG zao@28j>KEaX0JC`t7~%J6H;2a( zbXCSSSA>SMhdv{)5kdw`kh~xOC!PBI#&M?KHNt2T+%VjJn0;xhSZFO5gDMCF7+PF~ zk)x(`M(JrCkGHbkTIPE#C)-cz23b8<``ldGS^Qp}r}|*Q^dMWrou#)pQ45(~(%Bl- zN||}i@mPl+iBTj;w^>5;=P9d)C=X%y37y);SRKoD%e$Ut~V#lAB-* z?MQrnUrT(H)>75c&O}{2Z&hCn?`I*muhsN-q;RO{CqP{Ii~5IIq7iOe8}Sc$%d3^h zmY#5i@%5}GKZHC9LaC8<1qOaPNy)JYJ)KXA_IdaUBwndjV(cFhs)XhuJ1TL9?Ru%O zWR$SmDzNO1ZC+YZNH!pt*D;ujrh$%aNgGM8O9qf%2%Tyfq}L8RYBp%EX2!S>+x2XP!QYo0$~1<8(p2OyK0?er0`;&CRch)>hX}DCr#{5Qe1vQiY?kRMhr~ z-I`;ofHxOhosM%T2!sMqM0(T9@x{g;-a9a%{gPdWssR2l>YM6vSFJ#w2YBelF|-jM zF&BW#sKCppu5eDAWU6rk*D9BQgOj1E=dQT<&?tyCJiC+xOw>6ml0u?LR8tZ^HS>>* z@4?I1XG6P#iYy}iV~)I&Cv<)`1kCUnfLpqc=rOmU{q{$QV@tpZ?9WhUk)&gPobHcg z**omo2w$aAwA70JD%&0?nIv}gVs3DG5hYqH_1q7#@iCd$oIlaGded`sCC6vq?qwTB=ga(RXzFls2#VU@F%hopo=Duuv(2V{*1jhL!+#EL&&6!c1S{LZLjR{!_@GhB;Fv2=ML-tN> zCAqSCS2wcy(CwmFBjrc=5nXK<81TQ#5^!q@2y; zDXWnrfFh(;w!y|(1#7XY%!e=h+78Vpv(%Xq8W(^bNO_gUUATm_P?90@Q75&xu^bHmIlRjxd8Tb5x_$;#^7& z&^m>37^=|;Mi=XFgzr#$R3ZO+ESeHwtfdHbYIT_kdZ0(WqLPPSpm>pWnpCv1H_R#k~n_(H+YvoH|iVzin}yfP=QY5}mU#sb5Jut6g8P zK|-y>;1zbP*qlPm>Mua$ilc%X#X z!af_Y>4WS7*Xg|F%CJ|45hW$}Kh@LybrTem2ZG33;AVt;)MUYR5X|HkB7*WJ!(xFs z%>+G>Cy!G$N7CudjtRWTq7+^fIX6+x*2tR2t&F7v5}#<@kcGs*<>2;Likpc<$7G)b z>FKi+ZLOvCw?H}G=CS~CzCPaZixjVYK)d;<{&LiWwz19(ef#L3Q0QM+Sj!OET1rP< za8z4I#cp8iW7wy5plPt;P^s|@bvT3t1%C*79*c~f;o;RXFCNsSE!x>h%^d3-nI!OhQb&m!B2_R@;VG^HCSA8r z>wHM^38SK%IH=5#$i*O=Dqfe(+L4ue@^k2qsJ9MU@j(aHRFwU;Tuqin;KMG^S6A_g z=1IPaS7dLCWn`_@DW3PYAQrCNIYP+?-+VfbnOoWr=ZQ?=$N*0J-v^4}iw7~6uAdt` zS3XGJ8J>8T9gwfzWC(YvImkUQ4W^qL%~7P>9+faN0#ax7PlvsHW)s?vOx>ZX6!FpiVv1F{uw|3V|h1l#H9C;O9)XxD@<5 z@U8vPUT|`$rqS19T7CxR;Z=!JgTJmq2CY{OvPU{5w?`+#tfaLEvukSF%*XW=lTe?I z@Xg;~=~%Dq#!0pglL+kJ0DhbTWewL{EzGgP3FfLHkJP>GS$?b3*SF*#noBF}yuus_ zZwsrsr%(Y{N#goFIF+5w>;2_lXnh$0$zO@W?izP#K22>OnG0Fe%~(xOOOb>W5l&ZG zP{fAc<8vs})cV4ceYMnqnXU{N9kdJF`Ox_e3uAVl{47AQO#vX-b+&EXYHly3bc}gD z)~69QyPR;hHN@CA`TMsv0+&axW{TP}tM#brKi(~@D48LxQE?N+?inDUoDITOIbS1i z9wLsyV*)H?g8#4@VLjUs>Lpfq&|~M;)Kd@F{V`)oiOuZFr{mJWvQQUAum#;%|E!w* zVDAn^oC>QfD@@*^{lUC`6ya=amTH~cwun8kW)kmXaG9Wbkn3k}hz>7L#6BGhUX3NV z*vl5#^Nk?g^6|%Z9x-G-Tz6k2pH9PjWd8HVOEhfL|D)|4+%w(RWZ^jJpkv!f$F^QFS;|jLh+daTT4b}rg3T-vJ)gA15Gz_o8%2Sn;Ov2NA|r->OU9&om9nLB1#wc- ze}lEMo*J*OqZ#74`@vVk^Q~scL#zPF%#l3;T2Y=N?FQ__@^s}9kZm-&GA-jB;Zlvo z(VcM^ui>ywC`E`nJAaEDNW=v9Syk(2;S$?%}z9(?n<;H`YN5ka$Kn7Q}d+4Ee9gX^tdrGVV zSU-8N>uepf(@z+tdI9jVmTa|8;1`X|J4H7@nn02J?||oDn_2&7=$W38N%6n!3XVC( z!&?OzW)7;ALWZW2{LI+K!z8gP(KR&#U?R#P<6&cMAX4Y)9+2W8YRDy8nEZ;@4WPY* zbMw20_>-67@96mtC-1)oJ^#Vg|0TcvC-cL9jh^XP>HeWiV*C?5uL{F`^6M5if+(|T z`7ZG%Xrin2qUoQp22lM%)k(?=3ydY7yZD|LsP(vO40T9iV#;wqo@8-L`GS)fnV&r{ zB&a-{cLQ2X&raN}DO12(;ZNqf?z2zuFq&z)ksL1pl~IiY(cR#>5Ypu zg&5EKtVgu}>&Bh$L0fm~iog6ajqEOXFrgJeeD}CV3j0jjL!{Uv)Eh{rbOou!&|?T6 zYF1XbBxJ7S)&P@<^$HEO?;zT9rKn@U2&mOvh+ z$I4HTMvS`oP~0)vkpBK;FXzsqcaK~6@?QOtz!E`iha$Mr+N^t=%VccY1;uCX?yT@+ z)ues7IzY&HBz#b4GASi#fI$lI$Qc6`vOB#%H-?gBnU*_X?al+EabdvBFCA z6RL5%2HK~3@^s%{@>Hw}D2o-P?eTu-)+rWLrSZH_*4!~cXU))zZ{U~JJ3(arLDAKwj^ z2X^*)I8C#4gSN#O0+~(TU|c`g7WW>mlIT#{t9@R1_xhMNY*;oFPF6v^Z3vuS;lR-#I#^B113<+LGaW#eXPz$one3jI4IzXCM$Z}A&vEdL=K7(f zQ(zzW4s;V=N{+x}a$Y4q)J1lW3yY~mPbYq4>&YTQHytfEXOQG-2HW4POVk0gj{5K>iEM9<7*2!IHMbLY-51%}r*8Z9k^ z{Qj*2$pP@;m(e_xIiei4#~zcp*8OV6y|- z#;->?F2s)C7VRlL-fa7_4UGX0Xq9&?UKpgV1||ohwaK6Aphv)_{eG?GvMG1CAo%$k zJoq-%23yf>E=L-K%Nsd-JH~OSRhmhVr4hi60!8AC1Sf^!NVg;l7T-u>6TCxp4k|C# z(q)R_?klM*LEhYf@zfM(CWEH(IN1kyx4lqSQwovfGl+i1^1o<9@-%Pw)$7!ZylrEe z0*7temWQdpFLDC*@XbGp+S$E=M<6qYi{=5`a9SN684)L4(0ycGeU#yXh$pPJ(S$Ko!_*`~IVit}%ny|*K<=1_WeDaUo#QU_ z(5z9OIATOf+2ez)#q?NF-*H|BNQ{vy2%O*)Xa=Jk+QnH1huX5w*V#%vVX1I(T&wW2 z7^Zyp70h-ud{t3%E99s_icdc~L2pg|tyw~X_jXcB0Yv##Zns}wX(4%=;T3{b4N)kE zN!vpl{#?T1=&6n;JCkq(No#)?d0Bc4D`JyDA``J9t1vh$Xt;V@UIl`RARbq`pCyXw9PZ|gmuSb}sfc;J zw7V)VA72Bc$#3IVGcB-^99gx*7Tqo8A`p*$e}A;0p~%xm6tc*fYd2&jO_vX%>aeL^ z&+Oi^(d8HA?%5*Np7fc}SVTi$`T89WarDokg`$jAhdNPdqs@=(x-*{CGn?$Q0PLFW z+jNl=kbYE}v}h8Bu*ih`ViAVrE9KR2+qTRmu*3D5;qea3oo3^TT%+U%O93r}vsoOT zxVq)d$B`==A(Lpdd5f}sJz2w7|);zE?(*yF9>lk?Gq?5#~fY^S9EC6Z{4-`Vl;DndhAD zb0^1V&OZ8jc%(D9FfYzrda*+SvC$11kS5nh)~- zf#%bZ$GQE@Looiy-0**Z3giF2_3HmN0{o2({p+fNf4kg2Hv-WABTy+`p?D#IH$U6J7r#L9K=g`I zsh}F47um*@?9B~CzCXq%KmK^1)8TPx)}n~tWO#S`A$IVt`7xl9m0DyQw!`CAoq1fr z(ihwte)YM7=$7uD`mT9>;P3vNJ;tMhb^y!40E^<46i0JOm=cf6fR)sI>YzQ`*`)RY`IDjV4j^v_16)kCbiGP}~07moh6yD$>SAxpif$D$b0aqSl5< zUh)SPW$sGYQp%OdN+mDSDV|xv`vrFjIe_hczEe_jlxO^07g#}fKHkYL0oL%7y{@Cl zmbIfHa-bnCUViFZwQGOXN;pWfXdrAZtwY?=XR(T7b0g<eYwqPy(Mp&tX<&twBCI zrcfCD&*nQqk1@#=9f~rm`VC4#5)Z0M!^An(prfgwZ{D%4TaVI~zbpEVUvl;<%u ziQ25)`$&nP;;jd2@gp7j8yZ&=gVj`~ih<;{k|vQyv-cEW=0%GO04xWN&cn{T!N2L? z6UuLubct#)vwkUQ$1jR#cnP|>h?swHHA$4sy+3TXyBH#WB_RzE-&D$n9ZkCTjq@hA z<{}?V2< z$HD*c)h1vw1a+G4$d+E5#PpCDMIuQ0 zVETB(#39cF?5!*ov%y)vFIt?kc^RpaKKKveb9WK1v$f#tU7GY!b!4|Yvf|^nYLkUGSHj_nKDYc>!p{7Y--?i?)YJvj zp3t7urdYVG?24#l7DdkkGX?y} zZpXX5JA=y4=lIl7^%FLS%^x*4r8icP9W%polDJXp%e)Yu#W4a#Vtu>cBOgQ2 zGD4^Gga@BLy^EC7`f>E}c@8%p=^hO`8X3!QZIjMS0T`2CMIX;lqsNCNTjQ?|H39nr zS##^kWzB9t5ps#|j7z375|eZS#4&pphnzay=6)Jx-)d)~tt;2KCbQ^fO{X^Aj&7}7 zIXZ5R*CaBlmC20hZYJN=wLV@SyeWOxX?{F+Jl{&VoOHD1&Y7&G1PuTmKikVKz@ij4 zUR3Umqeo-8sPJAq2Zr3uk%f!W3G>BB68)-HS+m)JlH(3lE;oI&Iq>QeF4S@AUsMLI z@27@J)m94kI9FK}Z#{6op5rniMW~Ua?Mnb(mzo?uuJ}u2=Yr@Y0xi^0t z5hG}S&B%B7?7mIE$DV#mAiUMQA7IDa-4J2kH!kKt} z>M0k=G$LsczZjmyPr|vK;!O;L?m3my3-R)iFYsP?q`z;^!{(|^#uNuKM>O5ZorUTd zeo$e4c(}xr=kW7F^8`tA*y~b6P;qJ)rUj0OW#0?%tE~cLxQ-qDc66Bw zj~tQK8(A)kb8sfM(=Q37$5XJ^bCLKUCuyv&&rP zAU;1k&UnmnYWK>pHm7-{w)1&oRE6N4u2Av~*7`a2*o&aM6F(<1&$v~l@JG_Nb3~4X z2J^ry5tqA@&oAGm?<|}-pG#bdZ@Uv>65n1MPkT`-DLaF6)lt&_P%%%7sqfuIx!II^!>Tiu5j}rnsO*$o6eW39D-Z zH16VXR|X4Njwf^uwB6E{n7jGi2r~9!OgTML=EN_5E+1gu>C;$G(&^_CWb(g;P+o6; zw#7|`$15m}3^P34S;xfTCZT>H>jFIQ&y4*JbN!9G`LFTL{{wJj{0DtFJnG*A>KXqn zpgvf?(El5UF$gggY}o@reK>1UE-$`UELXK-6JUnq!|;0>Xnk#Vep_bCEXgdEU%$4k|G{uUzY2ow zhqbi>M*%k3&gDz#jDiD;eJ?Lb>`&Qn98OgF@HC;xCmM^=LyK`uJxR4UX8U+#37n^W zDae{QNdpXYW6PrpMelz+s4PW`iz70Nu)Z*+D>o*JQUh{i$O9djEBABbc)`&0{KC-d zC*!RhWQqeYafR~{9I+2d?#BZWP(M6}Rpkvs=Y&(<2VuQJJMfD{(vJzYY_qZMA$sE_ z`?-$1GA*oZw1%}k*Y*pjws8UZSE#(g$|XgljH}LrXuPLP7jJ^Z&i#jn^_>Rg#r6~F zN&NDzJR`v`QrB3=ungkc`G%F4tQz&s{9h~L`dZ3*SLMwuW_r_fWex*8+Za_yZv?)h zJ}I3%XHKwrGaY^I^Roz{GqVWHH9b7Pec>t~r7=d~*>!z88rxf5)5JFJcd?cp@X8A7 zaOX<(zUGH?`ZJ%QQYDpTqbNa}h4pfI|JBRJca?hA7Y z96@i$iXf-zO(E_zzN;g}b{*3PbF`IoX*3YGGaP4aTC+M!GC{O$jcKlC4$jjWL(3(q< zizjmo2R{Ie63(d4r{8E-qB-My@3Ze@>^8x~7&7z~i-R9?C@s2AV*C>N;PHE~-DmY+ zVGI(o-V?*YwYls0438frRk0c|$D$na*IIK}`k^-vmCOS#Tgi7@1?m8`dzK1KH6@%X zf-$Y|z>B0*t5C~?)Ttb<2I=Ox-b7$ji1yxYE}IIvDdF%QMU^<#)p)2Ix0>(}SbuZ% z%GePn4i0Qu2XffNc8#yvBZa^ZW_gWzzUql2bmpVwtSmyBxDV}%IZ3pT$pvj+uZza? zP-poiat6UA<{izy(`tke^T+KZr)fdX7Q-EaKcZ;r0UBLWt&O1FoF>Z@T`v#IH#y4b zIQX~xDhO0TLV^*V_HFkkB3_$t z*~;6Nt)IrH>;bW^U(ihwzUV$4lt=Kz%;Fh7I&m0OXUPo;0=Uxce2MHM^M4&WWq7*n z2`|RG*yy^l;Q-9q#PH_o*$E)om4U>O*`^hKNeo`1TXs=tChLIvK-K1Do*f%*8m)Zj zJLlsyguCK63Wt*#h~+b?)HYhdM)`@x0)5 z4E5_`f}t^FiC<8{+k4G+AFPHQIum2Wa%^Rqy>f5oj+3ME%}n_PO7s+6zJKyl`j_x- zzlLMP&12^C_?Dy3Jz)I=?a3a2>SrjBQUl7u*MT$>fBM=Lf>Zb4$7(l7Vt*|t_?{n@ z1?CLV^sN1B_kz1!?RhXQ#9b%Z<)N*!T-m7OJgL*&re{R9w6j7jx|tK>%Y=ljYn+EAZg-&bXsb%@R%VVxCn^Gn^d1QyyGpO|@NQXLQw>)*IZnGp-0H>VH)^q+X z-ZrIQF9@*Wb2aS&g@UeVWPH2}6sRo}@M%#Ny(}A~+|_hRd~5nIFrobm81Y;dyZ(Mk z3b*Gqwb9?Hn4EBLGp&)zBfZH^a*tI??Xz6O<8NJz=-AQIxHiUT9f-uhC%ANJK$TA9 zlSAYea#-YHH={a$nNH_oz1Rs>ePTKVdVG9wXIbYTfinp4LF6O}Z5Lj6;tP78;Nb;K zyzS8lwp+?w(q?G6g!D#y<7^-SsTMEuequ^aOo@fc%3@(6=AY`aA|J$thA@oom`Dm2 z)aa%N$)7Z~pEUHYbx^ly1p-&Lyo9NrZ9=tJW5ydx=g7kr3ttWR^Dm^NR*d+01@821Ja3>)Te5n5T}d zdV524M1$9`5e~UumN548!GNTWa3f`4arLJ8u7+ArzL{5XfC~Ls$jaQp$LrJT6nSHs zYHsFK+AwcGWbJUgrWae1X~Fc>3C8GP#W#G}HJtM*3a{BaLq3T<^VRN=e6E11tB-W5 z%xl%a`dba)YoJ}RJ5v`cu(SakZ-&=+eezUvKUqOhd3D}uLi2Rjh{?wnT7Zf0MEKvG zbAK(c{!b5RrT-re=r{CVzq|22G1vd+fd2Q5t$)n{{YU=QUmei@cDeuhBj`Ues~G72 zL7mQ5)v{g{M)6#%LfbV{T{5tHEa_&7Y3w0r6H{kgk@9VZB{VmrD-=t_b!xwO0e=pE zSWCqhB9GI`H-(oVIJll{dC0O4Lj8Dzt4?>i@6&gxQ58k09&kf>>`fjY^1h#2GtndI z9Uhu*OP#VZ?tL&CMtPzX6+#%WZ*)oV-0teQ(Z_#EZ>r*2n^cS0;&^wX7nP5__WrUD zYKB1k_?j2I^yQb!!K1tj{QE>ljCBIbK~urg`=Rnj=i>pT%~Oh@JP+bc|GpoVha0?NT)s45)o)9IQgKYiCd@?4^NlG|Mz48;Lq>x~63YPO#%Wn2KU z8eVM(GqEOTTUB9Ir}tGCD!2^pRt$U+cKQ7rUFJ{aATFf!AV(=!QH4rCw{e;U?GTz$ z3#PVxinGppqldVb&sI^*u6q!J@JKbydgzZurKe#?i`fmck9k*Lnqq%GpJyxv^_&2tN9c`Pjc!e!rX zZqOCbXcZd;RO^|M4*XJM7=3xYA4y9Ks6msk_L+!@4<1-s1%CxZ_hiyw%N zlNFrtO6Y^JC1AXInU0A~4}ilHC8)w9BpQEQyXR zHodlk?iuO?`d(@fseK}>^0XAoyz4a|Pp8ACoOvDDyj9@7&Po5WP8;@Rt!X7;={>5^ zg=xcYh@oNKe7?S3n@i=5_ND}J2MxkgY9-m3WP6@`A*xq4;HAu&w`IG=D0IK3!}@Rm z+Gku4q4_9cs1;mb{mjJ)mT7gtQe)cTP3Y>SP4hXlD_EOo`h$Z88GQoDGz~G9JJQex zzB?OD*^kTxoVe;|rkDInH`q~+xHoqA?@=3D|x(d=cyNFyVv2EC(_%70khEt+X)K)XPF1eBe_<$kDI{t8dv0 zP1ec{u~h=#y;J_k{4)42r14`q%=kdXrj|aT}?ncUGNk`unf>dk!X@p#8DP1l>o`j58eKLaMax zy$gYX2}P%Lj3^Vykn!LW3N;uV!)2lZErDi%u%4##Hp^e|a|R6AFO79`+zw#1Tn3hr z1oRE+$W{)TQ_J9|^Y-I2mhJ_s+lBJR)24JxAoV7JVDB}9?LQz|Fgq6Vo^vG(-$b%>tL}zpO{aobR(W$q;L{;W)D-@{M7QV-1F1+{ebIPPp~Y<DmGt`IQ<4fEsV)$Xm3q zmT~V|m2;y5mzJ`+-wG;ymv&lYUIOCgXn~tIP2;G>lS4L%b!N@6oz18KNXERP<2t;QO_{amVe?ip?*L$4HSJK#RKll5*4$qMlYL2bcmv1C zo-8t}^oDADHkLf}(W(2bDCzM%u;*L!A)ptgH5s)JF6ek*dbY8ClU`mXkF758PKqS`k`B1hly$|Z6%-_8h6_DEJ}kznj* zUz~ntF;We6B7zqWP&Txt|sA`Uu|z@jCMq>#h!6 z#*dPO9KSE93$yfHMA!+=F%=wTa*nikHss1Vxry~r2Ke9DOh)_^3{%Cy1UJL2N^+2; zIt+LM8zQ8%vXSNg*u*;dDJs2)EB#b>Zq00BEa_AO!3<#`toU}=s9pDjiB;^iRhe$rOs z99~mt>HV(CK~+0*iioncQsHHal{D;iXzQ6Ef$PdLD&|)kAWF)d88HL7yyg+wq3t&X z+v-KM46>!+mAR@~qGl|I7~g>1%c#X;HIrX@nG3aeUqM6kTb-=ijcDcS%u}e%iNoi| z4(x!|oCe|9ZPN&bE9QPD^rX z*wG^IzD!1a>>L`~xszSzMEOMS~ z_QxUpTxX+4Om~}4Ll0a3zA18$7DT_|Y*OQnXvQ{Ywtq5k`Zw+X@|7jvHwcgEPy7A9 z`TYG)(Da{lpnjSt75*34^nVbo%KqDERi;1j>!xGg1uav09!pz{wB=fW#E=<&rO4QP04uk%}i&FzS3mM>k z3L#NfdVAJXl&!a~8cZRj-@%odgn#aq!%tyA?Zhu!FdMdTEGg86ddt_i(uCF69dR=~)3KrbLcjZ->t9-0PnWC-vM@YY6aI9l ze~Li;llJz`Ji0oy6;!92fQz8;mLWLB!%vb;Jp{|dk>jU81GOZTW|rpX-zq%On!uq`g#)Tv~lKcB{5Oz02T<7#Z36r4QsWx zCgYB6KhD)Jj{>y>B4E3qqCZt^f~(Z>v^RBoUAW1}WIXF17HQ`#R=C2vc+vJ>Qp49F z@~VU-i}DT&kP^xAaMyr*2QodgRuABaPm+;}%)cbb=(~!F=+9kEcpa8Ydl7Hs$f+je z-_*2+iXg371A=;FxqbDh8n{6@-nzti@g4Zit*@8sbXbZuX5WOKr6g1K z0NFz9b&{21+%ic}&(hlU&|iI^;VBUqJd^Zf=GAg7`b{0B)e-7lZ4Ddg3$S)dWB`{da?C#u@d_w8>DLc1e09)}D3a^w z*mVOKL4d0tXiRsgQVuon#_{oWH{Eg5d^@}}z-G8vOcv~&aXTa8rjh=F3uU~fd zBaUw~5$$2&C6BY&DUux@Qi?H&DGFjohQtfMbA555+9wkCp2|dx(tSQn`05T^cq>#{ zZA)ghMZIUm8h3gmOzRidd@xFQn)ISfYq%kbD$bm3Qb$!u)}vSX>b2Tv{mmCkoz3fBSLuX-7j>qi7MD;B zEcNJG%_$8jRz<1RlA1L{NIt--|$emQM<)-V1r zUF>!0Fh5NRTLcFQPencE5j$vW{6Xzc+Y7z?b19Tb8Wgaj*|xd{MaIo)(Z{kuox`GC zWj;xBE1*uhyadm}oUH#i@XT@QEd3g8*Xv81f)Uq+9naXR$iUVhhBGQKR?!!u%i&>H z?9Hf*?WS4Se*7dlyE-N1?h4r(ipssv;x_gp-zl+?hGcQ4EEV{ z^3M%V_?9a$SB1f!uoad?iS`UTN(i6C*lZqBS4l6Ue-WtEjtL}mU0URu5^%s<-!dWM zLHrF_Z$`CmimzASJ%HkJOK}%dA~J-U?a1QPq?)!5S7qeF1@%s@!Xn9+_j@bzIs;}TUgTYMVEIGM4;nueLf<+}!q9j{Ppe(1diq?h5X{Le10 zhnp&%&NfUDYzx9dEpM3$H_Zs(J*weLmxrtSARf!c^;L9Oz&VcWs14uuIm9;+_lFyr zhwGgVXMybO^Nv-_R#(H3$`j6ZJ=BYS;$5I8IX|2gY@Uu4%8xm^rF(O7h7?wj{@mlZ zw&PwZ`hNSSxzeWi{4BR=n+5-@J9VyW^#@(MMFE~7lCh|+xtVAxd#0+#eyMzJ#(ce} z0(ut>nFM*ufJu>= zj4Arfk&T;}*wrB-T3lyOd8r3qk4<)NUL}uJNYe~J9o`a_j*Xj^=R9Oy3cneu%|$BF z$!M?cESbbd&n2wqfN?JdeJ8Aq3OytWu?|6@BO9lWXh9s@$07iqw;2j;-XmMr3SUj6 ze~#Kb!(s61Vn{89R>t2cG_R8N%{9adtZ-{F<+5?z4rXwRlo-=sFKB-GVYP@NT));! z{VnaB?~V)$8ucL7+r6*rkpd@?mzOSUyO!eP0Jrwd%T-B!6X<6=tyn;IP0anXxxmnH zT`FdQM`P}~v+%sH_9oOdc}}>$Zgv*e5JW85BP}#dCKgFVWX}bs1`Yj_I5E{SMrS|z z(L+s1{Os<`Og&X$5z|pWO}#SXMEmxV1y&Xe6D1pM_G}Ky7VvhDmhK6azPS|md*dAe zH(XnFa4|&BTey2n(>}b`)=|qr=!qgl?IG3TkoH)$@p*Z2YwkVV9LPn$Bo^*;fc&PZ zXu#v_Aer0Z+H>pb@w67=?ig*I5hDkdcmK|{G?Oe#BKW)&7&p*VacZgBca0E=Z0)?( z*1%Z$i0OK2uUxV8Cl?zD-$`PJY66W42LBIyxaOM1sUn7Zh`?&H@Ob46Qp#@pok@gr zf|RS;)YZ@Kz3NdIwup+gF-7g>Uj`y%Wni%U7;N$$te7F_8;%^dI&1-*caRP*f2)QY zMsOwk9bErwBg21&>-4lV(s*<U0~794lnW%lKqw z;;GwWlpQKLaCL^2sb#%s)(%@qPj_I`!*;d#&279qH6A2eeNtr&(VtJ{3H00aRi-}) zn19Fbe^A5!8vOppLe*c=%zx4%|7-mI*8mRLVG*Hx6>KCKXdC85*&LMKVV3q0 z7YZNrtttFmf(}$Lc=7eG3eW1Za&O#m;ZD!za@G~$OQ{}deVBQCnT58TYIbq9 z(r@eG1fP0j;$p#A0^V^>Pd&$jBq-NOuf|5VpX>E!1b&s0`jukOKzB}m_d1>N&iEAB z4T{}NWYwbwY6DDKW8ep-DlJ-V`hyqU7kd@wMIgVdHcn{LaFW_p9R-H4g+X5@{lh5M{iiO^`k9mc47qlwaRnIJlX(;%^^f%50cn!{C-Id_7th6NIoO+w{ z8>Gb&uMIndG_YPF&3N>3Ia%tGO|xLumRj0E_i)j!EEj6II+ZG&!dNP0*>lVq&M}o$ zyJO?)(&^B~S0{pN)-{_25woj$YVn4+&va66i}vI57cj9tIPR{1{2d;e2iMZn2dykW zC7$j{iukwjwC^9NYW^Su^QLN~Xv%d2ir%ST9KdgN2jXT&8NhB66x&h6tIg$1g8_+UmGDHEzX`qa0 zzJ$eLa7LSbWA1mXwOV0!WPp}soD+l)XJlAEduU%m@$e2IXZ zc@JA!H~a4I`SCi#5OZsEeF+H@0AnPL%piWi*@e7F-&L8gtk?B)37yl)f2)afxC^+_ z2Nm_z2CkMupyyQD>r5HEQ#)9>vEU{Z|}SYbNCOwPGZIP_B}INjl%H~w zOH})Ak_)v~CnC$9Zg|eXdv~p>MhbEF5kP67o*LE-}6qoasT9LX=feD!6^zI{j31wb$WHUHf} z|7-ine;Vlj%)8Sd`@f*y{)u_5rVe=)OJSpFdxG(lxG>a%hFlXI(f%?)dyJg)54%9$t-OSi;P28ARV@QW>Jz)-Uz za%DLW-gS2m%HNf6LSzJ7NFbZcXvsvU_Ub@6ZOnd)n?i9tXXv@MBjH@UNe%umPIRRd zUB|nvZcBANb=r5!!mVyk3GM%M?-)G2onn8SjD~7vcX{3P_8eq90ZlzRdcc!f^?Ey} z62`Gc)OhXlI*l(*V!C8 zu3O`E9Bpct9YSD{xZ~@915|vbg#UqAIEa+7R}=qXpU(?|Z^5pR+b}+I+Fxvk?moX! z#xbu|ZLTX+VW@JkJQ}LigItb~kz?5I2(lo>9LvYAaAt0@^jNJ|<;xzS^XAMuUQ+X|?s+ZP2UiQAeW#&?%qDJSOa<&@l-&;a;o~ zX&m7%%Z%~J=5GtNC2lvH>^Hh$K12JRT*p#mM`Ry)*hoktfFbwgxGUQR*4Z3q>QfU@ zk&=ZwBc{9JiAs6Or=38XL1Mz4;mwq-3)IeE>2q-hEo}U6@o2F?XCZwSuA=}-N)66D z`@XU2FoSYxH)0bC+Vj}FQL@_xp-NQ;1QLfct=>USGVWVAsDGEORf`3)z{yXy1c(Iq zxjC-6C1k?;%eGNTOQ#17VLrYJ>4bv)eC{e{(XMy}j65k`UC_pK2oEEaw0mddbqU~N z4@XQbZBvJkP{Awk_3CN0>4~CeR7)Yo0|EL*xx9Q>2ZL8MluZb4_e^{q6{K3>9tL0# z2}13G>Lq7f4~F_fuJ4MJwbNIjeic%_0X1<1)(J+h-7@$NhJHRsv424cT;p-##JCNU zT6L8SyMvD8aw40A-e`T}fgQ*00eysqL%z{mDyL#;L??JC=u@3`t z91IE{GYY9(D8CtV6A{i=p+Pe*kBjCs`_58%KMcYBc4|3nx|px3`nuM3^@!I8 zcu?6YZ;eSW4_|WKKCmwQAUXpQ#TXy5VS$FFwu%SYXj3N%s6Bw5oC8x z(oQ6KeYG^K)-H$pT%(2-g31a&ZhwMI?ogELS{4C~{hZjb;E}f5U{ZoUTr5|FG{~3w z>|u5*$1xa67HHQeGPGDQD+fwLZsbG7>P|(;-aM z6%kdupm;hLXbDHXgb|H=n1OBNq`P6;VZ4NuWQ;9xz;_x7%c+=YPDv4=^h{>fhBg`& zsQuWO-~s^hPyF!%;&(zSmJJShYIA3mxOJS7QMDf1j6c3-pJc&~F8Otq!8YjY)XUzv z9{v>W0gwRJVYL6=EW9nIFB3uFFlej6poqsY2c+7i1Zt$CQ;~qKVCrzyS-Vs(FWoLh z&X>?3P;A=AUvw5950~(P z-N266Bab4ms#Hq?8sebksVLK>0$Q9#3#~=tMKK6lj#u;|41a&s0`yN1(mC-!>DC#lBXA!#K>*XP)uv^hul9n$2^0U)R@Pi+iH4@y@ z8|+B&P+$|{Hu~wL4E0yU_BI_Qa|H@}74@ zXyss{I68K{tcZq{hc4%KO**No(c_Fh0@eDU*A$PuI9oq^?F=&mLv5apJT+NavCXfA z*IzX69^V`%^pfyi@}c{d4@GXp$5=SSKWjv_c}d45w{}}((ASWkXtwSmO6Qz^5GJ#7 zf2Z_5`s1Z4=kaI;B{@-g!>`bq*01bg(v^jWff|CobtsbpeFF1BF~#R7tmFdeJBGT4 z>b%5Z)T1zL`$R=%3|z+F`dbL{keYrox|#p9%KMwK`JXWd-CuDC^o$MvMQ#c&0-ux9 zv(=L;F>g-{93d#7z<=8=d>x3s^Sk^0lUDhEAP4gw`6mAxn&AI}9RK!W{(1%Z-!Au0 z-Ds?||6oQdSJL|2*+BA~s%qXIDG}E{NDn1&479W;pvjkKDV9Y6GL!=9J6d^v(Hg>s z>nxN44rRT+;(EY&!MTkV0JE=be?VS;fYIHYLK0>&2N&<s^;b(7qTo=vSX5;6SSO5t6JkR? z3@s+^+l$u7uf^}MnwIpDsZqdQDtZ`9Tms4fXG+!59E+A6F5>416DhnZDLQ=DSlE3d z#XzWUmjTh9dY?p& z+^HhBjkaJky^GkvQ+o8y@ta)=7WfE}N5;SA3c9J1*Y8*)QR=WWtnrX5Q4}4kA!igJ zAUt+>Gy+bMD%DJ1$IKg}AG{WS01oDpVO07=DM8edPijWLzUMrI*n_f6;u>qVDLh{B z+?|)64mfq3WQ(HbICGpT>8dZ+1yin?!*|Dp1 zU}aHoo6svu+4s|iNCSlnY&lJTS8V2)tshHSVshs~41|rDZdQQ6lt!;nQ{WaOzf?q7 z%;C#xOAbFnZet`;E!eTLoEAFM0Cq}yF@Y#TNHuLb5rK2MpdHodb#5g<1U1MBQ__LH zE6^htjcuoo8D%V11(cC%j1jYh9sY}Oe`al18*e}8^RtybfC==(t%Vd;kD37H25dx^ zBcm%3%bITtVTbOXo-%8vC7V2Z>mG%{R82cVjX<4Pg)}Mk;UycFyMk*5j%{{Y@{d)e z#-;;OIp%ejwZrY&W2AdBEE&r4fdOy54EUgd%C zgMarZex7KP8xr9~C! zYa%KO@dUDEAWh&S60%uK*&MlV(PY)EyHw2&CFT0bqmxxXr1ES%6%+P*oot7HI&9U=Thuz9bF@K?Ibd7|TqN zhbea&k{|95V->+;S`dU8J9AKG^aO+tP=L`<2uYidb-^`=&U-Mn@-EOH_B3`XYZB|p z4Ov~w>oUtzVVbRk$x$43thqgC!mJTPVOR)xVJbOjE%7i@Cy?Tu@I0FTA8qg0Wr@0N z>4t6FwrwNBwr$(ConhOy?F>7^wpF>$zEx-Ms&!Yjx^?a!n6u6KVYJ@g{)|3&KYZov zYD#uZ{PvFtNm-lJ#JvYkM91!qTuls2nT)%<)mQyLA~nH*?0G~8CriKkVitf1Mfvn6F#(T&}}!>h|0!x9G65CEhQOzA{(MEL%2kVQ7@bWK@4Wq>H8=0pI-!`jbs+-fFaxv{1t z5nwZgYgIA1U<8*l&CuVj!nGYsU><;~1O$Jbg4@?i)0WbakhFFOz@tggpW@G*IxLCX zi6vX9@e_d_`?NI4%A=URj%XbpBbo|LB`T>Rkn;*pTmh9VWg#Xhcp%YEE|Hc}R6}&Q zbFJxN%>lGm6f!wt*D;By*rArzKm~b3K?kPIdJ4-mbsf#}p<^GE#&t_xG6(nMGOQuZ zvc)O=BPvIgjn&n&u|20F*r$o}^vHAf+Ah=c36*BrJq`U%=i(t99oTw=9nI}s1p$#h zY7DvP0)6b0fo!~CbLu`rF%0hRC9i7Va%9XXi1RQGTQT$&mChVuwB^Msa*DU5&O|8m|H3{AfYh z%|4!7=c``I-@vPJU0z{+7pq%g*EgrkMQ}Lp!Sm0a&xFl66zJ3M{{57^!n@HiJEk+e zf9SBL@3E}Enp%G!iu^AFnDy@lFcS;&|05M=X2kESt151}hYx(Lz$}^A*N%W5)7|SEX+x$n#o(jVM%jxaQDEz-VSjhG- zk8J-reE%u`{%eNsKSk)@4&Q%yFZ-L}%fb0?%JHY#`Ja+Kw6EII^rcz@f>;g`QH2mg z0-IkW8*T4{nt7`n5ayP~4#WwXu&BfEbL0~9$CvFelnV8~ZEOU7PG&guIOSq~Iadw` zUHb4}!yezun=|S?Ip+n+gWSv(N)i+(i#h;wX6X{(c~bppu9>>e!K);}lNtJ$^CI`_ zz7?ZNUa>R7i;g$UjGQ#iP$DKeu8z#9oe3Rvsplz}(;;X&* zF`w!bJQ)HP)fYyf?jQ|ce5^eb8(Mfk#pvxcGO*;Xe0BLnsp&bs}Ux8+N znJz{pA>QZum5DJlHtt4jtdiWKj-gNslMQQe#X?5yq**=h}@QLBsB3ZFhOUM{k3gbZM!q zeQ+KNnlV6=G%nHB-qQUxb0yd7_G0vp2fp3q{`{H&F0bq+PR zO*xlM(1AiY8!T~NU>kj66y4=cCPoK3en?@F*1*HQ+;gG!?%7%UzT^L7{qhm=;z8#B zWeNg117F>miMTEt>X2;q_uKp=#=eW+=g;`Y%*NC1<`1o1--%$)D{5_HIp5Q+Z?!(g-A;aY7=dpC~S-=@U(p= z+e?5o;`6&BuJbHVmL5?no_r1L`9-5Pi=z?K)ld(1{sp%NMIqRn--%b_bef@bOYsBZml_ZaFFo=qrz!KtRywL0jF`H@~}B6^+d&k^nLu&aA|=r z%A_SZfwjL05xg!0m-;-wzEZ|neO*0 zhv}E;Q&gMB<9W6SBQT=>tf`%ySzxiRN;$@%+Ut{Y?8!WaZF;rO4czg-%GS}*)fKDd zwJxY@EYLn0eJp`J384;C_^-qu(N@TS=z(7rD1%Jh4IK>L4~D5JI-i8amgJ?>;tQzw4;-_=$x1e^S6ct2RBZf}WV# zSq1o%t2f9ZNaCTO)WMtg1k>=L#j(0jF` zz^A&Fh+rBGD-u|)Ul7*RC65HdaWCt=36q-L zli!{_Q6*QlO9_+%U|U?@HFFQ@NoLcR`s4gX;U3eUJ3w(#TJbJgF5fd8nI>#ELA(5hLG{c>q;oYB-7}-XI`JnQh=17fJlL=vt1TWr)^q z85f3NA`s^!79LelTuu-fj~8QCU>+mt*|@!wHiKFL^TLI&3q)zTaW61W$cN03EQ8vk ze0B6IZ3SU8LtarJcOexm@n|NTN2E*znuypI>qSlMCUUXCR*J+BB&sNE6Dz?5AybSA zP(Orwh(Pmb&WKn*{rKKkwCO=-+c=p=>7_dKVI)In_*(b!x?{&(!#IaMC;hgdv7;{( zK#x+5KCr0R->-Sp{o%NqLeQF2v(EiNxgHA!CrVpxnH;W52{lngfY1gFTkP(8p(?Ea zvTtfQG2CeaqA6hoLs!?7atmZ~ib$^FE9J_|T@3-dKL}lgA%exnLNEYtcKty)oucgT z;M+cnG4@_K^vMo^uPZoDSQ1y1@rgK*yJ9>b;dBt@b9}Go6CwuXZ$okY56QGi6`LAe zhYL6#`lPARmiu$Lxm4rN(2RXXQet-Lm|Hbk*;X5fI;Swl;@`Sv_jSC?0)Ky%{$WzK z1wvW>O8WeLw)DS9pZ{y`N$LN8?>#>VBmUy2u>Z?@-~W(5?EmzO|26XGpQ!!c$)ErD zy8rFF851k(zeROL8hUXXZHRu)r36nARLj~4Z+;R$O=;b1mxuBMboPBE9OgD2)!fww zQjXfs-FkWOpIzCQiYO+NS+Jh{qz$3>VZE4eW@2uBA0%r^tuKR?4y~G$WNVh4@jW-F z_j_@^6W(Y=L=ZlCeDvN=O$a@;KrNc|#zemvCRWed+;{o?9*FW^!tk$o)n_syciBJA zDKW{3_WTg1lKylI-+Xj~HyHKYr`{Ak2)%e+f_kTUVSZN^?2r$BJ}>2>FRTiWkoZzH zVAue1_wm&kS>((`yXZEK2D)^1(Xs{BFV*O~d}-I2PI|igJ}KsSNkc-VG1~1(7Ihhz zfGb!3$@4&LDxZr8>Yx;>0gR6cL?m?mUop)*!~?T@^w?O>9K-ed!tg?_?`x>pn&O*i znz82Sh?PUIPzc9+50?5fH+a4@tGrrfDj)MF-UqZkJK_~7ST!lYp^HhrV7XI2Yd9Zb zHCoLN>&vK2L52j2NbQZjl&Kl_7DS9G1LWWhuplvmp!>W|5fJjW$cHJ%TY8`te~cv$ zW8O}^M`6Pg9y37%M;0O?Yol&F)`#}eb_g#uwyX;K4|NHFkMQWBk3h#gUIr~?d0uV1X}kq=w*URAFd*6dI{OgmV$j&kL^&J#f3GjCqE*4 zRb@WffsMr;S|N`S#l}e8aadfyd#Gfq_iPnZVy}vEPZ0j_NeF?l^cet%MSxDJ@%%zjYb=Q4N;sZLJx8Z`w=U*`s|-;iRSNGw-^d2f`%G;wI4~ zSk$$KPHWG^fx>z2i2#GQraFC_%3AH4UrZar!NSZ_OSiL}zy z{rWsjU@MVp^fCD(xSrVD0*PG8z+I=^8u7B3QOh!D$khgl{SDsnuRMu z)Qs%fMHGD0yD^_d+f#a&@#zlAQ7%dnQkdoFKzJ-BIYB^XEXxH>c zd3`rd=EhDEz@h%DiC*Gy1wfVWB2AQ18zk)o%N;v9ic?;tNMx6R-X^h*GcZd#l!)Mg znCwHpPX;WO{9CHcIJs(%UPMeB(q`8e6jKN?9@E6WF9Sm910UX3i>5`%5P^&+dtoqm z(=Njy&FO=3RpRH5P$I?5GJ?TSIL7e?O5Usm$qr%sWWB<{7=A-4;I>?iWC4GTuy}Di zj%>xqSv5~q1)_Z8*-0$9Qog;$oGF%|!bc+d3~PfL2BxD_i~e5cFtfzV7fKcPJ%nT; zEQQd;FoC`d*&E|r?tn3=GW$^5PkoXk{9t?&jd*Q$s1}NW8!Y~@Ttevt%xxA_1s-YA zZA?eDW*PC-p_U8LjfOOvNwE#%Ol?cI=&bw=2Yv(F(XBB*jVwNn0pK;6Dujl_I5XeJ?9%leqUJ2s@y7G z-hGRGRyOtkF$6v{WX}eI@e#^80nT|k{l6M!f;>Zb!YTr>$(qv5wR-^-Q#kkJy0VVc zd*KwtA1ehzvO)Kx^MokPYX)}{fsQ@-hLKk6<-p(=p(7x+B#KOt^OntQ$NC7dWFy+u zrcGNlmZb|Oxo!y_8WBxjA-|N#GaF>lgXOCPi&GrS)S#-GD;f$FE5j78>TeCdhJ%QX zmQBm&+Mw@jSW-{#E{3vSRY_ze>1`zG!y=n$>o%b|R6P=fQ6*J~i^SbQSvk6^opX^3 ztv;hPRY;4nu-0N?VX2mE!x)%dh5NZJ+hik`snMYm;Z6aYY(LI+bkw9^JQfGvz*SbRCTuL__#zM zQ9gs15l;bIx`fi1&^6SE9cu~@j4$pbTr*KrO&=Qww0RC>Or5*QS6EPDdXMf~@g$XF zXu#;FvjSYVv98@r07OVGhX9AcwRVmY34wbtGt{mt z5n|=3?;*ytD^b8P5IF!JK5i%$(~!S8GC?M=2pEp8Oa<0kNHNeJ;IX8REGdmSGSA!? z%FHE(zoX|eUR!8%Bt{hWV>SzCLKkObe|y5spw&K{30F_A3kO*qo=ZB!=EBj$v5!hy zVSLA|1p$}>gEdXe%;2Joi;EKd`ESU!9*ipU*qa?!5V5JQuntM+1-%l_=GYJ3s* z5ruuf0lp`(2A*SwZJsy0!v;n-zk6wVTl|!}*uVDsszQF;dPjY))v7;>+-+nxng(HV z{oXJ2s?=3OY*gQj!N#E3x4Rn4xUNB?w?D6-7}T{Z)1uf*N4{5DM1RXmPY)FeL3gPI zk4~asW!8hZ{rI?2`Erjf^%+j#rpf7qc82RR8P#du-ospFap}jCyJ%lKX;0-oQJTBs zSexMQR2=y*@=RhY)hHY4$X62>|gtN9k74V@?y63DB4|eUe4{G&AG^I z&!w1Eastqt(;PN26b~XYnrv5Nfunxhoa4)NaYa!-sPWsp47aFs=47t9CYY*ajNKgF zdg4lp;puc>%E+mV&6wO|Z3PCZei+6D)n_>|x=;vh5%Q`ld5fc zg?HP9eE$r@@o`P!#*n32!BSJlB13)Hz@q$~1*Ek#nxIW)z2-JbP-}Il#wP{RuO@X7 z_slKYKtKq~^@C|sx}^s*ilfG6o={AUy#ZcpAP#Af@a9z?6BU*7W#$*!XZ@>G60rja z8gqs~WwP&kn*zHy@$(RCHp9JDTco{EmVC26zRD{5mzgGs#E&^EeQ;o2*&i9Z4QBEP z&Xmb8N5Ii^Du&U7+5+$|888RwGgIE_oZmI*y9Z>Dm!-T~y+XR5`S}gCCUaVSy4?cC zT@M5%j;pLBLkLt@p6!J$?O%dUCI@BIZ$1mng=8(i>T#eVSxpblj2e3SP99IF#k{gsUUmo3k~ctQI=Rpj;mNP|TE4nG6<{*eYvEK9VS zBRE7wdj|cv%C^ydteuQ&k0H)hOx^u-OSpV1*=ImuvXV)gp(>qYsX-gKTY;KnOSQlW z2vk2Y;Img#52W0c-O3J%eX74}Qg6G)& z`B{If$p0mg`isBwzb~wEvi{r6yeiE<@Z6s=?LY4y7fQ$&&1=)0dl+NuGei;Hj10iW z2^oEPB$h+0qLz{q4Ier55_}vgOSF+{lIOS>IrH*oXJ;KbO4aE6Tar)8TOaOz_rJe% z8ON{G*cZ@R4B0*wYl*Tr^X;{vBm`#NB?zI*d;fMQw>o7=z^_&l$v!jhYy{>2W3svKE104cwjH=~fpP0OttxRcgs}Tpy6Zf zjV5IoEoOYqp%F%iS}=)NeE4S96>GqJCnc(MUbax2N7_slHC*|D5)J?pKHEv+u9Xot zb4lusx@aEI%%st1l(=N_*BcIB@%itmGk&vb{7TWkn7P|~DQ8s@y7Nw`^UPK?l~eQ* z!=5gR-*#YLvM!O>aBmi}pqXhXJkW}F;u>h7X_3?@g`~WS5Xfe6iyhDo3l2zUGO*Ak zHvx^RForCcFnn&Jb7hs>LYX81CJ(C0KntA+E6Rykuts{I)2+gWJ<_OunQ~Q8ObFNG z>oRls5Mk=0S3ph>FI}WYr9f<-a_blFap1ws>}7J)!v+K%<%$iIzoNU`xC{vT{Dw?O z51j5-&@r*=cM3a7a1Ch)iEd&J9KpU)sBmolg>lhRwnj9wh7-7P2pP;TLc%AjMv3u| zfKE!?Lt>IamnQjT4%r>B)o_qrNJul!j*8(3KEvlfS{aDy;W!?zs5RIUa@+!2&g(Lt zNfl3YQq2zeez2XqUTYd=a2Rib=A-Z!u9_>n) zO&HNx@pL)%bdD03M1%n}oMe-GXQD_u;hb|4%q8Y8Nux?e_MXD@%71Q~Pd#SW*);Tv z^2c33;{=JGkg5_CE_FBylcq;uW7L)r?i|^$1d&|Qz=WutGt>YjNOSRmcfw`gk&MJ_ z6LrN~JgL~pHdGmtuy+A11=e^4ll9r)50*z5ZS{Jp^7>enT_ra9GuCXA6w~qOiA^x9 zK$1_h66b`n*otih6&)KePnF*oq$S8FKX2ZahLIDyY4>?km*O;F=3U*CYp{pOO2H0g zIc19VP&HKJiwYa}J3=8=m+Va`_R@saKojiK!EvdScM|mAShn+;f6k1T3(+O1WS(DW zWbWJEw$aC7d1$Ww5JO3@R~HTQG1(A&(exvm%SEiI{haV(OH6l@5f9IlXT$wI#`R>| zb&?jrA;MT+W%HGyI;k7w`ANe=80p&2Q^TLQKoQ#TrlfIE$^Utii!X6kDw;5_mt=7z z!pHSU(uP_guiI~#I=Qc-+G~85Dmi%^hsI;^z^e(HIKI4A`v!aQ4!c@ZD1#f|7ff7V z@FOlA;LE=n$SBI54Ij^fMV>d3_|_RgoXeBQ_>LW0sHk^$sUPT`R)3hP;b zqD6kX*ioWWTA{cLx6E!M8?t2#EarbJe;S*11Ym^<4kLN3f7M@W@N`PI{%MM&k1KQI@y5R)#A(OrnJyVLP_G zAun%TZjd-6rm@kNDj}I(`_Vh%*QmbRxuKsW@$n}Tr-ZytUjBSt)D0Ag50T_Wl@ZZW z$^y`j-MEfTgP#nRIB1GvRZ^bSBE9TE@-tLK{p0RxfT{-^vp6fj@3d0>wANb6r!Ksp zLZhP)fwl~W!ii6(=NiDFbWDe011_QzFkDfYM8>W>KdS|N2`;XF^DQ1-9@N`q@FI=P z4HuZXgj%ncS^=HtiPqN>N$|>e(h(D5uB8XUrHjYNH`ozeSJ7*Wk8R)aUFFr8_VJ?q zn$rP*mWNVc%Hv#O{0Y(_l6o#{VE=^h0+JVm%vI2J3j?J#9e=p*Fvbwur(?%K9Wf(t zb18`fN5TT9I{+TR6@s9=?L1E((Xm%nk}-($x>0B#-iAIMHANsSjdM)Z@yu@zp25{g z49q2R4jgNaG1u-ZQUNzEY!>NXR5txKM=K!_00>wtCFP^*F2MqwQ{>9Z2fbW| z?cAKwz>oOeUY5Wy#5xIWKAO1VDHOb=c~`iQpzsEb)*W7|!BYxv7XC$ogOtnET~c?W z;DN<5pXEa8yxCRm-BQ+ficz#g)Eh9BG~>VfbcakHGp6euHWu|gIwrk(xCj4HNEsgw zNaz<478vNm$c^}>KirsadKO?_yXadiCLMifkJr%-h*v>w$u!KebgZK}MAhnjnylZZqyL>KvLJbk@B}SSve5k~v z(!h%VJG<3fynym3fQ6#WBP`z?E*ln z4pCzlKne;7H`YxpEw(sz|87F(l72{op_#kI;022WR0z!R`W(&`Ms;yffuNB`&pFw- zpm7VZEAiFh4I(nZRusV->vXTayKn6oCH-}K_qa55Xk6bulv423SsveE`O#Dkd3iYf zNJc5U6Bkwt;O|AR#r`200E1emMsr zZFfz%{_Bi&nr`A}p4^C7=MKe=a&`0#r&c-xuR|9V@X_48llo&(9rUbIKv9XumuFjW zo)O$u$A*ehHmBlGUNfg+eh0Pzr=-sIdAG|W59Cfj@IH&FU#WV5^Z2X z3XHL))3pc+>ELSU+L0i(|8&5cgr18?Jg-|#la9G#Lj=?3R>Q*79r3rQ$Ni2L)B?3G zv7-RA8hKSp)mA9M0bvj^Na2kQs?$tpYJAvscz2X#XRe+vV2ROOM8)X?NYkhS{9*Y? zyr7pubtzK*oQux-JTS`58rOS8o#2vziOy%&NM%O#%!L}$girfKS*B$1{Ajb~sgP7t?8I4)kfJZ!# z4L#$nQu>2lR97!4_lMA+60y<(DnxqqZ<)+&TrH}yEnLLXy=vwA zxtkU|9n{ymR29jLny)ZKiQJxsgigAcU&Rk%l!~aws1aESFU)D)vEf{bp0{OrTJGc+ zj!PdIn-)eIr)Uh9(l~@a@f(5SuWRvNxyIAE9qt?;!;y+`lx8XKr4QH0AjQy6Wx^!F+nm7p?># zRIH`}sJlCRUwi+g7B~4he{~a){qGU4|Jfrka{L{z!4l04<isoqUh1iF$Dl1Qt>GEi-iLH0<3Dzuz1GMI|H>@=3xx8Y zX6c`eJpVQE#Xm8;zcow$LXiB8Sz_b(xA>yzKh4rh1WXa$HvM))z*aLdNBtfs4FTYY zN8V8CAFo73!g}|@%O6iaQ*N6`B&zW1N>+ccfWp<~v0O=|G^^+P+$_mvqB9rG?KvuB z_(zNSP6ik|jp-kS00nCbzUd0{(b37gdK8NtYSK7-HuKPa+(xK{bB)*R=le^?*5aDK zYw@;iga&`-wp^>^*h%l5+pOc%D>m#r*JbsoshV#zO~mcyTB^0#rYLNEMcLP{M*Q3A zww1f_2^>!o?>@%AGu9DI7K4QsPy0hE3SvVvSg;X7J0oWaGYmAHMOo`ugO$GWSo!01 z$xnBQ3=3ynl6GZJ)~^kVzqZ!=%Sh2?Z8n#`Q6+~rqDtzn%2=;Sy{b@HvpeTzwml?cq*1?&APHO z&%@NOX^%BNBfxM9b;=q0p)7zHOj~&7c!pHmPI#tNp|qc=Ko)ijp4QcNH>b}EzrO7{ zShpK3euEW;c6Q;IGL3dH-`&G^Y-{JN{&1D&B98SVHLB;gq3)01Wk=yRj9dMT(%X}d zfYHmDu}n#Y%p-9`>b@V?lS=-WbcC+Ri#%!A=nYOOr5=pg+1-2O6_~ndmU0T2Ly`7n zjIpWb`j}JC9AO&y5FxB4L5q79V=+}(7Np`K+L~(v$K=mR!&MM;Ow_b@?2ljI^ zII~))h}D2O`UlzMooVn|vF6gLf{D_Fb?Sy`bty$nbFFg8JHb}NRVtw;s?>OCs*;(I zGHI(+G-L2U$lY+z^2nxfXfz2cHL?VRqW;klX^`y?gDB$G?D3DouI;C=l8KQntRslr z_QpC^VN%As@(rf1cWU(&t87#nsnW;ZZJ@_LY^APyPTn?GG_N)3)=q$Qm_B{+)@bFQ zBE;*}4@E`BejtytkTRKu6M3eHkr%GC8+$dBxx(gzHO3yf%v#T;7Ij@M$}d?aB)) zklq-8q6cOjGBv38tFzEBsS$lR0v)GO#3oq}E>m<)a+Q~QCLKBu z>(}fr{2aV%jx$X8UFXHyIVfN%w;~VzM7_pqR6fSmCa2=LOoh+QLBD8&^P`p8_a<6( zpj$cqgg0C)Zx$n}!M(v3+|g@yT{xihvNy(+fAGkZ`>aXm#d$2T!dM#D2Nv6_T(O90 zP!uF59jlDk3At6M`fIOb3*Tm@%XDVO!=JUL8y86#m%f(5jz+xU9yGXh>24tr&Nn(z zCb7P3YOXe9mcf%3FDIk++) zx~uSU4AOQYw*>M`G7yXGw0dVqLD;bgm!lTGv4h12kI>91j>9varY9obanLdImM!u3 zDKove1r$Xl)f>=R>0!AArSkUmgM1WQ;E9J%c~PkK5eOZw32b2Eh5mtM9FU@~-%EKW zd&+@(Z=66smD#v#4)Z`6-zY+PWk^RWiUZB>8DyA@mS`zP@WU179!1zp#HKrdVgA*~ z8PewsA}&ShH)kz$MJ=0A_LpMsZ$7y{OL;@B=aBe)KbIPvwpmNGdVMrivbM$MoPD=S zoKqh&xoD7v7NG{TKNE`#mnE&v-dDHhOqble4i#+KGY9*Fp<}6t-R^Svd%kaKU^5C+ zPwwaV+YfWo(Qqq1c<6ld@Ng>_ksFC_(AY;aE1rxpm#IpOLtdTk(r3`SVC z7D+5ypCLS^E2{haMQoR?UdJ)t!|)0Mtz8(DjUQl=8q}VSt9f)qoe)@!n_UJjijt4I zEXcAz1hXWGW7JsieQNJMyEx4lxD9ZuneLjsb*quTe27}(Bq`4x4T$w~Qq3}&5%6Z( zfc!)PHY8P-0#{1vg^$mRuBKs@5i#$DN$sBC*lBEX#Zq1hoIs=I;j;>5B{Er(4CBg! zvLx^}d5D2fwV+h-p5D94%EXL+`JAo5@PTj46d6~NOdV#Wl#50!3zEm zj`Q18+1ns*Kg?h@aJ^`j%+u98ZwqLuvC!etfc*DC9d`DDGY#HPcwB2tQ_E{M2U!F~ zr|tvM?iUnnc*e7c5T5{E41S(JU0BI#7LGc@bJB3KSRcVckt#$RhW;_bi=7T_{7JOmc$ zw!dRHPyNCgQmcF)OK9s@7OZ8{&wSK*cb$tJ1C|sRrq!wd0au>wR{v}fN;&Vdl=MJe zA{SfpUCTN4^QtoUxYTD5xG!y6l0%sjS=ir_x3_rF*gE5`5Bh-TOA892W$Frq*{7|& zm-!SlUQ3~xq427)k*8(8RTQB6_n^Skh-EAK&>zamLxTNM=pbFCi<=jbJa|)9fTVbx z6ddJ;Vtmw1_(}p^-m}cWqKp~+(4JrRJPMRpFgJT-qs;fqS)&*2K^INQ`&ukdMHMjU%ztMN{?Oe2UwY{O54t|N|FY|o{a?7O|KSEW{y{kXS4AiPup58B z1o$6c_rHgkIRCxtvrF?lc4Hawt5?opSC6EQ>*(|MI$$Cf{o6B^|HF>FK5WtSo`ov!kbp z=O%Ot`0sI*t8#`#gM+=5FlxCi9gA6LA(zIEJEd$9hZNlK z?8Ko)Ely3I_Sqp9@lswYdXlEIK}sOZWhHwFlT5t=e@}?(8UAGMbcIswq-!VT(^=Sv zPB53#S+zySLX#sq&1Rt?p#e`K&4NKM32}X2O724D%u1GTekQu*QbpM>pIh_f*QTxH z$_6DFg{=%#)FsV!c7DCr_E;i`UJn_hKZ6@#oNQgCzJC#SHzH5-n9duZ`T%bI{m-S~ z<*Fse+GX3{7=2hAt+od!-pw1%6;(HOQTT2(*Uk-{Ayv>8zSb)_7b9uIlB1q0Hg4IP zS)-07>sCwYu2I=3ai)zcQd^r@%VjO@{(C;Uysa;<8L>eIM)oDXWsP|+pw9oyg>^ch|fGM z5zYCrtr6yJdBq^+nDFd}KAb^bdTNtINb^ez`Cu^Uj!C`N)Se!R`JeWc%^K0g?-x;* z)pQ(6CshA{8u43`8@zVDHnz|Xv`>2y?-8tP++hfZhf6kFeOT66+d;cb-;PfH0K`C_ z^O%kPKTuw}BY@Zt@j64pc{gj!&nwhzqdPV-RDYUU_f^H;vU+3mx2|V>4kze{H)6dq>W~Q~r)NDPUP>m-qWLAv~ezP<_iI&<;oSDgNk<=R`x?fIa*x@hgns5-L7YT~el`7(;n->~03z&`>Rs-1u^Vp~5c zEho)XZ+x(vr;50Fzgu?7Q$@N})&cj}b&PkognaZ08oAd5)MrUYD$?;w& zM_!Io6#RD<^WqFT3g9~1I4>Y-{2#M(Gn6@(}M#Q`)U z9Js3;`!CBP=`}jFYXfNDllmver!azdzXIL(r~La0cNw_})1=k;Tqv8JBBfw{OAI9logG$|DofVa-dDua&ZKtm%-<$Jo#qABAsT00 z(~P9C=D$ofNS6_`Gt>OsnT)!XLf2V??0P*9D-?`HBzxB%ctLI-Un&N-e(8A^gvL1`bpAPoPKNb(Ol#A+5 z{S7@3J-;84nAW{dMi9Ttv-|C*m_hb|Y|4zWFDAkl0&o|J8l5iaJ%gWV(_X zr^pEsLhk60USI~JoE!{sq!e=flW+hB<@siqae@ci#E0HBU^XDMXngH)wltRlCc*)R zz9HNCIe-8#u0$19`7H!1iwy(cnwhJn=)^?8s6Bo}k4og|mOmImsAbzG7bY3t0>2(# zG~=nk$Xb1DSYA??=mPv2KPx=DNWQb;1N5C3Hw*9>OBg|KuM%l0m^bu~LnjUNo-yO$+mzNe;SK0r^jR26Rf`2#EX? z^c2vFd>^U9AnL}j$qv13FORp$BMm0A(c5U9BOet2jMFReHb}QBZ=uG*OCvz@Lb{M% zNl;qpZ`Hpz7>56q|BP$E0Tm7YX>2Iqox!y+ZCa2AV7kw{Y{0Wo4Iv;@d-cL+*VSK>w@aPV3~Rgf#hC1+H(ZITfeERse96c>pbr>zk0$vU4 zrMAFhc_J$-6Q(p0}xy z=5~NBwsUIrh^>F?N6s%|T24V@Lytas8$sA@6SY=r!$|crH!3P zX#{V-VoSJAr6P9K?lXv$<(Bg3S{myW!c{)6-&;Z(O7qftm`S$>7bl)V|Q@2&4W-BbE>Zi66fbC3ElZq=ht95m+ZEqBW zI2Biu%kepxqmn?F=WJ@jMh)PezN-@3!pGwX7!e z9X0dNWwFgvod#B|0#TQHX93_41|deMvM5diyGFovQkID;@~Y3^;l*&%*uth$ixUQs z0$uvjSHy~xsW^QWK1kN5PTvWE6`r6*ConGIW@JJROOot~dGA{O{sd$_zQ*B7dyS@~ z`bDPxdP#y_lDHYt0bzNj+5x6w+vdF+E#r?)3 zGS?S44Kx==CKcs{&~f2G$nz$U>3xvq4sV(jbN@?D(l?`3wj~7~ew%dT*+C_Tt7l;o@E}1+qH6Ady$bx+>_(!&!@apVQbYaRGiE#m=^7B;C=c^z3XbeXD=DA8PnDG1q5>vk>X+hGEC!Lu-5F zIP7z`)MS#YK3T7}n&hY~<)G?39rUT`HX3ZOc21|7RH^Bdd%g##w;6=X(Ofq(u2(Xi zj~J)2TP;`AsJJK`byA(LLZx-#)&DU~hMwa+x5pDZgVcvyUlF#L$k8njH28 z&lY4fn%u=f+`&8m9ujMrBEM~&H41SGxiyT8q z$dDJ6jE5_0zm}uu4i$j71%QUj7FD+Tt-yV9ub;u3Bi8|8@@ZN9Zsh>E0o#}YYoY8l zq;E1pZc{JniPhp6rp;6pBeO+0-g##H`>~^cgdQEbL2}+j#GZQ{OfUYii}IrIgQyG0S5DG+@I(A2)koZhM9OBNJr8=;Opf@tMr_Cxuff4`)7-I=3=gfu+wbi#rMT}`Y!I}?DApF zi4uf8*n#RGneV#DH&v?+=u7-EEzYKRwdHiekM%KLg)KgLtE9%65zyh52G)^@Oh)Y4 zI_r_^W>AS)0l;BEPN(sd=^*8KNlG zeT8)?9QzVu^$`U3DX?o{XEIAi1);Zzq5~K3$DK7O(Gam$SfaGMP76UJ;0}0jRgm## zcneR|Zk4Fym+H(4yj0K)C-d*Pe#bJ-!}MvI*!tdvQ}te}jS_4aP`G45fG)a*8U&+~ zp!=61?@5U~&^ZsLLWsd=GbcvUHmcfhV4rW=W|>wabtMt;O$=fPi~bO95jQr>V> zli}<3{7hVm)rA@CD#dqzIOaef;a^F2CGtzZ9DctfAKBDudITiV zWkwQk5ZD&5Sp*n(3mU12Tp`zrW04@b^V&UB3p%pFoVas;8Vgtg)x5DQeF!7_t)rN` z6NW^`N}j_hNNrfH!!H*!18?MkA6>9x@)z>{k4o z+f|-%pn{b>Yz1RS)3qNlT8ZVUt9n({F;?Rb9g>KZ!2!#uFWstBq;MEK3yrxnE2O~f zFp`w-Dv4ZsL7m-vDf*H2_JFt$j4D%uP1H5e)guPXdFv43pL2|a%&|X??+?btp0*X>-X+C_2k8TSKN$Kb++T=D;2Rq2 zIcpW{&CpQGP`jNJ2(dDo-zSE9|O7pV{H zu?Q|)qUd1AzgVZurZ#9I_W;$!*^OFx(r4=k2L$*j) zP=RN5qY_uw0}{ID6O5o`vUw=U@1CIqrU0DS*eaaZA#Jala!w@N4IEQe=hb3TKf+kK zKiq*jJZQHi}&q~`?rBP|ywrxA7 zy3f4RXU;tR%tXKabw}*jaedekYu(rHUMq3aAU{p}K`}~Lonm2>DG3RPr*ROzhp0%w zPW$+!ga_v;f0%T~si7Lp5c<~#Yp0YMh-dR&J0y$@<3hzrQP!v@2;zRkHk& z1Wnc|ku ziOIjeLh!GO_`iWRg8A>GjeJrmivMb+eZSC-C1RkpE}w;NNtKas3nV^q&i4 zW^Shc?P^Bmzg6B2qyMSAO?{1^qf76zgNO}a!$q^;6bxAC1Cb;goR!#3KV-tA(fsiL zyvpWfEu_+@o9p$L(PVvk%l4|i{QSf@6+2waLM|At`>vd$vA#Q+p^JXc|JsGm~Y0CeF z?@y7~6rs=XWP6pskn{oNbH}}r9qs$9SNX(S&&KD|E+OmXIrHGY0dgYt3J7m+;(6*` z#Lyz#%#+ST%NhBa+DGz()<}7h^{XX(z2cQTJMQ9nc1%oMosmkxO)IyVOA>ILlDXEq zA=>vK5Kj{DEw}O%9{uw<#1;JI@>v^tgdhB$#3}qi`gTX82yJFsHzF(s1Cv?n+=%Wx z!?I#4&?AqoG7mnKD@S@Dem6tsmj#o{ilnsr?YbQGWZ{lxni6UvPEl!nQKBq|HFTlx zJJ7^qw@l#_E3mL_GKgsp)bK>Bama*^3heNBLieb0E~}aXWw91Cbzo^QHU^nwq`Rt@ zVV)F`EFlH-Ik1T=vDrE0eZC5|Iju%JeX3T2@ul6BDdNfpk(%W5 zuu?Z=1Cisgv4{geW{7#o@IVfs)vj+e)S61g6Xy;5+uetZ^7hrvhfMq?Un)=t1qbty zF*9Osi@oUNoUQgAe&HfAR_KW^ zgdI{ElaXcOEN##vXk?%gCVdqbM~mz1$knOq8BW3~fJcA_wlsNq!=^b2C;gu_k6fl< zzF8fK%8%YQDO^J2)BWSUaK_rPS?t$M7HsQ4E#tU|Hk8-J0ct4*U>>&~3O})Z6R*Y! zwI2;tXjw4|XprrxM9t8Swjx0%Gt#w&mNxxtf!Bno-Ta{V*M`|7RI-*;^QaXSlsKG8 zzE7<1*~TYPr?00gm+hRgs}OQ_mjt04CZBwxj^n0RpNx0hPBP<%cVTKXd_rc4YCItF zo4>ZY-Xni{`icn=gwd*)`K5ZwA}y8r3ih+f#63^4;@-*nZsi55Hp3O9+n8(^(y&1; zOzR`j;>?nH>nyO?Cs~qbp`j7RG|U+~DBfxH7jMxPYqGj*!oz3rY=M%}kID|6@H=Yp zEhn_{OVS(iT~7+pK~XMfWRi3?b2Z%Jd!dTKjr1<(Hm7ibU|-pNIyu41kSM!3xMqd6 zvG&KfHFdpRz=p4oagkvO3W~rL*v72nco?cZ*lJ$j0ToW@Pv+qRW#-T3z?K3M*&@D zllZI?x~&z`ETTtM!0?S@SJ0rBCeg(BiUHkC&-o&JG9qHx1~!D*3&9wp=03pSSUp(Jg! z7`}-Rf}whBK4d@Ssp%@6Dj+1>6c}rnA^yXp?m{ehb72-*kCmR9(2`#tyt?dZ6O;zP z(p0r$6QGw5`|EPcsN_W_wF<{tXf&fFmSsw`gNn@+iDNUrIitW{rF~UNVmq2TIt3GU zs65e>vG*ax;c6H}`YIG0vHDUhZeQ@#?Jf&X+`-#lW_!3gDAsYn()l$#wLu+-qNIZ$ zYYk+Zd<}=#@Zn~6G3ycgNeg{r^6yySs%H-nW9^XZy-}>|RhE5$sXS>_A#!Ne(5dXk zcmrr0xOR+hsHSah(34o7`o$DU1E#ota@|Ki=ar*txk}9mtcTi!#Pa=F6I)Q?c#QtS zB*pT!qkc6w5D(vl7OzDoi*{~0nXhJl%CQhAOu81)(PDI$mvvRsUG-u}jos~b0GI*k z)+ffwDGuvxNfLsa2A&Z7HWv12EIWUswhY#wy*uYv=*+i>hpS0%x=05RwP&o1>%Ot* zf5FxW@cH2UGTv&WW$dRPa=!Z_jEG2EHDh$(qdERb+YO|?IWqfK^5O46;r~Mt zasC}+@&5#1zM_z5+~G=o@UI;pSWmWdWw?28tI>~k^fLc|7d6UuPCB_ys7vfO%ELZ^U2>S zqJN@3{&Pjd!o~Ez9T|>m{7KInM*qAj!8)%Y)|YYbDqI0tv7PqJpwsN9g6S6(lq8M9 zO;>iW-Ge-ay^nayVik#;A3$iLmL~&{NAfvUZ@o;;L<#s@_uF{g%smW5omuI*?|M4z zw2nT6MEM=+97YnU2p#XP{f1Rr*QHu|0CnhSyGB6|Q=q6OWcW7f*X~AgFjnDahriAs zy1m2t$>|&tAB_I#bZri83Ka`t#j9=kw@+5i@%s zEJ<=*S(Nw$gkPd;VW79us|8@EIb(YkcB<&CMW1=LHC$)iKqn13c0C!zzs!y~e5;M| zP`=sT1Ol-X?deYaBmJ8c&2!mJ@bRsdLPNgYgQjyM)uJq&%K|zsWn$c}fmUTx3(4t$ zo#%p(f)QDJ%6BBc4^}M<_+*a=WY~yjIn#q{d7?2AJJF7SX410Y=f+F6&I#TCClGDc zPOawv7@ykfM934s1c?vn&)^M=r#2jCkLef;QS#gJLARKeJ@rK^9nG-1;)qDaiZW@< zpKYomG+j<8Nm3Wv-}NHH+8CD%Gq$X5`xT>P^pfLtrE^jSiv9fvB5keM8yzJV*l^YX z(ZI5gJX^Ihg~_>~C!h%ZXGPPesya9Ua^Js2DO2put91IIJ;&YJO_Buu;yf+4bh95L zx}bvhHkQYb1FPjlg3sX0^C7~DJ8~P|<#JBqG`+#H@)XDEA4R{q?_kIhEoDKQlP0b% zm*vdxE*jAAk@$s4?wVN50>N{Z2q=^uz)ezcA^Q^znBSA=fm-8#IZWB zv1Pfgt(J*lW4FJMA(`AlBl!rjtB|k^W&Rr{Y>?_5TvvdR!{mc#QPg)#4Hv;GoW$Z7 z;GHwBJC2QlZ*jvVTv4ZYrb-vTP`RUN7g>G`6Pds{17PGXsiD_VI?uk67hyB@nBcqHdN)~Lm+2!YzE00|;Lkpm6Tz=`@y!_q% zc3<59?6kNySDj?1D1RgAr9eHIT~EUy#-})S*`5hz5|#s>b)eUPVGw|ZzF+7l(M9e@ z15E?)u9(^td$dv5fLR10y4A*iW44VFO?)na(dfis{h$Lt9^32$1b0ARg3Z|BiP;J= z;$RXx)GjXKd9gD>?>V|=)YAFk;e{C57eK>IS&fq6uEP=1d5r>RXwsW}Q*KUb!6K}; zE zCkgiqL`Yjvv@z(=%15tK-h#8PXbm;VWKOonVh}6rv2jtxi~5_|1B#!f@v&vaO{~h3 z5xU7tdtxk6;VHa?c_^S*9h;nJuW7^$3WKCq4dQ#$`Z2WlCRa-SjT# zq~Npzw&~S{jtjciIiHfagpYtLf{a%fG9;8Fmewe!_@_J>VHI!|?`i7{Q&Dx4#~dFA zM{FMOVW`$j<(CR+?YmXe1Th-L4D^dUe3BhC0ks&8Al#gNz((QVyYAvV`&m|5vR-#8 z7vr>GM}(_5lSFK|kW~QeZij7p{ux+&qblebAvR-fbt~+*?c2xK!hbIN;2Q1_TKlXDX|JSH0h(V|N!jZ7bmp7$5XGQDhxHz$7=opjQc zgVu9p3>hNsau^p0QV0r)X!KN$V*Boc`9#n+7uM^KtPDe60Mcejwi9!aU$xxxi(p*d z`O(y8kB=~j=vq1WzMo6tz&A6S$f*OHX1q0rsf{nKR6^m04Qqh-w(%~@cxz%o^+5f8 zsR@FKC61|3B5gAIC&<#pK(=X&{HDg-jA|JdiCUq$sQOdlH`NGY8{sRE4wldD0j*3=;oLF_ zQv(%`8;+&0x1gD*;kg+@_F%sr?zZn7x?r8hX)7Ho-yuyl`GQ@4w`%e}S2ZCVQUMnp zW6KxFDhw~ioSSNE!nW@u*vDu9V?LooDQL(^3tBQ zOT)r%D-g)L9=tz&Ov))GJ)1f- zUb#0!oDfVk|K=j|=XQT|N9=)*TLkrQmAIWGkH;I?tDtSM!;0+Nhjumf9?vL{uIN<4$jdBmou$p9^-C|mXJg}5Uj*~a<1eMeZ)Xj)8?=B95lZtIfpoetM@q!A6J8CLL^Y zuIB>BjA^uzr65J+<449y-2 z6|0Y=4;NQ)jJ98A^3$E!5dQ1D)+SBxZf-Az%Xmm~+IK3pI`L%lxeI0_R*;M_D^VQq z6t+5+zfmefPssO1>=v;Ro!rYN?zP?UQ3D^QooKSVRPcVpPv8P3Sen~rMXXlCGLyl^ z-73mV8(@c?=93Y)r?~+Bwtit?0KciRY|nFVwwj9D89+KEYImH^=U%?s2khdEGe%3t$?o0nOd8_Y)ftwdBK5y6jPAL~1pJt#py#^wY!&fhG26H&`Y z<2fQyMq!+I6pwsa#sP$oomN4==iBdV*A(S9PVQ5%IeQ>L7m<=i>gIn$Ecp?A8ct3= z`wI#K*Edh?vw1yp8@p2s39VAZAS`*W*=0qOCJ87fQ7ceIG>%-2Wfk?!jr49E_j6WMDi4Q}^u*lJiG5)BLJ zH~M2$k?>Uu_5iF%#)kq_r-xrKkl9eI8cztPVomrTDBz#aFWFg58H4ZYZlo`UeKp*E z1F+8fPV9;L#dX{IDY%n2A zBmX4w9_2f&`AQb$KaHP-yL&+2$VR%ne|5M2{QkkP$nJ5O%6kP3_8UhCa$`3KQnG9L z5^qyi?_K;4w!m@OSpFCCo%^5CAOAu+|EnWmV);8q!opVnU#NTee}}$}ND1EmYKDJ8 zFaDPW;r=(2;lBob{~s3Q-y<^r9{T?G&;8$_Z!T7rf9}|=)cDh$@Fy?lNsaCOJBVtA z^(G`M1PANnB^fVok0v58Uw}|NeFQIC8DQQ#^U1ph;$<`Qw3sS#9$ur*UsU1T&}E@a zDW&t{rpGku^;B-IV(mnkDMRK+t!aS1hwlBPV%Si27=Nj^W=ai1OWR=#1tEh%&9&QS zDQel->B0Q{BjLos!2A7Wvwsi539NH(4y}psy8D^dtQh~fI_~U=!uV})Hgj9rwCy=3 zHMTZ-V)!||;)?O&)bGca@Mvq5W0k2?RX2>scPSxaX=*m&1>vf=DR3|I-TjImD{pn? zyOjWy4QOjOtJ;X|rnA};Nb$QBx3oy0K!q(!@1{E!qb-`d8zsMMtf7BI@JEgJ7%S!%gg>nT6lohmEuQZpE z(2!UjWVLJ>NjFjbmAW8PX-m#jWab?*10u4!%C6Y45YasqFhs%8EMixgw`p3(j%0QY ziBdkkxM6Y~y;)*+)o}j9)=NqP-bd9Wi(Nxw9bmiCMdni-b)I82^37LY4-N(NOdNkP z-#&_6yY=~6`0&l%8kTF5#;Q!Mvc4gSDAeB`KZ>`YQFVEzccIwWVNQ9)8eS4 znz$>KbKlS&D9lL>T}odBmRYI5}!F~41W&h58y#n=e=gD3`9`14TH+t`VfMI;Q+Ub(Qd8Q%0jbTZDNDejFAgqj=#-;*mg3lZn3A1vZ$cVN3MW$X<{+ z)1uG5-xBRQUMFd=i&|(a>q@1g*HGYq3bLyU;?fm{8WPvAVXoH3cdT(~Y!ta1LS)Ya zvou4e_%Lu2HlFpo(@N^HH! zEv`9p(FP1VRyuZh#K}f?$EgI~OD+SZrkX`!$+JqPpV<%&Tecu;q4qOGP;wte#Wr;x zRzd~%S68eq+nN|NVX;W82&6G* zeB$T&1;YFAyy9&J!ENiiBzW}<(C%&@S7%1J7!2LDU7q{jLzbK*9!Q0k6xi#`UV>pD z1i6YMKxG0u!F231U7B#u8PD2vD1l-tV>=lYsaCeymTYvEYH9LvXq7`|_l76U99-Ie zpz~qjbkQnSHF*dHfNnD3GnuweW^m%8M1XIs~a%@{MtT^AnYGp>&O;tBCo_%&@H zS#B|8o`YsWJOmLa>uQx(E#?j$noFyqkTR04Sj^zf4;qmkIdiv&q8iMQ%{|PJ?3Jr1 zJ+^Ocfo5Yhs{*rQTM_nQ>U6>Wh3@VH zFyCoxiNMAdE4Gw!1a2YCOC1&dL77}21^DpA%jF{XM(Zg6DY7MOccz*Nb1QT?j zo>}&hUy)@7%9Xz6uziLfdFO>z}%qYzn2;!idpd;K8MeB>_N9Wveow}#rh6Fkz?y{X9F>)S? zVp*Tqcdgm#->9X_tZa7aN`$nQ%In!|_vyV#BJZ=WsiFiq#gN-f@sra=R^S%-mzsN3n!+DWEpp6(Q z1Gm^2&gmKET_Sn<7t3Zk0RcM7!nRi-^ew<}{8>pu#Q4uN!mn%ms#%~PviF{N>sg`d zpG|y}U9IJh_wY^S#y=M@lXHNdcG$EIF8gGpC@&MY@b0%S1*cPgHXwGL^u>GAD@YWV zKPm^YP6=kDqDvhhb@Tgc<>C4d*v7F5ny>giVn9f0Gq(NiR)4Cdl)zf(`WLi^egP5y9mP>d3 zv&WZgRiH&asVdW45ekkyb}^*_mdJp#{W^KNL?n#)26ZbHH8PemaL8h_SsA&J(u!*v zg2p&LvFyi6n<^@U>PTA|iwoS-&CVU!{mp)R<#7pqG()NhUvrMqig-jbxhxwr33HB!(+%gO3>0v{BQ)xqy_#;cOeJm{+Mj>ulVr z(so4b-|RoQN*YhHV`%{NtE>iuE|n)#leSIVHMpZMHj{p-pVw#3uQzia#D@<^q(ML5 z5H;NkUwqbv_I0J-JcdENio`@nY*^>QmbV2c)9mMWmT zKJ52#8eE+$@O9f0kR!eGc1`f<(hzCLne68jQm{)EzWtzw2f!bRVGZJJMeW`y+8dKO zEWGW^eF{6g?>h49kdNUv{&7xjiZ-q*>TI>LUqtm_C7)>#eK#kUy@vBebso15rw8BSD3;A)1GwgW-fhm@JVcn3 zk4;cF;j(%1y}47G_bsT~N#&~{OuVSH{vnB~IX4SYmMZK_+`?bsggSZ2y-YGms>GI! z?@;01l!}O~LZNHel{Y~}bv-GG>3DEaylSpn4%!OvgVqg{kU!FN7)1>i3TsjuH5DkaW}fhD z!RjyCIzN-kIv)is2iln5t~HY_3Ly~{sK^6lxZmJ+>gYx-XEVxh2oLJ(3)q`u{nsMn zx`gS&9d9x;7M<5F^BL=B;-n5Jm zZyyAe(ZJwqa?C|!kez!lTI(RLNT1d!uY$cm)NTe4yKyUHK(GVQ#rR3ih+s*m9Nlw< z9lg%9|G)xetXa?1q3tokOS~#(Q-|#}jT!fdIZ`-MuQH}(r&pu-RD^f*i9$MRFNS)! zZ*B1M!?$MsZq)9E#l#HzNHwg-IF14yV*+t6N47`~5faRGs4=IJW_ZzEeFH$hRS(Ck z92nvfx*v#}e4~kdcS%uh4{rrhlx$}Mt_BP4dCd1IeI_v+S~N{RU&y`>zvUJ;l+c#1!M*!meIX4{B!|@MAASv zlI`6Z49XIoFb^%e0UaKx>i!bA8ZDn}Vh3K!CyQpz*CzwzcR(D8O|ffRT@@rZVn>BA z^z9zN9|nbPZU$O0Y^@=HTXbIHGmTMZtE<3eJhATkoNHlh4t_lwHE!(rZMGil$bxX0 zRcBM5=b#*fvS)wxF8EX@J*)-rzr9dFB}F_)(8vT5N&w2M@J{ynq5oo%N^Xn9<{Qr{ z3`(d^grgJ5O7;d~sYz$1*luEgocF&h%!n)HG%A1g0h@U+Dl z4SY&^3LV*oX0Y_I0d7A*aW+R`G%7me^`!miV*ERYpduGGSrK$D4+xE2i2MfS9>Wot z*90;Gf3rnW>KgY$LBHTP+N^Vsp5ySh(!_(0MRhL>`&>k#D5!vG4`kF{2Q`+p6(iX! z3l%#k?_!<=S$@t~h~DYC+#5A%$Xw+W2}czp>hF~?^$0`cvwhV%p95&>neZa+^k~6e zU}rcus=~gqpHu>F5oaOd0&B1GshIw4jJ@)$H%fxyM?V2(Li~YE`~Z6}3C3fg*EIPO zWR?YJX={!Bij&I>gjkj#erdP&>q6mHlKxC(49G;$tuneBh?3Y*mfvZr80Lst8{MsY z@}O}wljex;^A8EsTfIJJQyIv@*sl)hozqCcml6*V>;4UAp$#HuFVAs=;1!4YpsJ9= zXubBKh4}i5Lmu$-P>4La-e2Tkn~8Bb2s!fC^@8UjzC`75{VpBCC%`0XUi!AT=#Fgk^}RT7c>eXt z#V_Zg#Bhq`-DSKIv#jX7Slt%hs!ya`ckz@gYFCb*7D}NrTOYAIdf)+2vq;STd57m@ zAm#}BJMM>Qdq*c-;(W!u)dA#SA_gDvP%j=nscfQseAsd4S8*47NJ7xU2WzGz#_{&1i1?sv-#TrY- zhB}E+l~f4uH?bCG_JQ^kANrLzW#3y2u#Ab{iS71t)vWRQ6RVb!FSOB{VIv9-hAa}& zeDqKv?H=+zrgv7kmYjP`(~<_oH><_7&50&%{`^w8h~#Eo!hGW;iJet<O8WG)>?-OdnPfi0*I zza}Au`YNb&8|E5R+7s=>Re~}DmG@tni|gy~Y`T-VD9rq5cHyRit-f^&Yg(;Nhf*vf zKz+;K7g&k=6{pu9l8Wi!wOeYkN-@j!v7hMWW86y6qGdw z%1dKd8~WW2f3C^PJcfBm5h<1r)XdR!Vmduvd$XSYy&cH6x%piP)mUONV5fV*h9Wk?J`rZl* zfj^>R^a9J1f)D|C{Z@l6>&#%Oz2cMwOZmifGLm{avu=fSyz2m6iah&aOw(%x#dZN+ zOM?310jaK~^o(L*3h-a{;NlyFf77ZYRWk85$jtXiJ!Y)PVJHMykkU20y!zwi3w#(ZCljnY>dVyfz-l0_w!^Mj1bYoLEC(T zS;!<`foZFbH5{%~f_rPbK}`(yaC$j9Y}(CQVc^WEKmGjK{uH5>#3W2a+U31A3)zxH zIAaI{G@nL2HH1RSL6<&J??!!WFq$zY+9N)*{*q%)ahu>;js)+3E=vmPh28upVF0-6jRJ~o6=AgQP3idp2CVhq)Wc*(_zxL?z=HJ zNyU$$CITj{K7pjd6fnTk0Xe+UH{ zw_;%_Z*X=`stjn7HG|(H7%~}fkhwh$U`&=dummn5VLl^$J>di*w1{3u`azAu$xmnH z>vlOShDCCd-&|DA0$B?-p;1@UjI^e#KV2Nd5qoGIuH5gaGKYA~fqBm1St)bsM@==y zDw4mgXocmh`lRZ;tNqdKJE@o&f2G_1p1b~6FYYWw0rF4zM= zA_XTE0h|&Mt$_`5gx1NR!_oi9vvwlFrLg_?vQu`lTq*%>u0}FVf$jkr0kXz?vc*XR zqHZ9)NrH#J8seV_wEr+4|A5Z_d(6i__AY;GKK=D;wLt#ARy5tqr34Zs{=} zqe54EnQEY# zj-p!{N`G0y1S+8M#!?sl`99rZ@CX>ish?HDJ)As%dNvkEXR}P z&#^qrxa*LkOL&OUY_S!7|N7DEN>7lDQkxY@?@6L~4p7Eb9Y8EIMz0;dj49HlE*pAl z@EoY)OL#KXb7i$3=I7W;hB_H%Bx29vV@wt;VY4uNw^dbM7$feFHm|7uBtvhVV^;TtKavZYx0H!3Bd> zw@2+5Z=%oU7+P;mdX6&L_-AU6ZA+2LQFHm5V^yPg9_9HnN3~%xX2iLQx6iW-wckxz|E-YFdJs#Gn?6He)os_5-F_h=Ox>@k64qRnrI}ot_HIZ~oDGDJmcbkEfekgwor##DsKRXH&G?S-0X7|3aheG+ z2U#wz6|8z|$Osey6+x2v!wHHlc0J1H9Qma&r;v@2ALGl;!Tg;cWUGwXF1&(^@E zYBJgiEN&HwJ0EUK<4mm`>IlYKrvNBoF>dOowJPn{I)BIc3r5?3NKS=f$;~X=+i&p> zj`Ow!y)VlW;?#8*Gc6#biux(DsUx6+csEBy%7iW!g5;?Ck_R8?NlfLxpaPMIS%^2- zxE5lIIYK5un!G~4kKtibCfV0gs{Nkx#V#qL36CG|#y`8NR@BeEwoJT0S0IR?oS(9n zQ$;P9OkE|Cnvp)!{RQTYEnqyZgk!MP+) zf*hUSu&N@Wv_$N995|T7?RN!#jq0Hj-i!--YPLW(I6K60alNxtn5@H472IY}pN;eF?sRN0y46xKC9Y?Rg=yX)ZnSJjVQp8S4x(px!2~{Y`a3!f(1&kVJkyjWao0Vs1 z)fT;$xnIWzsy6@dO0iivg;z^5>I^g63GlWJIug#eWdts~5e~z8ouj}mZq)X1D!${f z-u&okqv5fkE9Ig$!$Aen2XTb|X!+es259|0e--KZgN0V`ow0&ML;3miBaF~2FRn}bU|1r42uF*x+YAnUft-(Sq~_rQ zK59b~ug%)1ix}lnQNv<9`0$Z@ZT~G_Iaji91O<2fe##xMhM!?gt?oUGWp2ya*(Ihv zSc?Ole|Hx5P}KzifyFpT7*&lf$uZtc+8QmT`$XlZ5Cx*#C z62f^pO1O}*%I-UnoTRApP*OIq4O{=?ZAX<0-Hi==1jw^(b&uN4j4$V>zPLL#*18tA zJKUiMByb(eVbC2N6??7DpN_OB=3ohctfcqbKHO2hkv;t$U zE$Sp@Z#s*#r?MdU1-P_INcgvS!aRQa)xCYpMe9qIhYJ&}mc@g(6e_>Ta~qoWM%RpV zWy%kf$ph{ofqbZTHz@PJ#Y`}??oh`F@LvdrFo?Z$0$j_~9ksoPuVBtG$A3fgJZNDv z2H=}K6mIh^CWjwzC zGZcohoMU-$=}nC{;P+AriMIdfCfq`EWpAVH2|bo?u~pk?qpX;`^a6}{D#b`IdptJ9 z66&m?bw|MI|L7e;+v=rF01DfEp`{`rw=iUaQECI@Ab_agefpYARY60 zJVdkFh?pR}4fOq?nM?`PqJYNlY5LN&35V_pB)9 zV_pVXZ~pE1#S8PsZ}*vXMaeqRHa$gv(TeYz|8^wKB{KtxrNWocnq$i9HTfKNBp;T(cyaU^J8YIQ>3UyF`e;^@c3GNMAJ%Gb zNJ+Px6d^0T_W62018|{c1fCOO3!?}(UhINpA?5+HvVwSzgopfDRX$ws^^z zLgqx|MT=<8WWfcGuUcN$9>#WnA&bnrj)Sa70-&4J*Yu%VsODOZrp1T3XJ`OV1dJi( z^tJtZj#e&_50#U!)9-78H(B7F4pSwgs25P3q-Cs>Fc~@7E4h|}os=ORUm>15hYn?> z&1;RKZvIIXC7edOBWP6fMVqmYJYxHMufwBa;x$k&65x4PApR_oT~=$zi-0kIyt}iF zXNK>Lnlc2-(|%P5T3DDctHL%kEaP2h;VZ%7^5G!UsjhIwvkY5XojcVYDq5l{T?`K5{N&pp9EzlAiPQK_01wJp7}D zb%qHz$F1WKYSP78Us%(@GdxG?06RXG1p)eXQ&|trtFkjhai4sRuP^VHBP0)bMYiiL zj@_>$&X)ER#fp*)x);}<6WlXaz5cMpgcXRbj6e~CQ(x~$XydvqUz_l?>h*A_`B&@l95gfCwXgv%VNUJc1FI`jvi`hpaUZ>4X z7uJq5Cp!yAWcAqNjoIi2dKvlw*k(34D7d_v&F?(VBDy^poV9J>>L-UwDw3@>h)u^b z$;GCU25x1e+!+o+vxB*O(mCoB2X%$D<3W`PAeO-=f0D2~WYz)@GkwqTqeU{jMG#fb zU<)5&p*o5x&E;3Kx;XYgI8&im>Mq{fz-&A77=3yf(z%q_N!wrBPVtdu3}p)5zd+!C zvNX$_{>ni9{au@Xb-~PRe+LZGMe!o|6a9OKg#SP(01HQ;LF_m>gxOgv+IHtg2c^x! zj%Aq!Fzu2rXXg3dLC74}3_PDcVOWJF;F#??I_LpLF2k(U7CwdaXCvp$vXr?PqIFpQ z3z)4g_$~o~&;tFADFR7gwekZT3uxtrjPNh!VP^U#0`h-3VrHiQdI9$Ti4XoGKbxfNyL+&k0iXeflVMaJ zPcHJ)jKyb5;8&OX6gjm<9lh7I;f$f--0ngvA>WgB=j%;^{+a3HhvB1T?RA=dR9M=z z_LLdHCuC;gTm8nz)xZ?1!Hqdn zL`s{2gt~v9H1yEAF9}wcYL)Q?g`M3B0fnM2&$Ci#%}nKDsWpcOK0K(HPh>o8f$(aFi(c}@R61WC0po`t=?oGIEs9p7?1J$Su0IxK` z{tL<`GbHf_HMddIZk0M3q|ts^q9vHA#a1J$8CMVquSZOF)C%i6PD_LU(-F2bI0<1Z z@~pvAPVRM?`rggtL3F_(ke950CvwJHb)3(Y8OFXN*i(+|p{aHwuF9#iULQ^m{%7R)hpUBEOd+uV#_a?!EZ0dO=9 zcTXn2`$2pxtE)GaI~#UhIvobLOJ_KY%dNA0<0_s@NS#Pj%NNcollO;lYEq30_3vH@ zbn9t%nuarPU5sNDof&@ys?&wM%-8f)p|BenehJR?J0#EDT?yA(EjlM>;-zE|J4fU1 zPUg`;-r{~{hNLYQx8PXkmRON9Nn!s$aAdfc!+2D|!r_chgpi!VU3n;~E^Ti67A3ai znl{JlbKxkP9M%^$)gY{=DynaEhC{~>}G=uA2JP+5Q5{7e|N<0^$+$ahuK+@=DkCR$L`-bFq~s9u%DHNdn+9>2?x+&pt-&7A8Z%{Nz|Q zs^~Ww)vi5y_<-(-p4&DIDcx_jybubpK)1E&ZMI>wHRjm5a`>=>n`m6Sm8_RNU%t`a z5fPN(h7S&6iZldg5 zCA~`ZeseSD(A#SY%UaJXe5px$)0kqj%?;+=FN3g|0*YtXh+ZEI5@4?)uSaaWrqwC& zZ31dxq$*NF|Kql`PqbvrHXppIcvwMw+U+EU05|b4|8#{v%oL1?0%hpyDn@`1!pbqk z`O=5#V(!*cu?)u~9~cdX$Ek&~eOt0%xP<$`ExZYb#+4>YXS*I;2KH4Ks45C4GTQtv&QRmCdq_?UB`GWkX0$BMd z=5L#>Kdi?IOWilc!*C_R>c#Xc5j?jeEl~j{o!Kz&tTb;j7QG(>!f9yY0ffJ2rYD8E z$w3WG{OmZhE-CEMwpDa%c+{%cCfC0=XX;`T^2D0)PF7dD*4d(u{88kGi+U<N)tubBIIa$^Qe!@Tk#e99+Xv@3Lt zji8Z99O%EbK8CK6dB0wA|%$a`5tZTW!RU)}3uhSGdK-H%}*V zr8B1f_(&Wb)E0X0QQGt3VjvZ9EzDIgNWg_8D9)=!3PskavSA9rvy>nD z@Wkca0ex9GG+Av0&H?m3efEDe=-aKF@kO}xw>!UKjB><;fymRrh6-K;nMm|gtr*8c z=-IT}j-YNd`7afVOEI25H{*6`+dS2`xEUol4#^4c87J3i7H}ppL64$Bo#;OzoKisX;; zGD_)3LfU6wy)&+s9LIzlScyB;53N)fJJU$etT~VEe?IxMvDl*nrNLEvf}ZEV@<#^G zq;L}HU2S@ptuyQX9C0Zho*+T27V#`rxsVD!ww{4Yf)!mlc@G*@-sTgi_WiX!&Z$EY zG&1DOszpXfVB}(GX#1tt68g$K&{14kHH|S!EQI1F)#R=_7lXISNQIws8;FjrE=YgA z2Y=P3sMHXhD7{NAk}9>m5y$qKfG8jia@4<))({3Ss|kg`g|o(HC3>{$5DKSNHlk$Y zAAmv2qp0MmnTfnCyM>p-WaOonTMc@U5s4(4bl$$VvLj??mfXxGlM}}mVDu_-y-qEc zY^Lx23*tReq&O#tmq}6xf#?P#_N-5cJTNV)39ocBE1hI`J!IhA=Qk0*Q?H5rB|-j? zn)w&+8UGQ9|I>#dWjfjFni-c0ayxh$acXhU+Km8Mlt`Grco-6l#gDZDGQ<*G`L7Sr z==S11{pBhCQ0@NU;D3?T{=3F#{_$Ub&%uAlaer}_|4U3`qG$Yf9IUnx{hNb3pA--} za((GT@`-=^FmaeUKPR*br{U1n^)sZ;wsI>MA;O5qzPk6;$(C7n5c^R5Mbg#-0>}_= z?D~B3?tOIqm<8|8l~{*|TDLYJfW(iQma-iw(Yur770zZSc<7Gp{l>MoZ#Kx5M59)2 zOsG40M^wu;&NKVu`-FFi^X%|L6ZGnOE=~ZHI-8e%Hr%ytPIJE zkilcWb!go)EIfJxcx3OP?=`W%Z%ZkyO17#sd?VGa5|F8MW5Ss+rq4W~-}xbUfZso@qxV@w<9mVd58x0i<6Whk z!9;h)LEn923TBl2uK*|NWF}$5m@Q3YCkuuI5Wu^T{kB3~ux_3McNIQhP`^1IW%oKY zlFGP$AtzKc>aq@IvVXEj-3i8XBWfuL*RHGBy#zBs-B<+CEz2^L*z#_c-bCNwdsD`C(=-&{>qh&Foez+*D7%6VFX*0v?-?=^{qS0s40md{3p<5CziwMSd-& zq8@Xpmi7z(oqlfWW~=k<%Z03(Lm^P|mrC48eQ-P3 zLbOLh(`T({Ax&a>17Z>@TJTGZ6@Tqnl7k5 zHFv7*_7F*g)=`blFAh^H(sThF<9GDSrahMMApc>dq2~N0*DemS9oEx#UiK)Bg-mwV zN(${6X7C(CEeD%Dvy*XpwmqP6D}p#K2^}8xqzy$dAcZRbeyVv4iM%9C3Etk{Z6&%% z4r2!uVQP7@-p-0DcRh#}TaZssvTF>N8OYUv&{@+|IH}WC>iEH8J&XCOCX;E(f=aV5 z%3X#^v6S~{_v*0OqP8wwv8({9ZzLqpQ^Z)YoQWoym#L=Q>KiCr*3g;1H(LZbXic`; zA`jF_&&ffIUVOJIT@8ZmIheE(%i41S+M%ya86p~d8*4QWl>uMJB=mH5k?}u^%wSm< zm@9sA`;lG_!Zz`#heM8lfB;PcT@?We_#$FwDC>h531GaSK`!bK&^t@Y@`a+GEd|pq z4bwH4D9wOmVp0CChVKIXp7Trn=a>$#Gwsx^<@#)vUcNxsmgm81rhv11lD z=oA%i7VlZu-RDM>bLe3<4$(673ml#=e)bZ+V)hm^(LP5O0@dG(VB`mLZbFknLocV= zIz0p~wvq`~m{E0gg-@?8I5CpN*o*gAeSc!X9(|)X&&T7(k~m;~M3R;#{bdkC9WZ?I zd&@aEA*lk3W51rygUSu=iJfJNh^(O-;J8V96PVd)%{KQ%^m| z$0aY=J3B9WXY~XP(I)tglaB8vC5(rTl72B|Rr4)qw#2s=tKx_7g;Cm7ml>@~ZCs#M zX{}2z7IZCqh0GYI;k`x4(4se><_4xo#>V7<)sv!O|jew%x+KD ze9EzCT#|TD1x=Pp{2c+`clMYs(jvaqAYMDBoGu*QfJr|rNn;{X2E3VHtd-31D7Mli z-u_6igOe&dqC8&)3-Ok&9r<}c5zIedSqY9tDv=10UuFlJ+v5pj?=o$+_mKXw$?$MC zkDm*m0ViXhjtTRS{l!`VK1chg3?Iy-Ow|#sFF?k?aTBY8MJ}=^b<2HRG#ZA1jn@bn z{JLO=%V+A><^WOQ5Ph+bi`Kw3=m*4I-$vwpE?` zPb^M{_j+2no?x$Jw`zz}>6zLX=@duc5N`($31?yLgEB7R$3RFtc)VJ2wq31S43t$c zz#qMKcEk4ilj{SmOKM2_0L+M;TTl*IdWXr1fNtJd7mp|}MlTxer9X)F6^~qr024bV zLga}0n_7V=WO;G%2?m1>rg@ECY8`>-fH4{Xe}KlHz(It_E=oo3dx^LSq0tw!v2@n0 zVqP_*FZd23*7%UJGjII3cz7-DTn0N^nOs}4`@te?z4a<0)MF~_L%hv9OI)_ApQkb$ zjn|!dY40VF8F2$9x%sybdr41$RDU^wJ@0x`~7g z5tX>`TUF7y0BfM($VwoymB9k%(hMX2>V`^F43oA}Zzh0@Sdq%RDh0}o;j8ctD5(Km z&bNfmJV3!+1Jb~wz^sxc)~Zr4h(NW|8wFO4ab=x*rk&$-2 zXpZ&+5?l%CL=a5wg94(XFG|#dYKI?|Lu>g4x5?~z2YD+`S!W#5KAjCGel}Ro%s$7& zJ}0-Fg{GLa8Wsr;Q$=<2T2Ql*)k(Isu7PGTzeP0=riJmM%T^8j)nqih{soBAdm}S1 zJ0IUEy8TgoHi!dzry(~m+FCYPWmrbkjp;GvqadIsy5^G!XgCzfpMZn2=f?wsG@hK? zULAr{>!;YlB7BY{esoCImQOQT%gBqDl*V`MlA7*%+`Qx)%B9ViiujQ-&3U|}z3nq| z=JvG)Jz&VS2bn`0W`!%A&rHp8_eXsdX0*0Z?epM(h>a6BtmmwKgtyc5mCI2Mk4To> z@Ebqh9nHOPWyM`;AD|IKFk7NI-t^sL%8<2#n#WuYn?sUCIG%5tGY3!YD6%g76+SDO zkKXie3ctlD$+=n3zeLBspC^`;&6Zp+BSZmR~*&58W(8Tk8h%$ zDUvOp&%Rt~^A7lJprq7V+oX8LJxW-Y4;QW|Z9%C>I)hUC{a&p)S*JW$ELd|7nIXzWtNO}yU*vj1x-iH*YiwB zmWSkkE$r8uybyg%N0-B41RE65^E(>eV|06z4;)$xUhh_7RlC$-(WU|a60P+^SLIjm zpqWW}9gHO`5-PZ|Yh(+a>JJ8w+)`pxL!YZdNG1u1V z{<>H1H?K{_sq*pnyHIGGvmvgBNFSQ*U7Rym36H7*@gwGEVtKzpSXBEb0ESr$-Lo}@ zwbZAgFYXMC1w6SydiW!c;mIHGfy%Wzdm%buco6PFb}a!J|M_?mO1?C?Zbgg13=E z(BkSNplBK*G!Fl+;&Yag@s{rff0l17>%bi`GLK2js z5&>k@G)~WK0dlrJg_3eYa=5+JTc~9u!h`V}B+=q%07(ED6OQ!t#S*l0ZCT&lI;VZy zav0|y#$mGv_8Bq>hvT_(2DojSwB$rI^^?O0Rt8^e#m?LL^5d-4A!DG}QardwoMuK{ zymZ8L5s(tXl0WpKH?8*(3hgSbDj_1R7&=YQS@YGO)s$=E2?k5MA$`}_sjf~&Muaym z6P3@LwEh{JfD?f{7iC za{Ob<9Dp5n+gqlkZn64Nn8xS0Bf4vpGWlJK5e78hB>V=vX(3~?FoV(o37g{8bAVR zHWL?><7>i9!w3!*Ir7`kiy?RyL66HiQsZMQT1{B4YGwMG?VmL`dTY&8F z!DH0GJ(duzi1sN#L_|gN6GcSal2GeaBv8Z1`}4EP)61-VwI6-mjhB4k0MR-2qwRMm z*&sr80CqZr_+~Yfj5Jh)0Xa81rctH*EEI|roe$i)9P902!=VHMx_qe1I47~kkuv*# zaXEDUHdW5(~+B_>tBTpmvocub&66kH8QIQYo3R^&06^d4eNn-DjV;2s!U1fW9XZvY2?&ATKd`WJ@uo!#rx1a3bNP|f3mg8@NcCXw;sD2r!4qQSQN4P z8;1{ZeTIJerL6Z2fhbR8XRUZ!F!3OhWsH4zJUOEh5?tUF`cwS7sf%~P{opXz9ISu+ zlhLXYvqn4aWH!QK*#rjFt>`k|s{0x8Gd0HxfYvO7DC zR67!3uuSf!2jje>aaYYb6NJCPD{)SofB3B6z&82|B}#mRNSSDQ3fy`G-(X|4Jx3Ye zpp*G_M2MTHAp{gBZacl>R^#g8wQToKHiK+_05Hx~GJyOJ6eJ{AP>+VpL=2a8o`xj& z@+TJb5&8v#UBK?Q8p9P#cOAAH3@92HVwNT3;ZPt*Qr5ygf$lyJ_!>S6 zc=!P-{vXpNRTxiT1W^GobZWKRW=odV1R86lnhb(#hh?8G&mr4S`O(GWAwZkRU$j^2QcBLN>vS@>dfz|z$mNF zLW@5a+h*Z_`Gkf9>7I=HRjbN89(5b2^D!qg1gq4qpk`U6<(;=ucb5vWRHr8c0=ZOS zQ$5d$HYdU9+=YWtk^@yCD!zGxc7#=csfKqb9U9!hcCmAe#@L|~`K=k*lEc})uC*l% z2%rvXUVY#4nVX(kvd4~@>FyNvbBRRo>o~AjjH^RSjFN^c{{&N3vK2!p z11|IUYbs(b%H$z&L59jyq1qljb%NQ%2C;LRBnk>&v9(R3P8!r@QD->%xC3==TJWjUHPS(F-jjd~FRQZ*{cgfI z>dLy|4Onjva#z4V{2+b(;h_2rp5x^D_f~E{ngF%-exgjRNc6F+sPtWKLx}bvf+1-v zlAOufb`?g(ugrElsV$ZohzJ2Xl1Q*jR~yfSBEN1hNtqar7d89>Dp&92QJ2vW&1q}# zFMt5TxoR_VM=R$I4YEOgP<{F=bQlBiO3h2J0?h;KMiJ8+2|K1rI5J20dS2Tbe z!~|hN1s;u9+2&nZMyAk2+o-zCapSJpT{M!*j}Jp3Q`4t5Di8Y!Bo`O`m`tt`)iF0c zD1EHK7GrIn&SFIg0c)porS4g?`!10K>4tpaBOnA%cI(t1nq{6nRS)(vOxL z-hoxk@WHN5audmr9@XzeB*f$f*#R34R~#31z;>lRj|&S7(SCY7S5N!*Fy}pY^QK{$ zckVA20$&=ffjYEw4||Z0b0E=VK2INev^as^du4M> zr;Un?n3BPO28jYNGo}}@(IQ2T_!=2C6T zF4U>br9UO$2^#afkUN1F#d(;8;5nIrsvm8v?kdQj(})_q>qT+0%?4E#^YkbBxWP~6{24l zK*?zgLh>EH?1`{Gg6&r>AAxrqcWsC@EZd27-)9?WeQj2LTSm()=`6NT3H-%o*YB4N zy~e*>2S!i#hwSrjB@zAq{XXUY(~bGZ?u5U66ZqHf{pW5B9V`2vZvySA8a8Lmh~B^5 zn0I0ndLnrwk_=M7#x>!ZwYglrciHW9LS4&2d6hU2t=;CUw=S39eCy8sq9VRH{5TRj zw!4YzG#k&iBvBFPiCvkqfeZHuNTRbO(7WQHFmT>O%9~I;1>WoJle1o;HqZyI%Y+Bx z9zsfgI)by0-H5xXt`>rW$*C+K`yD56)E<-PD|=ExOm?4wexo=D5Y4Y+f2*DRODG;g zYXBeC%B%cYsdOEOj~88)FN+Uo4m9cw1iv6Lwqg5WPKL_`6I{hita} zJujz^2&>eUo-wcDV#<-X=sQa%Z=d5~1P%giuc{+b%IB3uG*IN!h>%O(>G?F@oldEn zUla_^y^}@oruQ5Yz=9JyfWAVh8&x##fTjDk<1GXk3TfgZQo=BdL>pm@qiLu#n|2{# zrGBW+@14SX4}rzv;o*gC#0{&k9gS18<&voJm!4}Kz#?ltz`c0$Adqvd98`Sg&j)#b z4zr~O7qU4>Kq;a=cfpdaW&Q#-%Wp%1JKs~Jk(H0LIaf6!OJFY9?s_GPI^BveWeVIg!Ai;LF#?qk!_8Ff~xmM7P#ihw}T)~w=paXG_ z;M$`_4}YA(=~8N1_s9qJQhJ$0T2Ip4s-&B_W8QD{0nWwYJ^sBSwv9E#<n7|g;C7=3hI&+2Bu6_Us8at%AD}p_y4q-km zoFzz+B%nRqWzx#WZTtE+SOLLRq-b=%WRy%mhf%^%Hm<%tN%T6<)B?W%e%~74SU@_J zIp*nj{i8upSU&C1)M6Ji?Q(mz8(Wo)*vc7dgO*9k!PAH=>#KQL+{#gi{QXG;T(TkCQ4=&Ob8%@NZ}qhxYJN}hE*&af4T*TTgaFt zp%9-*nwI5eEws}qL&;G>HdIUhh9XtENA)CArFL^qxkW1x27-*0r8*lI?46;#yS57S1!VU{CB7$EBrwM7eQRAL4RY8KWlP;Q$LdPt!$`EQG@=!HG1>VWIm(tLQXarZlX(2ysdD zOA z&xYi{oBnV-StFmU2rF_vEK+J7AQix7P6~&Q+B*zG}{JiS$weJ4x!kH;bMXdpM^k94yhdH{R{+X+{czB zc0z$jyAuz&OSZd7 zEqz&=O`Q8$HoG{P>6|W5(4AiZBL?@YX z>|*J9c@JmuJiYC-fBd{eM-%I!WOd_DDI@?w3!2M$1uJJMUb+TDxJ>Kp211TT{h&3{ z@u=}Be0f5V1wJyta@ASFpIlr!KL7;_${n!I?iyWlrYq3-xP=5az6^8FDbQm*!@`&x z2b>&bV}xcIA76hO%wZ2Z6uhHrjRMxEPdoiSgLGwp$Ne4PGQ7a$OC&L^c(LI}WJZt^#Vh;-qmi!g6(f<)6`WKzo ze-7Cgm{|YcZgAD7!+rhBQ~V*L{DEx$P>}z7y4e5di2psZ{UMY5#a;d{jf0-$-*>T9 z|92ef?=H4-gcOSna1rxUXGxcEPWHYGEj1lC`ZkPg`jxR*F+&&K@pm zFx)ugwpN-W#f-)M5L*9=)U+#O)`XRD6aR@{ti5}gQ(W(iDkVeyf#9=!O=(y$71qs1 zFMw$iHbUb>7=@OPj&}<$LsQnnUPqsIWN1~K)M9oqGD~Q3;|52x=`DoOhJk0iZPhsJ z7A}!#>xs`kmtd0|4hlz40$W|n3t#CULX!m@4ra6LAyhjg)PqLJu@1J9u{xPIp)CeG zJuX`*zuKZo?_O-&S~eOi6O!;mJ6ZM9&R_P(dj}eo@7hPGxJ0M*9ZgssmsM>Mg%H!c ze2Ca0Uly6tN^2+9(E1G<$?KwC*;e(xF`wcc9th}#(4PIsNJjRTj3;2(Fd2~E5Vzr= zS3!GRsdp zo@Aiu#&Q}eK_iDVe-Og4R_VYh5PMgy`()T?M{(F`h?7a3%oN|tbeO}$cM5fYu3;$R zu~c17UP{(j8#AJ^a|aUbwvXwSk7EzjxSn{2YGk3RQ6oBVcvh}i0;;j-jzWi&CX6gR zJ@hTvl`^U(3d?q<+PR|Ir|i^-MJ2svMZKfCT$wbp2Je~;&Ph9gMsR_>(l%BI*ALZX5Uy)leAgfBz6m*bcLrrH%st*n4*2)O=`}@Okqt5B6Ut5J zkYG#YYD9b)HW%lFHjToMKrD(HN=@uuN;Hlz27bF_9Ns#b;>EyNZWFPAmMZm8BRd)x zzE!@i=p^syz0o8q84Y&0wg6RNWkX)wkG+Fql6sg}kEP)2@_jB24E^SzCoCCH1(#h|&FeK_UO0Y-4{Ej>tg$R5eShaO}?CbsntBip`9R+i_61V;Ih_znIM=J(7 z)||@{R+j)C7tte@lPFyajm}Y}D;yTd>igfz@8gbE3w$N$@vrbJWwB06rf_dmlq;f% zX9Mpl?O?mD$}rOoE-pBWNx4V7WTo620Y7ZS67W9n_adHr76h(ZZ^8TDTbY z9Y5n^G2JJ#^|YldugJOuv!yI+f+GI*$`%7>_q}Qf6o-jTO*rbZ{9wi07pJ$I9jy#9N}yS|#fbGh>F@V|0E$2Hc%^)sPxWLy`SVJO3HPpA8*gXB$iX8Rdqzpink zNDQdkgwlB6mDTw#_?#n`p?OGnvTlXjKG~3Pg-{Fo%r|DG=opap75A_~(yY2flqkO0 zHW)5s9?pu;cS9?f*8tBxy4o?dMfWJO^tOvAR=?|a`pD=DFlE7 z0I}LTR;k3=hbgr7=4kGnv}Gd@#}~yV+WHVfXbNm%3GgWE($%Q5dBSDsGRn~Q_7_KI+G+4OpNb1cDm)Fy4c4j&p6g`8=tIVyPAmD5UBCmJm*s&bZsoNW3FO(wXGN4 z;@e*zhjg3H=zEcZLk@$PenD>>1Xh%oql7 zm_7-VCX^wySbmzmrVIat&QCC@9r%q~s)3WLl`L%{2RdkxJ{zt1KF^?q9+WSEGa+iZ zT|5MkXiw&i>uEm+Ei3FH#oZc1X&8XbmPv`Hx+zvcw>aw=WS<#kc}Lk?e1oga^w524B8ob z!DV{;E`GdgoKw=nA1%*U8|)Gz&7malxe{h$tSN}Kr-|u71P>(vh#7TNEM{Gww{Gp_ z<}TQKwlPFC2@RHM4Hl6|US-F_b06}4b2`YuWIaf1<>>kQwDc4Of<@|>?6QHc%ODa9 z?UcunrrN^R~tjT1)~(aTv47*WqxCwEIa zk!^q<%LiIk{7;gi(Fy(r>e9h`aXBXji^XCBj}v7{b8x%_YEPeF z6$)mh;!+I|)pkQU{Bs`N39;*F#~WxAO(AZA%}dopZpV0*?>RY)H}AWuE?`ylFt+(y+M#s0?f;zXy$euveS~-)jaeLl;cyB zXap^`?yBqxtfH*5M!_oSI8M~_{ny`ylX}YD9c9}aX)#I*pq&=&-JYdh<~R|TZ{MG5 zm7L;|y7Cwh&Mh@!p9K;}Eo0u+vmFu}?yqrzes~d`apRJ7=+1*uIyzO)Q9bmSrG_%8 zdWn;-Go&ZL0ZYnI;oKLkO!)ks2fJ)R8vjdx`g;TQpTofaQs)lr|3~LezZCcFFHiA@ zlJN%y{-fLW--Chw2#xuB4E#f>_!|slXa08>SdaM|0|y^Abm)iYAgWC#VNArV8{Ndx zSjYLt9)opq;Y#q^U(h2w%&lPX09&S8Igs5VEwZ-432!BA6D z*_y1hHfEGYPx)SzeD#w4d_C0};YCO&1H$9^=6aB@ka+Z_8N2cx9+U}XC}}*U(sXab zE1f@d0*}|3);&c5>G=%vCBvqWgEq_SX4or$Lhkyt2Dz>5$eq#~;)v9J=1F+WkebHfW>>zK>EN@BHlToy(64ZRKH4!QKADa zh3m&I!i*4YrbI#|b!l3mb7~<$4#ZHm<@b}bd{m$0WRLis5DH+)*Cyw#4N{yc-caqc z94&vEWJbna!6VQ!ZOLMcm7Li+5mS`2pqDdNZ~)(UdblSxJEs2l?H;k%lQHEZ&*G;t zuq1iRlV6V*r*jKGfBb|$69hI49fk*kEu)^HBSVi@n64L%>KJI9W_bdc6-+FeQ>slj zh`3hLNnhjQmGilHggcZnByoy;jA(6x@6JKTRmcqM<5JE?0~)yn*$+6j`}c0|080?g z;AZAsQLTck9(H?%sh&2VS5?E-#ch(YFyELH!_n?RwMAE)=C1nXLRgO2Qf&))Vv=Oc zYRgmNjpJJ!_8cpM6tYl%HiY#-Ihl>7dLVCN(96rS_s!g|36-yR^CMwVFbDxWVeanc zq-MQeTt&wb-D^KZGoc_MAQ~-@`&YB-x@+Z!_2ud_$Z-ZjGBDaxDItwFzXGzuBMB1&#dX_cCf&7-WHUcg2Me!1fp#O0oc zz0HXn>(r8Ft6!j|6)Q|)rnsdXpa#ACY+p=M;DBPwO^mzF$BaB)ZEl)LTpJ)d963ga zz-!RMxG0qo#=Q%g6w{7(RIpGW7&Bs6a`{$&UM)(MXM{-cPZApQ-g*B`A(8Z z7TaqOmxlAoaj3zc50?|AA#0DUmaRcr@s|D2wQQoQu`DzP`_4U86BA0&Jo!!4=!l`n zKDT#JxD*1a`JfG!%!FJV)c#U0_1Atidx@mZx9Q55^ebEf#g1apJcFny{Ig5!OT_Qi=GWy$3spbS ztWUCBxFzPePY>j$ESQ*Wab@e9m~qT~5|+ps#tUllL*p*vQhh$3dkXXr#l{1;#ds}W zV6C?O7G&=6!&A!7b#+sWS4a3kVBZOj`S18D%A!I#;YpqCY@uNXXCFcCzD>Y8awmIw zK-o3%5WVw#VX`sT`YkRLAdYy^M(#BOujdoYr9fN`WN1hD+ezZJ6EE{zuKMTBZy%k2 z@n_>f0<+8Z-(<*5=3a%N(8~eo(N0;62#rSq3sjFBGnfG%AMVna&S^Qi(?9=!!0Np% z{(4M%yrqrm#+Udeod)s^jTKl;NRBkQc$o0}&e%N533@@FALkE_(LfexLI%*` z@%K1OTjxGrG&QKeuMTR5w-??+lUORqcgR(@>S!rZSPW#+c} ztRQ;jT!3(TJoB(DG6hCp_yrjP-nwcTlwXNDDbsD}M zG2AUDV|730>vxJJv*fk7jbjFB+Rb^cq@kvmLUA#;lcWRF6v*h1SZ8c;4e?J2T$lvx z5mqTPQ$N~LSR&$~x@Z8VB!!?=R>_8C%RCfsBJR|0lT405s- zdXoVrT!Soc3h_M-x>_99G6J^K66!-7z9DNu&wz%0?@?pttw7Z z9jrvsY(>rhF!jYmBs62H zk}C05%OPs2yBp0}RfOW7n6Th?>kSRO^P=(PNa&7yvE~`PSTfyy6&{=0Z6D{>VX#oE z$0k4>g0s{jYZWk+S59rg@UVpxSGG>3O({E+6r6oF+Da6?L%0!bp4jJMcnngD;8f>( z%p8V^%at=e9Z}OU#%+ya2})rKR9>tp_U`uTyRMaiT+o-LU_gE4P8q`Fx}w#y#y>gT z&0Rw|ZCPMDnwui&#GJgnTPJ%UmVsTJa#=ijGC@Qo_Rbg^2eB!mVKmL#3$i8-A)dl3 zThwgvmeReHqpg2A^YUlT=zuRq6GU%XU3vqvu94|d4Z%+wq9Tb2F}tU^HKo#-w@)&9 zgTtinxZ%?U2Kb8EtmzQz!9f@==_>bM3Jane@Lcv}9k|B{e81Qc!KOuZ6+L^u&p#Tt z0qigHfC*OI_&7;=%|tllS`u`uCUCjMCt8ob;s=+}+mjCQ#_GDkIJfnBPw*}gk1M|) z;g}+Hz}c6*1gB|p1P;AKjN>eVyf5W$&lA=!r?~pENpq}j?jzY(NVO@u>p$q^v&6oV z`rN)Lr+b!sT~StceCnXR?JW)&C+WENd}th^S~N_)N?Sv*7RRt^u+?|NNK2qGc0fIk zQ8{~;%c^c1A@FGK$=ktU9@1^Yke1od1_g&y!ZR5HURz7@*uRl;@yd*b;?F1t6F@wa z(dje;WaIheB}R!AVVJ;GP?ir?kbLo7dt5p#d<>!K9^2%=^tdyeJLk=jmO2`@+L4Wd zspV7_%Vye^8N6VtX!`Yl#BC9N|0o7;($%`>_ps0sCJeU`qqD)Dz)t^S5O>q69!~^V zNv7*B5U20a_)jG%%QTP*q^|f&t1{MIqo+DCU>ef)Y_O9O3bSjqI`Xm19GKcQ(MJcyLPatZ{d=8Q_wx=|RDvIiMAR91gm zMv)H4jqIgMCd71{n)RyVVwy<_@5-ZWjTUzdtbdRK(J(nn`H3UZpye#nfi z{_}gpCtqBB!o5pf*%u@oKqeLu$X}Kfe{U!MhvJBx{qMp*7?_%5|2fgSRM%7w$|wzZ zWl*-Uz(1z|P|<)-&fDJBT)f`fHzLhj%v@N!JOhXCcatBd2;1#15AcVV@kd?s4=>@L zuZtM|H9h3F(*1Ayidp}sR`uT(yul$6@ zQ+ZkCc)TlqjukPyo;G_<6331I%b+0ycys-}AN18}YlRH1yYeH=?`uDN1750+^an+t)#nB#;wA2>dX&cpU zyvqmm(>mMn+9sd*m+Pur9dh$;du7Kvu_#VlYb>ORF3%>kRni!Adad50^SBFM`1>Ge zNxM=Pe$1GcBN~tGC>W$=_7-}Vbd8TCg}|R^SHZp5To!G}7|)zH4Nl+t7g*z!n`gAu zE@5qw?Il!fu0qC?-O5tq9j@#;#huYQWE|bHd$nezZdKtjPlac-b(}eNEk3;2DdQuT zIpz#)5DE32Txp+Jb2!Sb^BPHL@lbC{@g~ZoR2GIzA<%IbIciBqL=yx zdJFcFaKk$X2SwU*B5r=VEeuv7gya3I00;7w}ltE?r< zQC!U1pzjbDbUpZS03N0Ay#%~5Gpd49?sCn-S?OgqW}D!bGmjKUQMr4Md}Ais{8m0=}0|)Dme8sITLREvnjO>zYMTi3peO_ z!kwXR8m_VRtnxRl7Qs*OMZG6me+l?bCDm#ef|mzXS!udLA6wWXi!P+9kVa2oMWKhipj`}c~;$zt*}U@03Fu2?4}nMs%PFl0ug$*xmjD|uZxRHsxF6e|G}0ez1oR~zKRnOXbqSL!}%Fs}6ldK^`x59eW*PuCOH z-_$9>%(qFx?!iv`TZNc9QNQrxZ*Bw%?QDe%@(rC5yUnz&>ezWX=Ur(l+$9rz_@_3O z@x}R(VA+FuP*N;>BM|_#*)jM|UfYp}0q>vy$)Ct6D6GcNTWjt+^g!AgIwA29TSh3n<=fk&z6h4MomD}pokfMjuZN`O!IwZ5J{MjY!4%d z6SmDPfr6(}wH^Ip{3oR{Z0LVQ`cVj241{E^4P^vbu>-+08_Re0VwJ1N2F8j21 zPS|ZZA$`0-)$e_j(&~MM79p_qt}qOgd^bapx(m8;YpJ{3%7jwk#0IrI*8e=`@cDD# zGC*RnA{^8YGDO6^G?_6ioz?|;muX7JBZCUssuv7-l#6x-p?s^s8r--lEH@z`RxT5- zZX35l`CV5#dVFjih<^7Mn;co`gch}?G~8L5280{a3Zx8RF+V5}o3U;U8O+QW767~M zZXBevFA)XEL=sDBNEATZuNj(JSoPJ;hI)cnUEupzhjjwI3-pylhE~D|UHC%8{oXEP z!(9zAA&4u-2O9*~>s=m^1SyY~57`w!DU^VO!zDrWl=7FtW>97bAz9Ao$n|f%fPeqTPbZjHSYHK{!*_kVfY$*C1bbr1}OYuo5*X z@0GuHm-BYET{HBg$Z$lnnGzWfaA%rKhl%I=f+@bm4jphgN)Odc6@qp#pW_SU*aD}j zjGO$_S(g(cC>bA_t&4^jFesQw*r5x}_Z}%tJ?!HT(l;}%v7hX(udrl-Nj(9FwKq5R zZOi$=tTY&xvj)#A8k$TxcuzY~KY*TQI7rtpvJJy!;A6|-q)0JtR?Kn(;qR4IOPPlO z!)UW1PD+=C?R?~Zl2@?J_K^4PBRUPuZs_Ife3o0e8J2e8AsK%(yHw+fep%Q}E$OA_ zaW(XXt+O=kYsbTK0AnIK*T6^_LiqN^9}%;KC~4)gQO9X9Gzo1)8Z9&X<^yl!%1EAf1L1D1HyuQ|)!v=nnQc}sNcrgSBy!q){hy|N-p*n|REQ)5JuX`XX zUN&1SDzWiyzBzX0(o`r)3zE7NqhB%GKD!jZDe-%eFfb0+dVJt|pvb#UUe3oukFypkzH#b|QFO1~3c2a6)MPzr6yxoLu$!EUCB ztf#u=Qe)0oay0)JHG5S#-f;tXhMjtQ?GGwU0I__zuE5EL7uXkKDbvoZs86D9fNJ9g zw7=pXhCkBI{-PuM9{`U2Kil`(^#2iJ&i8+0nE&5IRTyOAe*EPj{xCKE4dNL7kv#t2 z0rP+b4Ex&5!-`_CbcfsN@;h$~mqu-R)y@t&&Meif%A6v-FUk|*nyc4=e0fP0nY zMNUQr9xSx7LLQ+SX*u`!a+x9^`5oA{27wA}crL`nm2~BKMC5cg*7YyV?JQ#z);bKb z$njq)<3&7VJnwZ`NLe8Cuz~PCUY)lir{S+W@WYng1B94RDX}YO3#>=HI$Gi_cLz&Z zTCVE^{(UabTL;vUru**+NJ+vu5Ic8Un$#*q!_TP>u7`L|47QQk* z_QIJz(M3&le_eepVP`Yd>Iu(F&=#MR@5thCVBOO{GW)ASYME`rH#Qx79{6UQUk zmCZ5_sdDY(c>;#U<*2+Yo&pSswHV@?rNBJ(y(N3m-y^WV(J!y|7ANahZZE9kg**i| zuSoghC3UBSJ37+>?FAM&v??r~JCb81xlhlnZ`DlFM!4-vy8JL=C&gn-Yjc5^iCYkm)*A62f1*ZcC7D@BrEam;W=*-*|?~0VvEbj(3_O(pZ zZlI(keXhB7dj*-Ce`J(!c+F^upx06CM2-UNKF;JuwWlVjAEEcsH~_+Wh4ynD@byO6 z%OOtH#km9WNrYG9(d8jfH*F#^NM+^VK3NRIeOT3{ZZ2Ofrmg5)r<_EIOWbjG;h8GL zVrd|@ku{U1=05HaCm?it0g#bKK>|>{s8c(Fv8lzbx7q*~k>WTG+e7Y$Pxc;bRFb9B^t~s02+Zc6a95d_HZJz}F1oV1aJx>*P0NNQ zB#6R(V!}TF@yS<-M`^4Y`*+?NDI|+J`Xd1yQmO)1D4a?`W;tlKR8$L^FPp2b8O(r1 zioA;{Y!7bfrM(UI^T$dCcPtbpPP`rhFXK0;Yb7i>ap*Kit^>m=w0peU$4O3$WyW_o=jQb!=o9+N+p6;io-K>~{L=C+rNh}CbKDF1Xx zkf$XzOcO|;_p$#<7*%O~*K?mE35qlVm8aDA8Di`-1>Oy|!3iKx;0c9}mm^#_(5i{m z5p*NQ7@~q=rchPZ>DklEi=)b1uV#p0YRg_PW|!qWh;xfzPpAXn0`w=B)v~ zmu*#;I>f_?z|w}`&^;t3I|YStx}gfFSV+r9(})NQ8m*i z(FnH#VS)w%S!`r-OtJEWK?%QQmbA(yY^#ZjTV5o2gljiM6UlZZFr01>@KotKVANDIre)@ zJVb`+hiRi7p253b5YZrms#?q`%(qcW$0L%R$ES9_ReB^>R1ENaZi+-3-Do;6l*>hZ z_jr54XPTqWN2t87BjQ@R)@gdLz2_~u#get!i(>|q)a&K3;`s%m@;K9yz; zcNo(ihkh6W*=2h+zS6z3sk!Pw$V9n|D;uLrW=o~^FKXTAVJnJo#Z`gWGMZTH;_k8( zOkVpS?5q1mm@qs;aRrXk$i~3v9edv%1&YZtT>p~fh#OhPC-9=Q2c?Q}E@(R=Grfe) zv;+LBVfc1J0rR%6@Z6CR_x1Qi64Y73Jh!dxu3+DgY?KJmKUaQHL*MD#v#1~vNHo<5 z6nRvhb_OH+#xF$hRn>z1wxdra+Uh{$i@o`_0&_qGjQ%t0bZn{fI@6We8hqg~97pwN z+OROpBd-78P`eRFKk@oj88Wf-`-;^D2N@v8$Z&qlBk9HXUL#u+RD=0s9zDdHdNup4 zYMaNqH#G|lL?LT7m(+(Gv*^Dnu%sAwsF=74QV!raz}_f=S@RXxyxF5qYDXjz z1z-i9GCp@L;DR`JOCQ`NaX)g93axc<==i<0QVYvYiAI{3GK>@7dNL57!Rs>FLb(l| z>)0{&)uIP-7JbykWo2ZIrUV3P%&UO~e*_E#I2DzXl@+E4BGo$8=zT$<=1rat)d|S% zn~AJi6;uFOJVEW$8`;_>Ab;r9*Pa2PN4qMd4MFxVbR2hmz-?{s6(Nc$S#%>rl{=*u z50c_}W5B4*Zm3q5lX;A>sK>PQgABX_m+6G|yG~otT{ra1jd3cgokqpZr18a(X5pUm zvgfxwj-yyC(UMnOcUAV_-@rZN9VC~|qZnk7BUnD%Q9F~M4cQ*oeF}X_akM-os!)4o zn=5qy)Le>Feh*2@Pe6rT=?H4C;c|LSuqJSnO2BEgS_bzf*w6*X#a%KqZNMK_;hey- z6rhDGUrQiDN!)~h>$~Pf5PocjUdi=?O{%@AH%mAi8I7c;V9ojAp zVLzRg;vF#})TXzdr=GyGdt?$Ji0qvA8Z6-iZQU6MSo1jMDW|u6^Ve;C8hQDauXZA z%8;hT-6G>O4mgx%B*R5DOPk>5XW&WWKIL!I?)pqb2JIdCj5zVog`C~lYSIm%e|Pr8 zh?sZG%^pSRErE-nmlR@eyCJ%@Y~*J)%U@`(N2f!FKO`lLp@7vD4!xm(wdExh zbT-~^=S~9%)7kIMi27hAFyUSIsY1H=7N!PD9hzYD`de>?lj49!Q;c*9b=>>m6Z2^c zS_C((K(}lqED$65q+yPNT-&z_ygkZ;u+yi;+4^kvdyFq`bC_PcpU2-N5 znvA*0AC3`C<8h=Um)>zy`Lfi$lFc}wWwSw5ssB15{x;z-tPd8mcJk&yJ!KiL?+5#+SJi{@9<=gpQt zP#oS)eec(tl?)a!3Vp&Z?foiTB)u6iKXXXa6>hvA5vPa(VbqI(x4GOrmQfh{p^P}> zo0Wdy?^R%agza6ge{s>;d-llo(KKhxMkAF$(}%Iei%p3qz13_%^i|@!q2-R zA(~rs@O`{AY@J%9!#Pu0>1SOSxQdZPFlRYg+lH`_2`~q{ReCLe1enu=ARmE|6XK@M zACUh5QpA-^$X8{zDADWmgkDF2`yiY25j;+N(50$rB6##(&TH*FO)fL(!_B5Xe>-(n zfyjAR=Yi7~6$@Y4nhhyIXfRdHx`EH+Zt+|+deh`6(!Fu#)nrWN0L1iKsd|=H)x%8} zvSfTA48~}k&#zPQs}aqm%CCbSJc~8QO4QK_<*IoOxJ7i?&~oA*ar36t@$}Z_ zG>f%*593M$J8|6^vfk_va9?`Cc{~QO?F~9V8f~M$N)?|lvuUxx;jdtgU=J76I_AO` zAoj-+ctTvY@(bakuzqaXJqr_`!fvXYko-vM@MOMbz}G1(dl*2>`<9nFr+Y1+#A*-{ zbtKq8pjiJEVmLcr@J;W*;g^M=5O|K0BY!fs*XYDOIK(VKBnV<(vK)p3WOKNBsC0yO zMWl?&CABhaG9TIRH+=1}RuVEEiL6#gHRKKy_^F4)K_h+`5)CYKA3ympE2jgi=3!Z` zE0`8gfr{QfavPCg6VxEj5Vg)XC$iVY`R7r*100bCAdN(RzKV*T=yPes8Jp7PEn+YW zE5!Gw3I^3~KjM+-e(>L=T?%-3mAiL#*fphLdFTex0$77|Yer|y@1pR|Som_&yn`w6 zMNW!dt(3P-9q9DK4-CpUB5+m|%UhtH>pFgs@@bJwdgX|zu4Nx|v1gW$#9oaVU>2m| z<%3*7xHwBaAYROO$ZcNPSmWH|0Drq|{XtYSXyJ|@0>&ME914<$V8E^NB^F0vW_~86 zI>-T0;@ysdUw#Y(dt$q>W14@A*Xkfhi$!6>UtJ#;R8cV(h6dZOURJo(kAZ#ha$trJ z`i_UhByX7%vCq{!U$GpyOX>o8-m~z*4oXE6vK>RO*H_eRfMrj(Q+aV*#j#cOwlmua z@W|~RLkt~+8B=or9V?XgIV>sP!ZQ~)TxjoGei^|=_+qxKq-5F(D$jeU9z{<3ybDW; zpjwR}MMkC>^FFQBn`eg+&E*wB1Z*LKlX{9jd`oB^um-&Z>L)=tCR>6Xm1b!OE1Z)L z<)p3gRhHDvsXYjf&jrhaqpnq0740>I-=`im9kpXz{>nMkOQLsQdiNAPr=hBeaP@(A zTxkE?xdx)gF!j(zPe;}16XLl^?z~YHNf(5Bq;aa+)tm7Tvby6bfO_%GO9ZhAhtMY@ z#A2^(B16XfdX#r#diqPd8hSFBxLPw?Vhg8l0e;8u+3sdE5~d3~{wbi$s#%K=W5`cl zqVi@nc5i#vNqowVEma5A7sjf6_OHXU-7SV@ow6HlY_+RvHg^^$G?e&Ec_5hfJsB0p zuf)E>SUJeK3wdE$wqX#yVk4)3z-9|CDQ%B(qDheERtj*dwU=1OdX!?~VS78C@cf36 z_(+q9Mf1c$CnUbtm}u@2g?pVdHSx=CZQt<;i}ckM&8WY89bpV){%4cb67rjLwTO}W z7Tx3nPWSv(b`IwEa_I-chdaVpfSgP*)}p)2StonZdvW0GNvI(%XXVfTXvdP0g!x6g6YCHAHCV9(cSpxS2!$|B_uzdsE$ zs1sog(AR3}gXXKZ7qDzZAaUs6i-hF+7^Yf=Ss()76ATQ^0T_N#7E>kagkkX2aPpA(q^K0?q7J-QZss4F@*}fKTalM z+2r50(MHTm9(*tGbll9J*1|`FsgohliO*%u-EgQSydmSntWxyN-n~}oa!f~#y;Pt} zgILZ?^b2d%#ZJ%~bh~u;D=tubN%o7;In7FM@1}97ulvdJl@^t8l zKY(G?2U>w@UM43gte~yhHo1pa&`-vDq$9;hYds#5m$SGg_A>j6NZvbxedP>+if@rN zG&TmVl}~-W*3#jm==dPdRhbOG2}{+THJu*_E)k>UNZ6lrDqWi_#ZIgei*Ic}!6q2u zUuPfjV|xz+b!d&&RH~;QW0>F7LiGoxDKA&bWu9UtDLryAKtM;t1ZW-u2$GJM=7j0?>e-1wcO zxiVoONRJ#rJk!L^k?XEF+z4Pv;p#ZUQgDNe1k_{|*In2|*)%kLBD#5X_^zb) z(fZRGVNJAKmLcYbimC4 zy!xM0l`;W;u^0LG;uRA!km*+?f3W&e>;?y#&n}&mhm=~QAy zO;Svxb7u(!j7-B$-^wG3cw)S1432JwcTV0FBfR(9cBro7TaS)FZ3a&F+GRPOv{pi$ z2j;?QXnsTAwT<`p5s07Qopeunfm!ZMQGtMvZHw;%NLweM4&crZcgC#R^HGSVyi4hHbmOfpoF2;H{v(D**W=_UWV`fiuX%*Y!L# zji#YfKHCudCQPrx9D4;@Ckd-)*ii+c_r}(Usl0s4PzHMo5YLICCoeRen>0qP3yT`p z;F<2y$Vqg(1@I2fM)kxMS+J`>40@DU2O&puYZj)QWz~wG)bHws7(CuYD&wrI85h)e z`kbC2JY3qupU#MClPMR0)my82u@!kNzKYw4$^MX9vFy^e1yg}hK~QmwqXPM=Rg~YZ zF+^1|`StlF4~{Ta$QV)a>&3Du90JUCD~bxCZjOnR?ORId{Gb`Q5kXt&DxC;qY6qC9 zOHJGaI&tw_q_C+58>EE z4eYbFK;nNA+Y)E*IypFig+IAPHzZP1T8Rawlkjm2$1_iy9(BBD8x&gkM(h;+W_0E$ z4Z#sK&Vc+uRZ)eSOYrNB%%?ol?mEqMwu1&11jhbHo6}@umhY;w1^&XiL=>*j-ZN_U zG$K@=kbY?5B@VZ99Bl^MF^78g>a9Zb$thJ-HV4oTqzlorFMd+`;49_jGo+bcj|*Yv zFJ_!KWq^j+SstsgjGpe_G_qHMwxAghBZ1SOjPQ&lCl^gMO!Od{O4ZgkN2UDfrjP8Q zgX>WCXkE{L39U!T*Q!pL?DaQu=(dWTa4E@etmHeP>i%IYh3GI#wxHP_T6MVkR9Wqr zJ{R>U;gvnHnMfDXafQ)kXhJkg7ul+7jkLy00dy}i-?I2M{Kg!`Q@1#s2wL0$cgdEN zt{k5ZUTueX4SU~aj<~lb?G{dir2){bDxcc=!{W6)>E4}lM6l|tex3PzBJW&`dbqG? zl$JDGU=^!=Q5e2cn3hBR`Kr8SVkXzwcG}NzMvbm5*8tkRdGU@0me68L&%STDc*wbV z^c@v(FTf|TE6lAy9;+q;->C8+#D~{e%;`Pp=FzQ;T|3?*7g)B{n<%+U?ADl^zC&-* z6;!Z~Z+t|T%gvq^^JrG7&P;hzy|4i@fzIz_b3l+& zm#TwrTpeA$({M=pBRz&-rz;qWMHJ)oBvhLA`ul?CD*k)F`!u$^rWSfS7!T}zMTWN9 zF(lhwfk#=<4jBLMD`4!41x~hZJ%}ypidUV$d99>T7unB`E!hu@U0@75GN*sK?fg>8HHv@Z88(Azlb$XsT~=jt{k-WGlxmbwTIArs~;icqe;v-=EY;M+d_T75_% z)w|x3WBLifS0=LLLef*qy^M#CU66XcHW&9Hz*CkAo*QftUb0kfFlFK%$@`%|x8XVn{(rjGckj zvq{mafKdGmRhu*`NXz=|hp*eTsMQa*3M$4o9r`4sqJEy}3u;KTlowY>J=N?Dgcx!4~yG?dSpH5c> zic^N>ASbPIbouP0M5UkwjMxX}JEpw!*+Co3dwyWY+p;S$PjswT>fj~%Lif9$ydJ*& z80?JR9Ha*~_yLB#LM4dCls4FR%ONyCVS1k*Lp1vsORiJ6#bJD^$TcO7s&P56a;BhqF@v zqWD-ClM#h_qrMP52U8e50KyN)s_z?sh&a$}jie3fb)vVUzxNp5RIG)a#dCn(f&vtx zA$_C4J@^COac;n>Ldc_t^h~q)XG^;t>#--m#2v_;$XJ&Os444xX~70KFK(9Pj&=#} z&t`h@g67DA*$aACu#KlfsW{)PoS~+;DiX+s86A%T)Btp<)}o(;>DI!Uo#XMqGj@3> zi7pN=ZDDf7pnSnJDy-|CM{_ZH__1ho3>*lDW;XTWUPk_rzl*wNI3JbEKcMm6@Dtpo z2`MdgiV3X+*(&uaB|qh}DE%d`XG(vad+O-&SZ{MZ>O5{DM~9KoY9F^&q3i3}M1%1{ z2TCi47prcRkNLPKA=U0uuH9(OBxn(M=xkH z-m`I!=^5J9uhzLiqou;@pLf^pQ=D`$=d6-;OW7RI9e|~n2>nf?*BpxYYuy%sf5ow8rGo&HA3%{ns z)|2|URC}NJV;rBH_#O>WZ^~pkxH?$~y(x+-xAy3XiH|j-SMnpGS0L|jk9G2+R-RDC zB+VrpcW~ZW@>$?nsJ#*gB~IkF5m?q6VoX2ETxJS_FGz?;a>DcG@i0YkMerODb^>s* zK95I9x_u%-XN0%m@YeAiqPi@hu?fdLor!OC0uUfJ(5wSL19~~P+?>%{b8d$Ss#aaU zh|TuI&0Ak%X~Wn>WQ8ZOCaT#Z+^=Qcym9XbBQjM%s&)7d&5Lvl5IH60FKd+~wB;Q2<{MdJ{dRWEgBRY3%hEX&~ z7O2rJzZkMC5yu9mP)dL2c<-*b)-JfF2*R5AW?eW!{7t4~;Y(zyjebL;oR(JU+X%@T z|Hw9-g_@y*B{or%Bvv|IiPJh-Tg~>oucIznL(>@+L+Rstq)W<+HuX|G^}5xEcBu2b zY=n$hn7WnIcZC;@q*z2jCP+#LGR52?rfL{DdCbQkQiLxoV^XrJ)~+t8Mw_%{Z+lN{ zR9~roJZffXSjb7MOw@Y!r65qLa6sD`vuvifEL3ZzEG&Wh#@XNl;Rq2j4<4If)F8%E zMWg3jtICec1s*(3w3V{i)7ORM_Zoyeb+cDpF^eoXwWZEO*~n1D@iUKfy6l$YZEl|& zkS>eyU2+A8LVeLH_v}aTVosbtsnlZ%k9bpgt0i4r(77_4TgT7P6pLA)TA!^g7Qv{P z697<4OSHrre~`{Fz-H=3YF0}rFY7HGDPTZl!o-f%k2KiMZf%Fkz$b+u_xFp3&s(r# zJkjRJ?eAnevdF5Yo<%1Yw~(tWP#aS*K<}#baQneSc~hE)re)=&Ek@FP$rqdPgeU^@bb8!Bye?fP>Iw4SA9QLcpd#T(weFyfElX+7ab7p*3Z>1@Id(ded@l62$ZiPhA! zrPn1hQXeG6g13Xjs;cL7)%>^Fvu+_yQMB`+@m=hTtrFAnDQ)nopm9D(@#-*p>M|Q+ z&mrl}$+UW5Ef0$abbcLIC>|@kT`EWK(8z*CXo{u8%0am%3%Cygqz;1`~c>A+roXWR^{lZ~_LPvOA29LS+~ zgQgV191oHRqU@1WP)Ln9-;Ju9^mh(zK~9WzYKe0tzV#G_S@aI%yc8-q!${7vYi+?y zWctxxMu*;zKTwOZnG;|_xQpK_ZUVUhDnJJ1`Md%1!X307;DJFqn5V2QJXPw3h*|(< zuSyOT$_vsPaUYnGV8QeEacZuq6?r^G#k-Q92xP%rHs$#;%{CA?*_gcR*6M@iIMKR> z<&Y!}f(##=^bN4bFG*s&YQct5NA$rls_ruIM-K|XBI^jW*VIB@7(nd~*j#lWYPg*o zunLpEiItFro$N&3RB6~X6fPPd+&S04=;Bzw=;uS> zM`WA}tuc3S_!J6?58}f~eaXrMR{ zDsh_Lmq+qQL{!Mg3D!_)CO|gclPbxaA4Rdb-y(ggzUPy&f_s*WgM?dD8fNiy<{Rr6 zwrfKE;wZ+Mlc|@SlCGD~f15G;ed6fy2kN*Ke~djxC5Ml?sKXsfFhsELR;%u8{?TPk zRZWcU>anMQ2GsCy%SZ%#BCE_qTgw~TH?)?D02w(A>6mVP{2=k@?yCWVN+$PDOd4iL z3@k7gF@jM{>4r!X%v6#QiIR~?M#bimiAaegitt56wbvapJ*a+(-F*pyYcG2q)rgnx z(|^xN24MJpv-O~Q&ghiW>Jq4-zDZ@ytuO&%d&#zeW6oR?n1 zp%ol2C*gG4vOAO&^4T{I9e_$PsHWTtF;WN$sJObObdn&3A602pBU0dtU7GFocoJ5= z&!iZ&o;$Ljv_b8gFhAdr`G`mFqrPgLj`)(tfb3|YkXACL_=78(;cAlbgVq?lO>7on zIiKmbo>fSm$Zv6Lif%tXl$Hh2*AvzgN41fdcxrhc6M8)!F!kFhS4YaCsM_7aT;bHz zs9BTF8`-&rG;2IIuUlF*>7FkxlTQk032_Ruhup7R*^|c$Yd!6T8-D2t8(T+z71}|% zfu_!W7B0-QffhG!U^7X+$eqpFH?@Z#sjxW^78e^gnQpH0Mm}>vH_7^-W_qrAIor{1 zkr9gIvk($rl<|4HEdhl3w(72`6Sgg0P2}c#Z*$L*a){dhC-H<*!_$uA)Vn!S$? zwsDK*xvdESN$57oC@9n=xZ2APoWi%{5?g-$0G&q1L!F?hFreZYQ#e~V-+;Mgvf>$f zIwdHrNP|Eo?M-T&I!ii(`djuGbph3Wcj&MJA*Vo2Qb_LQC3{)Z!nrCkoqp#*Ip(p*oa^!oR4WS(D=t{oQARB6M2a9s=^YzHoC!v`lGB)D zFK6qcDIeoTPmH*I`N556v++m2s&hJm&fE8$o!(yGQDD4r`^niQ5hYMX%^|<=3(>!(!+EeI&{7*(6YN! z*_|GyWIeP$<@)+rwO5-;HjQr(&d-0DX>Gf;YnViWoJ96D@3k{}ZG?|P#vd>|*U~EEB#5SA* zyR(N(w$wXw7jD+i=*L?pg#8)?FMeU^_D^a>4B_WVe*55I6@ny#dJWc- zv6=cnKIlbHBVp@T6OSf{`89^WzZwL(~cqxWq#TXm4M>H0W9WOO;9o z7}$`0-;T8$mvrAPncN6gQRFc*S;+uJB)(Wg2BVZhpQBN%_EzybADjinu_F^=?Y@m{ z<%MBurXS(%UZL^iMt$Oa(D*uXCE<2a8qAXEW7y}~4%Wc13>2_Pie^NPDtGj(@bo@Z z6le5vM=<-M7DkR8L>&8is14!Pq9W6aR)=$lj||drww5;Id0vjdBo2sxeEy`Ap{<5} z2HN3r89g?zD6^-x_zpJv1!UTpTH0C!nfMvLie=9qZ`dhMj>*~)ZkCzDXMcnpzkg6V z05ceiv}`y;)gJQ&<7$Q=aeGG1+93s_5cRR@Jn#1~-|&dQg;TPLj$vkIj%guGO=!vM zJoMvwK=wK3B|mrVOba_1i2xxaK{-2o)1Z~472@ONO&Q*pM1JP!d*(QMf1yDz6tR8A z$)hMzWd>{$Lc~*h7%m zdEm|gqyR{@E*uUB_F9OW00T6XSqPkP*kONEpdiCM5`qBwJoI&-lmg^|Ah|g>wE!#! zGK2r<8dnSai9f6XRCLg;0ZbeC1saqgDNrO}i&Rwt5$Hcg0%L;OD#)imf`uX~?0|-f zA=EmF^I}67@+M!a4E+v=3tHE|MhJ3}*BmP|&^KhI7fvRyycSr6{Hhme9{xHsXr0xb zFdJsK>-J2p1)?S>uKV~5`i9FJmN?*DZv&h_I2^K~FcLb5J$d8Oh=qYB(8+_IO2Wf8)Nn-EpUXWX<>AOtm_jSFbvg^tZ6Oa^(6%KvCr4o^TL!75Ow*Pd zmTOu>HkLQObFO!;b56Qc-Iv6#if)e1Lei$v->DrF&0s2rM8r${QV}bhV;4i*M?& z5V7THR@4JiE>j>YiIH8i=l90ev)CRVZ(YE`uNZw z&F8Y-y^y*Rhv0MDO}$NdDjX_Fsw*l~8IX*6`c8&x#*<}<-2ku>_{m0&&5eE4e8u9) zF{2weQ6DsVqNVWGo4#4oS=+m@v3^UtSwl|KLQ7OzRU4VP49QQ7>BTQ+adE-3@XF%Q zjV+Z8A`6qPbDm(4YLRV0j$ux7LQ}8Pdragk zAy;erZ2K=xQubUsmpu!88K=uTJiA7l^()(LbQ=fjT6>G9RGT;(YWvsoy5D#w+82BV zdV7p>20i2(OF!(|el}zTF{Y{)&W#~dVjrqsx+k>-wb8Y|yV<)x^ZDp)xQgB3<^8&5 zT5DK)VIB6N?cw@{_xs$r{KYdL80Ok*1?)4HNfrQmEqgcy7WOpOCDlA<3&o9KxNSI~ zh+7570&HY*W*B3wVzD+!Ij^{?tO=tFcIxFOuSL&CM@Da`{HAuQ%S&v5u8GFkC}t)* zn^|dvJxf*1i%E&!AI*gy6N?gM8~N0AX*|nhX53~}VQi|wP~}s#Yc1}I?-gl0NV_f5 zx7eRLAUCkke;kn&=@!ly_ipE-JzXm!CJ&}1#tz0KMjQnLMF?dR)n;O0lKi=edqi6< zR!VYJa)3(Ebl`LXOIBmM1+%xEuP|3~2UWYW0QRHI6}(G`c1R)qHNGmo9kVdAys4zw zdHr4eRfC&}{8r1ZcRNZ`Y*P)lI=!3I$)0EFRnx0HWTsai*)6TdRyjI`jlWKf2aiW& z`3*4-(~WzzQ(8-_cQ`(kJk(VaYiA)+AlyLrE?Iw=u@<-4wLM)m>@a?gl@B@Uu@a>c z-4=Zvso6i*0er03s0_)eM1R7>fCwKz2{dMP+uu`tg)p9ë%ZdVp|($tPnXhLM3#joZw^Yca1DqoP; z5wl8TMTtM%#vHQe(1+4DRX5HSY_7GO-89%0b`o)|`xM+$Y?_R^X|1Y$P+UY<%D+Ck z&WOE@t#kr{tYC(D8=(>U2=j5laVdoE}Tcz8W6__ze zrb(rx%cZt_+K<)`9tRh<`+@I@{c3#k?>9c_6Z-9EX|gguB|bZ@&uyrGe;?bXcm2_8 z*6;c}Ym5EDDa3AjmDp#{eeRy`=&{dx2lgl|GS2h$Cq559&zAVM+Ow>ev+a-Ii$4tt z$oz}N`kx#)f4^JfVEWq?3<(?CZ?|gzMimcx5Tm@2rLwclugBgh)uq@)0NNuqKSPC* zzN1qSHb$CKysJgiq!Ro&S~y~=P#|GpYUgSlWN>XU^vB5Br1SaRu`zt%)+o4wW#MTT z!|Q~@j8iZVUofX|h|gn$=aXTaLOc+waNabaGa+-#U|N& zap$Gh3DiLCjfgQVP13kk-g1O*n*9i^NLlo{if@yb_jEMnpxW0;TO9Fic0E6!yH_%>lph&gT)#0;A_P@QRqC8!WeHDC=RnAc;yH zr$$oC1)$;@HT=Z0)tOYfH(yf++i(nK^SLZNs{QCS|B$;2(;Q6|5%BsguCg+h#8Q>i z9Z7(B@e1eT8$SRV&qOB(BO7WucV6dp`2$e{^Iohg^x10@ekc2f0*rjHgLSZ(E=D6- z#A*^Lc7=pdp9MVy%;nh=@S9sYXa2XlpF*DDf|pi z@QfKKFzUP1T7S2+g`_uv)6;0jGi$0wBnD<`hc&$ACBeYg#aNy|>`8EjCNOb_Y_k)- zgm`|7qJ``Y!BXSM+kvy@CD?$;1P6CUg1?kFAaZ?LI?@n zn2Wgip$70J`bLyHXnPdEJk9++t<1Mw;hl~{CzL%Uq3?{7<>Tt=?OfBRmDjhtg?G3= zE_M^!J5Y?P7v5v~^vT=ZqiOM6qA{L&Iu`EbC{40kt>!=JJ=$I}1@af%bLSPbuT_1A6^zq39Q^WRyY$OQRR7`vcz@uP?$Ht0OIn>aR(Q~#cIFoYt-m{H z{FL&PU5uQ6E;S8Vncq>%f}9Oa44n=Cx$1cM-R%vHtw7EsMj$f_TVAr$Pn~2W7ACx8 z>a4N=S$koSxrKzMBS_g(PQ}>M%9zuHjGqsI$DPaF#@^-y8i~7&wXGAEJ1?1`iJcLM z>*f2;Vn#BO->NuU@sbJtd_bZht3V=b=LjNUVE{5P(HjF;m`PZH49x66b|A-Fl9#|Y zoQwb_MkWq=CRQ#02N!^a0(8$ik znU{?0C!&9Rejk~M@jvj`yEt0^hQ-8~5o8Ur0ogh`F)}eQG5!PiOIscjV=hxWM;k+D zK0|wZYYSt;pI8{}O-y+h|M}>jh& zHncV4C3B}Y0ht=QSUZ#P$yyjY+Bw;oI+Li|Ihv5rzffILM%0ys!Nks(hw=B8e}wpZ z=^qjO*3p0M)6aeyJAw?I?HvCF!cVZuAcsGe{C;apSDg6zcDR?v7HU0yP^HdTmt?$moK%sgnteiB=Yjd2Lv!N z(*xM)nV3{wn3NU3#l%DpVBrD)co_es<}dU6k0*AHDt30(d*SyH|F^sTG8MA2 zf8P1OPrr@LAN&7X6D|=uW0#-HT~dV4#l^ye3t(!aFZ+HSJ&6NZ-aX1h?SBVnjG7 zl5^c$Lp-lbnfSlketO@SLKrUTDCrO>k@%1vv3w&jQM574qH;BBU$XVehDpLnp2?*mDgB%iqs^oaTI% z{{&%_LYx=2C-_DJx|{^t1Pq@o9^3@J9!fJXnuL4^4j3py@)ZUcFh;^Ww4FEanrdUU zaGly6$uY{mwyhMd3qhUg+Qv}h>hqHIBkwlj=}*b_*M2_#Vs_5N{+B*e-eYwLJpe3$ zDSeNuVS~&*7)2?;svv{>JdH4hrU(LfpYNFmr%T-nkGYk_5b&C*)&n;JM)Gc_MCT^b zH8I5Z1u?PMR`fGM3@J(j62&{f97)c5)uf78qFZ&Ao=`9|@m`8U&sm5n&?0h0_ zaqF+>dP{I2uM1GS(W`>d5zRG@y`tXbuk&!;;t|q4PNTR-DUfTX_ZBa|c0EqNrJsNe`Pu`qrSE=ibxe0+a) zeJv@X%_9g91B$S7yl4!B1eutG*x7)>f-IaIEC3F6K_IJ`FdvVIC<`kqhnT3Cuox55 zi;jUql!;wfh?7l_RZIjZ%*@HB%P47UYWJdV_&pZJ-_<2R7DgpIJLi}3-!t1OTX=$A ze*8^kqoD*cWmMN@B4H+B(*39M#C*lb2n?J`4Cbj6Nd^fiT9zcS5NB*93T-_T&QxD4 z++Gf{K|V+#6sH#MwT20GI#iVXGsGydXt@1}D6SzLbi;cR5qHmSYYJ8!Teu}sR|i6o zy})q$)!+X6h_Z+Ag6ksh!aB3Q{W0m&ofwD~kVzPJf>^@>xF<;L9GP-)NDo@zMzc(J zLN|nK8igC}n;GQJ0<_$%s zum}kQPmoC(pobxxU{(m=g_y1ak?>I2bhkfT{oh diff --git a/docs/development/development_index.rst b/docs/development/development_index.rst index 1c61ff15..a14c51a2 100644 --- a/docs/development/development_index.rst +++ b/docs/development/development_index.rst @@ -5,9 +5,7 @@ Developer Documentation .. toctree:: :maxdepth: 2 - about pysd_architecture_views/4+1view_model contributing pathway - structure complement diff --git a/docs/development/internal_functions.rst b/docs/development/internal_functions.rst deleted file mode 100644 index 22b4da1f..00000000 --- a/docs/development/internal_functions.rst +++ /dev/null @@ -1,52 +0,0 @@ -Internal Functions -================== - -This section documents the functions that are going on behaind the scenes, for the benefit of developers. - -Special functions needed for model execution --------------------------------------------- - -.. automodule:: pysd.py_backend.components - :members: - :undoc-members: - :private-members: - -.. automodule:: pysd.py_backend.statefuls - :members: - :undoc-members: - :private-members: - -.. automodule:: pysd.py_backend.functions - :members: - :undoc-members: - :private-members: - -.. automodule:: pysd.py_backend.utils - :members: - :undoc-members: - :private-members: - -Building the python model file ------------------------------- - -.. automodule:: pysd.translation.builder - :members: - :undoc-members: - :private-members: - - -External data reading ---------------------- - -.. automodule:: pysd.py_backend.external - :members: - :undoc-members: - :private-members: - - -Decorators used in the model file ---------------------------------- -.. automodule:: pysd.py_backend.decorators - :members: - :undoc-members: - :private-members: \ No newline at end of file diff --git a/docs/development/supported_vensim_functions.rst b/docs/development/supported_vensim_functions.rst deleted file mode 100644 index 087d218a..00000000 --- a/docs/development/supported_vensim_functions.rst +++ /dev/null @@ -1,121 +0,0 @@ -+------------------------------+------------------------------+ -| Vensim | Python Translation | -+==============================+==============================+ -| = | == | -+------------------------------+------------------------------+ -| < | < | -+------------------------------+------------------------------+ -| > | > | -+------------------------------+------------------------------+ -| >= | >= | -+------------------------------+------------------------------+ -| <= | <= | -+------------------------------+------------------------------+ -| ^ | \** | -+------------------------------+------------------------------+ -| ABS | np.abs | -+------------------------------+------------------------------+ -| MIN | np.minimum | -+------------------------------+------------------------------+ -| MAX | np.maximum | -+------------------------------+------------------------------+ -| SQRT | np.sqrt | -+------------------------------+------------------------------+ -| EXP | np.exp | -+------------------------------+------------------------------+ -| LN | np.log | -+------------------------------+------------------------------+ -| PI | np.pi | -+------------------------------+------------------------------+ -| SIN | np.sin | -+------------------------------+------------------------------+ -| COS | np.cos | -+------------------------------+------------------------------+ -| TAN | np.tan | -+------------------------------+------------------------------+ -| ARCSIN | np.arcsin | -+------------------------------+------------------------------+ -| ARCCOS | np.arccos | -+------------------------------+------------------------------+ -| ARCTAN | np.arctan | -+------------------------------+------------------------------+ -| ELMCOUNT | len | -+------------------------------+------------------------------+ -| INTEGER | functions.integer | -+------------------------------+------------------------------+ -| QUANTUM | functions.quantum | -+------------------------------+------------------------------+ -| MODULO | functions.modulo | -+------------------------------+------------------------------+ -| IF THEN ELSE | functions.if_then_else | -+------------------------------+------------------------------+ -| PULSE TRAIN | functions.pulse_train | -+------------------------------+------------------------------+ -| RAMP | functions.ramp | -+------------------------------+------------------------------+ -| INVERT MATRIX | functions.invert_matrix | -+------------------------------+------------------------------+ -| VMIN | functions.vmin | -+------------------------------+------------------------------+ -| VMAX | functions.vmax | -+------------------------------+------------------------------+ -| SUM | functions.sum | -+------------------------------+------------------------------+ -| PROD | functions.prod | -+------------------------------+------------------------------+ -| LOGNORMAL | np.random.lognormal | -+------------------------------+------------------------------+ -| STEP | functions.step | -+------------------------------+------------------------------+ -| PULSE | functions.pulse | -+------------------------------+------------------------------+ -| EXPRND | np.random.exponential | -+------------------------------+------------------------------+ -| POISSON | np.random.poisson | -+------------------------------+------------------------------+ -| RANDOM NORMAL | functions.bounded_normal | -+------------------------------+------------------------------+ -| RANDOM UNIFORM | np.random.rand | -+------------------------------+------------------------------+ -| DELAY1 | functions.Delay | -+------------------------------+------------------------------+ -| DELAY3 | functions.Delay | -+------------------------------+------------------------------+ -| DELAY N | functions.DelayN | -+------------------------------+------------------------------+ -| DELAY FIXED | functions.DelayFixed | -+------------------------------+------------------------------+ -| FORECAST | functions.Forecast | -+------------------------------+------------------------------+ -| SAMPLE IF TRUE | functions.SampleIfTrue | -+------------------------------+------------------------------+ -| SMOOTH3 | functions.Smooth | -+------------------------------+------------------------------+ -| SMOOTH N | functions.Smooth | -+------------------------------+------------------------------+ -| SMOOTH | functions.Smooth | -+------------------------------+------------------------------+ -| INITIAL | functions.Initial | -+------------------------------+------------------------------+ -| XIDZ | functions.XIDZ | -+------------------------------+------------------------------+ -| ZIDZ | functions.XIDZ | -+------------------------------+------------------------------+ -| GET XLS DATA | external.ExtData | -+------------------------------+------------------------------+ -| GET DIRECT DATA | external.ExtData | -+------------------------------+------------------------------+ -| GET XLS LOOKUPS | external.ExtLookup | -+------------------------------+------------------------------+ -| GET DIRECT LOOKUPS | external.ExtLookup | -+------------------------------+------------------------------+ -| GET XLS CONSTANTS | external.ExtConstant | -+------------------------------+------------------------------+ -| GET DIRECT CONSTANTS | external.ExtConstant | -+------------------------------+------------------------------+ -| GET XLS SUBSCRIPT | external.ExtSubscript | -+------------------------------+------------------------------+ -| GET DIRECT SUBSCRIPT | external.ExtSubscript | -+------------------------------+------------------------------+ - - `np` corresponds to the numpy package diff --git a/docs/development/vensim_translation.rst b/docs/development/vensim_translation.rst deleted file mode 100644 index ecc1727b..00000000 --- a/docs/development/vensim_translation.rst +++ /dev/null @@ -1,41 +0,0 @@ -Vensim Translation -================== - -PySD parses a vensim '.mdl' file and translates the result into python, creating a new file in the -same directory as the original. For example, the Vensim file `Teacup.mdl `_ becomes `Teacup.py `_ . - -This allows model execution independent of the Vensim environment, which can be handy for deploying -models as backends to other products, or for performing massively parallel distributed computation. - -These translated model files are read by PySD, which provides methods for modifying or running the -model and conveniently accessing simulation results. - - -Translated Functions --------------------- - -Ongoing development of the translator will support the full subset of Vensim functionality that -has an equivalent in XMILE. The current release supports the following functionality: - -.. include:: supported_vensim_functions.rst - -Additionally, identifiers are currently limited to alphanumeric characters and the dollar sign $. - -Future releases will include support for: - -- subscripts -- arrays -- arbitrary identifiers - -There are some constructs (such as tagging variables as 'suplementary') which are not currently -parsed, and may throw an error. Future releases will handle this with more grace. - - -Used Functions for Translation ------------------------------- - -.. automodule:: pysd.translation.vensim.vensim2py - :members: - :undoc-members: - :private-members: - diff --git a/docs/index.rst b/docs/index.rst index 1e4b1c21..c8f18781 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -37,27 +37,11 @@ PySD This project is a simple library for running System Dynamics models in python, with the purpose of improving integration of Big Data and Machine Learning into the SD workflow. -PySD translates :doc:`Vensim ` or -:doc:`XMILE ` model files into python modules, +PySD translates :doc:`Vensim ` or +:doc:`XMILE ` model files into python modules, and provides methods to modify, simulate, and observe those translated models. -Contents: ---------- - -.. toctree:: - :maxdepth: 2 - - installation - basic_usage - advanced_usage - command_line_usage - tools - functions - development/development_index - reporting_bugs - - Additional Resources -------------------- @@ -92,3 +76,21 @@ You can also cite the library using the `DOI provided by Zenodo `. Its structure is defined in the following document: + The use of a one-to-one dictionary in translation means that the breadth of functionality is inherently limited. In the case where no direct Python equivalent is available, PySD provides a library of functions such as pulse, step, etc. that are specific to dynamic model behavior. @@ -34,26 +36,32 @@ During translation some dictionaries are created that allow the correct operatio * **_dependencies**: Used to define the dependencies of each variable and assign cache type and initialize the model. -The model class -^^^^^^^^^^^^^^^ -The translator constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a ‘statefull’ model in much the same way that the system itself has a specific state at any point in time. - -The model class also contains a function for each of the model components, representing the essential model equations. The docstring for each function contains the model documentation and units as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. +Building the model +------------------ +.. toctree:: + :maxdepth: 2 -The model class maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. The downside to this design choice is that several components of Vensim or XMILE functionality – the most significant being the infinite order delay – are intentionally not supported. In many cases similar behavior can be approximated through other constructs. + python_builder -Lastly, the model class provides a set of methods that are used to facilitate simulation. PySD uses the standard ordinary differential equations solver provided in the well-established Python library Scipy, which expects the state and its derivative to be represented as an ordered list. The model class provides the function .d_dt() that takes a state vector from the integrator and uses it to update the model state, and then calculates the derivative of each stock, returning them in a corresponding vector. A complementary function .state_vector() creates an ordered vector of states for use in initializing the integrator. +The Python model class +----------------------- -The PySD class -^^^^^^^^^^^^^^ .. toctree:: :maxdepth: 2 - internal_functions + model_loading +The translator constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a ‘statefull’ model in much the same way that the system itself has a specific state at any point in time. +The model class also contains a function for each of the model components, representing the essential model equations. The docstring for each function contains the model documentation and units as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. + +The model class maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. The downside to this design choice is that several components of Vensim or XMILE functionality – the most significant being the infinite order delay – are intentionally not supported. In many cases similar behavior can be approximated through other constructs. +Lastly, the model class provides a set of methods that are used to facilitate simulation. PySD uses the standard ordinary differential equations solver provided in the well-established Python library Scipy, which expects the state and its derivative to be represented as an ordered list. The model class provides the function .d_dt() that takes a state vector from the integrator and uses it to update the model state, and then calculates the derivative of each stock, returning them in a corresponding vector. A complementary function .state_vector() creates an ordered vector of states for use in initializing the integrator. + +The PySD class +^^^^^^^^^^^^^^ The PySD class provides the machinery to get the model moving, supply it with data, or modify its parameters. In addition, this class is the primary way that users interact with the PySD module. The basic function for executing a model is appropriately named.run(). This function passes the model into scipy’s odeint() ordinary differential equations solver. The scipy integrator is itself utilizing the lsoda integrator from the Fortran library odepack14, and so integration takes advantage of highly optimized low-level routines to improve speed. We use the model’s timestep to set the maximum step size for the integrator’s adaptive solver to ensure that the integrator properly accounts for discontinuities. diff --git a/docs/development/supported_vensim_functions.tab b/docs/structure/supported_vensim_functions.tab similarity index 100% rename from docs/development/supported_vensim_functions.tab rename to docs/structure/supported_vensim_functions.tab diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst new file mode 100644 index 00000000..8eb05b91 --- /dev/null +++ b/docs/structure/vensim_translation.rst @@ -0,0 +1,25 @@ +Vensim Translation +================== + +PySD allows parsing a Vensim '.mdl' file and translates the result to an AbstractModel class that can be used to builde the model. + + +Supported Functions and Features +-------------------------------- + +Ongoing development of the translator will support the full subset of Vensim functionality that +has an equivalent in XMILE. The current release supports the following functionality: + +.. include:: supported_vensim_functions.rst + + +The translation workflow +------------------------- + +Vensim file +^^^^^^^^^^^ + +.. automodule:: pysd.translation.vensim.vensim_file + :members: + :undoc-members: + diff --git a/docs/development/xmile_translation.rst b/docs/structure/xmile_translation.rst similarity index 100% rename from docs/development/xmile_translation.rst rename to docs/structure/xmile_translation.rst diff --git a/pysd/translation/vensim/vensim_file.py b/pysd/translation/vensim/vensim_file.py index a7c8a315..651e37e6 100644 --- a/pysd/translation/vensim/vensim_file.py +++ b/pysd/translation/vensim/vensim_file.py @@ -1,3 +1,10 @@ +""" +The VensimFile class allows reading the original Vensim model file, +parsing it into SectionFile elements using the FileSectionsParser, +parsing its sketch using SketchParser in order to classify the varibales +per view. The final result can be exported to an AbstractModel class in +order to build a model in other language. +""" import re from typing import Union, List from pathlib import Path @@ -14,6 +21,9 @@ class VensimFile(): """ Create a VensimFile object which allows parsing a mdl file. + When the objext is created the model file is automatically opened; + unnecessary tabs, whitespaces, and linebreaks are removed; and + the sketch is split from the model. Parameters ---------- @@ -93,10 +103,17 @@ def _clean(self, text: str) -> str: return re.sub(r"[\n\t\s]+", " ", re.sub(r"\\\n\t", " ", text)) def parse(self) -> None: - """Parse model file""" + """ + Parse model file with parsimonious using the grammar given in + parsin_grammars/file_sections.peg and the class FileSectionVisitor + to visit the parsed expressions. + + This will break the model file in VensimSections, which are the + main model + macros. Then the sections will be automatically parsed. + """ # get model sections (__main__ + macros) tree = vu.Grammar.get("file_sections").parse(self.model_text) - self.sections = FileSectionsParser(tree).entries + self.sections = FileSectionsVisitor(tree).entries # main section path (Python model file) self.sections[0].path = self.mdl_path.with_suffix(".py") @@ -104,7 +121,7 @@ def parse(self) -> None: for section in self.sections[1:]: # macrots paths section.path = self.mdl_path.parent.joinpath( - self.clean_file_names(section.name)[0] + self._clean_file_names(section.name)[0] ).with_suffix(".py") for section in self.sections: @@ -112,7 +129,27 @@ def parse(self) -> None: section._parse() def parse_sketch(self, subview_sep: List[str]) -> None: - """Parse the sketch of the models to classify the variables""" + """ + Parse the sketch of the model with parsimonious using the grammar + given in parsin_grammars/sketch.peg and the class SketchVisitor + to visit the parsed expressions. + + It will modify the views_dict of the first section, includying + the dictionary of the variables classification by views. This, + method should be called after calling self.parse method. + + Parameters + ---------- + subview_sep: list + List oh the separators to use to classify the model views in + folders and subfolders. The sepparator must be ordered in the + same order they appear in the view patter. For example, + if a view is named "economy:demand.exports" if + subview_sep=[":", "."] this view variables will be included + the file 'exports' inside the folders economy/demand. + + + """ if self.sketch: sketch = list(map( lambda x: x.strip(), @@ -130,7 +167,7 @@ def parse_sketch(self, subview_sep: List[str]) -> None: for sketch_line in module.split("\n"): # parsed line could have information about new view name # or of a variable inside a view - parsed = SketchParser(grammar.parse(sketch_line)) + parsed = SketchVisitor(grammar.parse(sketch_line)) if parsed.view_name: view_name = parsed.view_name @@ -158,7 +195,7 @@ def parse_sketch(self, subview_sep: List[str]) -> None: for full_name, values in non_empty_views.items(): # split the full view name using the separator and make the # individual parts safe file or directory names - clean_view_parts = self.clean_file_names( + clean_view_parts = self._clean_file_names( *re.split("|".join(escaped_separators), full_name)) # creating a nested dict for each view.subview # (e.g. {view_name: {subview_name: [values]}}) @@ -168,7 +205,7 @@ def parse_sketch(self, subview_sep: List[str]) -> None: nested_dict = {item: nested_dict} # merging the new nested_dict into the views_dict, preserving # repeated keys - self.merge_nested_dicts(views_dict, nested_dict) + self._merge_nested_dicts(views_dict, nested_dict) else: # view names do not have separators or separator characters # not provided @@ -180,18 +217,23 @@ def parse_sketch(self, subview_sep: List[str]) -> None: "any view name.") for view_name, elements in non_empty_views.items(): - views_dict[self.clean_file_names(view_name)[0]] = elements + views_dict[self._clean_file_names(view_name)[0]] = elements self.sections[0].split = True self.sections[0].views_dict = views_dict def get_abstract_model(self) -> AbstractModel: """ - Get Abstract Model used for building + Get Abstract Model used for building. This, method should be + called after calling self.parse_sketch method or self.parse, + in the case you do not want to split variables per views. Returns ------- - AbstractModel + AbstractModel: AbstractModel + Abstract Model object that can be used for building the model + in another language. + """ return AbstractModel( original_path=self.mdl_path, @@ -199,7 +241,7 @@ def get_abstract_model(self) -> AbstractModel: for section in self.sections)) @staticmethod - def clean_file_names(*args): + def _clean_file_names(*args): """ Removes special characters and makes clean file names. @@ -221,7 +263,7 @@ def clean_file_names(*args): ).lstrip("0123456789") for name in args] - def merge_nested_dicts(self, original_dict, dict_to_merge): + def _merge_nested_dicts(self, original_dict, dict_to_merge): """ Merge dictionaries recursively, preserving common keys. @@ -241,12 +283,12 @@ def merge_nested_dicts(self, original_dict, dict_to_merge): for key, value in dict_to_merge.items(): if (key in original_dict and isinstance(original_dict[key], dict) and isinstance(value, Mapping)): - self.merge_nested_dicts(original_dict[key], value) + self._merge_nested_dicts(original_dict[key], value) else: original_dict[key] = value -class FileSectionsParser(parsimonious.NodeVisitor): +class FileSectionsVisitor(parsimonious.NodeVisitor): """Parse file sections""" def __init__(self, ast): self.entries = [None] @@ -289,7 +331,7 @@ def generic_visit(self, n, vc): return "".join(filter(None, vc)) or n.text or "" -class SketchParser(parsimonious.NodeVisitor): +class SketchVisitor(parsimonious.NodeVisitor): """Sketch visitor to save the view names and the variables in each""" def __init__(self, ast): self.variable_name = None From eaaa535bea7c9b89b9f288e82127971e5b1d126c Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Sat, 26 Mar 2022 15:39:07 +0100 Subject: [PATCH 23/96] Add new test --- .../vensim_pathway/pytest_integration_vensim_pathway.py | 4 ++++ tests/test-models | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py index 1e3ad6bd..fa66af38 100644 --- a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py +++ b/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py @@ -227,6 +227,10 @@ "folder": "lookups_inline_bounded", "file": "test_lookups_inline_bounded.mdl" }, + "lookups_inline_spaces": { + "folder": "lookups_inline_spaces", + "file": "test_lookups_inline_spaces.mdl" + }, "lookups_with_expr": { "folder": "lookups_with_expr", "file": "test_lookups_with_expr.mdl" diff --git a/tests/test-models b/tests/test-models index fcfa160e..d770b2c6 160000 --- a/tests/test-models +++ b/tests/test-models @@ -1 +1 @@ -Subproject commit fcfa160eafe8639de605cfde3be59d4880e310b7 +Subproject commit d770b2c6bb377776b2b855682f6aa210bbdcf441 From ea2faacb24be323bf0bda2ed2e544d5baa2f013e Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Tue, 29 Mar 2022 12:54:36 +0200 Subject: [PATCH 24/96] Correct bug with externals --- .../python/python_expressions_builder.py | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index c590df0b..caf07b1e 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -116,12 +116,12 @@ def _compute_final_subscripts(self, subscripts_list, def_subs): # TODO reorder final_subscripts taking into account def_subs return expression - def update_object_subscripts(self, name): + def update_object_subscripts(self, name, component_final_subs): origin_comp = self.element.objects[name]["component"] if isinstance(origin_comp.subscripts_dict, dict): if len(list(origin_comp.subscripts_dict)) == 1: key = list(origin_comp.subscripts_dict.keys())[0] - value = list(self.component.subscripts_dict.values())[0] + value = list(component_final_subs.values())[0] origin_comp.subscripts_dict[key] += value self.element.objects[name]["final_subs"] =\ origin_comp.subscripts_dict @@ -130,8 +130,7 @@ def update_object_subscripts(self, name): self.element.objects[name]["final_subs"] =\ self.element.subs_dict if isinstance(origin_comp.subscripts_dict, list): - origin_comp.subscripts_dict.append( - self.component.subscripts_dict) + origin_comp.subscripts_dict.append(component_final_subs) class OperationBuilder(StructureBuilder): @@ -429,7 +428,7 @@ def build(self, arguments): + self.element.objects["ext_lookups"]["name"]\ + ".add(%(params)s, %(subscripts)s)" % arguments - self.update_object_subscripts("ext_lookups") + self.update_object_subscripts("ext_lookups", final_subs) return None else: @@ -439,6 +438,7 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_ext_lookup") arguments["final_subs"] = "%(final_subs)s" + self.component.subscripts_dict = final_subs self.element.objects["ext_lookups"] = { "name": arguments["name"], @@ -446,7 +446,7 @@ def build(self, arguments): "%(subscripts)s, _root, " "%(final_subs)s , '%(name)s')" % arguments, "component": self.component, - "final_subs": self.def_subs + "final_subs": final_subs } return BuildAST( @@ -483,7 +483,7 @@ def build(self, arguments): + self.element.objects["ext_data"]["name"]\ + ".add(%(params)s, %(method)s, %(subscripts)s)" % arguments - self.update_object_subscripts("ext_data") + self.update_object_subscripts("ext_data", final_subs) return None else: @@ -493,6 +493,7 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_ext_data") arguments["final_subs"] = "%(final_subs)s" + self.component.subscripts_dict = final_subs self.element.objects["ext_data"] = { "name": arguments["name"], @@ -500,7 +501,7 @@ def build(self, arguments): " %(method)s, %(subscripts)s, " "_root, %(final_subs)s ,'%(name)s')" % arguments, "component": self.component, - "final_subs": self.def_subs + "final_subs": final_subs } return BuildAST( @@ -534,7 +535,7 @@ def build(self, arguments): + self.element.objects["constants"]["name"]\ + ".add(%(params)s, %(subscripts)s)" % arguments - self.update_object_subscripts("constants") + self.update_object_subscripts("constants", final_subs) return None else: @@ -544,6 +545,7 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_ext_constant") arguments["final_subs"] = "%(final_subs)s" + self.component.subscripts_dict = final_subs self.element.objects["constants"] = { "name": arguments["name"], @@ -551,7 +553,7 @@ def build(self, arguments): "%(subscripts)s, _root, %(final_subs)s, " "'%(name)s')" % arguments, "component": self.component, - "final_subs": self.def_subs + "final_subs": final_subs } return BuildAST( From 1386d97c460570c3c9f58f28468cd4107d79d93e Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 30 Mar 2022 17:47:51 +0200 Subject: [PATCH 25/96] Document --- .gitignore | 1 + docs/Makefile | 5 +- docs/conf.py | 10 +- docs/generate_tables.py | 56 ++++ docs/structure/abstract_model.rst | 30 +- docs/structure/supported_vensim_functions.tab | 88 ------ docs/structure/vensim_translation.rst | 100 +++++- docs/structure/xmile_translation.rst | 95 +++++- docs/tables/binary.tab | 15 + docs/tables/delay_functions.tab | 18 ++ docs/tables/functions.tab | 35 ++ docs/tables/get_functions.tab | 9 + docs/tables/unary.tab | 4 + .../python/python_expressions_builder.py | 47 ++- .../structures/abstract_expressions.py | 299 +++++++++++++++++- pysd/translation/structures/abstract_model.py | 243 +++++++++++++- pysd/translation/vensim/vensim_element.py | 296 +++++++++++++---- pysd/translation/vensim/vensim_file.py | 50 +-- pysd/translation/vensim/vensim_section.py | 135 +++++--- pysd/translation/vensim/vensim_structures.py | 5 + pysd/translation/xmile/xmile_element.py | 227 +++++++++---- pysd/translation/xmile/xmile_file.py | 53 +++- pysd/translation/xmile/xmile_section.py | 181 +++++++---- 23 files changed, 1631 insertions(+), 371 deletions(-) create mode 100644 docs/generate_tables.py delete mode 100644 docs/structure/supported_vensim_functions.tab create mode 100644 docs/tables/binary.tab create mode 100644 docs/tables/delay_functions.tab create mode 100644 docs/tables/functions.tab create mode 100644 docs/tables/get_functions.tab create mode 100644 docs/tables/unary.tab diff --git a/.gitignore b/.gitignore index e2a4fa3b..95022f46 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ tests/cover/ tests/htmlcov/ .idea/* docs/_build/* +docs/tables/*.csv \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile index 1529adff..4f8ee358 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -48,9 +48,12 @@ help: @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" -clean: +clean: clean_tables rm -rf $(BUILDDIR)/* +clean_tables: + rm -f tables/*.csv + html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo diff --git a/docs/conf.py b/docs/conf.py index 8c3ade7b..135b5634 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -18,13 +18,18 @@ import mock import sphinx_rtd_theme +from generate_tables import generate_tables + + sys.path.insert(0, os.path.abspath('../')) +# Generate tables used for documentation +generate_tables() MOCK_MODULES = [ 'numpy', 'scipy', 'matplotlib', 'matplotlib.pyplot', 'scipy.stats', 'scipy.integrate', 'pandas', 'parsimonious', 'parsimonious.nodes', - 'lxml', 'xarray', 'autopep8', 'scipy.linalg', 'parsimonious.exceptions', + 'xarray', 'autopep8', 'scipy.linalg', 'parsimonious.exceptions', 'scipy.stats.distributions', 'progressbar', 'black' ] @@ -139,3 +144,6 @@ 'python': ('https://docs.python.org/3.7', None), 'pysdcookbook': ('http://pysd-cookbook.readthedocs.org/en/latest/', None) } + +# -- Options for autodoc -------------------------------------------------- +autodoc_member_order = 'bysource' diff --git a/docs/generate_tables.py b/docs/generate_tables.py new file mode 100644 index 00000000..d8cd7bec --- /dev/null +++ b/docs/generate_tables.py @@ -0,0 +1,56 @@ +import pandas as pd +from pathlib import Path + + +def generate(table, columns, output): + """Generate markdown table.""" + # select only the given columns + subtable = table[columns] + # remove the rows where the first column is na + subtable = subtable[~subtable[columns[0]].isna()] + + if all(subtable[columns[-1]].isna()): + # if the commnets columns (last) is all na, do not save it + subtable = subtable[columns[:-1]] + + # Place an empty string where na values + subtable.values[subtable.isna()] = "" + + if len(subtable.index) > 1: + # Save values only if the table has rows + print(f"Table generated: {output}") + subtable.to_csv(output, index=None) + + +def generate_tables(): + """Generate markdown tables for documentation.""" + + tables_dir = Path(__file__).parent / "tables" + + # different tables to load + tables = { + "binary": tables_dir / "binary.tab", + "unary": tables_dir / "unary.tab", + "functions": tables_dir / "functions.tab", + "delay_functions": tables_dir / "delay_functions.tab", + "get_functions": tables_dir / "get_functions.tab" + } + + # different combinations to generate + contents = { + "vensim": ["Vensim", "Vensim example", "Abstract Syntax", "Vensim comments"], + "xmile": ["Xmile", "Xmile example", "Abstract Syntax", "Xmile comments"], + "python": ["Abstract Syntax", "Python Translation", "Python comments"] + } + + # load the tables + tables = {key: pd.read_table(value) for key, value in tables.items()} + + # generate the tables + for table, df in tables.items(): + for language, content in contents.items(): + generate( + df, + content, + tables_dir / f"{table}_{language}.csv" + ) diff --git a/docs/structure/abstract_model.rst b/docs/structure/abstract_model.rst index 49896d18..1eee97f0 100644 --- a/docs/structure/abstract_model.rst +++ b/docs/structure/abstract_model.rst @@ -1,2 +1,30 @@ Abstract Model -============== \ No newline at end of file +============== +The Abstract Model representatin allows a separation of concern between +translation and the building. The translation will be called anything that +happens between the source code and the Abstract Model representation. While the +building will be everything that happens between the Abstract Model and the +final code. + +This approach allows easily including new source codes or output codes, +without needing to make a lot of changes in the whole library. The +:py:class:`AbstractModel` object should keep as mutch information as +possible from the original model. Althought the information is not used +in the output code, it may be necessary for other future output languages +or for improvements in the currently supported outputs. For example, currently +the unchangeable constanst (== defined in Vensim) are treated as regular +components with Python, but in the future we may want to protect them +from user interaction. + +The lowest level of this representation is the Abstract Syntax Tree (AST). +Which includes all the operations and calls in a given component expression. + +Main abstract structures +------------------------ +.. automodule:: pysd.translation.structures.abstract_model + :members: + +Abstrat structures for the AST +------------------------------ +.. automodule:: pysd.translation.structures.abstract_expressions + :members: diff --git a/docs/structure/supported_vensim_functions.tab b/docs/structure/supported_vensim_functions.tab deleted file mode 100644 index 53a5ede6..00000000 --- a/docs/structure/supported_vensim_functions.tab +++ /dev/null @@ -1,88 +0,0 @@ -Vensim Vensim example Xmile Xmile example Abstract Syntax Python Translation Comments -Binary operators -^ A ^ B ^ A ^ B "ArithmeticStructure(['^'], (A, B))" A**B -* A * B * A * B "ArithmeticStructure(['*'], (A, B))" A*B -/ A / B / A / B "ArithmeticStructure(['/'], (A, B))" A/B - mod A mod B "CallStructure('modulo', (A, B))" "pysd.functions.modulo(A, B)" In Vensim the modulo is computed with a function and not an operator -+ A + B + A + B "ArithmeticStructure(['+'], (A, B))" A+B -- A - B - A - B "ArithmeticStructure(['-'], (A, B))" A-B -= A = B = A = B "LogicStructure(['='], (A, B))" A == B -< A < B < A < B "LogicStructure(['<'], (A, B))" A < B -> A > B > A > B "LogicStructure(['>'], (A, B))" A > B ->= A >= B >= A >= B "LogicStructure(['>='], (A, B))" A >= B -<= A <= B <= A <= B "LogicStructure(['<='], (A, B))" A <= B -:AND: A :AND: B and A and B "LogicStructure[':AND:'], (A, B))" "numpy.and(A, B)" -:OR: A :OR: B or A or B "LogicStructure[':OR:'], (A, B))" "numpy.or(A, B)" - -Unary operators -- #NAME? - #NAME? "LogicStructure(['negative'], (A,))" #NAME? -+ #NAME? + #NAME? A A -:NOT: :NOT: A not not A "LogicStructure[':NOT:'], (A,))" numpy.not(A) - -Functions -ABS ABS(A) abs(A) abs(A) "CallStructure('abs', (A,))" numpy.abs(A) -MIN "MIN(A, B)" min "min(A, B)" "CallStructure('min', (A, B))" "numpy.minimum(A, B)" -MAX "MAX(A, B)" max "max(A, B)" "CallStructure('max', (A, B))" "numpy.maximum(A, B)" -SQRT SQRT(A) sqrt sqrt(A) "CallStructure('sqrt', (A,))" numpy.sqrt -EXP EXP(A) exp exp(A) "CallStructure('exp', (A,))" numpy.exp(A) -LN LN(A) ln ln(A) "CallStructure('ln', (A,))" numpy.log(A) -SIN SIN(A) sin sin(A) "CallStructure('sin', (A,))" numpy.sin(A) -COS COS(A) cos cos(A) "CallStructure('cos', (A,))" numpy.cos(A) -TAN TAN(A) tan tan(A) "CallStructure('tan', (A,))" numpy.tan(A) -ARCSIN ARCSIN(A) arcsin arcsin(A) "CallStructure('arcsin', (A,))" numpy.arcsin(A) -ARCCOS ARCCOS(A) arccos arccos(A) "CallStructure('arccos', (A,))" numpy.arccos(A) -ARCTAN ARCTAN(A) arctan arctan(A) "CallStructure('arctan', (A,))" numpy.arctan(A) -INVERT MATRIX INVERT MATRIX(A) "CallStructure('invert_matrix', (A,))" pysd.functions.invert_matrix(A) -ELMCOUNT ELMCOUNT(A) "CallStructure('elmcount', (A,))" len(A) -INTEGER INTEGER(A) int int(A) "CallStructure('int', (A,))" pysd.functions.integer(A) -QUANTUM "QUANTUM(A, B)" "CallStructure('quantum', (A, B))" "pysd.functions.quantum(A, B)" -MODULO "MODULO(A, B)" "CallStructure('modulo', (A, B))" "pysd.functions.modulo(A, B)" In Xmile the modulo is computed with the 'mod' operator -IF THEN ELSE "IF THEN ELSE(A, B, C)" if_then_else "if_then_else(A, B, C)" "CallStructure('if_then_else', (A, B))" "pysd.functions.if_then_else(A, lambda: B, lambda: C)" - IF condition THEN value_true ELSE value_false IF A THEN B ELSE C "CallStructure('if_then_else', (A, B))" "pysd.functions.if_then_else(A, lambda: B, lambda: C)" -XIDZ "XIDZ(A, B, X)" safediv "safediv(A, B, X)" "CallStructure('xidz', (A, B, X))" "pysd.functions.xidz(A, B, X)" -ZIDZ "ZIDZ(A, B)" safediv "safediv(A, B)" "CallStructure('zidz', (A, B))" "pysd.functions.zidz(A, B)" - -VMIN VMIN(A) "CallStructure('vmin', (A,))" pysd.functions.vmin(A) -VMAX VMAX(A) "CallStructure('vmax', (A,))" pysd.functions.vmax(A) -SUM SUM(A) "CallStructure('sum', (A,))" pysd.functions.sum(A) -PROD PROD(A) "CallStructure('prod', (A,))" pysd.functions.prod(A) - -PULSE PULSE pysd.functions.pulse -PULSE TRAIN PULSE TRAIN pysd.functions.pulse_train -RAMP RAMP pysd.functions.ramp -STEP STEP pysd.functions.step - -Stocks -INTEG - -Delay functions -DELAY1I "DELAY1I(input, delay_time, initial_value)" delay1 "delay1(input, delay_time, initial_value)" "DelayStructure(input, delay_time, initial_value, 1)" pysd.statefuls.Delay(...) Not tested for Xmile! -DELAY1 "DELAY1(input, delay_time)" delay1 "delay1(input, delay_time)" "DelayStructure(input, delay_time, input, 1)" pysd.statefuls.Delay(...) Not tested for Xmile! -DELAY3I "DELAY3I(input, delay_time, initial_value)" delay3 "delay3(input, delay_time, initial_value)" "DelayStructure(input, delay_time, initial_value, 3)" pysd.statefuls.Delay(...) Not tested for Xmile! -DELAY3 "DELAY3(input, delay_time)" delay3 "delay3(input, delay_time)" "DelayStructure(input, delay_time, input, 3)" pysd.statefuls.Delay(...) Not tested for Xmile! -DELAY N "DELAY N(input, delay_time, initial_value, n)" delayn "delayn(input, delay_time, n, initial_value)" "DelayNStructure(input, delay_time, initial_value, n)" pysd.statefuls.DelayN(...) Not tested for Xmile! - delayn "delayn(input, delay_time, n)" "DelayNStructure(input, delay_time, input, n)" pysd.statefuls.DelayN(...) Not tested for Xmile! -DELAY FIXED "DELAY FIXED(input, delay_time, initial_value)" "DelayFixed(input, delay_time, initial_value)" pysd.statefuls.DelayFixed(...) Not tested for Xmile! -SMOOTHI "SMOOTH1I(input, delay_time, initial_value)" smth1 "smth1(input, smth_time, initial_value)" "SmoothStructure(input, smth_time, initial_value, 1)" pysd.statefuls.Smooth(...) Not tested for Xmile! -SMOOTH "SMOOTH1(input, delay_time)" smth1 "smth1(input, smth_time)" "SmoothStructure(input, smth_time, input, 1)" pysd.statefuls.Smooth(...) Not tested for Xmile! -SMOOTH3I "SMOOTH3I(input, delay_time, initial_value)" smth3 "smth3(input, smth_time, initial_value)" "SmoothStructure(input, smth_time, initial_value, 3)" pysd.statefuls.Smooth(...) Not tested for Xmile! -SMOOTH3 "SMOOTH3(input, delay_time)" smth3 "smth3(input, smth_time)" "SmoothStructure(input, smth_time, input, 3)" pysd.statefuls.Smooth(...) Not tested for Xmile! -SMOOTH N "SMOOTH N(input, delay_time, initial_value, n)" smthn "smthn(input, smth_time, n, initial_value)" "SmoothNStructure(input, smth_time, initial_value, n)" pysd.statefuls.SmoothN(...) Not tested for Xmile! - smthn "smthn(input, smth_time, n)" "SmoothNStructure(input, smth_time, input, n)" pysd.statefuls.SmoothN(...) Not tested for Xmile! - forcst "forcst(input, average_time, horizon, initial_trend)" "ForecastStructure(input, average_time, horizon, initial_trend)" pysd.statefuls.Forecast(...) Not tested for Xmile! -FORECAST "FORECAST(input, average_time, horizon)" forcst "forcst(input, average_time, horizon)" "ForecastStructure(input, average_time, horizon, 0)" pysd.statefuls.Forecast(...) Not tested for Xmile! -TREND "TREND(input, average_time, initial_trend)" trend "trend(input, average_time, initial_trend)" "TrendStructure(input, average_time, initial_trend)" pysd.statefuls.Trend(...) Not tested for Xmile! -TREND trend "trend(input, average_time)" "TrendStructure(input, average_time, 0)" pysd.statefuls.Trend(...) Not tested for Xmile! - -INITIAL INITIAL(pysd.statefuls.Initial pysd.statefuls.Initial -SAMPLE IF TRUE "SAMPLE IF TRUE(condition, input, initial_value)" "SampleIfTrueStructure(condition, input, initial_value)" pysd.statefuls.SampleIfTrue(�) - -Get functions -GET XLS DATA "GET XLS DATA('file', 'sheet', 'time_row_or_col', 'cell')" "GetDataStructure('file', 'sheet', 'time_row_or_col', 'cell')" pysd.external.ExtData(...) -GET DIRECT DATA "GET DIRECT DATA('file', 'sheet', 'time_row_or_col', 'cell')" "GetDataStructure('file', 'sheet', 'time_row_or_col', 'cell')" pysd.external.ExtData(...) -GET XLS LOOKUPS "GET XLS LOOKUPS('file', 'sheet', 'x_row_or_col', 'cell')" "GetLookupsStructure('file', 'sheet', 'x_row_or_col', 'cell')" pysd.external.ExtLookup(...) -GET DIRECT LOOKUPS "GET DIRECT LOOKUPS('file', 'sheet', 'x_row_or_col', 'cell')" "GetLookupsStructure('file', 'sheet', 'x_row_or_col', 'cell')" pysd.external.ExtLookup(...) -GET XLS CONSTANTS "GET XLS CONSTANTS('file', 'sheet', 'cell')" "GetConstantsStructure('file', 'sheet', 'cell')" pysd.external.ExtConstant(...) -GET DIRECT CONSTANTS "GET DIRECT CONSTANTS('file', 'sheet', 'cell')" "GetConstantsStructure('file', 'sheet', 'cell')" pysd.external.ExtConstant(...) -GET XLS SUBSCRIPT "GET XLS SUBSCRIPT('file', 'sheet', 'first_cell', 'last_cell', 'prefix')" pysd.external.ExtSubscript(...) -GET DIRECT SUBSCRIPT "GET DIRECT SUBSCRIPT('file', 'sheet', 'first_cell', 'last_cell', 'prefix')" pysd.external.ExtSubscript(...) diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst index 8eb05b91..add6b36e 100644 --- a/docs/structure/vensim_translation.rst +++ b/docs/structure/vensim_translation.rst @@ -1,25 +1,105 @@ Vensim Translation ================== -PySD allows parsing a Vensim '.mdl' file and translates the result to an AbstractModel class that can be used to builde the model. +PySD allows parsing a Vensim '.mdl' file and translates the result to an :py:class:`AbstractModel` object that can be used to builde the model. -Supported Functions and Features --------------------------------- +The translation workflow +------------------------- +The following translation workflow allows splitting the Vensim file while parsing each part of it in order to make it possible to build an :py:class:`AbstractModel` type object. The workflow could be summarized as follows: -Ongoing development of the translator will support the full subset of Vensim functionality that -has an equivalent in XMILE. The current release supports the following functionality: +1. Vensim file: Splits the file content from the skecth and allows splitting the model in sections (main section, macro section) +2. Vensim section: Full set of varibles and definitions that can be integrated. Allows splitting the model expressions. +3. Vensim element: A definition in the mdl file which could be a subscript (sub)range definition or a variable definition. It includes units and commnets. Definitions for the same variable are grouped after in the same :py:class:`AbstractElement` object. Allows parsing its left hand side (LHS) to get the name of the subscript (sub)range or variable and it is returned as a specific type of component depending on the used assing operator (=, ==, :=, (), :) +4. Vensim component: The classified object for a variable definition, it depends on the opperator used to define the variable. Its right hand side (RHS) can be parsed to get the Abstract Syntax Tree (AST) of the expression. -.. include:: supported_vensim_functions.rst +Once the model is parsed and broken following the previous steps. The :py:class:`AbstractModel` can be returned. -The translation workflow -------------------------- - Vensim file ^^^^^^^^^^^ .. automodule:: pysd.translation.vensim.vensim_file - :members: + :members: VensimFile :undoc-members: +Vensim section +^^^^^^^^^^^^^^ + +.. automodule:: pysd.translation.vensim.vensim_section + :members: Section + :undoc-members: + +Vensim element +^^^^^^^^^^^^^^ + +.. automodule:: pysd.translation.vensim.vensim_element + :members: SubscriptRange, Element, Component, UnchangeableConstant, Data, Lookup + :undoc-members: + + +Supported Functions and Features +-------------------------------- + +Ongoing development of the translator will support the full subset of Vensim functionality. The current release supports the following operators, functions and features. + +Operators +^^^^^^^^^ +All the basic operators are supported, this includes the ones shown in the tables below. + +.. csv-table:: Supported unary operators + :file: ../tables/unary_vensim.csv + :header-rows: 1 + +.. csv-table:: Supported binary operators + :file: ../tables/binary_vensim.csv + :header-rows: 1 + +Moreover, the Vensim :EXCEPT: operator is also supported to manage exceptions in the subscripts. See the section for subscripts. TODO include link + +Functions +^^^^^^^^^ +Not all the Vensim functions all included yet, the list of supported functions are included below. + +.. csv-table:: Supported basic functions + :file: ../tables/functions_vensim.csv + :header-rows: 1 + +.. csv-table:: Supported delay functions + :file: ../tables/delay_functions_vensim.csv + :header-rows: 1 + +.. csv-table:: Supported get functions + :file: ../tables/get_functions_vensim.csv + :header-rows: 1 + + +Subscripts +^^^^^^^^^^ +Several subscript related features all supported. This include: + +- Basic subscript operations with different ranges. +- Subscript ranges and subranges definitions. +- Basic subscript mapping where the subscript range is mapping to a full range (e.g. new_dim: A, B, C -> dim, dim_other), mapping to a partial range is not supported yet (e.g. new_dim: A, B, C -> dim: E, F, G). +- Subscript copy (e.g. new_dim <-> dim). +- \:EXCEPT: operator. +- Subscript usage as a variable (e.g. my_var[dim] = another var * dim). +- Subscript vectorial opperations (e.g. SUM(my var[dim, dim!])). + + +Data +^^^^ +Data definitions from GET functions and empty definitions (no expressions, Vensim uses a VDF file) are supported. This definitions can include or not any of the possible interpolation keywords: :INTERPOLATE:, :LOOK FORWARD:, :HOLD BACKWARD:, :RAW:. This keywords will be stored in the 'keyword' argument of :py:class:`AbstractData` as 'interpolate', 'look_forward', 'hold_backward' and 'raw', respectively. The Abstract Structure for GET XLS/DATA is given in the supported get functions table, the Abstract Structure for the empty Data declarations is a :py:class:`DataStructure`. + +For the moment, any specific functions applying over data are supported (e.g. SHIFT IF TRUE, TIME SHIFT...), but they may be includded in the future. + +Macro +^^^^^ +Vensim macros are supported, The macro content between the keywords \:MACRO: and \:END OF MACRO: is classified as a section of the model and used for latelly build an independent section from the rest of the model. + +Planed New Functions and Features +--------------------------------- +- ALLOCATE BY PRIORITY +- VECTOR SELECT +- GET TIME VALUE +- SHIFT IF TRUE diff --git a/docs/structure/xmile_translation.rst b/docs/structure/xmile_translation.rst index e4b96314..9b0ea94b 100644 --- a/docs/structure/xmile_translation.rst +++ b/docs/structure/xmile_translation.rst @@ -1,7 +1,94 @@ -XMILE Translation +Xmile Translation ================= -The XMILE reference documentation is located at: +PySD allows parsing a Xmile file and translates the result to an :py:class:`AbstractModel` object that can be used to builde the model. -* XMILE: http://www.iseesystems.com/community/support/XMILEv4.pdf -* SMILE: http://www.iseesystems.com/community/support/SMILEv4.pdf \ No newline at end of file + +.. warning:: + Currently no Xmile users are working in the development of PySD, this is causing a gap between the Xmile and Vensim developments. Stella users are encouraged to take part in the development of PySD by invcludying new test models and adding support for new functions and features. + + +The translation workflow +------------------------- +The following translation workflow allows splitting the Xmile file while parsing each part of it in order to make it possible to build an :py:class:`AbstractModel` type object. The workflow could be summarized as follows: + +1. Xmile file: Parses the file with etree library and creates a section for the model. +2. Xmile section: Full set of varibles and definitions that can be integrated. Allows splitting the model elements. +3. Xmile element: A variable definition. It includes units and commnets. Allows parsing its the expressions that contains and saving them inside AbstractComponents that are part of an AbstractElement. + +Once the model is parsed and broken following the previous steps. The :py:class:`AbstractModel` can be returned. + + +Xmile file +^^^^^^^^^^ + +.. automodule:: pysd.translation.xmile.xmile_file + :members: XmileFile + :undoc-members: + +Xmile section +^^^^^^^^^^^^^ + +.. automodule:: pysd.translation.xmile.xmile_section + :members: Section + :undoc-members: + +Xmile element +^^^^^^^^^^^^^ + +.. automodule:: pysd.translation.xmile.xmile_element + :members: SubscriptRange, Element, Flaux, Gf, Stock + :undoc-members: + + +Supported Functions and Features +-------------------------------- + +Ongoing development of the translator will support the full subset of Xmile functionality. The current release supports the following operators, functions and features. + +.. warning:: + Not all the supported functions and features are properly tested. Any new test model to cover the missing functions test will be wellcome. + +Operators +^^^^^^^^^ +All the basic operators are supported, this includes the ones shown in the tables below. + +.. csv-table:: Supported unary operators + :file: ../tables/unary_xmile.csv + :header-rows: 1 + +.. csv-table:: Supported binary operators + :file: ../tables/binary_xmile.csv + :header-rows: 1 + + +Functions +^^^^^^^^^ +Not all the Xmile functions all included yet, the list of supported functions are included below. + +.. csv-table:: Supported basic functions + :file: ../tables/functions_xmile.csv + :header-rows: 1 + +.. csv-table:: Supported delay functions + :file: ../tables/delay_functions_xmile.csv + :header-rows: 1 + + +Subscripts +^^^^^^^^^^ +Several subscript related features all supported. This include: + +- Basic subscript operations with different ranges. +- Subscript ranges and subranges definitions. + + +Supported in Vensim but not in Xmile +------------------------------------ +Macro +^^^^^ +Currently Xmile macros are not supported. In Vensim macros are classified as an independent section of the model. If they are properly parsed in the :py:class:`XmileFile` adding support for Xmile should be easy. + +Planed New Functions and Features +--------------------------------- +Nothing yet. diff --git a/docs/tables/binary.tab b/docs/tables/binary.tab new file mode 100644 index 00000000..d7ec0c2c --- /dev/null +++ b/docs/tables/binary.tab @@ -0,0 +1,15 @@ +Vensim Vensim example Xmile Xmile example Abstract Syntax Python Translation Vensim comments Xmile comments Python comments +^ A ^ B ^ A ^ B "ArithmeticStructure(['^'], (A, B))" A**B +"\*" A * B "\*" A * B "ArithmeticStructure(['*'], (A, B))" A*B +/ A / B / A / B "ArithmeticStructure(['/'], (A, B))" A/B + mod A mod B "CallStructure('modulo', (A, B))" "pysd.functions.modulo(A, B)" +"\+" A + B "\+" A + B "ArithmeticStructure(['+'], (A, B))" A+B +"\-" A - B "\-" A - B "ArithmeticStructure(['-'], (A, B))" A-B += A = B = A = B "LogicStructure(['='], (A, B))" A == B +<> A <> B <> A <> B "LogicStructure(['<>'], (A, B))" A != B +< A < B < A < B "LogicStructure(['<'], (A, B))" A < B +> A > B > A > B "LogicStructure(['>'], (A, B))" A > B +>= A >= B >= A >= B "LogicStructure(['>='], (A, B))" A >= B +<= A <= B <= A <= B "LogicStructure(['<='], (A, B))" A <= B +"\:AND:" A :AND: B and A and B "LogicStructure([':AND:'], (A, B))" "numpy.and(A, B)" +"\:OR:" A :OR: B or A or B "LogicStructure([':OR:'], (A, B))" "numpy.or(A, B)" diff --git a/docs/tables/delay_functions.tab b/docs/tables/delay_functions.tab new file mode 100644 index 00000000..cd209660 --- /dev/null +++ b/docs/tables/delay_functions.tab @@ -0,0 +1,18 @@ +Vensim Vensim example Xmile Xmile example Abstract Syntax Python Translation Vensim comments Xmile comments Python comments +DELAY1I "DELAY1I(input, delay_time, initial_value)" delay1 "delay1(input, delay_time, initial_value)" "DelayStructure(input, delay_time, initial_value, 1)" pysd.statefuls.Delay(...) Not tested for Xmile! +DELAY1 "DELAY1(input, delay_time)" delay1 "delay1(input, delay_time)" "DelayStructure(input, delay_time, input, 1)" pysd.statefuls.Delay(...) Not tested for Xmile! +DELAY3I "DELAY3I(input, delay_time, initial_value)" delay3 "delay3(input, delay_time, initial_value)" "DelayStructure(input, delay_time, initial_value, 3)" pysd.statefuls.Delay(...) Not tested for Xmile! +DELAY3 "DELAY3(input, delay_time)" delay3 "delay3(input, delay_time)" "DelayStructure(input, delay_time, input, 3)" pysd.statefuls.Delay(...) Not tested for Xmile! +DELAY N "DELAY N(input, delay_time, initial_value, n)" delayn "delayn(input, delay_time, n, initial_value)" "DelayNStructure(input, delay_time, initial_value, n)" pysd.statefuls.DelayN(...) Not tested for Xmile! + delayn "delayn(input, delay_time, n)" "DelayNStructure(input, delay_time, input, n)" pysd.statefuls.DelayN(...) Not tested for Xmile! +DELAY FIXED "DELAY FIXED(input, delay_time, initial_value)" "DelayFixed(input, delay_time, initial_value)" pysd.statefuls.DelayFixed(...) Not tested for Xmile! +SMOOTHI "SMOOTH1I(input, delay_time, initial_value)" smth1 "smth1(input, smth_time, initial_value)" "SmoothStructure(input, smth_time, initial_value, 1)" pysd.statefuls.Smooth(...) Not tested for Xmile! +SMOOTH "SMOOTH1(input, delay_time)" smth1 "smth1(input, smth_time)" "SmoothStructure(input, smth_time, input, 1)" pysd.statefuls.Smooth(...) Not tested for Xmile! +SMOOTH3I "SMOOTH3I(input, delay_time, initial_value)" smth3 "smth3(input, smth_time, initial_value)" "SmoothStructure(input, smth_time, initial_value, 3)" pysd.statefuls.Smooth(...) Not tested for Xmile! +SMOOTH3 "SMOOTH3(input, delay_time)" smth3 "smth3(input, smth_time)" "SmoothStructure(input, smth_time, input, 3)" pysd.statefuls.Smooth(...) Not tested for Xmile! +SMOOTH N "SMOOTH N(input, delay_time, initial_value, n)" smthn "smthn(input, smth_time, n, initial_value)" "SmoothNStructure(input, smth_time, initial_value, n)" pysd.statefuls.SmoothN(...) Not tested for Xmile! + smthn "smthn(input, smth_time, n)" "SmoothNStructure(input, smth_time, input, n)" pysd.statefuls.SmoothN(...) Not tested for Xmile! + forcst "forcst(input, average_time, horizon, initial_trend)" "ForecastStructure(input, average_time, horizon, initial_trend)" pysd.statefuls.Forecast(...) Not tested for Xmile! +FORECAST "FORECAST(input, average_time, horizon)" forcst "forcst(input, average_time, horizon)" "ForecastStructure(input, average_time, horizon, 0)" pysd.statefuls.Forecast(...) Not tested for Xmile! +TREND "TREND(input, average_time, initial_trend)" trend "trend(input, average_time, initial_trend)" "TrendStructure(input, average_time, initial_trend)" pysd.statefuls.Trend(...) Not tested for Xmile! + trend "trend(input, average_time)" "TrendStructure(input, average_time, 0)" pysd.statefuls.Trend(...) Not tested for Xmile! diff --git a/docs/tables/functions.tab b/docs/tables/functions.tab new file mode 100644 index 00000000..34124719 --- /dev/null +++ b/docs/tables/functions.tab @@ -0,0 +1,35 @@ +Vensim Vensim example Xmile Xmile example Abstract Syntax Python Translation Vensim comments Xmile comments Python comments +ABS ABS(A) abs(A) abs(A) "CallStructure('abs', (A,))" numpy.abs(A) +MIN "MIN(A, B)" min "min(A, B)" "CallStructure('min', (A, B))" "numpy.minimum(A, B)" +MAX "MAX(A, B)" max "max(A, B)" "CallStructure('max', (A, B))" "numpy.maximum(A, B)" +SQRT SQRT(A) sqrt sqrt(A) "CallStructure('sqrt', (A,))" numpy.sqrt +EXP EXP(A) exp exp(A) "CallStructure('exp', (A,))" numpy.exp(A) +LN LN(A) ln ln(A) "CallStructure('ln', (A,))" numpy.log(A) +SIN SIN(A) sin sin(A) "CallStructure('sin', (A,))" numpy.sin(A) +COS COS(A) cos cos(A) "CallStructure('cos', (A,))" numpy.cos(A) +TAN TAN(A) tan tan(A) "CallStructure('tan', (A,))" numpy.tan(A) +ARCSIN ARCSIN(A) arcsin arcsin(A) "CallStructure('arcsin', (A,))" numpy.arcsin(A) +ARCCOS ARCCOS(A) arccos arccos(A) "CallStructure('arccos', (A,))" numpy.arccos(A) +ARCTAN ARCTAN(A) arctan arctan(A) "CallStructure('arctan', (A,))" numpy.arctan(A) +INVERT MATRIX INVERT MATRIX(A) "CallStructure('invert_matrix', (A,))" pysd.functions.invert_matrix(A) +ELMCOUNT ELMCOUNT(A) "CallStructure('elmcount', (A,))" len(A) +INTEGER INTEGER(A) int int(A) "CallStructure('int', (A,))" pysd.functions.integer(A) +QUANTUM "QUANTUM(A, B)" "CallStructure('quantum', (A, B))" "pysd.functions.quantum(A, B)" +MODULO "MODULO(A, B)" "CallStructure('modulo', (A, B))" "pysd.functions.modulo(A, B)" +IF THEN ELSE "IF THEN ELSE(A, B, C)" if_then_else "if_then_else(A, B, C)" "CallStructure('if_then_else', (A, B))" "pysd.functions.if_then_else(A, lambda: B, lambda: C)" + IF condition THEN value_true ELSE value_false IF A THEN B ELSE C "CallStructure('if_then_else', (A, B))" "pysd.functions.if_then_else(A, lambda: B, lambda: C)" +XIDZ "XIDZ(A, B, X)" safediv "safediv(A, B, X)" "CallStructure('xidz', (A, B, X))" "pysd.functions.xidz(A, B, X)" +ZIDZ "ZIDZ(A, B)" safediv "safediv(A, B)" "CallStructure('zidz', (A, B))" "pysd.functions.zidz(A, B)" + +VMIN VMIN(A) "CallStructure('vmin', (A,))" pysd.functions.vmin(A) +VMAX VMAX(A) "CallStructure('vmax', (A,))" pysd.functions.vmax(A) +SUM SUM(A) "CallStructure('sum', (A,))" pysd.functions.sum(A) +PROD PROD(A) "CallStructure('prod', (A,))" pysd.functions.prod(A) + +PULSE PULSE pysd.functions.pulse +PULSE TRAIN PULSE TRAIN pysd.functions.pulse_train +RAMP RAMP pysd.functions.ramp +STEP STEP pysd.functions.step +GAME GAME(A) GameStructure(A) A +INITIAL INITIAL(value) init init(value) InitialStructure(value) pysd.statefuls.Initial +SAMPLE IF TRUE "SAMPLE IF TRUE(condition, input, initial_value)" "SampleIfTrueStructure(condition, input, initial_value)" pysd.statefuls.SampleIfTrue(...) diff --git a/docs/tables/get_functions.tab b/docs/tables/get_functions.tab new file mode 100644 index 00000000..996f490b --- /dev/null +++ b/docs/tables/get_functions.tab @@ -0,0 +1,9 @@ +Vensim Vensim example Xmile Xmile example Abstract Syntax Python Translation Vensim comments Xmile comments Python comments +GET XLS DATA "GET XLS DATA('file', 'sheet', 'time_row_or_col', 'cell')" "GetDataStructure('file', 'sheet', 'time_row_or_col', 'cell')" pysd.external.ExtData(...) +GET DIRECT DATA "GET DIRECT DATA('file', 'sheet', 'time_row_or_col', 'cell')" "GetDataStructure('file', 'sheet', 'time_row_or_col', 'cell')" pysd.external.ExtData(...) +GET XLS LOOKUPS "GET XLS LOOKUPS('file', 'sheet', 'x_row_or_col', 'cell')" "GetLookupsStructure('file', 'sheet', 'x_row_or_col', 'cell')" pysd.external.ExtLookup(...) +GET DIRECT LOOKUPS "GET DIRECT LOOKUPS('file', 'sheet', 'x_row_or_col', 'cell')" "GetLookupsStructure('file', 'sheet', 'x_row_or_col', 'cell')" pysd.external.ExtLookup(...) +GET XLS CONSTANTS "GET XLS CONSTANTS('file', 'sheet', 'cell')" "GetConstantsStructure('file', 'sheet', 'cell')" pysd.external.ExtConstant(...) +GET DIRECT CONSTANTS "GET DIRECT CONSTANTS('file', 'sheet', 'cell')" "GetConstantsStructure('file', 'sheet', 'cell')" pysd.external.ExtConstant(...) +GET XLS SUBSCRIPT "GET XLS SUBSCRIPT('file', 'sheet', 'first_cell', 'last_cell', 'prefix')" pysd.external.ExtSubscript(...) +GET DIRECT SUBSCRIPT "GET DIRECT SUBSCRIPT('file', 'sheet', 'first_cell', 'last_cell', 'prefix')" pysd.external.ExtSubscript(...) diff --git a/docs/tables/unary.tab b/docs/tables/unary.tab new file mode 100644 index 00000000..9d2cb6bc --- /dev/null +++ b/docs/tables/unary.tab @@ -0,0 +1,4 @@ +Vensim Vensim example Xmile Xmile example Abstract Syntax Python Translation Vensim comments Xmile comments Python comments +"\-" -A "\-" -A "LogicStructure(['negative'], (A,))" -A +"\+" +A "\+" +A A A +"\:NOT:" "\:NOT: A" not not A "LogicStructure([':NOT:'], (A,))" numpy.not(A) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index caf07b1e..f035d2bb 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -609,7 +609,9 @@ def build(self, arguments): self.component.type = "Stateful" self.component.subtype = "Initial" self.section.imports.add("statefuls", "Initial") + arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_initial") @@ -642,8 +644,10 @@ def build(self, arguments): self.component.type = "Stateful" self.component.subtype = "Integ" self.section.imports.add("statefuls", "Integ") + arguments["initial"].reshape(self.section.subscripts, self.def_subs) arguments["flow"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_integ") @@ -679,9 +683,11 @@ def build(self, arguments): self.component.type = "Stateful" self.component.subtype = "Delay" self.section.imports.add("statefuls", self.dtype) + arguments["input"].reshape(self.section.subscripts, self.def_subs) arguments["delay_time"].reshape(self.section.subscripts, self.def_subs) arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix=f"_{self.dtype.lower()}") arguments["dtype"] = self.dtype @@ -723,8 +729,10 @@ def build(self, arguments): self.component.type = "Stateful" self.component.subtype = "DelayFixed" self.section.imports.add("statefuls", "DelayFixed") + arguments["input"].reshape(self.section.subscripts, self.def_subs) arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_delayfixed") @@ -761,10 +769,14 @@ def build(self, arguments): self.component.type = "Stateful" self.component.subtype = "Smooth" self.section.imports.add("statefuls", "Smooth") - arguments["input"].reshape(self.section.subscripts, self.def_subs) + + arguments["input"].reshape( + self.section.subscripts, self.def_subs) arguments["smooth_time"].reshape( self.section.subscripts, self.def_subs) - arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["initial"].reshape( + self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_smooth") @@ -802,28 +814,33 @@ def __init__(self, trend_str, component): self.arguments = { "input": trend_str.input, "average_time": trend_str.average_time, - "initial": trend_str.initial, + "initial_trend": trend_str.initial_trend, } def build(self, arguments): self.component.type = "Stateful" self.component.subtype = "Trend" self.section.imports.add("statefuls", "Trend") - arguments["input"].reshape(self.section.subscripts, self.def_subs) + + arguments["input"].reshape( + self.section.subscripts, self.def_subs) arguments["average_time"].reshape( self.section.subscripts, self.def_subs) - arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["initial_trend"].reshape( + self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_trend") self.element.objects[arguments["name"]] = { "name": arguments["name"], "expression": "%(name)s = Trend(lambda: %(input)s, " - "lambda: %(average_time)s, lambda: %(initial)s, " + "lambda: %(average_time)s, " + "lambda: %(initial_trend)s, " "'%(name)s')" % arguments, "calls": { "initial": merge_dependencies( - arguments["initial"].calls, + arguments["initial_trend"].calls, arguments["input"].calls, arguments["average_time"].calls), "step": merge_dependencies( @@ -853,11 +870,16 @@ def build(self, arguments): self.component.type = "Stateful" self.component.subtype = "Forecast" self.section.imports.add("statefuls", "Forecast") - arguments["input"].reshape(self.section.subscripts, self.def_subs) + + arguments["input"].reshape( + self.section.subscripts, self.def_subs) arguments["average_time"].reshape( self.section.subscripts, self.def_subs) - arguments["horizon"].reshape(self.section.subscripts, self.def_subs) - arguments["initial_trend"].reshape(self.section.subscripts, self.def_subs) + arguments["horizon"].reshape( + self.section.subscripts, self.def_subs) + arguments["initial_trend"].reshape( + self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_forecast") @@ -897,9 +919,11 @@ def build(self, arguments): self.component.type = "Stateful" self.component.subtype = "SampleIfTrue" self.section.imports.add("statefuls", "SampleIfTrue") + arguments["condition"].reshape(self.section.subscripts, self.def_subs) arguments["input"].reshape(self.section.subscripts, self.def_subs) arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_sampleiftrue") @@ -1351,6 +1375,7 @@ def visit(self): except_def.reshape( self.subscripts, self.subscripts.make_coord_dict(except_list)) - for except_def, except_list in zip(excepts, self.except_definitions) + for except_def, except_list + in zip(excepts, self.except_definitions) ] return excepts diff --git a/pysd/translation/structures/abstract_expressions.py b/pysd/translation/structures/abstract_expressions.py index d11141da..28ff2bbc 100644 --- a/pysd/translation/structures/abstract_expressions.py +++ b/pysd/translation/structures/abstract_expressions.py @@ -1,11 +1,32 @@ +""" +The following abstract structures are used to build the Abstract Syntax +Tree (AST). In general, there is no hierarchy between them. For example, +an ArithmeticStructure can contain a CallStructure which at the same time +contains another ArithmeticStructure. However, some of them could not be +inside another structures due to the restrictions of the source languages. +For example, the GetConstantsStructure cannot be a part of another structure +because it has to appear after the '=' sign in Vensim and not be followed by +anything else. +""" from dataclasses import dataclass from typing import Union @dataclass class ArithmeticStructure: - operators: str - arguments: tuple + """ + Dataclass for an arithmetic structure. + + Parameters + ---------- + operators: list + List of operators applied between the arguments + arguments: list + The arguments of the arithmetics operations. + + """ + operators: list + arguments: list def __str__(self) -> str: # pragma: no cover return "ArithmeticStructure:\n\t %s %s" % ( @@ -14,8 +35,19 @@ def __str__(self) -> str: # pragma: no cover @dataclass class LogicStructure: - operators: str - arguments: tuple + """ + Dataclass for a logic structure. + + Parameters + ---------- + operators: list + List of operators applied between the arguments + arguments: list + The arguments of the logic operations. + + """ + operators: list + arguments: list def __str__(self) -> str: # pragma: no cover return "LogicStructure:\n\t %s %s" % ( @@ -24,6 +56,15 @@ def __str__(self) -> str: # pragma: no cover @dataclass class SubscriptsReferenceStructure: + """ + Dataclass for a subscript reference structure. + + Parameters + ---------- + subscripts: tuple + The list of subscripts referenced. + + """ subscripts: tuple def __str__(self) -> str: # pragma: no cover @@ -32,6 +73,17 @@ def __str__(self) -> str: # pragma: no cover @dataclass class ReferenceStructure: + """ + Dataclass for an element reference structure. + + Parameters + ---------- + reference: str + The name of the referenced element. + subscripts: SubscriptsReferenceStructure or None + The subscrips used in the reference. + + """ reference: str subscripts: Union[SubscriptsReferenceStructure, None] = None @@ -43,6 +95,17 @@ def __str__(self) -> str: # pragma: no cover @dataclass class CallStructure: + """ + Dataclass for a call structure. + + Parameters + ---------- + function: str or ReferenceStructure + The name or the reference of the callable. + arguments: tuple + The list of arguments used for calling the function. + + """ function: Union[str, object] arguments: tuple @@ -57,6 +120,15 @@ def __str__(self) -> str: # pragma: no cover @dataclass class GameStructure: + """ + Dataclass for a game structure. + + Parameters + ---------- + expression: AST + The expression inside the game call. + + """ expression: object def __str__(self) -> str: # pragma: no cover @@ -65,6 +137,15 @@ def __str__(self) -> str: # pragma: no cover @dataclass class InitialStructure: + """ + Dataclass for a initial structure. + + Parameters + ---------- + initial: AST + The expression inside the initial call. + + """ initial: object def __str__(self) -> str: # pragma: no cover @@ -74,6 +155,17 @@ def __str__(self) -> str: # pragma: no cover @dataclass class IntegStructure: + """ + Dataclass for an integ/stock structure. + + Parameters + ---------- + flow: AST + The flow of the stock. + initial: AST + The initial value of the stock. + + """ flow: object initial: object @@ -85,6 +177,21 @@ def __str__(self) -> str: # pragma: no cover @dataclass class DelayStructure: + """ + Dataclass for a delay structure. + + Parameters + ---------- + input: AST + The input of the delay. + delay_time: AST + The delay time value of the delay. + initial: AST + The initial value of the delay. + order: float + The order of the delay. + + """ input: object delay_time: object initial: object @@ -100,6 +207,21 @@ def __str__(self) -> str: # pragma: no cover @dataclass class DelayNStructure: + """ + Dataclass for a delay n structure. + + Parameters + ---------- + input: AST + The input of the delay. + delay_time: AST + The delay time value of the delay. + initial: AST + The initial value of the delay. + order: float + The order of the delay. + + """ input: object delay_time: object initial: object @@ -118,6 +240,19 @@ def __str__(self) -> str: # pragma: no cover @dataclass class DelayFixedStructure: + """ + Dataclass for a delay fixed structure. + + Parameters + ---------- + input: AST + The input of the delay. + delay_time: AST + The delay time value of the delay. + initial: AST + The initial value of the delay. + + """ input: object delay_time: object initial: object @@ -131,6 +266,21 @@ def __str__(self) -> str: # pragma: no cover @dataclass class SmoothStructure: + """ + Dataclass for a smooth structure. + + Parameters + ---------- + input: AST + The input of the smooth. + delay_time: AST + The smooth time value of the smooth. + initial: AST + The initial value of the smooth. + order: float + The order of the smooth. + + """ input: object smooth_time: object initial: object @@ -146,6 +296,21 @@ def __str__(self) -> str: # pragma: no cover @dataclass class SmoothNStructure: + """ + Dataclass for a smooth n structure. + + Parameters + ---------- + input: AST + The input of the smooth. + delay_time: AST + The smooth time value of the smooth. + initial: AST + The initial value of the smooth. + order: float + The order of the smooth. + + """ input: object smooth_time: object initial: object @@ -164,9 +329,22 @@ def __str__(self) -> str: # pragma: no cover @dataclass class TrendStructure: + """ + Dataclass for a trend structure. + + Parameters + ---------- + input: AST + The input of the trend. + average_time: AST + The average time value of the trend. + initial_trend: AST + The initial trend value of the trend. + + """ input: object average_time: object - initial: object + initial_trend: object def __str__(self) -> str: # pragma: no cover return "TrendStructure:\n\t%s,\n\t%s,\n\t%s" % ( @@ -177,6 +355,21 @@ def __str__(self) -> str: # pragma: no cover @dataclass class ForecastStructure: + """ + Dataclass for a forecast structure. + + Parameters + ---------- + input: AST + The input of the forecast. + averae_time: AST + The average time value of the forecast. + horizon: float + The horizon value of the forecast. + initial_trend: AST + The initial trend value of the forecast. + + """ input: object average_time: object horizon: object @@ -192,6 +385,19 @@ def __str__(self) -> str: # pragma: no cover @dataclass class SampleIfTrueStructure: + """ + Dataclass for a sample if true structure. + + Parameters + ---------- + condition: AST + The condition of the sample if true + input: AST + The input of the sample if true. + initial: AST + The initial value of the sample if true. + + """ condition: object input: object initial: object @@ -205,6 +411,23 @@ def __str__(self) -> str: # pragma: no cover @dataclass class LookupsStructure: + """ + Dataclass for a lookup structure. + + Parameters + ---------- + x: tuple + The list of the x values of the lookup. + y: tuple + The list of the y values of the lookup. + x_range: tuple + The minimum and maximum value of x. + y_range: tuple + The minimum and maximum value of y. + type: str + The interpolation method. + + """ x: tuple y: tuple x_range: tuple @@ -219,7 +442,18 @@ def __str__(self) -> str: # pragma: no cover @dataclass class InlineLookupsStructure: - argument: None + """ + Dataclass for an inline lookup structure. + + Parameters + ---------- + argument: AST + The argument of the inline lookup. + lookups: LookupStructure + The lookups definition. + + """ + argument: object lookups: LookupsStructure def __str__(self) -> str: # pragma: no cover @@ -231,6 +465,14 @@ def __str__(self) -> str: # pragma: no cover @dataclass class DataStructure: + """ + Dataclass for an empty data structure. + + Parameters + ---------- + None + + """ pass def __str__(self) -> str: # pragma: no cover @@ -239,6 +481,22 @@ def __str__(self) -> str: # pragma: no cover @dataclass class GetLookupsStructure: + """ + Dataclass for a get lookups structure. + + Parameters + ---------- + file: str + The file path where the data is. + tab: str + The sheetname where the data is. + x_row_or_col: str + The pointer to the cell or cellrange name that defines the + interpolation series data. + cell: str + The pointer to the cell or the cellrange name that defines the data. + + """ file: str tab: str x_row_or_col: str @@ -252,6 +510,22 @@ def __str__(self) -> str: # pragma: no cover @dataclass class GetDataStructure: + """ + Dataclass for a get lookups structure. + + Parameters + ---------- + file: str + The file path where the data is. + tab: str + The sheetname where the data is. + time_row_or_col: str + The pointer to the cell or cellrange name that defines the + interpolation time series data. + cell: str + The pointer to the cell or the cellrange name that defines the data. + + """ file: str tab: str time_row_or_col: str @@ -265,6 +539,19 @@ def __str__(self) -> str: # pragma: no cover @dataclass class GetConstantsStructure: + """ + Dataclass for a get lookups structure. + + Parameters + ---------- + file: str + The file path where the data is. + tab: str + The sheetname where the data is. + cell: str + The pointer to the cell or the cellrange name that defines the data. + + """ file: str tab: str cell: str diff --git a/pysd/translation/structures/abstract_model.py b/pysd/translation/structures/abstract_model.py index 90ac5804..b5383ccc 100644 --- a/pysd/translation/structures/abstract_model.py +++ b/pysd/translation/structures/abstract_model.py @@ -1,3 +1,10 @@ +""" +The main Abstract dataclasses provide the structure for the information +from the Component level to the Model level. This classes are hierarchical +An AbstractComponent will be inside an AbstractElement, which is inside an +AbstractSection, which is a part of an AbstractModel. + +""" from dataclasses import dataclass from typing import Tuple, List, Union from pathlib import Path @@ -5,7 +12,24 @@ @dataclass class AbstractComponent: - subscripts: Tuple[str] + """ + Dataclass for a regular component. + + Parameters + ---------- + subscripts: tuple + Tuple of length two with first argument the list of subscripts + in the variable definition and the second argument the list of + subscripts list that must be ignored (EXCEPT). + ast: object + The AbstractSyntaxTree of the component expression + type: str (optional) + The type of component. 'Auxiliary' by default. + subtype: str (optional) + The subtype of component. 'Normal' by default. + + """ + subscripts: Tuple[List[str], List[List[str]]] ast: object type: str = "Auxiliary" subtype: str = "Normal" @@ -15,6 +39,19 @@ def __str__(self) -> str: # pragma: no cover "%s" % repr(list(self.subscripts)) if self.subscripts else "") def dump(self, depth=None, indent="") -> str: # pragma: no cover + """ + Dump the component to a printable version. + + Parameters + ---------- + depth: int (optional) + The number of depht levels to show in the dumped output. + Default is None which will dump everything. + + indent: str (optional) + The indent to use for a lower level object. Default is ''. + + """ if depth == 0: return self.__str__() @@ -26,7 +63,25 @@ def _str_child(self, depth, indent) -> str: # pragma: no cover @dataclass class AbstractUnchangeableConstant(AbstractComponent): - subscripts: Tuple[str] + """ + Dataclass for an unchangeable constant component. This class is a child + of AbstractComponent. + + Parameters + ---------- + subscripts: tuple + Tuple of length two with first argument the list of subscripts + in the variable definition and the second argument the list of + subscripts list that must be ignored (EXCEPT). + ast: object + The AbstractSyntaxTree of the component expression + type: str (optional) + The type of component. 'Constant' by default. + subtype: str (optional) + The subtype of component. 'Unchangeable' by default. + + """ + subscripts: Tuple[List[str], List[List[str]]] ast: object type: str = "Constant" subtype: str = "Unchangeable" @@ -38,7 +93,27 @@ def __str__(self) -> str: # pragma: no cover @dataclass class AbstractLookup(AbstractComponent): - subscripts: Tuple[str] + """ + Dataclass for a lookup component. This class is a child of + AbstractComponent. + + Parameters + ---------- + subscripts: tuple + Tuple of length two with first argument the list of subscripts + in the variable definition and the second argument the list of + subscripts list that must be ignored (EXCEPT). + ast: object + The AbstractSyntaxTree of the component expression + arguments: str (optional) + The name of the argument to use. 'x' by default. + type: str (optional) + The type of component. 'Lookup' by default. + subtype: str (optional) + The subtype of component. 'Hardcoded' by default. + + """ + subscripts: Tuple[List[str], List[List[str]]] ast: object arguments: str = "x" type: str = "Lookup" @@ -51,7 +126,28 @@ def __str__(self) -> str: # pragma: no cover @dataclass class AbstractData(AbstractComponent): - subscripts: Tuple[str] + """ + Dataclass for a data component. This class is a child + of AbstractComponent. + + Parameters + ---------- + subscripts: tuple + Tuple of length two with first argument the list of subscripts + in the variable definition and the second argument the list of + subscripts list that must be ignored (EXCEPT). + ast: object + The AbstractSyntaxTree of the component expression + keyword: str or None (optional) + The data object keyword ('interpolate', 'hold_backward', + 'look_forward', 'raw'). Default is None. + type: str (optional) + The type of component. 'Data' by default. + subtype: str (optional) + The subtype of component. 'Normal' by default. + + """ + subscripts: Tuple[List[str], List[List[str]]] ast: object keyword: Union[str, None] = None type: str = "Data" @@ -63,6 +159,19 @@ def __str__(self) -> str: # pragma: no cover "%s" % repr(list(self.subscripts)) if self.subscripts else "") def dump(self, depth=None, indent="") -> str: # pragma: no cover + """ + Dump the component to a printable version. + + Parameters + ---------- + depth: int (optional) + The number of depht levels to show in the dumped output. + Default is None which will dump everything. + + indent: str (optional) + The indent to use for a lower level object. Default is ''. + + """ if depth == 0: return self.__str__() @@ -74,6 +183,23 @@ def _str_child(self, depth, indent) -> str: # pragma: no cover @dataclass class AbstractElement: + """ + Dataclass for an element. + + Parameters + ---------- + name: str + The name of the element. + components: list + The list of AbstractComponents that define this element. + units: str (optional) + The units of the element. '' by default. + range: tuple (optional) + The range of the element. (None, None) by default. + units: str (optional) + The documentation of the element. '' by default. + + """ name: str components: List[AbstractComponent] units: str = "" @@ -85,6 +211,19 @@ def __str__(self) -> str: # pragma: no cover self.name, self.units, self.range, self.documentation) def dump(self, depth=None, indent="") -> str: # pragma: no cover + """ + Dump the element to a printable version. + + Parameters + ---------- + depth: int (optional) + The number of depht levels to show in the dumped output. + Default is None which will dump everything. + + indent: str (optional) + The indent to use for a lower level object. Default is ''. + + """ if depth == 0: return self.__str__() elif depth is not None: @@ -100,8 +239,22 @@ def _str_child(self, depth, indent) -> str: # pragma: no cover @dataclass class AbstractSubscriptRange: + """ + Dataclass for a subscript range. + + Parameters + ---------- name: str - subscripts: Tuple[str] + The name of the element. + subscripts: tuple or str or dict + The subscripts as a tuple for a regular definition, str for a + copy definition and as a dict for a GET XLS/DIRECT definition. + mapping: tuple + The set of subscript range that can be mapped to. + + """ + name: str + subscripts: Union[Tuple[str], str, dict] mapping: Tuple[str] def __str__(self) -> str: # pragma: no cover @@ -111,11 +264,54 @@ def __str__(self) -> str: # pragma: no cover if self.mapping else self.subscripts) def dump(self, depth=None, indent="") -> str: # pragma: no cover + """ + Dump the subscript range to a printable version. + + Parameters + ---------- + depth: int (optional) + The number of depht levels to show in the dumped output. + Default is None which will dump everything. + + indent: str (optional) + The indent to use for a lower level object. Default is ''. + + """ return self.__str__() @dataclass class AbstractSection: + """ + Dataclass for an element. + + Parameters + ---------- + name: str + Section name. '__main__' for the main section or the macro name. + path: pathlib.Path + Section path. It should be the model name for main section and + the clean macro name for a macro. + section_type: str ('main' or 'macro') + The section type. + params: list + List of params that takes the section. In the case of main + section it will be an empty list. + returns: list + List of variables that returns the section. In the case of main + section it will be an empty list. + subscripts: tuple + Tuple of AbstractSubscriptRanges that are defined in the section. + elements: tuple + Tuple of AbstractElements that are defined in the section. + split: bool + If split is True the created section will split the variables + depending on the views_dict. + views_dict: dict + The dictionary of the views. Giving the variables classified at + any level in order to split them by files. + + """ name: str path: Path type: str # main, macro or module @@ -131,6 +327,19 @@ def __str__(self) -> str: # pragma: no cover self.type, self.name, self.path) def dump(self, depth=None, indent="") -> str: # pragma: no cover + """ + Dump the section to a printable version. + + Parameters + ---------- + depth: int (optional) + The number of depht levels to show in the dumped output. + Default is None which will dump everything. + + indent: str (optional) + The indent to use for a lower level object. Default is ''. + + """ if depth == 0: return self.__str__() elif depth is not None: @@ -148,6 +357,17 @@ def _str_child(self, depth, indent) -> str: # pragma: no cover @dataclass class AbstractModel: + """ + Dataclass for an element. + + Parameters + ---------- + original_path: pathlib.Path + The path to the original file. + sections: tuple + Tuple of AbstractSectionss that are defined in the model. + + """ original_path: Path sections: Tuple[AbstractSection] @@ -155,6 +375,19 @@ def __str__(self) -> str: # pragma: no cover return "AbstractModel:\t%s\n" % self.original_path def dump(self, depth=None, indent="") -> str: # pragma: no cover + """ + Dump the model to a printable version. + + Parameters + ---------- + depth: int (optional) + The number of depht levels to show in the dumped output. + Default is None which will dump everything. + + indent: str (optional) + The indent to use for a lower level object. Default is ''. + + """ if depth == 0: return self.__str__() elif depth is not None: diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index 027945b7..ffa344eb 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -1,3 +1,18 @@ +""" +The Element class allows parsing the LHS side of a model equation, +depending on the LHS a SubscriptRange object or Component object will +be returned. There are 4 tipes of components: + +- Component: Regular component, defined with '='. +- UnchangeableConstant: Unchangeable constant, defined with '=='. +- Data: Data component, defined with ':=' +- Lookup: Lookup component, defined with '()' + +Lookup components have their own parser for the RHS of the expression, +while the other 3 components share the same RHS parser.The final result +from a parsed component can be exported to an AbstractComponent object +in order to build a model in other language. +""" import re from typing import Union, Tuple, List import warnings @@ -6,17 +21,31 @@ from ..structures.abstract_model import\ AbstractData, AbstractLookup, AbstractComponent,\ - AbstractUnchangeableConstant -from parsimonious.exceptions import IncompleteParseError,\ - VisitationError,\ - ParseError + AbstractUnchangeableConstant, AbstractSubscriptRange from . import vensim_utils as vu from .vensim_structures import structures, parsing_ops class Element(): - """Model element parsed definition""" + """ + Element object allows parsing the elements the LHS of the Vensim + expressions. + + Parameters + ---------- + equation: str + Original equation in the Vensim file. + + units: str + The units of the element with the range, i.e., the content after + the first '~' symbol. + + documentation: str + The comment of the element, i.e., the content after the seconf + '~' symbol. + + """ def __init__(self, equation: str, units: str, documentation: str): self.equation = equation @@ -29,17 +58,18 @@ def __str__(self): # pragma: no cover @property def _verbose(self) -> str: # pragma: no cover - """Get model information""" + """Get element information.""" return self.__str__() @property def verbose(self): # pragma: no cover - """Print model information""" + """Print element information.""" print(self._verbose) def _parse_units(self, units_str: str) -> Tuple[str, tuple]: """Split the range from the units""" - # TODO improve units parsing: move to _parse_section_elements + # TODO improve units parsing: parse them when parsing the section + # elements if not units_str: return "", (None, None) @@ -57,17 +87,32 @@ def _parse_units(self, units_str: str) -> Tuple[str, tuple]: ) return units, lims - def _parse(self) -> object: - """Parse model element to get the component object""" + def parse(self) -> object: + """ + Parse element object with parsimonious using the grammar given in + 'parsin_grammars/element_object.peg' and the class + ElementsComponentVisitor to visit the parsed expressions. + + Splits the LHS from the RHS of the equation. If the returned + object is a SubscriptRange, no more parsing is needed. Otherwise, + the RHS of the returned object (Component) should be parsed + to get the Abstract Syntax Tree. + + Returns + ------- + self.component: SubscriptRange or Component + The subscript range definition object or component object. + + """ tree = vu.Grammar.get("element_object").parse(self.equation) - self.component = ElementsComponentParser(tree).component + self.component = ElementsComponentVisitor(tree).component self.component.units = self.units self.component.range = self.range self.component.documentation = self.documentation return self.component -class ElementsComponentParser(parsimonious.NodeVisitor): +class ElementsComponentVisitor(parsimonious.NodeVisitor): """Visit model element definition to get the component object""" def __init__(self, ast): @@ -190,7 +235,9 @@ def visit__(self, n, vc): class SubscriptRange(): - """Subscript range definition, defined by ":" or "<->" in Vensim.""" + """ + Subscript range definition, defined by ":" or "<->" in Vensim. + """ def __init__(self, name: str, definition: Union[List[str], str, dict], mapping: List[str] = []): @@ -206,18 +253,52 @@ def __str__(self): # pragma: no cover @property def _verbose(self) -> str: # pragma: no cover - """Get model information""" + """Get subscript range information.""" return self.__str__() @property def verbose(self): # pragma: no cover - """Print model information""" + """Print subscript range information.""" print(self._verbose) + def get_abstract_subscript_range(self) -> AbstractSubscriptRange: + """ + Get Abstract Subscript Range used for building. This method is + automatically called by Sections's get_abstract_section. + + Returns + ------- + AbstractSubscriptRange: AbstractSubscriptRange + Abstract Subscript Range object that can be used for building + the model in another language. + + """ + return AbstractSubscriptRange( + name=self.name, + subscripts=self.definition, + mapping=self.mapping + ) + class Component(): - """Model component defined by "name = expr" in Vensim.""" - kind = "Model component" + """ + Model component defined by "name = expr" in Vensim. + + Parameters + ---------- + name: str + The original name of the component. + + subscripts: tuple + Tuple of length two with first argument the list of subscripts + in the variable definition and the second argument the list of + subscripts list that appears after :EXCEPT: keyword (if used). + + expression: str + The RHS of the element, expression to parse. + + """ + _kind = "Model component" def __init__(self, name: str, subscripts: Tuple[list, list], expression: str): @@ -226,7 +307,7 @@ def __init__(self, name: str, subscripts: Tuple[list, list], self.expression = expression def __str__(self): # pragma: no cover - text = "\n%s definition: %s" % (self.kind, self.name) + text = "\n%s definition: %s" % (self._kind, self.name) text += "\nSubscrips: %s" % repr(self.subscripts[0])\ if self.subscripts[0] else "" text += " EXCEPT %s" % repr(self.subscripts[1])\ @@ -244,33 +325,23 @@ def _expression(self): # pragma: no cover @property def _verbose(self) -> str: # pragma: no cover + """Get component information.""" return self.__str__() @property def verbose(self): # pragma: no cover + """Print component information.""" print(self._verbose) - def _parse(self) -> None: - """Parse model component to get the AST""" - try: - tree = vu.Grammar.get("components", parsing_ops).parse( - self.expression) - except (IncompleteParseError, ParseError) as err: - raise ValueError( - err.args[0] + "\n\n" - "\nError when parsing definition:\n\t %s\n\n" - "probably used definition is invalid or not integrated..." - "\nSee parsimonious output above." % self.expression - ) - try: - self.ast = EquationParser(tree).translation - except VisitationError as err: - raise ValueError( - err.args[0] + "\n\n" - "\nError when visiting definition:\n\t %s\n\n" - "probably used definition is invalid or not integrated..." - "\nSee parsimonious output above." % self.expression - ) + def parse(self) -> None: + """ + Parse component object with parsimonious using the grammar given + in 'parsin_grammars/components.peg' and the class EquationVisitor + to visit the RHS of the expressions. + + """ + tree = vu.Grammar.get("components", parsing_ops).parse(self.expression) + self.ast = EquationVisitor(tree).translation if isinstance(self.ast, structures["get_xls_lookups"]): self.lookup = True @@ -279,7 +350,19 @@ def _parse(self) -> None: def get_abstract_component(self) -> Union[AbstractComponent, AbstractLookup]: - """Get Abstract Component used for building""" + """ + Get Abstract Component used for building. This method is + automatically called by Sections's get_abstract_section. + + Returns + ------- + AbstractComponent: AbstractComponent or AbstractLookup + Abstract Component object that can be used for building + the model in another language. If the component equations + includes external lookups (GET XLS/DIRECT LOOKUPS) + AbstractLookup class will be used + + """ if self.lookup: # get lookups equations return AbstractLookup(subscripts=self.subscripts, ast=self.ast) @@ -288,40 +371,119 @@ def get_abstract_component(self) -> Union[AbstractComponent, class UnchangeableConstant(Component): - """Unchangeable constant defined by "name == expr" in Vensim.""" - kind = "Unchangeable constant component" + """ + Unchangeable constant defined by "name == expr" in Vensim. + This class is a soon of Component. + + Parameters + ---------- + name: str + The original name of the component. + + subscripts: tuple + Tuple of length two with first argument the list of subscripts + in the variable definition and the second argument the list of + subscripts list that appears after :EXCEPT: keyword (if used). + + expression: str + The RHS of the element, expression to parse. + + """ + _kind = "Unchangeable constant component" def __init__(self, name: str, subscripts: Tuple[list, list], expression: str): super().__init__(name, subscripts, expression) def get_abstract_component(self) -> AbstractUnchangeableConstant: - """Get Abstract Component used for building""" + """ + Get Abstract Component used for building. This method is + automatically called by Sections's get_abstract_section. + + Returns + ------- + AbstractComponent: AbstractUnchangeableConstant + Abstract Component object that can be used for building + the model in another language. + + """ return AbstractUnchangeableConstant( subscripts=self.subscripts, ast=self.ast) class Lookup(Component): - """Lookup variable, defined by "name(expr)" in Vensim.""" - kind = "Lookup component" + """ + Lookup component, defined by "name(expr)" in Vensim. + This class is a soon of Component. + + Parameters + ---------- + name: str + The original name of the component. + + subscripts: tuple + Tuple of length two with first argument the list of subscripts + in the variable definition and the second argument the list of + subscripts list that appears after :EXCEPT: keyword (if used). + + expression: str + The RHS of the element, expression to parse. + + """ + _kind = "Lookup component" def __init__(self, name: str, subscripts: Tuple[list, list], expression: str): super().__init__(name, subscripts, expression) - def _parse(self) -> None: - """Parse model component to get the AST""" + def parse(self) -> None: + """ + Parse component object with parsimonious using the grammar given + in 'parsin_grammars/lookups.peg' and the class LookupsVisitor + to visit the RHS of the expressions. + """ tree = vu.Grammar.get("lookups").parse(self.expression) - self.ast = LookupsParser(tree).translation + self.ast = LookupsVisitor(tree).translation def get_abstract_component(self) -> AbstractLookup: - """Get Abstract Component used for building""" + """ + Get Abstract Component used for building. This method is + automatically called by Sections's get_abstract_section. + + Returns + ------- + AbstractComponent: AbstractLookup + Abstract Component object that can be used for building + the model in another language. + + """ return AbstractLookup(subscripts=self.subscripts, ast=self.ast) class Data(Component): - """Data variable, defined by "name := expr" in Vensim.""" - kind = "Data component" + """ + Data component, defined by "name := expr" in Vensim. + This class is a soon of Component. + + Parameters + ---------- + name: str + The original name of the component. + + subscripts: tuple + Tuple of length two with first argument the list of subscripts + in the variable definition and the second argument the list of + subscripts list that appears after :EXCEPT: keyword (if used). + + keyword: str + The keyword used befor the ":=" symbol, it could be ('interpolate', + 'raw', 'hold_backward', 'look_forward') + + expression: str + The RHS of the element, expression to parse. + + """ + _kind = "Data component" def __init__(self, name: str, subscripts: Tuple[list, list], keyword: str, expression: str): @@ -329,7 +491,7 @@ def __init__(self, name: str, subscripts: Tuple[list, list], self.keyword = keyword def __str__(self): # pragma: no cover - text = "\n%s definition: %s" % (self.kind, self.name) + text = "\n%s definition: %s" % (self._kind, self.name) text += "\nSubscrips: %s" % repr(self.subscripts[0])\ if self.subscripts[0] else "" text += " EXCEPT %s" % repr(self.subscripts[1])\ @@ -338,21 +500,39 @@ def __str__(self): # pragma: no cover text += "\n\t%s" % self._expression return text - def _parse(self) -> None: - """Parse model component to get the AST""" + def parse(self) -> None: + """ + Parse component object with parsimonious using the grammar given + in 'parsin_grammars/components.peg' and the class EquationVisitor + to visit the RHS of the expressions. + + If the expression is None, then de data will be readen from a + VDF file in Vensim. + + """ if not self.expression: # empty data vars, read from vdf file self.ast = structures["data"]() else: - super()._parse() + super().parse() def get_abstract_component(self) -> AbstractData: - """Get Abstract Component used for building""" + """ + Get Abstract Component used for building. This method is + automatically called by Sections's get_abstract_section. + + Returns + ------- + AbstractComponent: AbstractData + Abstract Component object that can be used for building + the model in another language. + + """ return AbstractData( subscripts=self.subscripts, ast=self.ast, keyword=self.keyword) -class LookupsParser(parsimonious.NodeVisitor): +class LookupsVisitor(parsimonious.NodeVisitor): """Visit the elements of a lookups to get the AST""" def __init__(self, ast): self.translation = None @@ -392,7 +572,7 @@ def generic_visit(self, n, vc): return "".join(filter(None, vc)) or n.text -class EquationParser(parsimonious.NodeVisitor): +class EquationVisitor(parsimonious.NodeVisitor): """Visit the elements of a equation to get the AST""" def __init__(self, ast): self.translation = None diff --git a/pysd/translation/vensim/vensim_file.py b/pysd/translation/vensim/vensim_file.py index 651e37e6..715ae21e 100644 --- a/pysd/translation/vensim/vensim_file.py +++ b/pysd/translation/vensim/vensim_file.py @@ -1,7 +1,7 @@ """ The VensimFile class allows reading the original Vensim model file, -parsing it into SectionFile elements using the FileSectionsParser, -parsing its sketch using SketchParser in order to classify the varibales +parsing it into Section elements using the FileSectionsVisitor, +parsing its sketch using SketchVisitor in order to classify the varibales per view. The final result can be exported to an AbstractModel class in order to build a model in other language. """ @@ -15,13 +15,13 @@ from ..structures.abstract_model import AbstractModel from . import vensim_utils as vu -from .vensim_section import FileSection +from .vensim_section import Section class VensimFile(): """ Create a VensimFile object which allows parsing a mdl file. - When the objext is created the model file is automatically opened; + When the object is created the model file is automatically opened; unnecessary tabs, whitespaces, and linebreaks are removed; and the sketch is split from the model. @@ -50,7 +50,7 @@ def __str__(self): # pragma: no cover @property def _verbose(self) -> str: # pragma: no cover - """Get model information""" + """Get model information.""" text = self.__str__() for section in self.sections: text += section._verbose @@ -59,7 +59,7 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print model information""" + """Print model information.""" print(self._verbose) def _read(self, encoding: Union[None, str]) -> str: @@ -89,7 +89,7 @@ def _read(self, encoding: Union[None, str]) -> str: return model_text def _split_sketch(self) -> None: - """Split model from the sketch""" + """Split model from the sketch.""" try: split_model = self.model_text.split("\\\\\\---///", 1) self.model_text = self._clean(split_model[0]) @@ -99,17 +99,25 @@ def _split_sketch(self) -> None: pass def _clean(self, text: str) -> str: - """Remove unnecessary characters""" + """Remove unnecessary characters.""" return re.sub(r"[\n\t\s]+", " ", re.sub(r"\\\n\t", " ", text)) - def parse(self) -> None: + def parse(self, parse_all: bool = True) -> None: """ Parse model file with parsimonious using the grammar given in - parsin_grammars/file_sections.peg and the class FileSectionVisitor + 'parsin_grammars/file_sections.peg' and the class FileSectionsVisitor to visit the parsed expressions. This will break the model file in VensimSections, which are the - main model + macros. Then the sections will be automatically parsed. + main model + macros. + + Parameters + ---------- + parse_all: bool (optional) + If True then the created VensimSection objects will be + automatically parsed. Otherwise, this objects will only be + added to self.sections but not parser. Default is True. + """ # get model sections (__main__ + macros) tree = vu.Grammar.get("file_sections").parse(self.model_text) @@ -124,14 +132,16 @@ def parse(self) -> None: self._clean_file_names(section.name)[0] ).with_suffix(".py") - for section in self.sections: - # parse each section - section._parse() + if parse_all: + # parse all sections + for section in self.sections: + # parse each section + section.parse() def parse_sketch(self, subview_sep: List[str]) -> None: """ Parse the sketch of the model with parsimonious using the grammar - given in parsin_grammars/sketch.peg and the class SketchVisitor + given in 'parsin_grammars/sketch.peg' and the class SketchVisitor to visit the parsed expressions. It will modify the views_dict of the first section, includying @@ -225,8 +235,10 @@ def parse_sketch(self, subview_sep: List[str]) -> None: def get_abstract_model(self) -> AbstractModel: """ Get Abstract Model used for building. This, method should be - called after calling self.parse_sketch method or self.parse, - in the case you do not want to split variables per views. + called after parsing the model (self.parse), and the sketch + (self.parse_sketch) in the case you want to split the variables + per views. This automatically calls the get_abstract_section + method from the model sections. Returns ------- @@ -297,7 +309,7 @@ def __init__(self, ast): def visit_main(self, n, vc): # main will be always stored as the first entry if self.entries[0] is None: - self.entries[0] = FileSection( + self.entries[0] = Section( name="__main__", path=Path("."), section_type="main", @@ -313,7 +325,7 @@ def visit_main(self, n, vc): def visit_macro(self, n, vc): self.entries.append( - FileSection( + Section( name=vc[2].strip().lower().replace(" ", "_"), path=Path("."), section_type="macro", diff --git a/pysd/translation/vensim/vensim_section.py b/pysd/translation/vensim/vensim_section.py index 8a7efdad..444a728d 100644 --- a/pysd/translation/vensim/vensim_section.py +++ b/pysd/translation/vensim/vensim_section.py @@ -1,20 +1,59 @@ +""" +The Section class allows parsing a model section into Elements using the +SectionElementsVisitor. The final result can be exported to an +AbstractSection class in order to build a model in other language. +A section could be either the main model (without the macros), or a +macro definition. +""" from typing import List, Union from pathlib import Path import parsimonious -from ..structures.abstract_model import\ - AbstractElement, AbstractSubscriptRange, AbstractSection +from ..structures.abstract_model import AbstractElement, AbstractSection from . import vensim_utils as vu from .vensim_element import Element, SubscriptRange, Component -class FileSection(): # File section dataclass +class Section(): + """ + Section object allows parsing the elements of that section. + + Parameters + ---------- + name: str + Section name. '__main__' for the main section or the macro name. + + path: pathlib.Path + Section path. It should be the model name for main section and + the clean macro name for a macro. + + section_type: str ('main' or 'macro') + The section type. + + params: list + List of params that takes the section. In the case of main + section it will be an empty list. + + returns: list + List of variables that returns the section. In the case of main + section it will be an empty list. + + content: str + Section content as string. + + split: bool + If split is True the created section will split the variables + depending on the views_dict. + + views_dict: dict + The dictionary of the views. Giving the variables classified at + any level in order to split them by files. + """ def __init__(self, name: str, path: Path, section_type: str, params: List[str], returns: List[str], - content: str, split: bool, views_dict: Union[dict, None] - ): + content: str, split: bool, views_dict: Union[dict, None]): self.name = name self.path = path self.type = section_type @@ -26,11 +65,11 @@ def __init__(self, name: str, path: Path, section_type: str, self.elements = None def __str__(self): # pragma: no cover - return "\nFile section: %s\n" % self.name + return "\nSection: %s\n" % self.name @property def _verbose(self) -> str: # pragma: no cover - """Get model information""" + """Get section information.""" text = self.__str__() if self.elements: for element in self.elements: @@ -42,38 +81,65 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print model information""" + """Print section information.""" print(self._verbose) - def _parse(self) -> None: - """Parse the section""" + def parse(self, parse_all: bool = True) -> None: + """ + Parse section object with parsimonious using the grammar given in + 'parsin_grammars/section_elements.peg' and the class + SectionElementsVisitor to visit the parsed expressions. + + This will break the section (__main__ or macro) in VensimElements, + which are each model expression LHS and RHS with already parsed + units and description. + + Parameters + ---------- + parse_all: bool (optional) + If True then the created VensimElement objects will be + automatically parsed. Otherwise, this objects will only be + added to self.elements but not parser. Default is True. + + """ # parse the section to get the elements tree = vu.Grammar.get("section_elements").parse(self.content) self.elements = SectionElementsParser(tree).entries - self.elements = [element._parse() for element in self.elements] - # split subscript from other components - self.subscripts = [ - element for element in self.elements - if isinstance(element, SubscriptRange) - ] - self.components = [ - element for element in self.elements - if isinstance(element, Component) - ] + if parse_all: + # parse all elements + self.elements = [element.parse() for element in self.elements] + + # split subscript from other components + self.subscripts = [ + element for element in self.elements + if isinstance(element, SubscriptRange) + ] + self.components = [ + element for element in self.elements + if isinstance(element, Component) + ] - # reorder element list for better printing - self.elements = self.subscripts + self.components + # reorder element list for better printing + self.elements = self.subscripts + self.components - [component._parse() for component in self.components] + [component.parse() for component in self.components] def get_abstract_section(self) -> AbstractSection: """ - Get Abstract Section used for building + Get Abstract Section used for building. This, method should be + called after parsing the section (self.parse). This method is + automatically called by Model's get_abstract_model and + automatically generates the AbstractSubscript ranges and merge + the components in elements calling also the get_abstract_components + method from each model component. Returns ------- - AbstractSection + AbstractSection: AbstractSection + Abstract Section object that can be used for building the model + in another language. + """ return AbstractSection( name=self.name, @@ -81,22 +147,17 @@ def get_abstract_section(self) -> AbstractSection: type=self.type, params=self.params, returns=self.returns, - subscripts=self.solve_subscripts(), - elements=self.merge_components(), + subscripts=[ + subs_range.get_abstract_subscript_range() + for subs_range in self.subscripts + ], + elements=self._merge_components(), split=self.split, views_dict=self.views_dict ) - def solve_subscripts(self) -> List[AbstractSubscriptRange]: - """Convert the subscript ranges to Abstract Subscript Ranges""" - return [AbstractSubscriptRange( - name=subs_range.name, - subscripts=subs_range.definition, - mapping=subs_range.mapping - ) for subs_range in self.subscripts] - - def merge_components(self) -> List[AbstractElement]: - """Merge model components by their name""" + def _merge_components(self) -> List[AbstractElement]: + """Merge model components by their name.""" merged = {} for component in self.components: # get a safe name to merge (case and white/underscore sensitivity) diff --git a/pysd/translation/vensim/vensim_structures.py b/pysd/translation/vensim/vensim_structures.py index dc8ae8eb..0f955ebe 100644 --- a/pysd/translation/vensim/vensim_structures.py +++ b/pysd/translation/vensim/vensim_structures.py @@ -1,3 +1,8 @@ +""" +The AST structures are created with the help of the parsimonious visitors +using the structures dictionary. + +""" import re from ..structures import abstract_expressions as ae diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py index f8a684b4..3c440087 100644 --- a/pysd/translation/xmile/xmile_element.py +++ b/pysd/translation/xmile/xmile_element.py @@ -1,38 +1,69 @@ +""" +The Element class child classes alow parsing the expressions of a +given model element. There are 3 tipes of elements: + +- Flows and auxiliars (Flaux class): Regular elements, defined with + or . +- Gfs (Gf class): Lookup elements, defined with . +- Stocks (Stock class): Data component, defined with + +Moreover, a 4 type element is defined ControlElement, which allows parsing +the values of the model control variables (time step, initialtime, final time). + +The final result from a parsed element can be exported to an +AbstractElement object in order to build a model in other language. +""" import re from typing import Tuple, Union, List from lxml import etree import parsimonious import numpy as np -from ..structures.abstract_model import AbstractElement, AbstractLookup,\ - AbstractComponent +from ..structures.abstract_model import\ + AbstractElement, AbstractLookup, AbstractComponent, AbstractSubscriptRange from . import xmile_utils as vu from .xmile_structures import structures, parsing_ops class Element(): + """ + Element class. This class provides the shared methods for its childs: + Flaux, Gf, Stock, and ControlElement. + + Parameters + ---------- + node: etree._Element + The element node content. + + ns: dict + The namespace of the section. - interp_methods = { + subscripts: dict + The subscript dictionary of the section, necessary to parse + some subscripted elements. + + """ + _interp_methods = { "continuous": "interpolate", "extrapolate": "extrapolate", "discrete": "hold_backward" } - kind = "Element" + _kind = "Element" def __init__(self, node: etree._Element, ns: dict, subscripts): self.node = node self.ns = ns self.name = node.attrib["name"] - self.units = self.get_xpath_text(node, "ns:units") or "" - self.documentation = self.get_xpath_text(node, "ns:doc") or "" + self.units = self._get_xpath_text(node, "ns:units") or "" + self.documentation = self._get_xpath_text(node, "ns:doc") or "" self.range = (None, None) self.components = [] self.subscripts = subscripts def __str__(self): # pragma: no cover - text = "\n%s definition: %s" % (self.kind, self.name) + text = "\n%s definition: %s" % (self._kind, self.name) text += "\nSubscrips: %s" % repr(self.subscripts)\ if self.subscripts else "" text += "\n\t%s" % self._expression @@ -48,24 +79,24 @@ def _expression(self): # pragma: no cover @property def _verbose(self) -> str: # pragma: no cover - """Get model information""" + """Get element information.""" return self.__str__() @property def verbose(self): # pragma: no cover - """Print model information""" + """Print element information.""" print(self._verbose) - def get_xpath_text(self, node: etree._Element, - xpath: str) -> Union[str, None]: + def _get_xpath_text(self, node: etree._Element, + xpath: str) -> Union[str, None]: """Safe access of occassionally missing text""" try: return node.xpath(xpath, namespaces=self.ns)[0].text except IndexError: return None - def get_xpath_attrib(self, node: etree._Element, - xpath: str, attrib: str) -> Union[str, None]: + def _get_xpath_attrib(self, node: etree._Element, + xpath: str, attrib: str) -> Union[str, None]: """Safe access of occassionally missing attributes""" # defined here to take advantage of NS in default try: @@ -73,15 +104,15 @@ def get_xpath_attrib(self, node: etree._Element, except IndexError: return None - def get_range(self) -> Tuple[Union[None, str], Union[None, str]]: + def _get_range(self) -> Tuple[Union[None, str], Union[None, str]]: """Get the range of the element""" lims = ( - self.get_xpath_attrib(self.node, 'ns:range', 'min'), - self.get_xpath_attrib(self.node, 'ns:range', 'max') + self._get_xpath_attrib(self.node, 'ns:range', 'min'), + self._get_xpath_attrib(self.node, 'ns:range', 'max') ) return tuple(float(x) if x is not None else x for x in lims) - def parse_lookup_xml_node(self, node: etree._Element) -> object: + def _parse_lookup_xml_node(self, node: etree._Element) -> object: """ Parse lookup definition @@ -116,10 +147,10 @@ def parse_lookup_xml_node(self, node: etree._Element) -> object: y=tuple(ys[np.argsort(xs)]), x_range=(np.min(xs), np.max(xs)), y_range=(np.min(ys), np.max(ys)), - type=self.interp_methods[interp] + type=self._interp_methods[interp] ) - def _parse(self) -> None: + def parse(self) -> None: """Parse all the components of an element""" if self.node.xpath("ns:element", namespaces=self.ns): # defined in several equations each with one subscript @@ -149,7 +180,7 @@ def _parse(self) -> None: zip(subs_list, parsed) ] - def smile_parser(self, expression: str) -> object: + def _smile_parser(self, expression: str) -> object: """ Parse expression with parsimonious. @@ -159,9 +190,9 @@ def smile_parser(self, expression: str) -> object: """ tree = vu.Grammar.get("equations", parsing_ops).parse(expression) - return EquationParser(tree).translation + return EquationVisitor(tree).translation - def get_empty_abstract_element(self) -> AbstractElement: + def _get_empty_abstract_element(self) -> AbstractElement: """ Get empty Abstract used for building @@ -178,13 +209,27 @@ def get_empty_abstract_element(self) -> AbstractElement: class Flaux(Element): - """Flow or auxiliary variable""" + """ + Flow or auxiliary variable definde by or in Xmile. + + Parameters + ---------- + node: etree._Element + The element node content. + + ns: dict + The namespace of the section. - kind = "Flaux" + subscripts: dict + The subscript dictionary of the section, necessary to parse + some subscripted elements. + + """ + _kind = "Flaux" def __init__(self, node, ns, subscripts): super().__init__(node, ns, subscripts) - self.range = self.get_range() + self.range = self._get_range() def _parse_component(self, node: etree._Element) -> List[object]: """ @@ -201,25 +246,31 @@ def _parse_component(self, node: etree._Element) -> List[object]: # single space. Then ensure there is no space at start or end of # equation eqn = re.sub(r"(\s{2,})", " ", eqn.text.replace("\n", ' ')).strip() - ast = self.smile_parser(eqn) + ast = self._smile_parser(eqn) gf_node = self.node.xpath("ns:gf", namespaces=self.ns) if len(gf_node) > 0: ast = structures["inline_lookup"]( - ast, self.parse_lookup_xml_node(gf_node[0])) + ast, self._parse_lookup_xml_node(gf_node[0])) asts.append(ast) return asts def get_abstract_element(self) -> AbstractElement: """ - Get Abstract Element with components used for building + Get Abstract Element used for building. This method is + automatically called by Sections's get_abstract_section. Returns ------- - AbstractElement + AbstractElement: AbstractElement + Abstract Element object that can be used for building + the model in another language. It contains a list of + AbstractComponents with the Abstract Syntax Tree of each of + the expressions. + """ - ae = self.get_empty_abstract_element() + ae = self._get_empty_abstract_element() for component in self.components: ae.components.append(AbstractComponent( subscripts=component[0], @@ -228,9 +279,23 @@ def get_abstract_element(self) -> AbstractElement: class Gf(Element): - """Gf variable (lookup)""" + """ + Gf variable (lookup) definde by in Xmile. + + Parameters + ---------- + node: etree._Element + The element node content. - kind = "Gf component" + ns: dict + The namespace of the section. + + subscripts: dict + The subscript dictionary of the section, necessary to parse + some subscripted elements. + + """ + _kind = "Gf component" def __init__(self, node, ns, subscripts): super().__init__(node, ns, subscripts) @@ -239,8 +304,8 @@ def __init__(self, node, ns, subscripts): def get_range(self) -> Tuple[Union[None, str], Union[None, str]]: """Get the range of the Gf element""" lims = ( - self.get_xpath_attrib(self.node, 'ns:yscale', 'min'), - self.get_xpath_attrib(self.node, 'ns:yscale', 'max') + self._get_xpath_attrib(self.node, 'ns:yscale', 'min'), + self._get_xpath_attrib(self.node, 'ns:yscale', 'max') ) return tuple(float(x) if x is not None else x for x in lims) @@ -253,17 +318,23 @@ def _parse_component(self, node: etree._Element) -> object: AST: AbstractSyntaxTree """ - return [self.parse_lookup_xml_node(self.node)] + return [self._parse_lookup_xml_node(self.node)] def get_abstract_element(self) -> AbstractElement: """ - Get Abstract Element with components used for building + Get Abstract Element used for building. This method is + automatically called by Sections's get_abstract_section. Returns ------- - AbstractElement + AbstractElement: AbstractElement + Abstract Element object that can be used for building + the model in another language. It contains a list of + AbstractComponents with the Abstract Syntax Tree of each of + the expressions. + """ - ae = self.get_empty_abstract_element() + ae = self._get_empty_abstract_element() for component in self.components: ae.components.append(AbstractLookup( subscripts=component[0], @@ -272,13 +343,28 @@ def get_abstract_element(self) -> AbstractElement: class Stock(Element): - """Stock component (Integ)""" + """ + Stock variable definde by in Xmile. - kind = "Stock component" + Parameters + ---------- + node: etree._Element + The element node content. + + ns: dict + The namespace of the section. + + subscripts: dict + The subscript dictionary of the section, necessary to parse + some subscripted elements. + + """ + + _kind = "Stock component" def __init__(self, node, ns, subscripts): super().__init__(node, ns, subscripts) - self.range = self.get_range() + self.range = self._get_range() def _parse_component(self, node) -> object: """ @@ -291,10 +377,10 @@ def _parse_component(self, node) -> object: """ # Parse each flow equations inflows = [ - self.smile_parser(inflow.text) + self._smile_parser(inflow.text) for inflow in self.node.xpath('ns:inflow', namespaces=self.ns)] outflows = [ - self.smile_parser(outflow.text) + self._smile_parser(outflow.text) for outflow in self.node.xpath('ns:outflow', namespaces=self.ns)] if inflows: @@ -317,19 +403,25 @@ def _parse_component(self, node) -> object: flows = inflows[0] if inflows else outflows[0] # Read the initial value equation for stock element - initial = self.smile_parser(self.get_xpath_text(self.node, 'ns:eqn')) + initial = self._smile_parser(self._get_xpath_text(self.node, 'ns:eqn')) return [structures["stock"](flows, initial)] def get_abstract_element(self) -> AbstractElement: """ - Get Abstract Element with components used for building + Get Abstract Element used for building. This method is + automatically called by Sections's get_abstract_section. Returns ------- - AbstractElement + AbstractElement: AbstractElement + Abstract Element object that can be used for building + the model in another language. It contains a list of + AbstractComponents with the Abstract Syntax Tree of each of + the expressions. + """ - ae = self.get_empty_abstract_element() + ae = self._get_empty_abstract_element() for component in self.components: ae.components.append(AbstractComponent( subscripts=component[0], @@ -339,7 +431,7 @@ def get_abstract_element(self) -> AbstractElement: class ControlElement(Element): """Control variable (lookup)""" - kind = "Control bvariable" + _kind = "Control variable" def __init__(self, name, units, documentation, eqn): self.name = name @@ -348,7 +440,7 @@ def __init__(self, name, units, documentation, eqn): self.range = (None, None) self.eqn = eqn - def _parse(self) -> None: + def parse(self) -> None: """ Parse control elment. @@ -357,17 +449,22 @@ def _parse(self) -> None: AST: AbstractSyntaxTree """ - self.ast = self.smile_parser(self.eqn) + self.ast = self._smile_parser(self.eqn) def get_abstract_element(self) -> AbstractElement: """ - Get Abstract Element with components used for building + Get Abstract Element used for building. This method is + automatically called by Sections's get_abstract_section. Returns ------- - AbstractElement + AbstractElement: AbstractElement + Abstract Element object that can be used for building + the model in another language. It contains an AbstractComponent + with the Abstract Syntax Tree of the expression. + """ - ae = self.get_empty_abstract_element() + ae = self._get_empty_abstract_element() ae.components.append(AbstractComponent( subscripts=([], []), ast=self.ast)) @@ -390,16 +487,34 @@ def __str__(self): # pragma: no cover @property def _verbose(self) -> str: # pragma: no cover - """Get model information""" + """Get subscript range information.""" return self.__str__() @property def verbose(self): # pragma: no cover - """Print model information""" + """Print subscript range information.""" print(self._verbose) + def get_abstract_subscript_range(self) -> AbstractSubscriptRange: + """ + Get Abstract Subscript Range used for building. This method is + automatically called by Sections's get_abstract_section. + + Returns + ------- + AbstractSubscriptRange: AbstractSubscriptRange + Abstract Subscript Range object that can be used for building + the model in another language. + + """ + return AbstractSubscriptRange( + name=self.name, + subscripts=self.definition, + mapping=self.mapping + ) + -class EquationParser(parsimonious.NodeVisitor): +class EquationVisitor(parsimonious.NodeVisitor): """Visit the elements of a equation to get the AST""" def __init__(self, ast): self.translation = None diff --git a/pysd/translation/xmile/xmile_file.py b/pysd/translation/xmile/xmile_file.py index d10c4975..013543d5 100644 --- a/pysd/translation/xmile/xmile_file.py +++ b/pysd/translation/xmile/xmile_file.py @@ -1,15 +1,22 @@ +""" +The XmileFile class allows reading the original Xmile model file, +parsing it into Section elements. The final result can be exported to an +AbstractModel class in order to build a model in other language. +""" from typing import Union from pathlib import Path from lxml import etree from ..structures.abstract_model import AbstractModel -from .xmile_section import FileSection +from .xmile_section import Section class XmileFile(): """ Create a XmileFile object which allows parsing a xmile file. + When the object is created the model file is automatically opened + and parsed with lxml.etree. Parameters ---------- @@ -20,7 +27,7 @@ class XmileFile(): def __init__(self, xmile_path: Union[str, Path]): self.xmile_path = Path(xmile_path) self.root_path = self.xmile_path.parent - self.xmile_root = self.get_root() + self.xmile_root = self._get_root() self.ns = self.xmile_root.nsmap[None] # namespace of the xmile self.view_elements = None @@ -29,7 +36,7 @@ def __str__(self): # pragma: no cover @property def _verbose(self) -> str: # pragma: no cover - """Get model information""" + """Get model information.""" text = self.__str__() for section in self.sections: text += section._verbose @@ -38,10 +45,10 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print model information""" + """Print model information.""" print(self._verbose) - def get_root(self) -> etree._Element: + def _get_root(self) -> etree._Element: """ Read a Xmile file and assign its content to self.model_text @@ -63,11 +70,25 @@ def get_root(self) -> etree._Element: parser=etree.XMLParser(encoding="utf-8", recover=True) ).getroot() - def parse(self) -> None: + def parse(self, parse_all: bool = True) -> None: + """ + Create a XmileSection object from the model content and parse it. + As currently the macros are not supported all the models will + have only one section. This functionshould split the macros in + independent sections in the future. + + Parameters + ---------- + parse_all: bool (optional) + If True then the created XmileSection objects will be + automatically parsed. Otherwise, this objects will only be + added to self.sections but not parser. Default is True. + + """ + # TODO: in order to make macros work we need to split them here + # in several sections # We keep everything in a single section - # TODO: in order to make macros work we need to split them here in - # several sections - self.sections = [FileSection( + self.sections = [Section( name="__main__", path=self.xmile_path.with_suffix(".py"), section_type="main", @@ -78,16 +99,22 @@ def parse(self) -> None: split=False, views_dict=None)] - for section in self.sections: - section._parse() + if parse_all: + for section in self.sections: + section.parse() def get_abstract_model(self) -> AbstractModel: """ - Get Abstract Model used for building + Get Abstract Model used for building. This, method should be + called after parsing the model (self.parse). This automatically + calls the get_abstract_section method from the model sections. Returns ------- - AbstractModel + AbstractModel: AbstractModel + Abstract Model object that can be used for building the model + in another language. + """ return AbstractModel( original_path=self.xmile_path, diff --git a/pysd/translation/xmile/xmile_section.py b/pysd/translation/xmile/xmile_section.py index 76cd72fc..50d87be6 100644 --- a/pysd/translation/xmile/xmile_section.py +++ b/pysd/translation/xmile/xmile_section.py @@ -1,22 +1,64 @@ +""" +The Section class allows parsing a model section into Elements. The +final result can be exported to an AbstractSection class in order to +build a model in other language. A section could be either the main model +(without the macros), or a macro definition (not supported yet for Xmile). +""" from typing import List, Union from lxml import etree from pathlib import Path -from ..structures.abstract_model import\ - AbstractSubscriptRange, AbstractSection +from ..structures.abstract_model import AbstractSection from .xmile_element import ControlElement, SubscriptRange, Flaux, Gf, Stock -class FileSection(): # File section dataclass +class Section(): + """ + Section object allows parsing the elements of that section. - control_vars = ["initial_time", "final_time", "time_step", "saveper"] + Parameters + ---------- + name: str + Section name. '__main__' for the main section or the macro name. + + path: pathlib.Path + Section path. It should be the model name for main section and + the clean macro name for a macro. + + section_type: str ('main' or 'macro') + The section type. + + params: list + List of params that takes the section. In the case of main + section it will be an empty list. + + returns: list + List of variables that returns the section. In the case of main + section it will be an empty list. + + content_root: etree._Element + Section parsed tree content. + + namespace: str + The namespace of the section given after parsing its content + with etree. + + split: bool + If split is True the created section will split the variables + depending on the views_dict. + + views_dict: dict + The dictionary of the views. Giving the variables classified at + any level in order to split them by files. + + """ + _control_vars = ["initial_time", "final_time", "time_step", "saveper"] def __init__(self, name: str, path: Path, section_type: str, params: List[str], returns: List[str], content_root: etree._Element, namespace: str, split: bool, - views_dict: Union[dict, None] - ): + views_dict: Union[dict, None]): self.name = name self.path = path self.type = section_type @@ -29,11 +71,11 @@ def __init__(self, name: str, path: Path, section_type: str, self.elements = None def __str__(self): # pragma: no cover - return "\nFile section: %s\n" % self.name + return "\nSection: %s\n" % self.name @property def _verbose(self) -> str: # pragma: no cover - """Get model information""" + """Get section information.""" text = self.__str__() if self.elements: for element in self.elements: @@ -45,11 +87,24 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print model information""" + """Print section information.""" print(self._verbose) - def _parse(self) -> None: - """Parse the section""" + def parse(self, parse_all: bool = True) -> None: + """ + Parse section object. The subscripts of the section will be added + to self subscripts. The variables defined as Flows, Auxiliary, Gf, + and Stock will be converted in XmileElements. The control variables, + if the section is __main__, will be converted to a ControlElement. + + Parameters + ---------- + parse_all: bool (optional) + If True then the created VensimElement objects will be + automatically parsed. Otherwise, this objects will only be + added to self.elements but not parser. Default is True. + + """ # parse subscripts and components self.subscripts = self._parse_subscripts() self.components = self._parse_components() @@ -58,11 +113,14 @@ def _parse(self) -> None: # parse control variables self.components += self._parse_control_vars() + if parse_all: + [component.parse() for component in self.components] + # define elements for printting information self.elements = self.subscripts + self.components def _parse_subscripts(self) -> List[SubscriptRange]: - """Parse the subscripts of the section""" + """Parse the subscripts of the section.""" subscripts = [ SubscriptRange( node.attrib["name"], @@ -78,8 +136,43 @@ def _parse_subscripts(self) -> List[SubscriptRange]: subr.name: subr.definition for subr in subscripts} return subscripts + def _parse_components(self) -> List[Union[Flaux, Gf, Stock]]: + """ + Parse model components. Three groups defined: + Flaux: flows and auxiliary variables + Gf: lookups + Stock: integs + + """ + # Add flows and auxiliary variables + components = [ + Flaux(node, self.ns, self.subscripts_dict) + for node in self.content.xpath( + "ns:model/ns:variables/ns:aux|ns:model/ns:variables/ns:flow", + namespaces=self.ns) + if node.attrib["name"].lower().replace(" ", "_") + not in self._control_vars] + + # Add lookups + components += [ + Gf(node, self.ns, self.subscripts_dict) + for node in self.content.xpath( + "ns:model/ns:variables/ns:gf", + namespaces=self.ns) + ] + + # Add stocks + components += [ + Stock(node, self.ns, self.subscripts_dict) + for node in self.content.xpath( + "ns:model/ns:variables/ns:stock", + namespaces=self.ns) + ] + + return components + def _parse_control_vars(self) -> List[ControlElement]: - """Parse control vars and rename them with Vensim standard""" + """Parse control vars and rename them with Vensim standard.""" # Read the start time of simulation node = self.content.xpath('ns:sim_specs', namespaces=self.ns)[0] @@ -125,52 +218,23 @@ def _parse_control_vars(self) -> List[ControlElement]: eqn="time_step" )) - [component._parse() for component in control_vars] return control_vars - def _parse_components(self) -> List[Union[Flaux, Gf, Stock]]: - """ - Parse model components. Three groups defined: - Flaux: flows and auxiliary variables - Gf: lookups - Stock: integs - """ - - # Add flows and auxiliary variables - components = [ - Flaux(node, self.ns, self.subscripts_dict) - for node in self.content.xpath( - "ns:model/ns:variables/ns:aux|ns:model/ns:variables/ns:flow", - namespaces=self.ns) - if node.attrib["name"].lower().replace(" ", "_") - not in self.control_vars] - - # Add lookups - components += [ - Gf(node, self.ns, self.subscripts_dict) - for node in self.content.xpath( - "ns:model/ns:variables/ns:gf", - namespaces=self.ns) - ] - - # Add stocks - components += [ - Stock(node, self.ns, self.subscripts_dict) - for node in self.content.xpath( - "ns:model/ns:variables/ns:stock", - namespaces=self.ns) - ] - - [component._parse() for component in components] - return components - def get_abstract_section(self) -> AbstractSection: """ - Get Abstract Section used for building + Get Abstract Section used for building. This, method should be + called after parsing the section (self.parse). This method is + automatically called by Model's get_abstract_model and + automatically generates the AbstractSubscript ranges and merge + the components in elements calling also the get_abstract_components + method from each model component. Returns ------- - AbstractSection + AbstractSection: AbstractSection + Abstract Section object that can be used for building the model + in another language. + """ return AbstractSection( name=self.name, @@ -178,7 +242,10 @@ def get_abstract_section(self) -> AbstractSection: type=self.type, params=self.params, returns=self.returns, - subscripts=self.solve_subscripts(), + subscripts=[ + subs_range.get_abstract_subscript_range() + for subs_range in self.subscripts + ], elements=[ element.get_abstract_element() for element in self.components @@ -186,11 +253,3 @@ def get_abstract_section(self) -> AbstractSection: split=self.split, views_dict=self.views_dict ) - - def solve_subscripts(self) -> List[AbstractSubscriptRange]: - """Convert the subscript ranges to Abstract Subscript Ranges""" - return [AbstractSubscriptRange( - name=subs_range.name, - subscripts=subs_range.definition, - mapping=subs_range.mapping - ) for subs_range in self.subscripts] From bfd574fd85157b832b9edb1eccf2e95ea5e7196a Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 6 Apr 2022 12:10:07 +0200 Subject: [PATCH 26/96] Remove todos for 3.0.0 --- docs/images/abstract_model.png | Bin 0 -> 154975 bytes docs/structure/abstract_model.rst | 4 +- docs/structure/structure_index.rst | 13 +- docs/structure/vensim_translation.rst | 5 + docs/structure/xmile_translation.rst | 4 + .../python/python_expressions_builder.py | 3 - pysd/building/python/python_functions.py | 4 +- pysd/building/python/python_model_builder.py | 6 - pysd/building/python/subscripts.py | 1 - pysd/py_backend/decorators.py | 16 +++ pysd/py_backend/external.py | 77 ++++-------- pysd/py_backend/functions.py | 119 +----------------- pysd/py_backend/statefuls.py | 49 ++------ pysd/translation/structures/abstract_model.py | 15 +-- pysd/translation/vensim/vensim_element.py | 4 - 15 files changed, 77 insertions(+), 243 deletions(-) create mode 100644 docs/images/abstract_model.png diff --git a/docs/images/abstract_model.png b/docs/images/abstract_model.png new file mode 100644 index 0000000000000000000000000000000000000000..6302193ebc637df4cc1648260fda81449c930925 GIT binary patch literal 154975 zcmeFYbySsG*FLF$s&k(BQ4?oMeC0g*0|5Co)KKnZCiqy;~U?z&p2b?z3+S7Yt1#+oY!^DxnoqH%Auo>pgZaaQE^f|Nwhoq59=*#8sjs9I@Hm8DuaEZCy=y6V;^p{w!nu1g5JeGh!m-(RztMScYGF^C(B3;E zolfJ|d+sc2NueFMQ#xuZgO?aQov>5CnqPFnezVpu8!hN`GQHfm2)x5ZSW*da2=04& zDLkKd&rjzkC!nk3Dw%b`QXfd5x@MSb7hro(;$r^3`@DDDuJW=K=fST=fnsB=)0>zt zqt3zk>Ko_Vl=+i)&1);44!U;5lY&L_)(^UZm$2@IjypFG=Pz43Dz0ezk2=odH;nFg zNAwup+_sic>IZbXuF?EB4Xij;U+VS_G)|fEd2J1QsUXa1Bs^*mvi|dxN<% z_;R%EZtw9q!n%8+$%DY4qXD|;FI*3rFMhB=2F?+aexM&F&Ey4#+QUou_K1ATFFqwU zQ@C?u_R%!XXXZ!gQ+aPGx(tJa?T#8ZfDfl0b<@1@rr2iWlWRy4=NOgkmj_`i>KN}> zDyK07iasrf6kWDA%3o&7)EO1z zgVJ@%(lo-dCbi(g-lB5x=l#?SIIlx>Y>7YdVug&+aVXI?YUPHoaVXWkz*pBPh7({w zG^6?^LfCxC;B_y#&%cm6eP`U1M%gb4vl#rKnIZq0zyB7uu`}OGa^I_bG*0!6DID*! zi39r1pxwIGGW^ZxEt&i`d95Xg1}~c}68h=nKD{shVU4aX@N9m*G_wBG&m#scMXcC3 zWi5*CT!?r&HG$38&ONF5gYk|bpDWYoEVga!!o%`{4-SnoaH+*eO7iB*Z&*?;7AJaH z=U08EMXXsOj&^Nt1=aK-P3H!rpGPL_4ncHI-%!7r82ZN-N=Qm(bwZmy7ldwcb$D7RAM z4Td0Zn|n%F5V^O^Yi;iJ?CoE-7VvIW!uHn34&UQxG+^qxlW7DZo;yc>yjmzpMTK{^ zvrnT=Z}Ii@%Db%-R8uw?{~Uhp_X_qpUD-!uC1{xT#$Zb1z@V&GZ?4S#;Op6!SyHZi z(u+_N^v(};uQe_Z@(bmJ6t`l&)lGP_<^mTh+ab`3tpS~nv%(vABai5o2@xrFh#8=loDeI(bc@lCzEF%K1z6J%73Ut!w^e3lUD zb;on7`0+|HUuiez)U%E$Oj?ZWg1S10{0&~V)Ab%>`6HUJDjop{HM-^w4duFGthbbsqD#JQXXIfF6s`IoBx$7~fTfuWG zL@m_v`S@&H0gTFkuDc)eA`9_imZN_W8~`C!W=uqIWDyZi7C%kkObGo9&LsTluUb5Cy` zRCU!&=eI$WseYWnvn-CwT2c--*;zymBdG7@sc|3e_=^uD@A%DxhDYZK*uIXcXjM&3 zjELWSO5GzH3rWJ2bs@3cB%YY-R-__Q;lX&Dnv^y!^(GVY615+TVQP-1;L&1!X1`f^ zJLW>1K1^;xPQjEk4+l0KL42;N%oq|NXoOWPi74*XPQD>EPK)uwRoMlBn4o(P@?0a1Mm}^?QYHhF!XjsCP`A=CKjevg z18l;h<~YT=SQr6$c$VJ913IKIYT67pFM?>71OtsQxsxL-KXE zt?eXtFs5Z0YC)b4yCLigr)f=964KWcF?J7>j140#f`R?C|FtVWO+;2apPTElS%GZ4 zd1VJrSBE3pmqF@Kl{d4T22%xjc=hd9p8?rMPqg=^rA-%1Z3=iiW-NF%5@<|M*Xv2p zM<0t7K}>0|IA}s`3UAcbW-+$i*ha3j0!onNskjL+4&rAHO}`^>YP47&qY5>p_hP22 zEe#`|%qtuYp!s9!pB!bhMXcKqk@q`JcE%bMgQzhr1P@CA|0ew>_G-a4aYs?v`7g#? zyghx>xBVqF1T3MP=1CrF+B7$gahcE2~^|d-~AZHe~7o^th>M2B-l)O z9-$&x?&83V_dHv4qWeisA~78^R@&EJ5A6>dQS-++Q3~Gk_Q0*kNqI9b%BDYQ?`ei$ zcXb&}AQVS>iCoBFB~>`v6rbBPS|M9t{)!*%Hp^I@CZ|J^tEM*5{ap0AVY4>+#$nBt z$TnM*l>|mEGLQ%Z?Q1AO_KT*)S*l#?{CgZ?HHHE*YK16fcoyUnTt81$@&+{AUb~u} z$WLJ&H>pJ2WE5Fb$&!!fcb&}?{Dj6o;rMwD%I*=OJ)}M#t7QDeQdy6SPVCQMAMX%( zFUSRd+ZDE;+!v9!{)WE)b z$W}Jv%0zo~;jY{_xcBjy12S)E-!ZllE3V*#OY)34O&mj>jhj*Pe zA5QhyON8}yy8DUl>;5?vIq_0PLVYV#&%*Z13(q%*6X@egL6@>CH03eD+`tlprO zMobUa2+2Q=d0XvXSY+@T!GQjb14&}@=s_we0m7-%Nx+kCNl{Dbf)nAMcjGWcwBIr} zkyo5_u<@o?zxU2nkJ&w>eOo(LzIK4$Dl?n==#5Q*W$}c*nV+OiH-(fG&gA|NuS~@S zp?SvChD*ONQgQ}_is$Fb4@GnZ@k@AI6rxoQmX1W~uvltTGLKj%DW1^NeJb+pH6~6> z&@As7Anhb#PK}{EIO98?6YFeux(X_!d=Uz#)%lWLfOalPfsU++_8Wr9oIRrp-rX%d zKgr6Pq3d8me=ToI@jA&jYI{w#Z5|SBv(86kl64UBMo9wc>iG}-W;gB@^oAiTz1&11 z8SF(v6qWpRY$Abilnn(=RQ&b49=N1;1gx213wJ`_rC!v^dBU$%WQDeBnFw1cDOCk2sXz1uke2MgWKsE^$7G+4 zJ4@x`!pWt{xHK713u#v0JY$rMRAsK)HgVe`Y0-b79rk9JNn`jsQNG=)WCglNgP>%eJv^;1(8 zp2C9y!udNQgznbI?@Z^H`x+L7U>=NqOq)KM$^3Z2u$7QVApYR0AvA6$k^TTCUHIe2 z6gJ|QwD2}paWda;<2xQh3(OXS=x#Fw!jh3apr!j_bnfG=;_ZB&YkPhU z`y$S*_Qj-6IG~v%wt!CRC_fQ0cXnhmwQx4GWb<)!0dx}r5fS%sF*Uch^q?}cw6=8; zr8#Krq@l945T()PRpwB3k+QV0mG^VA)bM+%Y3^rlE@(j`E`}oFBLoI;wDd5g@^N%< zau@OurTIOs5cnHintTH~fUR*R{C{!YD7FI&) z(z1UK0sbXQW8>lBBE-(_?d{Fx&CTZQX3fqiC@9Fz!NtzS#R~3Vb@z4hF!f<|a;Jq3 z@z)sAmhR?mwk{sF&Q4U&F-^^!Jv~HeXu$VWf9B`tqOAPS@J{Z3TLIXE-N)30os*4& z-O-W#-=A>zknsY8{5_%n;}h@Yx|v(bcv(7m(Ej@n7Uusv@8ap^@OwEH z=IoXZmX6?7ckouu|9GXGg0kvAPe4szZR_ar`zc`T|G3k`*6Lrz`j5FmzxlnKf1e0= z{GW0ER$T ztUP?&7OaATR;H}n{2UfM7J}T|+&rfLHiU|sE$~WHhku_HbSMijl)0t3nT4PkA1l8F zwE@1ZQrg=C%G98JM?+B%wATe7=2S^xe5YH%S5 zRRvKRE;f#TT~T!~^{@gDh|)Zn1HrH+SHP&@nkV*tmFpe*@hXA+Q)=SW~E1xi2rnR7vHugU{Q+hj|&u-?0 zT{e^U@gylkyv@Bt{$`gkqOmAm$b8ZXv9TyfOtB%cCm%jOXzdhldP<~Yl3rN4JbL_N zAa~Y%WGFjhR(e_R04DnX$DdOAEf=J})?Xe~R%YwX%-lLEh$Ah1La)Z?hJSQalLFH( zNFojWOsOPEJ^uZdrf7V^u<-Ef8kf{}_M{jH*x2oyuUNCr1;oUrkc#$l=aS+d3rSa{ zeEQVaKdKFZoSdSYn41quP?y6`eAP%!OQTNR+s&p*Zfm=wM@CM{SAPTpk(KSOuB%&> z`MvZ)y?>10R~c~h88JFB5dvvyX))iK?jge~)H4};_3ZG}kRf$6HzDka)y+hr_0a2} z{vj5>fqdi6E}b?XR(2fd9$-%?{=FO6<4X0u$Ib#SCM7oWjmz&tT3ZnjqyBdMC7*xY zRisIv-_+K2I7=Fq+d3$$LT4Yi}%4QdkHF*Gt#Oe+Hph!NU4I%J|^wL8t!SSvHyA16WMgkiTcVnb7yoYw)(V z$^`t+B2>>#c2T*x-3tt~KIZ+oPr3W#`nStUpQw|GZX-rYQl~~?kfSoc^E*7AM}RcB zu5nR_G{qsHlekw$N0%~X3J3`$u5(~vKQmNR{36k8K=CD+ipH;oXp-Tt%}Fo3coLwb z#O$%Z0Q>W&XGFdj1P*SuYqHG!UEnfP8p^=HC{+rv6g~I*1~EXrU*zNB3YILEAAg1mCF(GeCLU#$%H>Kp$^>0)KrZSN8yy-MU(N@ zi{xYsRMbI|PA^~IW_cBr@R%6egr$@dKR^uq>`ykwkLT-bd%CD-%EO{CDb4og>V=>_ z(Ecw^K=s}jVU?CyB=~ZBjTxDnOR=yp*E2f{UsH1q1Bt@Ck9nkFC86E89UKsNw)+T! zVj;%7?~Imh9|i>l^kLA`zula!#C(oU8t%0xWIttXwrYNHeIV=NLbbn?AnoLoxan_h zt|1^aEjRdT-)E=N3Ea*fA8*lp{^*f{PUAiU1jLBfrF+hutZdvmv-S4uZMSE_g^tYH zcOexOXpoRNC5^W8C5CRXM6%f^=&R}JNx)hmA%3U3t`~>v>VbWqMWyBCu_UF~X_&V8 zR#(jmGzn0am5LE;rpj4`JpD+eq7o&54VeR#ad7j#j}?f58M-$GUJJRo{kXr0jLfCT z&N2M{a~j65qR^_LNQzz{V)N4s3-g0>TAI=Zu(TSh{j+l*`m z0<5Wl0Ue7BZna!mh61 zmEMHGZX%eFI-wqVv-+Wy+Z8z3LbE$+P|*BX)v^hTQCq{e zbKSxrxX=cRW20@WX7l^Uw5 zA=l?cF84ur7-ZjIgoWkc;0$m&lA@6`guN6LVk{FMLBK{uFd?2gFL$ub^_Ap}NIp#@ zbKiK{HT8~gGCt!$I(uH<`rrnq&tWHr^OB0>tBCk`o`BnyunOH~ISP((Km{;)}zcQD)I4=)V?Q@?KN zKqY)?KKN?t;%CXu6%mbyJ~Mgu zp!1#LH7dp(Dz{Yt3(BUxj?OE^gevtir~?}Z^44g6Y<)AF72NY{9d!!fWO!&ut?_+A zZ`Vww+s`4_p{(=fxsbd2k)}_c1I*m{>?s+ghKf|c=!5C(pJMUjc*NcV2Qiy?v+}KG z&AT2ZFk=W#y-cEP4@9&2v)qv{Up{?`>6t`0QTlbhobc@3E2Opat&)3n(lPd}gU)cy z;5(j#L#?P7ImjzcW^Cq%IfDyzJ6eJ!5iD>KHO%%14Q z>aaR`dZ?o=rlv5{6}n7qr9}zd-49rdL`cQ&1mqkaFHcTZOyb~xG|b^RN0cqFGD@lc zJIwnzy1f09$5gFH$mC>YGgZ%ikvzIu8X3XfoA>So`wj!y7|D^gvvVt+Xe)V3Z`u{c zp08eJVr3N?e1FY+av+XpW%Uyh0%oYo_s=vGH^^aXucY|+5DDjndXaucuuoVJ{~*MsAp#*s!XWZ;58r>KbxQa z^a(FF_jU{fQy%9flBZA6BO#8!qgPh=T@Jsqk%_ioGCdoQgPCurlxy|M`SOJI-oVI+ zt>eOsrLeI3{xS|(R1}T)m)Mf;hGCuE)KmGK+>DIS`Nk^hhd=Nhz{0*rqYDx=-5leT zM2O1Z7Vr96f(!|<8rCyiYRmf~f`Ev)Vb@vGdF@1RW^PWEMq=`#|Hx+}huUT=AKAmx z6B`#d!LXHYb*w-<@(N5Ls_|SN>hRwC3t`eUq3JV7Xe2x+%9(W$1qIj?;z4l1_lp1p zMRj)<7Sn`)__F^pwzt9mDG4bQKnv}6RTP27L`vDvxqSs4 ztacc%)(cx*?}-)YY`+r%GH;Rl2F1nA@p!l2v9Q-oC=BfL7%n6c@6}Hd<4^94gD%B= zd^|K-gPEC0b#zZ9XWqAeU#o?j@5{t?fHPw=-i;11ab(4qKEJF#x#eQo>_gTAt<15n1b8t4|2uGuPU2m`2C9B&n+QDX9Z_c_y z=%11${`escd2I1R*eTamXQYJGZdNE-P8I8cR74gTp4KhD(G@o|Yb0w)k;^TzA#Rc&u!M}oi;HB-C zCnO}K^T8spEF{R)ao!7Wcu1QGb!DgZspBGm01=~XY|4XZo9UV&A@?*yq*<$t%MIx$ zo9Qz2++D9ZQ@((&CX1EW1I}j23dr#B*)qwE)B|p$$ONi27)eP;Oi#BB?O?Cywsv;j z1zr|4jE#?XVTxC1UE}n--`|~UbMW%we!<2z1>xhhSfB)S65)uQ44IBWE8Vt?cqsLr zX^M2f`I+a=_8OU$;7{V*y}Y_q#BVXk$;nOUfpwBoXtnk4`?19VyhmA=2$xMm*(B6H zndqj}vz!CN>eO$`QR7BDv5HS)ZiLSKZt<|2AuoVBm6p~SVEpjQ9;M$_suKCb8#f~b z1AMA%5}etquwU{H!AN&5ck5VN(PU-8+YRCS! z@mkHF;V;q96ULg|RmS=J{jJeR`Q9#gflb*QU(2(m78mc-?2X!BGZG2ExQLRR?I{u# z5xv&(xp8tLA8S0ID#!-7K7|2xa$3~K#k%I&7*pfzP+4^_;3EMSD;p$98=vE>UGDG5 z3q<@*q)lf-V&ln$bx(eMsxc3`S%DHFnnG=g@T9{iZC1}_Aloa$=k;%!~MJMOiMyW360P6ySm2pD8)Yu6lne39+9;`Wnr zvP&$rPV)VaR%$ivLRNajeH{1qf^HG)HLG0LeiphNEWuJIYu=d24>%xkrT^}LOp$+#Iuz5J<_)pN$kjwrcC^+DNG%a0)iBst8GY+*u1CQK3@#kFG) z8jhy*@7qk&mg4CDT0#|y=s#!!Y39A5w84USZS?iY(euzy(~Q&k#sf5ceMAvo988D+ z2+40%j3H%{kKf(4wEEi^G&;^dE&R1NKQmUT?>ydsLEZw*3NVn!NO)+_BjUEgd5%F2 ztEBW!k8|2{dlKK0yfhvp_D`P{J_9}pKroz}FW;|UZc0;wBO`cXCB?<1d(47@*Z{Lf zx&h40!C!Sew=h4P)i_<68yRU?UkC0}*FTu99K$)cBV^sbO&t6BRZ2pl>;9@mul%Xc zg(o%L-7#P0hq^l1aD)UI?!!U1+bg=2+Imk>0RbCt_SPmI302kc4FJW#d66$xzeD;h zgN!rB{S)b_0ZXD|(&h;_qDeOS{&D47oh@BRVe0o16-uejASs?D>%|s6J@_Q0x8W-C zKAPoE!NqFZuT{|r375;R#ZV{+Wkx*q(bWy#6M&&!1_KxmK*Xsol&P9rQXvh43{6cuTn=v3k_dDR-=+(%*bbXi zFT-Qja)?YC6`-!h#AE#(E5GF9<&Ph+YOU`f&Iidau~z{T07(gLs)>-N(Sd5c?(b4M%XURj2Rq zG&D}6evyobilPSC(Er>?Mp5yls`t$Mgw1(xthqYhcUp!;fGx$1+}4i%G&%f!&SRIA zo2zG<^HK@(A-&j2xEd_ji^%*SJ3E|IMY@MLF}-^!FYlTWEx6Xy*&R{?C~^;!H0bi! zJ%N{*n3ynH!1s@gNCI5Y*7SP^PGrsh&}Tc%0o|*S5%hYaoXxmGuNPuPf$k7E~p0QyGds5Z^G%sVZ|@$zT-@ud9l0NwN^(_Tj$0C&*e z3A^HVG9VD!+1vySJJ@!Tjh9y!4~b2w=ecq|fNcOoLOy~RdSNmd0?_ALS9mKV)@mH6 zA|bW4ak7rwToZ>Et_C$7nWgQCbb0A_w^4Qh0d#p)AHb29k(MTce80ODelb`}m6CDz z;ICjt`cE(`=LEr6Ai(E9JQ!)}YtLKdyXxxd$?`lPkbGa`e#&@#eK5gTiitNYNhx*% zo3Q;nA{!@7zrmww;u4BSeh$e9_)V(FI|d&tk)H-Xp1~cBWor)dRc6Xn{`l?N8_kxR z4}E8;E=KqXQgH;15%K8f`->7!o_Lf|xmw~=wk|9z@SpkGFSd|B%@d>dbCOM(W+@Hc zH^|@AYiX`$e^#P^s||uM2*oQ`Dmg;hMRx~_ai0MEBAE&VVw0q*ae?ahh33a?$0Myb z5_Ep>boBIQm%r8}eGk8PZB7&qDhcQIL}9K0Z9>v_yin$aV)4}UJpeorKYmz19``&) z=k)#cL;GxQIEx<>9v&VNa<(^b{-ZzHa{BgiBQH5s^W+{UfrPjFQw;9ATc7UkZV(tS z3Pfxt9yT>K9a7WM6!IhdN%xxOBYEOo zqaxyaM2y{!)u!gmPrCAcqmA3h55>j2dJPWx>zo|iwIOL~=((c)C=gY~Uz5`*1MxEJ zKuAom*42J?gtrF*AJ7ch)BM^W0#*VbkX>6+M<61yL|~d?B&)vG%Di)M<*PMzF)v%& z0q5Q0>3VzTo3nYdf?dE}@2_U$#->>BtAYW)Ioq9OnVYWEKTtn?qy7kr@Bu=GgGkFn zu7OAi#I@CpPovJQy&K~tN&Emb(x&n|UZDqG`Dpu4NVDrbd4l%dkU7Wqjq6&ki~GBU z57_B!hLQmRZJ8Ub%-d7%&c7t6X=xDvJ{kF;we?yFeOpdOCMMB(r8{zJE?8b3i9*y5 z4q`L&evvsa7!e*GncbC`nE08`QcX?G{q=oWJc(c~gGz4ndqZD#VxS_oC;n4rD1UT! z*D7u4sP0KvQc@Bn7T6Z)2KXHRj8<5+MW`p;XcmmjHVE|Ft&r8*VWC? zBlq|7D?j9V0GWJeUDaSTK=%O5;;{R@#*sM_Bjd+N)Dk9#wVhV2*UFVK)KW`P85xnP zVgWwvPqPoJej8OmK?OkMVA{OK-h`|o!`5If9T6WF!j6s|CWj4RVJKPsf=l(`x8p{i*mxzBqsZ z2_QG%?`wl|6h&zj7nZx&dakD5C=FFE5jm5yZuff8|q6+C+*|T=-vpC!3hGw7SLDAOMN(?&T!EP)%Lp zW!XjfyOfYV`xfY~06~J$h^&S)kyJ)IHkX4Lfw)stT@85nVpk}vuyk!Ij0p&Y;HbZ! z6FdI(>pSoR07|~^?uI?h5gBjuwf{CR+yg`#6d8(_DS6aFLS#`Gd-#cj5CenESAK#k zg7!OpP?`=$eXx}9DPJPT#C``tN~$kkj3BuPGyzkfyiR#l&Nb}8X={5&L=h%PNp{n= z-3L*kQHo)Z3;T6XPNKQ_5_I?Uxculh0w)+2GFQJ74+QoEBNs^Q2g-#eSKa<>z-&X- z2MIx8s4?H$*%@d*1ORo*NJzlEOu@w__C?(oMO;~Gm+!ae)v5kYSby6A!m3Wa9VZZH zVIa`$g+=uGI6aN+xuYi`w~?1l_jB}~-NBLEXntd4aS?W{PX&lDoiL1k$mX*js$7BG zdv#9v{5g8oV{Pp)a4J{7)r12+?U7U8{BeWBTlWAGLF#|lze~` z^4zE$p%AckkbuK7G6X+-;7ZQEL4)QT0R1DL8%zf z$V{~2#?&Nh)z$BiP+Rq!gaNL*xZ1JAKRZ~Rzyo^r^Q2K9Mm%3gNJPXpu3G-htu51) zZv3X8Tj$D=wY|pWF6U0izJ=yI8lc)jUVy?@k=(7zNhxKpMcK5gn;Xuw22f}O-m2ui z_aY|rR99Dbxx4YL55#Uq0|NgB6&6yJ$=;t{08q@N`A12a+CSUdY4T=QXVllzOH({D zBBl%9lS|yK-cDB|s&(34=!s3Vr|H>R4Pvq{SQvdp2`*md&T&I8>X4Q!WKv=P2gHMal8+ z&Yco2`5)jC5!r}o)TtHyd@bb3HrII2O=;n0ihxE$b#wQoqy!34NE$^0E;vw7QSZ-} zX_Y*-Cu3k0p%LrB$e*gEowcoPB(J>}vL(>g62c?bU0rQsPl1{c5Ge^mTNHeiE9_0| z=Jo*w0*bT{5JQ9=7JML}YUTRtjW~s9Y*($dim8na9N_F8dvmCwer>dRJ*_MtF!!cD zs!e|RGUA;!JC(?d5J-z5Z{C#j4-ABAGDXJ3Jj%`{DYR7b208{MD3|hx$$^;lns*0r zDDxB+)O4r-E6>Q-Dii`_v3TzGu!rQw24N*_)$pBS;=$P-0~Xm0p`>J(3xJ8xDkwd7 zC_p)x{LTqL#fUo-_dmk~h#L-)(0kC#X6RSozL6Up-4u#SI6?*Td0E*2_%=e*4g!Qw z9-A>DXvqr%0s!Apu1^j(F))+%8Ch697Rh0Pf@$J{!O_sowtTy|MgKJjtMsXW4)NH{ z)D)F$Hrp(GdsgHHmHz6Pod;r8{Z4;{m23!upcHbv5q@~MBKqddLuhaZO#0(^jTp32 z)wr}>{_NvTGXd+u_Z@$@bhezHG7ULwaKtk*hyh%VsH}wbJMlpTs&!bd(fF~T$EFS_ zD@)l_*$P>M83THSltLdEGnhWQwmpUQ@S%vLo*s&uyE`Z_LyN&+Hj+iM!z(M}=zM%$ z_-EJ0qgfHlIKb!cTr|=$GBOGPCJ^x4#tSL*xN7%1ohaQ)rj>(M@0KLz+<(_OLjGaA zFkW7X)wS*w$sItOG1xy}FMjLoZjZ^|csAum_Ue@ulrmkNk!zZ-*_fN7f+Ul$-QYN1 zqj&ZwS^oRhEMabLAaD8dhhqQY^J3ulAm4zF1?`=tt?g}FvWxdlb|UJz|jWy;&@|}Rz!p>CMM>^fJOC(50Y7Yj$`i)TbWJ` z)eQ{D*bG}DE-rk)7?Pms3NoHVk!-?PnMMgpUt>dmfB#4DCl(=LREbJn_oq*Ar`uCf zAY-hot`@qA4-5?S4-eztWjR_~m$=SHM8KJ5kYqf04obwDN{i+FLqnftYpt=#$rDmj z@qmWY)!T~zYSEHk-zvXz6bF3-*8=IP% zY6?zMgclbVuP|yS0YKeu=iB_e`Ol$@!;_PW2^|o3Fo2iJXliO^cs|5IzB=8BdZh98 z(qZTosT=o;N|zTq>7dE?clpV`Ggz`%sA8S-&%jzlhr zV(LOiHlB!@tS4V0yxOEI%&ttI7$x#S@XZk-@Yc6p23qe8VgM{QUuyHU0sj}DYv|}i zhlD^J7Mie=D8<=ob2XVh_b1aH4KU<=9I9AF6G@?02JA!xk}nm7q0{I@{=-dhy2>~S z77-)W?+(ZVitwjjCnl=>PVLCV0(AC>O8pzet~P1W$c0kH8>FS3sgR@ow5@;!lkJm} z6AqJ)kb(eU9=9E4JQOnVAOl8B63xq9c4TB^pjn^+Zl&$?n36Gbuiat*2dGBPL*D}w z6pL}(H%2}uiQnlsWdVO0dd&&jd4K0$JR((~YBG{72vWIUGtBKqpU3Vu&Ugw@67BZ} zWuE6!c6Lm?Nt8)}*Jo*@b?shzB)##(;i!acZP=pYJIEm}=lh1h%kpL7 z2<&!p1>I$lUMLl-15PfSHn&l0J<4I)4fl|q9%UXlhLG1T68Hg+j*br1qesY4%BZ9B zT6F{X%f)%;eF9iP*VtI~LLf-@aDe{dIoi*Dpc4@lH8nR6-`#asXz@e^=Tz3oiH*TJIP9*#Ou?wR#_{{uoH zgp+u9c&L6}4h}EiQ7>|H6G2sctveDGh><#VHrTRB6hKye`SK-n$)7%dMxF;1(Up~z zP~Zzi0JlQheE@L=e?@|50FenjhMq8R>c9z}wlN0rVma_U=MX2hpn!U=!4XwXPL456 z@l#(PEf<%1sWE7E01a4*CvBNoSv_CB;$mQ6fKwe28Vd8FwiX+NFwnCR5gCcRbr%!8 zIsEC5lncm@8n5j_6Q^FCO=xs95=3U}bP#2mP^J9<6z$dK4qoOW;>95%<>cgmR+v?F z+d*LY_!lp*Ko0~z=sH3KqJWI)`j@oVy!Kid*GS07p?P_Es;?A_b*fE5=hCroaBw(4 zbsZ>l=8G+!dO3r!_{`EUx4j9Z=78?oeXUZ-;zOTraO?qX3i+B$1*(jPr!&UppzN+! znP8*=WEJSuqM~r1%-~T5T~I&7aa~;)6}v_Q{*5=Wwej=k>b!I3XK?m`BzCVSMmgW; zOlWA%%*+h+ll}euLO=7q#Lu9R1f7S7?=Lw028VGUO6X4i9L9w|Y4z9&6LkMsYWUXl z>SRl&BR~*?OaPK^^Zfqi^0>xpPcPxj`}n6a5SliD4hN3MYXMiYGXH8HuQ+b&5hQkh z$m7nS+rwf0<&XXSr9*)hmX<5PQ3^M*0Uu}i2hf63pqbFt-X1v$kgr@&P*9l=pfEk3 z;|L?c4+Rep5fN!0K9pvhsnjR5pRRc1PW}rdr|&by!I40aX@E*j(J?V<`o_iTra;9i zbc`clW#WDp(%vo#@*O+_Ly7vMzCrifD^E~Smt*92ULpgjj_FtgM5Du%D9Y71_71Fu z&wYKUp|k0Gm(42K0@&d2yOxEBsn(5LO%yAQzc&;%od=$8H!P^xI5hNR`~wlMktqN7rF+>PRe7?s&V{*HI*V|`r-}zqM zY<(?R18-HmR#!M8^a87S7{JHN`x$`aFX)@FAu%z?pu`h9kiu|`_$qo6{h#u}Z=c*= z?(F3FDkLgO4qSox*#-#MlKB%L1>^{Ozn|=gMeK(8*w=^H(9p2nU^`Wg-pj9B1R>U<}_V^$;CWmCQmi!IMCDu6ZwoJpdH_`FCiNw(H% z_zPK5dRfrz6*N2!Ru%zu^&V7w3yV==tz~?2$VC0{z)vbAeZlqvEN|QFNW$l!vqD5r z0wC}@I|Q1!LEO;3R+mu#P7zU7R;J3flc8DyLbA{OEns_Dsi_ZJ#=km5O~#M&-3%C>Q8}i zCbNFNJPU8!7<-9maFeYIM3YGgDd#+Pi>#nNrtQS6{q%p~}1; zj!i53Sfy$e--VRVfzhvwR!B(igy!>3;YItyWO+c07!&5uDa8y@bqhrb3JQEqHj`B; z1l`{B4ssHalE&rcz00QO-mo;x&xg2a)LkpgZ%qF?;#xL~;j=d8fA=Xu(koQFqU=|Z6lYUNyOOG~WP z6^O+>e?Fbl8S^xXsLln`{kgSl@$VLZ%|Bn zrm2b7*4Ac}wL#6shT+z1z({D!C4mdt^Lipt3Be%)t%4w=RZX>Px-|p>9f1+4Dlt4f zymuB7QaD6L%I}0{WFZLHHB`z1PM)t(@^tfC4eikI@FguEhcthrK&Zm?jN8!A5RFU# ztD~a>5(03JDe&a}!NDGoWFq|1a*8n_TU%R&A1W&=wf2+{a+J=}Q&M1|y|j)F4t3j| zM(utiQ_Q)aArPyf*J{Nz?7kH2;q~?Py<6X=3|r@1Fs90WT-x}v+v|np8Dat2*UB@adCuk>AuUm-JD(-WM|ylYGxkXi9N# zbUfOr>RbhIz3>1;TL7;?;}9%BwEY%bZP)v)K$}TYEqO|QR}H3D@SoFTV`Fpi^ppV< zC8V(K8k15Cuj2rSF90?&Eq3PSUQ~H2y)CGc9{_s9)%uw+aQosr-YGz~H@CO7{0uUA zY=2Eu1wN~i8K?=KdUoDkj!a8S3mAC4VxOb4v#}|;BNrFmOlNTL38>ry+|moesw+si z>blG!g}OJwUb`Q`9##ijx&buut1k^i(*HP$=tpN~1YuWliDcN7%=>$LhQ%Gq%E~h9 zFKY{!)!q1f1_;p=^~Vl zRW)x<;|}E5(rGz4kJ4J4NqB5PcSIBrA1wvEc8RP;b6FLe^IpCTyS+X?j{2Wg-_lzyM3Id2(%f5ZN%~zwy z59}Ronv|rZ(dABVUS1LiGvKi6KAMiJlF7@-sdUX3V`ADaPLu<92tZfbROiK(;%ENK zJD?}xtiCxNR9s{e$zqC?(qkKpo82PCE1(qPkM8)}aASb`C_28g+YW@-EHu?HqIf1fRYjfF*8RrQ7JEfgsYWpI;Bd3t+Cf%vW}=66y$ zy)|oJl)|85r>Rn`zH)K2CaJ5Uf(gKqmVTp;R(M$0gM?b@Y!F?FljM&4A5l}oG&(O& zmo5UU@diCAusDkYfI+fYy?x?b4WKv~c6YGnM{@tup4$t}+!z!hj5lis2T7n;>5XTqlt7+f8}_dJSz&k(!(aQaO$#-nTa2T3Op=@W%J9CQD;uSfD6}`vt_Q z;>3ezb;0*R2@{=&2nefpx7U)h0P_M3z;1`ndC5R?`UhPc@KZ7opF=S5fpmSq=c?3N zf#zq^mISb8Pfw2&Bk1_-AlTZDV{bJvGwY^&2BMkSOPAt ziO673YsTnTdYEpFIwP5N+Eiu-XbTw`Sw!&e-rl>)gW%g8Mua2XK9Np}BT}4~V~VAnIvdXNdTI*px}l3%WvPc$z)d)DF6j zfeUFl*3|%7^QpJj&fp&S3IO9LA?CJ5_@9z)lc3zfe_TbkGOYQ!s>i`8m z0YJH{t4ozBt@{jQo{Q^ibHK}^YOl$4sPs%p8ikr4&7cx*LH0!lVyVq#(> zB{ws#Iy=RI%n=HHs6z%K;U{oJC#OLNQ!mi2KqI<{Y1aGqP8qtq(b1cyZ~Lf$6#Dy9 zm6Ct=tSh8+RCuUv?aty}90U`_HY8BqKYEUSov(fy36!d>3E$#|M*A6TpjNAw zi3MMn65QPhH%*?gcN&B=E(etid5MXwVd1(rcHeYC@+HpE`SQl$jB(U>Ya^lav^FRytzqHg(+#nJ;PVL}x1-?u^I*^f3k2xIYcoIgT3>PYu0NRqAmxl>KLPhO~C+5yCDM18i7>I2J)zw%a@5{@} zm(~+AK!d;2Vhfa~B4J|o0qBne_WIlGEc7-`PR_#F8OT_L?*GHsTSsNJM%|U}H8$}Q)L8K8WX^?IZ1rZSu1Zj}&l5SBzLQ1+py1RZ0&+&ZU@80{zeaATG zj57vr>}NmET64`c=h`>j_GcnUc7!1beFD-7M21M<&&9*NJu$|x1VBJ$L|v3G|MI=`83kEc@@@z7%n3^~ zJZKOx?q&-BdT3@~NjI@DRPX2A>+t-rR^OXvG1SLEm%!X)4Z63kn$u3X-D0aa&dDj7 zz>?*G!jzxl?qym!8KQ{O!7XZ)RF4bLBFR3ur>NL8(cqbJfvaU^Cii-5>F^Upe2)5PE0rcz3L! zy&eCoZb$Q$OHnR*=xPJZCGcH@VuHDpw-;^N{T=Qcj&4QYr z3FHgLxzhd>4NIv#gzoFhms|-)eUC@XMz}{S9I?)ymo8LQke7GvGSiTep#Y!A z*^z!KM&FgKstd$HlRvJpoJEZ*-$js~jV}y-9(+>~ACJJ)!{*;z#KmTgOTSLYL=5>E z7^uwqIV3?Knx$EcE+caTYCK9_(#NGa;$Sxs^021qya@E^Z+fJu zSwAvD1y!09Tpf52)7QUb;O0E^kB+8XT3SL7fCw*Osn042d@2DwUTJBadzYY9-JOpZ9bO3^aI!0hoKr?hmql7 zKl99amsR*>vhb1X%`~1+Mt|v9H9kJr2G@@s_=66i*9U=wP+L$z#{=;Kk{|(+62J90 zaz@tW<#A2V%%o?VTUmtw36%(833zWm`G-4JX28IVkB`%8Nm?uo7UKfuI)leKT4f+R zUwQfAKC^Sj4p1lzGOs($&ZC{}iQj+maR%rQr__VUav#mk_Qm>&TYcKvQr%h^kz)=4 zm00&@ckRqKHf!VeB_5oD0SMfE!W5-HKw$*L$#nK?voyCEN}fGB7jN?6 zBvv?;l}c==z=-5w_JPu}BYbVcr`w`tBgT5;bZs9uI|Qa(TwLm3cR`?s))tugw*P&8xtt(VfvF6etv#zMh^&`eLC;SHwD~bcT>SIl;S$p`l&Bp_X{I3bF*s zZ9I{<%dww6y$0?hGBPqAf^IJ314hsdqw8h?d3s)>q*CIn<0 zz`2P7ZiPFXS6Jx#@#DaCI|bl9*QdV|3%Ty#o%KNq3o}0~DnW$<|Okie_XV6K}hLjVSc z%4=u|LUIz*)umx%WVE)m{kb>ePj-!!6$gIo$x2s}?B>3wfRq#`sqsIa>9bP27W5tv z7r%OUzSWAo=5pTPS6+TN{uTf^a3_+sfcFnvR~Nv#g@^FlOwv{#*(UUvw?_1UTsRkrpLvr)2FjC&{bSI-LI1wP`uJe1zNEHR61w_me5Lpl z%MC-WmmcQo_*7PEB_|6tOfMRfOttKdE?Ya~n*5;s+nwfgOd;YI zf3*NAvYBpe;5?S<{><*bks#@5Hq1*q9v6Wh#p@tl_%z$h-fgkBusOHJ(my+!U&D1P zE$*v3J30ba>evsi`s(j9p>pQWWXGvU}zS8gjS$4-DE_Q{iE^+R&wC&_dy z>9e^pAgM@Bmdakuj!J&O46J`3c&5PzT0`1l4Z5>euM`20J)Syj`F2MVzTunVA3zatvSKd!KWEHum3xHlnr=hEKRZl5nZ_**d08~Bob6|PJUA!BS`cGIVYXOUZ z&NsLj(FQ$fAyPv*I#p?EH0}p@7e$&Ir5He)bjt9@V4;f3ef*EJO^H1pmI8QVOa#2g zh6_DqNFZ7^)BP)rp$r~pU39oC1X4qUx!M~e& z-)d^aQP%vE&OvSs{?yNuLJ8-Ct#e?FqT}QwgOnpHR!a@4s`K7*?(XhIrkkW-a;k4N z=fC%x) z!oPm~0@Otc5-~4o;e#Y`->($AG(<#AZ&A?*W3P`@8_sc>OU+uJh z3*bulr?ib3Rr2nt1|p@-RniH5R@U^F2mCH+7V z=c=ihk#`I#T@VL7|G0c|yqc?LYI^N`_fKVj|H{R?Po=?^46d?Zpoi$$mI8mzU=S0l z6q)y*2UFkp49_2P`1btV9O$$kr+h7hox!Ad&@kpnW;u!{4MoeEYgb0NASkN>!5M4@ zp1hZV<-y=C^0&>^LBBo{0lT#pI+IH)D|E>zo`=hh*5D^YYEKIt$JQ0pj%jK|KU(}` z)V3E(cwdn4y1?k=(hqc2e$SWaBv)SIGT~ne$0A$sr6jTOX}Ls1_?AUwhdJf_O4ZQP z7!6<953}eMByM*xJYx6iT#ntULg1KP^<1sDHdK)|m&CM&CmB3o3i_QBr1Y2%ywl)PCI zA=K{szVc@^=(N#a9&Y1eg`-tNwYwmKogPl(Jbn9$@Bu-rtL2+}o#cdQ0RHYrZ*5(I zZV&4T+H)o4A%C~qx!1Lu;E4P2DGogy#-h{Jy|LNsej{~09S8nGIF1~+nPsCZdMtDEw zp!BBzV;B)gbMM@{_q9F7bFzV8toFqPEJEth#1;!>+UNk)N|Ij z1?hTfzfwwKobo3lRUe{*I>M7`aJ%|&Rvhdn6ZK4az^axmeiHShnCpo9y3l)lZPZm4 zjD;)V!=;}39kCW1t8s#kEz9mGZ0~1t@g6odMAS~>Pd^E}Cbrmh&9^iymbmR>IZcPo zX~|qycUal6SYC0D7F>1fq)XG))LgQ)a=F+8q!&AYFC)mosxBR?!!Wsi<%qa7!t>?+ zrfzescSFX=%E_^3P=9-0?EE~7(@NO@FmP{&R)at<4aZ#)WQ6BLj;ycM*GKVva~vqK zprmSveePjx-1=b~KZbUxdr$F`Qz3Z|U-S=+niJz;$1y_(ft~nxLQslG9Gz&DuB4jz z9xV19U*!O^QvF1=sR@OGSL^YUd=040g*0{3RTCXo7xV1qo+7onp2u&DW1Z2JS;AcZ z7&V=VPyFxyAvLB`CZ{^aCjzV-DLYGT@Q*;aaM5v0_`8J|e1b&wj-6A-GhV}n7iwU2 zFXulkPAw?-X*1@oLWNe!8a`+twmAJg$!Xj>IyB0q?CUZtHEzB^Q*-!df>Mn(4vUTN zO!wA??FvUQw~mi)y=|pB{4zKDbfRqT=~Mtsb=rmov~ju3`+_ae51=)fkTB6{hCYnh zm=T>RKsUj*Qtf&XfZVFG`j_V?9798u&KFi{PM=QC&Gn@zIF#E8?Tr`7p&#r3eBm~3 zXNP95Mdx8vWo6b+uWHTZRohjxdhETXAl+K{u8HuDHNG*nhRu!+8_LezUB@3DZ{OLT zhi+(=8tXnNL)$*x?gHoZNY&1`Z+DZ@(YD3zO5lBJB_)f69%sGfL7GM*8XDaI`*jc$ zLo(KX?%q7r^!|B|~?VtE)OfBZ@eEG?Z+l&fPZzdX=8CY2{VJXtnBOr^#z5UL5@+cxS zG;pBM3d#%V!1eZ+im9C;;{?xBI&6IWbF>)VCnv5qMNW@|uU^x}KtOdtj!Py2m80n9 z9=Wc|JoohMQ$TSOB||Gs(5>}N!1I`*>1ka`J>5o)qTo_%c?6e)dmdv-=5e4dv#%|dCYHTS+Jp_g9fZn zzsl@xiX1L!L(%ebw+&k7(>o_l3+evM93D6j%_4;>=;J|C^yABRpXSGo#c(X-3!&)+e#v)6qOlZ8nSyU;QnYeZI{SMB%Z%L(}{lP|41mgT0H zXAV9jl5dq`-DBxz3RA3m$woWkpfFW!F(kvLeGt{)dS(D?`$#~?VToMAIcHyPyEJyn z2i3IjCn4ucUTstIeFRT0T4K_WIEOmG zxyznM`IvCLL&?7>%E{G6=yEiDy1lu$U6BczJy;;J*8Kb`^Q!FR2*6~&Eaz4aNkJLygGSs{T8{n zI32l=)03o=hk!vk6F_$|L}8Wmn6KH~e!P|g$kX+J$TOP$ThQMUf7&0MiU0(C$H$*S zPIw&QyxLG71xe1sVBi_=RN34tKgO1EyBoRPtkCj}h$@++roDB(sTY)AC%!YS)`^}j zq0wT{ogUdy$plADcX1E;)LYdYU&ADzo>=>+xg2sg?6YBK`~-+DWwgreQ3xoG&%5o8 z%2lk3g8Upo^F*M1q^eDLrgPT|1o2;JBK9`l{@w%a<&%F}OA`fKAv;O@^I ze$%4P8veT{of4#PFNUn*{P*^5Li8GmO=>cz3*r=+Bm1GtsSOL2UO! z{Q5|46)w*cw%^+>+=9Lp@g627{ICX1?y5y8y1{R98F6qn@=QVv^W0|J0@gbvJNWG@ z*VfmYw);cQcKzByrlKAM{>jawpZDzY&3f?z40GfkJz@dG-~)2;i3WliSyffafR3a@ z11|GEp~j}|=1;dv1G}Fe9W2U$5cW%Jl@1wfLPv7;u^jDYO)K*ef#%R%Vy5!ZF=9q} zV&4NVF3p;pGp6kf22ED1iV%k+6~v#EJCHmgfeqSq4fl>DD<@}zqNw}KS}c@@Xs8sN`mgyKQ1mJ=S9(9$;xaxM<4Hrjm1Dup<7>SG5K!yI*@SX7+cHf zy&g`RKNPS0R3E}>7J{@R6ya4}b+$VTrf~zr`dh-|UNSru?gFtUo7SGUY~2r|R??uM z58tPqcC~j=?ygm?x^rALu1}v*oa{~UMYu$8TocRf51KLUe(EB$Y7es|hChd3d4W|o zkm@?3xApr}Z?rNZqwVSVVkX!jPbD{J*_U%h3Mf*jJ?B##wb2fC5)%ioTAvnv zr~BdL-ff0%J!G+~M$zfW`{dglp|#pDgMC0a*A>Rbet8@&3kI+dLGihAv9pvZ?0ddm z-4K2Zp0Wz3VfW`Tq0v<3^u{48PRH}VhZehy<0bWkI61LEd6Us^C+fJ8EO}d?TsynO zkT)81Tpb2wj|#bf-so8_G&-d!vlzkz$!71q5Tio5w3dkHNs;6ZD*C4Jo2jGyZUg7Z z#;^CPbRWD8NR5m2@9DX^E*vfw9Bmb=G-UznicMfe_n~U$_CGle=Wi3Z;Onos zEiK_Brd?{RJk;)6=R*46Ds7sn2wu*pq`skNVW~@U@b!`4GW@z^wpeW1O&!KG#`*yo z==rX@4?~6~@|$(Gn6*9KjxK`;PlZkKEa5y~p6?Q2RIEN18k*g-@mZm!yoZII-G*5` zT%wxsS&}zO|LT6FGpF-=nXwMVskiI2A}6mUz4BRYY>t5f_+qvgOeNTx;5Pf(@!a9( z`&Hc&qj?IwojowaEjQ)~T-}8m3Iw4)I84W{jYTXA#qJ_UHI9=lXI_2U6N$e9Nm= zD#5Rbk^vmseQ!YFh@2j6mNu9l?kMbS_lG-e_cC!gZ#euaoZ}2QT$)eS&2?}c%8dZy zX3`t=KUl42?LYLd4Eb$ZscLePK0t5wEgb{!ev|7Skb9ON?A+oKFlfXGk?BCUzKiQQ zUTYcBXFgh)(!3c6qMSJVneSm%>qj)r%9mf@rAOi(0vI(2w2dCuYvy$zo6bz%*tl_p z{{gq0Cm7-lGMqYcMj|z}E4U3mJE?Nsx^;z)j^yJNnD|z?aJkD29YioJAFA~IY&10s zB=rP2ay-M6=9ae?co66b*>l`>Kj4?U7qqw?C18A#wrajmS@rZsy?Jp=vAL)90shB_ zrXz6h1)iYiE56bt3u9D!T{(7^aquwnpK+025jg$TpG|;E+6GCjRG>1}Z9}K>Na=hy$i41mk z9rI>u7JEx15TZXM|fgSKVEQ#*w) zV~mp{;sYszu<$u{O{rI_(z2zpckYk_>_lOUS5Hn#KyM67*mY-xLI4jbZT#E@(+G~8Z9q#_39^CiTX!SL&O&qVF+lK z-K#1n2*ktLVOfoCS)U4^YT@}frcECjZ#7nT=YBPY7mTe4Jbxc)8n(s;R?T&)*Re!=&J}p zcTP9e?y}%>R`6n|!GZ&-2CE3W!Hb}p)6v&HK0=6>M1u8quTBk+dA=GboRduI>N6E= z@7CehmHl(h%{2cz=OAbZhCoW)TLm&AN$0tCQPA0amN^W&!;1FP-+!Mvo0zz}Yiham ziLI1$)63@YsHmkbmmTr#s3Kp9gvQCHj{dX@V7j7e+241@KZIc}Uv@`j^POy(dN848 zZpzVqOhiup+T-{|JQOvZg~qx7E4R{a`fL^tRqS#`Or^bPGRRp6FFp2#(_wWriTJ?A z#s+51u8K<7TCX~dK2(qfojoej(*6o3OHZF93%nmHSyoNjj`8h}eVwe%>aOKeptioQ zRA+L@XMaUNuE&3K=MqjFh1JyVn5WI}rO8k84Kc5Xt)BnJ&a)f|p;nYs?zOw|O1(rLSoBI=B z`p{PzgFkD?S^uvnHIxiYWxHQ;?8fatLhLYDn(D2T%^NQ>)NAIuv=gekx~y08qo6?f zXrr~%!3@ihAu?4V`c+ZUlPrz({D#B*{W6F-7kI67(8b7a3qAS9EFbH3FFa{ACU*qj zg>YHuXo?qwZ+`)V4w2Q#EJ@*kqnCiWbfVWPNM;9A*jV~)j^e_JkQhrfJ2@C zy#gbHR}@v8`+-}T)=C%iUY)P3c;>Nf$^32F&G%>`1mME zgDyj-yEOA=)#wl?c0>s;4-C#6(%;^{2;+%@doydiyG;DPKWQv53ex9|h9q36x!Uoe ztgd?G4{!O+b&b>h&&tqqs)mWFx~JYP(O{$QRbe(GP21egCO@CizX)%n4R6UX#roJh z#L1Df`kbqxlIc%FI_S&9i~PNf)|6irjsXolSiDerokfc*?L(*&sr({zT>AA_c0X4? zA2k7tn{U$P7GMXcRVq#dy{o|JR?#40atj48F5!VH#v3yBD9+EByO`|^pWNNd`n)C5 zx6*B0N83dTeb=%U&E89oZ_OL>JFdLfz(0T94tqoW8XKDg-U1Xa2D>XY*2mjP3-%(Q zTRf?dhrw9?|mHhN`y0BwZTubYe zLSa|0ql`g8A?Na$l#Phzzxgpa*6m3Hz}N^EflsAxsM|z{(py(YM>A=85G$|OtOf?+ z@K|Aq+9bQk%VSk;v>ZAN>Zqzl%ZV;_C2@{=dY%JV4s)@n0U#3qO9;9?mG&vDIJp}acmtw z$-dy@@$?MtE(`C;W0%VqjxEK-$9G^RyENTvQjRr;c*w}GitOnrWxp|kDth5cPsI0W z&`YpF1Dtq$PyTb@8zJzqS-ewPJ}Zn2L)ZyZ(Yd34Kt5~+(AlnOWNF<_G3iG2XMd!b za{t>wwD-5ek4#rx5IFZjI6N#&JL&eSo$ZY#BSk^{9w|M&Jo287jt=wpyx2H6b!kdr zp!B@De&uxEm<-bKWoU=k^Y31}M>-zk_(71WaH@E7d)wmu)DAC18%Z6VHyR#WU1UO0 zvQ*X=WO#YzTm_k!GG!K_&CdVcJgT}ToyTe=G88O0R7y(R)Y+N+i)RB(ww-h>pk(Fs z@r~{^6$FC?nePE`P|pvRJ}{au0$B3qG54s2DG3My_5J!(i0Fb@8p`er^qKSsSVusm0Wr8W0EN6yl+$z;dNODwJTgnCrn zv-daoa(8n7^qv_#;DCnrD7e4dPTEyEb8jc=`qlk`BG6UZ^|o6R1c2DQn4`TI>Ebz2 zPXHv=Dvyi%!8|Aa@@nOe!6<&4@>W;4rB)j|Z-N#>#C=}`&44n&?ey4~XHW(Pa5NMV zfZ+{h0mI9g>~Hra$GWq44XX>FaK@+w6>=eI*~|_6xqBvAu2iML$cUy-mJ3if2qWVK;%|;wve({wEk! zs;tBjz=`PJP#epyd&WC)xGI_pg|Z{5NgB}!MKCFN*NIy1-aWrJ7gr;!xqFt%$kRov z$GN^eQSWPe1xQF7PKpLdkdkE=Q&Lm;i@-Ky2X$QT`bk4QeP_k4gTa2i|4n}94U?D- z^QB)bKt3ckn72i;=yc8Qj-C2R?nBX**=6EBXWmogYIUX+J-hzP|6M;C%(YCD6m$6g zj?89#a-YH~^~;xUQ{^sI_^XwhH`qw;g>ORx`8Qz}jKqu{QtfjfAtIXRisBU%B+jk8 z(LpCErahRzXki!@Nxu33pTKHDc@F9&8}dg-Cx^H;E5qrSRnISMh>|BOgNnH=HTIL+ z_bG?vA!+h#9e2W`R!&jtk7JbrKxSQxxOuZXp?{Ggnf44eVz z4v^r|vw?!@!T1W`48TK~Z#@wPz$YqLXr#bM82IIIIoXG+V>QH_Rk!*lssgB8KGzLS z)M3R*00C*>44}qot4lC6Hdc8V!a~!pFE%7AUkvde!hN6Fg!WS-mE&vh`(_HYAbD0} z_*swDLk|Q6%BeJq1D~h&e^ziEcAi6j-mK&U2bw6F_jo}mzz-@u!>+{rNNz)|u(5!h zZq`pC$DEM6XS!~~XuQTVxkiV2)VX@tZb6)*%}jA@>D}w|_A_7y&X+~aMNx1YR7&D( z`uW*G5mf%69Z5_y5E>nu{$6^<^sbLM#!bX|o_y^|cZ@e|)?mIQ2Y&=(s<$~}aL#zz#hi+S zy1{s;pMh<-4YFDe%yk%L;mx9gIeD-sd&ZfX3>5Gb4br}cvH@yvn8eVWuF?!?*V1PC zxsjbwoUkhz)HN_C$_+~JOWGAXQXpUeLm&pU--m~vLBkc$cs58zf+WQ%GTQM7M32~j zsn_eE)_=1Hd#7@(z5#}Y?Q+@wCWkPLE-p^e&jEZO2m)*Cd|hF3Dn_k4sn=Itn@M}v zW7J-RY1(;u93R-6&l3@`6OxnD_o<}!fVF_L$rk8YF#fA^UvPpOHSDs8J#62N2SJw{ zLI`9VIQ`6E`~ZD^yGB3d>rbDaGy;y`ae4#IhrY_QbO z%2@-0++J!H`^qymXXaLeF6wI#l^Beq4W91~(4TBH{oQMh6b*_2KW%t=5(9ger=>-% zX&ab;waFkW-TX1>eqB)tvtm&wAW_i8I`!76%E>G`Nsx36eWx5cK*#u*LBRgE=HjvW zEloUXp^G5JMUYD*B$K%{9=R&AC@V5b$^dWl07K~cOger8dPi?(DK%4zlVzSBHp#1* z^(m|A5SBlii0rR7<+s^BR;<>WlzTNz(N-cs#pc&#UNT+%BrQ=t;4(JWKp+}4J$(-b z1ks$eO=?cv^IHylu|jI?I@m4Oo`X#AjL78^0z%!u_*_!gON$|1gQ1e(?E-?B93W1Q zyr~3F*3wRZqvd|4F~cwqnRwL0^)}oVqD=M3OU71zwE($TwvBBiQfJO0jT1!EO&d$9Ug=54$p3O zcwv3rKcKWWDWuxVam-zM>6!Awhk}{!)*Jp=%q{3Cp7%Y6 zJ9@@~swFuu0#_sxazZfM;W;wpFl`IHvNc!vIFZ7ZvRufBFnlBcelRZcr9c_efcr6L zD#ssRR;!z}(7vg?3N-VDsBwisM&5ou%e+4p$a(%AGtZDxGU9F`8n?WQyTh9%#SSDm z=gxpEkn-%{#F$wZadUHn5wjRVr$>)Xox3picG%IR%W+c0m$kF$CYoVw35 z3Sy<`>l$Bm=K)T@#>2~5Umx)}WOG}qlRJxWaw!GkL-f47_+uUw=aXblQ-6Au1go+( zKs?cF3JivbWZmRRHe%ZFM^;whQ_U(_%Y;6JZi~(_RqA+#fRbTc{qU-J`3@cYGMgdOH zeMxd%gtT?2B<7N=*XNXLXH{_B$D7TeF)^s5F2BxB>ocQsMv2|46_csRdXUzID(-a*Ed_L(tHf}VrXO1IlYXI*|+j?l0$e~8q3hvr12 zq~8BFi5i^M=73NI7gA1%@W^fGjmtCY%3^dzY=M_h(zgeKFTVNazy{`l(BOjfASrrShJ*dkijTRAk@+zhOS7KFPax90f|1wSZ2%n+#BheAKO~KGWTQN=Tc(9jdeU(BCNF zv8>wGLA-#J0GQk?V`>0{D?wgjg+fe-+4aTK~PE;yjxM;D8vr(c-f!Qs#1mR`X%`|g+K3*lIrRF@1sM` zmJ0%&o+L1`e_lNF{~sUtxBmL~Zvd&Y{6*^jKq&Zk<)#1egZ%qNI{&X{mC$ouGbVdn0$B;zM$@H6k)%a_A>-vZl(N_henSDTyWCQS^yQ_0cZm}w80W5VBzt4 z*G^T1Xwbf{*2NotTfFq2j$L6(J z>G1b={`oIO#CwI#-_MaRdg^d@#Mn!D(8S(r@AG>xJexGvv_lrNPLC;TX`M(r9ZPV< zbde#tdEJkKjPbyz5-dlQ;PRG;Z zaS6fGv9~Zn`1MD>lTbx3xHbmS z^sBeR2xSa81LihUJCM$%axI_0H#o{j5;Bag|7M4@&y^jwmU_`9N9Kpmkq2V=;w?or zlh++YZqXPM`HozqeJwYJuSdyBu(yk0ovFQVh`b8#SvDNWtthWThH>@y{a&9pL;MMS zrf4L|xmWMYB8A!g3BKDR%xKnxwWWjt?vds;x<7om%1?Z@BbPfoMOX4o;^7H!TFFW@BnbrIEu=C(!ZbyGLLLMWp^FN>?^3vPl&aS1%gMsk=#0@<+ zO61g^@RI}CuM4HsFVLM(5&^!B!u&5N=`aTLM|LHU1j1PZn#bQgK5Oavd1FBdmUq1n1 z0oU)WxVxxIvys@%!c-Jj+d(;2trW7)^MyuY>5Uz!pVCgaGZ6{uR1eATb5%kgC&7;4 zqAtpe!yH#aYBW}Z!x6cC@_$;s|6Ur85KDCYcQ!}Xj;x7;_U)RiWF1fBknVM7gj1e2 zyO$&4rWtgYblJ<@ng;r+mn6ok%HLe$6Tibk^Qgga-86oY5w%9_*M`$uJeN%bM3=64~<0XtGYDu7*A0@{EIstg`=|UwDtmj z-AI4JccM&fyhr!`1u>Y`h~Zb(DcnU=>+pQ$5)wVqvxrB>AeJ`bDocl9At+A%ahZUQ z>R1AMzTe_0$|(bLjsbn`rWZMOQf-YNA=`?caBc#j^3S1;=23!rlid~!jsAQE6+Le* zhdSm(UUxc-zGJ%AyfwrU2M0%(m6nRANCWLrY`v)B7}$^qokR6;5t^g;H!m(?5-RQ; z$vcK)y)M-5bgz&4hqC!FB+NV&2W~J{YPW6E~vCUE??+AM~&Tl?J1nJrwPQykH2_)}quUg{opNf$mTWu108Cfob zBmAcB*+sFNwV36WVYK%0UkD_y&|M?wNTB3=<$%Vsua(Z0xY@o#dp z`R0_0v$-LDo!ks#<)?$J8MB)TeRecB36gh+XbkD;JGAyztH~6a4#*J`2gMNQj6M`T zRVPkJal|tr zTmNcV{ejDs`ZRY%a$1bZ5LKl)V{E&^F%x!K^q1(?-xNmpD`vE?RnVLg$yaWY^+cSr zzVLgbkVa0ZOx2iF&jOPTA~?+{Kk)G_rmSHqNs>Xg(`WRD{g^-{`;w; zBGq#hrVba2Ly2!U@bX1fZs+Tl1+$u8~VZ*e0Ss^h0^QOSZxXDUZR%@$od3~<#^gT_LN&h8> z->Icq3uZ>I_K(Dnx zW@`)e)^U(irw6O%sxWgqo^Kdy8-4jh>v&XfJWDd7p{zhf?4^dp7P=}Dj6WhMIVFaB zf^_2cVr@5MpH?kR+%7^rz!y8GZ}viCk*CDSL1Se$w&3my!XARTse4k>AL|Y0+4W19 zQDQ5()|7Ypjl+xc*$-drU+a4+N*XrM-^X41BQ-BFo_kS#;w!y-e?hB%*oGq$jhW58 zIiu?ABLDW{ADBUj8y0Ggw4{&JXd;+MFp^K~XHk+K&Q_{VQf+Ej3Y(fWl0(=9K3;z8 zCa`#atZ#5Tv|AuI)YN^evhaT`PaQ1J<2Qk2EhxHT4>j@QkCuIoeI#On6&rHc;x>xe zGSUym2qg5JYp@ox*Q^h*Vp=R%(<97Fubih#If-Sg))8$e#K%7s2rm_njEWbKjcMVg z&N9T!OLcX^XS(iO)8oSVuorc;!8Gn{G|SJRbCZr`Lbbbz6#`!+V)h>Y9=m-jA?NUY zItf_{t8>$;&pG9w(MwO*xQo1-5)lNhXSH?glT-fo(wB2BdTJ@2On09Ar2kk$-!&!5 zaZKVL?*7o4i=EZ3!GHg$FN^AMwM0}B-b1SG^Sgqao9d);(-*7qzoqezLtZs7mj-C5IbLJxXe2-fq>T z5iPmJ;_1`$WPOebMn3%e>ZgE=t%L7XjC_97N`aj|KN9RxbHOWDd*ApAfu*AZ>ljAA zn5UZR{TV`nK9(0;KZl4J5rGOjC3R&hbU&q;l1)?0ac#s(BJ%mTBKD&T_!4)R#>fkV zveX|cE~X1_rN7rIWuLA8U`&urRvO47 z<)sSTg%10|SV8goGp-9xZr;>A>81}x!fL&VYE_XMbQAO4Y67o`J~CR2v|TZ5xl`=; zJXEflGehp&t6PhnL%r5j&#5Id zt8*)NO&@!t=LMGy6Vh&~Odk{z+|CWgz4yE{xBz#*Zom14VDv_71Fg68z_2K{y+*X* z#gxXSb?->_cf;BdLlHV0uf@v-6hA9X?>piO=gbeY;Ua7&x{0laDEBBPo}Lri672;uwoZq_;|qkp za+{iCLA16nbM8{3P?u+D+3pn_o(|wc{i1Q_D znds{n*MI4$JdrDHm5LYr4rb?1&1#g`Eib7G0d3S)@%6X>F1e8yFBcIJQ{s{z&PWv^ zbbcI#=vywNA+PhQNBs!jpJ2y2T=_z=gcXm)h2cfYv)JN=lTbiu$xfy8 zf>%4TfqWV_yeFm5q?Mt5)nl!LEKiJaJ2a)((`Icr>&N+#O@ibxBaWLz1-Xx%*oez2 z-jx(r*Yq2gTw0Z$E08SgV`}4?4&U}ZcjNKMyFH;xSLAE1*cb+s-inDfdgx9_dUr5t z6Fo@r$>`I*k5@dRaYBrv-06=>q(_K%jn)S~SqBYF;vzSS>AOZuoU~=`QVta;M(QTD zk>t*ky09E}1SX@(hYM(t3#2`q{GWL59!;qv36bztFDDEhOVj7y>6Mc->c6@_6h?fH zb6O!kvW>z~ZDTi;eyX%^z=JC4cyO|$-E+hAzZFfqj`BSit-~0nL^>X|Dr0;1sV^_I z*=x>SSEVlEuwhk{9+{1=>1kY6!PY~0`4gh`H_*y6b<&X7)em8y?;&DLNWWa*C_N&G zyhh_9s&(YiI%W1Vy%38@i@wI~C1S16a7wf2c`DY+a`5UUh3ZLHR$qx;rF@e7orB5W zMFm0U_|V^e$;@qKzU;9dZRo62=yDRd?<%2nQavrp&KML|Q9gS^GIL9}T{yv+clu+? z&#uSzPX|&I3C7Oe!&uGYeV0CBw&9u5BYDzdj_fB-CJQ7{fQ1a0Sft}ahPVCTq4 z^1l%Z}n7f?4%a^HYMhTqr1L{$yjxtepYqqx(@Nj zqHi@SYl?MzMT1jAT92P(syt7p&0EuW+WZdJ8J#vfa_{P25Khz;?j!ELN<=Vjh0V~3 z(qGwFq)H4v_E(&DW_}*6u~6xh#;I$MaBwkYXy=lg zzqcbzvEfjK#IvQxQy~%Y9)lU(k`AU#Nhq-lWi3kw#iiCAZ^Jv+KIGYO%T8M2g39b33FoGYRu-F}nEQ31x4d6V*h1x6iiGJ|W5!04Z(=e9iLUyc1e@`Njlm_ZK0od&O z@XsM?sq}Z-4H1}1_V3F+Meo5_#lP8SW!1}k;g~aXpq)2$;}St%*-sM0390>oxMi``4HtqS&KrjmPN*V0@(;>=*j*u_^VXb;k@04u zosx+=2e19m$9vm>)2oUjX|1f6Sz{f)jNN|~QuH%)5#mGik+12d&_K0n%9rUYJ$w<- zM;5IY%aYUR+FMmE@e0^81u=Bfay-=L{&{fK+4@XdkoW9z0M&&fX2?EPGjDYVt1=SF z9^9MxX*<_(voAKL_Gv5 z{f{5i2$iK-{Y;gWurLbxo_H(#vTv||c{^#+qwBG$A#B$TY!Blox1#2+`y42jWo4Bg zn2N?!CtMY=dY0l@dGK8&X>RB7g7xwxWt$4=)29yI_NE`|QvYzyf8tyYDc*u>#6@hD zj@9B1$EYKOF5YaD3en~n%11Q(0=xE=k?SZtj0fFBwsOb;qghDQ%47U^f9>1b6qNzg z-h?k{P|(&{w%+ z2OSv;6u8@ys(gWBx&dBQ(#9XcnaFUwpT27BeCD<_Yv%dBCs1gJprrDh(%#ofTkf<~ zGU5kT;_i~(IROb`>PBBFK57!tz)3hfD_VMcaTh4ptC$YM_gb_CB*iql^6L#sR1Zs2 z(;Eq&u<|sj!T>GlHX()$(+N-5hqma*rI?mVruC$Eyn~kdfe#t)UL;jW17f`-5`~KFld@RH@{%N+4X7CxKlbmN zDju-EFe-|P?R`XpygrvVFvzhg@$Jd@c~3+U-3ikRO+pqPyeVWMN~Dy;qQFMzJKpkO zyGE_I*XaGhM~BZ3khcdd%11+3M(S!~8fihWixbf~2RuJi9NNkTeV@ca0%^yR!c#^{ zjr*b-FP*eJW!)7&Hjt#Ojf$-?my(k#iIHMuiC|7~#tFWmGa~Xt@%Oh#hFFZs6gg9+ zZ)uTJ#Ur^rvN{HjOCq&Wd8`=$DSq$4323K)>aZj0roVd0{l-RoS9oTa3`NjZjJ~&4 z#KRk`*Ow>Lma<=6vkl(Jx>0}XB$oTq_e$O00T@zJ>hze~RUMXyG0NV4bAV{El^7Fy z{*VB9SybIm1--FigjeEDbDVh<0UD|Zix7*BeE55L`vK!K+zVbzS-HN1K3f5_7<-$v z7@r<%gg7wzh0gE}ASaH&qmt(cKR*LTkQ>QGMPAT13n)g~RSR_3yuKsU=LqrXrW#ri zonj7RBA(mY57YU5hWM0Ki~@2e!ya`3mGL-XJ-fs%GQUxzmU`jsa>}$sEdPS`<^Zrxd@WEFKS44I1)MGgLK>p@l~~+I@7gBqzFRBI^6C7NMbnM^-r8 z5ep>B#XlcBI-czov~g0QJ?t~-O$_W$gE$cl zc~Viy>YNBV-TusJ`F3sPaC&Z9j-%$hbqj*^{|(2^0UR^E?ZpRmE#pr_3u~MVanD-4 zi;~Z;&bjKo)Se(9O;~`d)ta;}kIahSiLN1+_!>}}m3wvZdV`B7u>UgBa|FmQOg$05 z_DfPyqnk4ilgVTk4jr-6Z)m$evJKTxr=EOl6m{}*?}%_nYx73&Pq&(+23dNf-R^T`s;79}=!Nib+(tJ-H4S@0Yn9weN@{qVB}3z^p;rAsW>y;H3A& z&3#q&^6$(bm%M(yS>@y10YMJO>k8jOZcfjaN48$bZfsS4`m%i}Oo%s(A`3xik-Jjj zs=9I78y2mXNBTEj{XekiJSBEg=5lTbeywBiuRbG05^3}2niTG7*xo`su-5d6OBzTK zB5OQe*MjyW#Q7W4?~{g0kk>WdIMEk9gS`&Q6~sn6;+IrR~pfmm_1ziU|rVtBpVbS@={X&LRXKl+=W)vxMefBb}sCl_Okp3We` zuk9%RghnSa`1V%X%b3|mx;pT1`PKKBbbAzfyE+_RpENCVuhe5N6QBTTUy+D9Cd9Vu zj6h??c#F_&pN=8V#mLun!cWmOOto|7B^{WMj}Eu<-wAYR=q{u1e7OcIsk}ZA&bsqy zga}egXrA;U_nq=qYf$o~*VTJb12SKlmFDegH@v*orDK;Ea4 z5_B8AKUpBo;V^ygE{ipto`_!Zu~lA>cyy$u6ADmV+$ZM(x?N4nZym-cf~V}ytBb_= zh8InCYY^QN>d%=Cm`GK2>km{i-`WXv02c`lGVB{=tYCEuvRm$pNUTzfS{QBR{!2T+ z!82WaWV5e7b=!WpWNS0_j?_#He#Sdo{YF=X%jRXVscnP;R#(@E7!t25+?5cQ7C1)6 zSY<~SH(TA?1XS$K;`7h~V@EBo6TtRR(e-{Z z@6Ks;al7H}yWl48EA1D#j#v+nrzc(xi0y_}*z21ATR{0FA5m=}*|Aw~$BOtzKT-4F zF(PEkXQxs~DCXFNaN(ToJ=EZ)$Nr&9rf~j4ze)2_PdpW+7Tn`HY9AhU*$? zUlCEUUARnGe#23{U0$|#O)fGPP1-%{he#AzPbTxP(*0)h3LJnn85XMCZZF@}>gbtE z<7bNh@A|Ij%(|Z_Vn?9cbFo03_R)JNL3Zj-r3(Ge(A#!|DeHK+>t9fm(oS~H<1cz! z%8qwY_Vc5ces;G#j}C?D)F-St9rq$rHiV?tI0s)O#k>_untc3N)L#$QiZ+NPh_3Iy z^8RDbBk&o;N|!zuB|l)=ifhI!BDSC|P~f=Y@4Ka)tmnxvE0y8QCkx_b6&SFQi3{vE z&VFsz=fnOxKVQ%7jz%aV2XN)SpALTN(**y8rB5`@eHvHnU|K%#O9_;`7E)&m)_sTn z6kjCQ>>s~ziGHCzCSkAG8~4td;hQeCS_fm_H2zWrM}tiojONYD5vN4|V~U7#HY5GM zS5;{hb6KaWUN!$4T9m7|y-bpv34$kgCpCYDSV!kZSGHLp=<6lM$|haRefS?0z~YE~ ze9~*|e+cyPN|BE>1a0jF8z~dWK=D=yw@5taq_)ks@WWjc92;j|N3~N)*^e^_2&_$p z!Xo!&3wI=ett<$yqU7O|`5LkY2*)4D&W)6O7ab7jVcL1UHIm+22j6~>{AXX&GNu_9 z`bIfxc~pO!Qvidc+&p9-P+SBzjpSHg!Wpmk)`Xw{2(0wjBLNp8=aXHm5G0t8Z~|ME-f{@?DHr#6G4^JCF{Ahf*WZsN&*I{53b z2-WwBx`U)72)(x?;S&==#JdhmFwT1;*^<&DV$+HmgziQO;wo3=a!9xWY$>6jpS;z)dm@xDLmJG+a+o8sb2aZ)0J>e`q1Wg~NYj z!#2b>owJ4Hu?Ns)ip$%irveHM*9vy`n5pE|zrlLwy>?BG<)1Trc3%7o6k0$;Du(%~ zd@la=r`@D>ZBFY?274Ec@4unV98~`m(oY>Khz;AGeW$M{mPf1g`Cmzjk9GaV@-~N0&bhF5tG*|D%SeV9{hhPboLcL<&%YivDPX-yeJy)K`vE zBIdb*mb@u0F0mtVGA@4Wi-`~jN!5Y(q^czq^II(j=!{OK(-^uJRc00qk~4f$`b0>l zSIeg1%KZNQxu0caap|#*Ro;QFwgVDZy+muRv`^GZN&*7@Uq10F$=O$9oc1kMFWaaP zZpDJripFXiq0|X%?|(-qv#W{C85S3-MNBsa_<1Kbrar zAy1ofW>420T=6N?|?1Kr6Ih{O@?k?M;otz34_Z)#v^l_HF(+4O?+QEo=)3}P7+HXy@ z1)Xch`hGlp2LvFvi~s^a`e*qTEwvL3t_6>y(s9W2S~1);Wxa82Ef=chn`GXfkL1DPH<4wJ#{ZUmvfWJbpWp;oHq66sY4&yRICAgTyM z%tDPrGDU{gTs^}ZmPRzi4q!l-)vHs-r5|W4TbR6Xx_p3nBW#-*En1Wv*(0!x7CWNG zVYQRB6C*Go$bkdiiLjk)QqZ|BIu)S?b=lEj@92Y5x?Kq$!rViy==sBbx`!{Z<$cdm z^^OE{9Eb)2@d>@vi|JKRe5BR(QAgGaf;O~>wK%dC^;AXnyu3;6S;Jn(KU0y<3r;}@ zL{JKVo*TPwUiR=ciOtw!J*SsEnqsdQJd`{e9J6wFms~=k9^G=Fc509;aC<3w%o}so z&Vnek^Dwu2XMpXCRTypa#vhWg7H9*Bxjp5~EyzC8)TO<|6Ek$ZYvBOXKWofj9 ztSC`fUC|l?-*%fsOm5dOrC^l!%nqlhO=a7~o4moOR5<#4mogSG5^(K)Q;S&scW)a5 z1>pzWZ3Ux}bed5f5-!IW|BBgKA~1aA8vHI1=cIFm^s8ljkEUMmqN^N9_!&`=(`}gA zOsjiTs%+C%Xs;-;5iMu5cyPH%n_Kv%QP3c)u=312CM2EAwdTi`K%U}U0)xbC#<;(s zKzP8gnLJ{8r{ARic{=GtLBCH>7&g9}{3@o`rV{fP*XW1{7h@j>Fnh&PcH}T~8rhT6 znr~aGR}KC0W!3wLPPj7m_veL3__uE)&G6l^_05dJ%lmISs_wdeV3>A)u5Qes1=#m( z0k>D66aD~XS$wO}97t;&xq}z;|Y&10BqqPpi2*85eMOq0kOdl^ffUz5=YT3P3=KE2*3-VRqekgxyrm3CG@09g7UQTxRkUmK3g-M zmQu^yy2q7lLh__GTXqtP!QqZVLE_%F*jmd{iQQYHD|`s_354(*6FOxlj2xlXZtqEr z$xD2{pFnI%5q7Mgm{g8xRakPb+>9D2Lq3P`)0iPFM|5h8Cjz$c;|FlNqs|WO|N89` z5RjUF=`pvQKPBvI=nbVCE@9b52IJE`Yj>_LF@pH#h`>LRLfkWN?oJB6J%kn{`O?M3 zF?3^W)Sdv5k#TmCXmoGa4$yvbn&-cNvz+-E43z9=75V{2#l(cGrS*W3z6LjPLQr(SsDd$9PzCCv2E!Baa+C>gW;q2cEf*Uey_m(nICb&OHC ze|t9TB|{IYC^%5)88M??jJv)xsYZPn=l9X=UyoQPb4Bw1Jk-ozq{M=yte@uEN=C#! zm%7-^n{bh12KQCL{eoa~ge+N`kg(D0MR*S)1;tL%kQ?oU8Fo`; zO6sJ5SM$Z8%CC-o&|$GeuvYa1 zZrlD81JN6Wk+S{Uv`qC9l!v}j$R$ipjsbyzU+hKDcHq!n15QVrDPKpVC@pz;>~DwZ zj%tsWo*w?uJVEtDD=SjUbYqJFPlxh@@Er`0Oza7$>%A<3_ zlM#ZM6wRu4#w>Y zS&XaCuY*HB-|# zSPKr-J8**|BC*Qm;^Pn2k-q$Q6R9EY_SHP>IFbs)=aKeAoZAYFMQBwFi(r#!Zoyl@ zab(707(DuGf+rW2eL*fiVhY8V!jDH@xZ2OdU_jH^t`IXnn-_9vyKh5dIOdim$C}oR z=oP!yLC?hDu@@sUkG7P-H@|#kZ(bZ6OJICGnW^GU7lJ^iv1Ce*lVs+(P#rue%MGoJ zy?>zZpkEW)4INwxIL@!*(WJY4gU{$*v{G#d-99Qm=)_k*iIBw24b%yE(d5#`Fe;>* z6l0MaH-o|LcP>JXl_#bfIkj_u3{E#zqc$)`tGVPyX;Xi%rg{h#8{?0)l}_}x+FQ>l zfI;nG|F0w0!s? zYIIj9+!ps1CHN+ToDXoTJmP)UxHYmo(5q2b5eXtpBKe&r8pe&y<65#z+ zUO$nAN9j044y(^4({L3|J~g3R;xb{Ar~S~I*Kkx=;hxT6nYJKKF<|?eSam>xfd{d+ zXq`BOb%q_^#JuWxHY{e>M8;VB`Ds_X*0K)Sfnnf~ZXWXAp~Ev_sE0_q64bhNM#-ol zjOU#6H9;c`?$7%}npd)!ZalPa)rp$ztk_6_%~&k{=0QRIS5IkNR# z2F7mxdnX<*QbdS1bq|aQ7a$Vs-Up&*Ajw_bf}uAQ(}(*O;7avtt}!YD4n`mupBJIG z-PLA*uq1{~N+A?CooYX~n5_bw?efZAvqmbV{hI0z^7iKY*p!M*3zLYx~oq@Ini5kYr%JQAlu8y@j5A@F)E6Ou_7v1gt1_+38caocVmQ2 zrx&H~3Q_3Aj}&+2*sFfT+Qh)h%p>)k8_B2U+whAR49)mU>rvuY>3bHW^9s5D8xA~v z=@aSi2tF$lIjU&6up%UcA_xD|d}t4B`MB3NBw+?i!^6eRw&)ptrv?d#iux3qbB`zG zYe|yp*mPhF#`0dN)k!QJ%Z+<%4mqx0op9{A`}(fhMr%7ba6WyClPRFB#qWz&Xsh2c z^Si*V1dUyJtg+i`jq;m{jw{EFVsc^@ILn zlnfrFQPFchXTu}WiCg9r@ARP4-V8;5(SLu~B%ZWMVH4y~%VAuP(0pxplKQruJl=nS z#<|--9X*o@b@QR2h@E()3P&c(?NetXD*0#pq(ml}dH=BE`gao4+>xI8)m3Q%}KNoQ3ZVYU~aqQ)Mq8uLn43WE|-5S;`kgGnp;mxHStwa3fdVK;2 z4Hcs*-`Y&Ozb`f8&(Op8bdsf-8|QfRgwBS%?X$3;%n5PAiXksk6z_bMnn`9L+^ZVP z4EvaZOZ@i7|8oNX7MtLxnQ4?G1PVQ!YLm3TwA#V5JcbU@)_Nmvq!8xqVPiHiE zX~mN!Ulx`ZvaSsYnIL5W8>xAhfu+LIQhGj3@m!u{WnfD>c5q@CyofIBea=%s02a#j_ z-Nnr-r5!Ar{l%7OJjoz&-{-t zpxUXKnMoEEwGE&Gzuy1cyofw;sr>2?!Gd8!4<=Sjr^;vJj>ddi>B0DlOa!$1(&LvL zIsrPc2g+mm7}T=?oVkaB2Z0bx_lnPz;nO1ggTWuwOzgcT(l^lj5#q5Vv9r|{%O28E zT2Wh(CRfCjeaKC#mKPK|ZBVxza3|~he|t?sut<5|jl~|6$JXyyXda`%x91wX=_G8$ z+Gn5D7)i^iA`~Ki_ncmI>a0~?Zo_AIhs@FGmWP5BAL8JnzJy1h)ZQ^yz*~Fq92-ur zfh#O(Y_Su;V^=l)ZR5A(Mmztq1XTSc%5mv^9oZGIkD0b;d->nc>wmtaSpYr{J24qn zXlT=+cfj#6#-6pavk{<5WY^Yyzr8gC<5uBOQ7-z4Oe5DI0PuxC0Go?}DR+erU=J^G zz+WQ*s>Cxtzk7hlm|*ks*e3@eL1pI}Ma#8OINZ|m$6$&ZomI6{Gr8CW;1|QwCOzfl zYJlCEJHqYx*o(azB_#H|}YkK6QSGWzvuX`TccE1{=KGzz#YMsa z9E0JjGGW0*r*Ybe&lmNt4gS(Fuzkk=$}VB=*nGdFe8-QNi!(w+%vd4#Xofn($W$htG3>bxN=+N|6m+~>&CS!MmPgr-WmNNW^3C7$a)XhJnVZIU>2DU z0e&Ho8Oha%R4Ih(MD5*HSQtRW@{@{s4-*EKEmnl*&ml=7XPIT9!;i0S z|LY9k!34usI=fHc-6wvV$x{vRT`bn;&sYCu-#wl~b*s1F09<_e7bKA&c?ZVg?)&&f z_Mx~mufY@XBcZ*0sAN+Ep0w)5X6)-b!zjD z0MD5Mcm&UAs_W~MfLq^1DJ@xF!|(Q)MD%x8Ul(KZhyU*SFe8^PiG5fIvJ#(Hg%;v5 zqO{46;~^OTtz!?QP`!xleui-$#Ch&zQF+PrF-jOWBQkHM-68B15>bG2J4smDDt*Tv zf8<-#m4>5z@;PyvP%%9F+!YxMxbEkhw4DN3i&+SfJN&dDGSS%!Y&CT{g{Qr#v>Mj$? z$pyWsq-c(kldIcCr~aPP`k%7Oe*>&7a$ry}x7#A=Fslq!7}eLES%De0T~OXg%$8LY zDI3v?xyj3;&HHh%+8d9(ySsaQK^jY8?0+X>XJ-dCQt{hkKGq1QLS9OA@;zsFj)1mX z%TFy+#l;%$tbDA9L0lp*H9t6B6uWvv_qTYh+5Ny}?_xq{>6KHZEBcqpN#9q-X&1DrXw;DG)s}D{(zC=p^egH0oL;zbr&&s-8dN2U)mDlBh zN0|e-o~*2_0IxH-_ea^Xp&8#+VE`Xf=75cX;l#S@I9&-qEMz-vYinz%9J^$sr24TU z_R~Dici*I>t5+*}Wge>^mS*kSzBQ*I<}FD&blmKu?98Te{6hCKY#xDo&Cp3;6IUw4 zTBcZxl3h!`zoZ=(uRoFL&y8b%Phbf_!#rouF~7d?xTEsSD|^NZKH_HnKpN&26bRuJ~4(`xeIX9d`9 zaR8JdH$cxip47Hntwt6Wzkd4`A7IE56Q0Juy64Fg7qXjI;vOrB$cMQbzrIm}4^D`s zUjL1eyR%t#hKNyl_I2GCn>DHM{VL+k80_Y`pm?do6#Ikm&mGfr4)G_Kx5GBV%d7=0 z<~APji=;aI3geEdBZr$dY8L$pW_;hLQO9Q?AJ2cro~%zInT46HSfHPoW`9P9NxYw4 zJhXy@3UvzKP!3lsQ+l6d9yVx%gr)q|>JKC%AV4!7Uy4Mg@H6>u?DUwQiqvj={Glo^ z#{QEmP%_Zv<-Oea`1o;!KbR!xdo*&*bl0!_pp%O4x7==Auo1=RV{ZP!*tk~liIP(7 zcqph^Ff)Hzo6zavMMnM%{4aXSGN&ddUAE&mzh&oE!rz))fdB=VB9ZOw1qJy67jh6c z;)d}&jO8!v>am>J-qwBm;FYQ&LL3IQQGfsf zCTbo30%jDc#3JxAdShfn1M1cP*{S_8{u6WCY$m>>2;6)T?G;XDTy2o`#D_qp1W&N) zM9<#qkM-*8n#f$G1o_I2!6NiA^`%~}Pd$ZH?nJAccn>@Oj9z4k)c#rRbY@f;-a6-; znQ;F}{_cRY5=|N7$OoVCEdHJSjdZ8G5R~=EZr{oj>+XS*@wPQSA><(A3_8Vg>O@xRo167*Q_H8ANYA_SSQu)HJFFG zB%#4yF`*0`^349e<>TDn5gd^&e;uFr!p=s?2xjy4GT*0l2FTpKvB!hS(&>3(c*-wj z;5KJFO5utJRleS@m}2^GG{tsqI~ObD-aMQtwv=b!$q1Rf_+ir2`Ux{!w#b^FTVx76 zt(PM197F-YgL|~fqFGbz^5VcD=Omsfw6MI~Wt-x#zo`{0A=ZeXd|PVqBn4aBaDluB zfKgSPtF7&g1e}5KNz8({>1kDHu!uY33)&-4S4;vwZT!Y?jQ?HvZ4B_3R0Z*ULj%jz z(!>3bEZ)$^i7LU=JrIUgwz`veFUi>0B)K&;H)CrHSiJBO;h@5&$8iDY>=u~7M+dE< zNgr-8u~YzG1TF?kyG3vMJ-4$%fmH$nz>&E8Wmee)EMqHh5+eXa?*L-_?aH=@hdmyN zn_!AJ4ehP!D1XL`#0OX(sm*JFxERlSYAi#H`5*qZs8@f6?t2JEDts6-Y^4+-N7mLg zvgEkap?pFS8PQK#@M7d=R>J024&~e}QvE9*k|TRtaEF7ZyD=90rct(->Mtih+N!rv z2{OevM3cJ?foLQ0h3OO9B~%+cLqraDjj00+6*4(~XeiCs8SHrgL_@qq&VCTx?Ywb6 z60uuC%Th2&kPV;;91$naWu}h}nT;ygTW9K|^rG|Z8JhNXDTg9G5su+EYm+?sB}F%k zrrySM%*rUa;f{*E-o=ANB(UH?W9~c`>DvyKkTXoL3UDRaWj9&qWd5 zsvi9P9gNpBBhteDEQ{uF)`8#9Mwu=>_}-B%n&J$BzUwC!!D?fHo7h-_v9xd;k}rm` zG{%k9T?;XkXpIKG$F|Hyy;b+1VG`>X=5)V=T!$FSX+qbY-fjpHXNO5nAT`m9>mW5p z_VVG$*dLYE2@M>gYa#K-n9Qv?QKFn=|+H=ib>g##Bn=^FXWC= z8!znbLkjIFKSwDJFE34c?InEgX!$eVCU_bYyu6<8O_l))6T7}cGl@L7?_XEVSFQj$ zZR=+X0xGK4l_smy)nHq0WYnOWG-<;_}-O*hE~k|F#_{HmLL}YFB~RKE=D9YFEI!d>|)} z##fWZSWh4rDRL!1`hWtHSLLf5lQvSg%dX0RUyS_oq5pIC-ZbW3Cjx`TIJB4KBQ~8Y zby66QGIWPH&5+38Z{~O`Ig833HRXa@?+p?sPg5f^j&&M)t#0;!;h^^9| zQ<;CKRK#RwG~3JCMOoZ7kVfd`z$tYjeWxamapZ@=`ydEO{~RW`vQOzV2B(KdzK%U~9`;+#ZqOjU3I-m3AM>)*2c;(<9Ga$Btgwd->vy z@{IPDgvH%5z{#fFTy{jQE(a%AG&iGKg)9%ZHwG;^DWyD9?b?=LR|?6sFJ0bcO(Y`= zRFgt;ini?NhS-c#^stcL4yBl2(e^I6P^(N(H?q9jdWV){iy2_@$+W>Rkg#uO?bUQ$ zmYvlo*4PiS2a%4&mLd*4#*E+^TcSP^o%!W|esi^V{pCjH4*N?>#|84h_j+bVvCI4P z;u$Y*!^&7p9&M>fTgQc#Mf=KphVxIUW5Mj|OGXrXL!PGi7Cputz%>Q6J%S|C zKzkg0d-Q4e!JL0}?LvBCn`7nyg}#i8P=gbQPfCCzF;7XM|M|B1-VFc}Fo_xDnYV6b z_yGQvlYtJfuXo)Xlpf;;{BckCPC-#&;W7ZDlU?qGbXrw$Fzy^VN{uX$^$uq0&C<<`2p8==;6WEkV zH8Api{iciQH*io312!7!#!kaBXv3}Il^q4=e&13kgfHgZ?hIde zZnucGeby1_O?8YSvy2jdBsMC2ErAhhtvF&txhC)t+rl54;BY=H=y;qiNW}?9I_&o1 z?oQ6}<~e%L3G;mO59Q}Fz8>-x*a&cQ*DQzUECXW#ubgxQi48Hl1m5YS#| zf|P|Vlt(t%&XjPQ$xXd+QIDCuZBeA>i^@}<~PZ~Wl5=Lyh zkFrp^aP$`&gzkf_$x;&EURXyxQAvGbs8aw>0?YAUTU|Hbw%=u^i#z_-OBjAQ>0dap z$r_NyqNx1tR@q8~?QxwsoSJ(->WU2?&Q9NTTR-e->Lgg6w{Fl z*aZ!o0>3irg~KKPO2cNb^-u6@iUC-j;ut@%JOc+u)Fb*3Kj%C*R&uwt+;ZT1mCMF! z1g@5I4M1W5PctA278{Ew9)%wzaB=NDfR&gm@Zq~Q>n{Kd=b(ydeaXSuvEcF?|Nt4RE4q-bXsx+Erj-3ffP%1G2{; z_!l&X6~)EB=ngmAN9o>pS<`L<8tCK;04%ZjQV1x_Ly*gbo+^u;-4+u)y*|Kd1vs7C z4uL7d&h@?oiE`Jmd=>G#^Re|UA?0sI#>T?{7zn&{kUK;1wt~rwon}V>{-@Hl;I9El zp@w~qT6(}`Ok}0;4#li)EDC9`eZI9WhQBH_-7zk<#@=qO zoxYcE|Fo7BndD>tBz+q3LwMNtxG4NhHTsTyZZ|%~iw`HZ(3M_oP%zMkN}e(?S_)&R zFTAJ?raj*9k;`EH$jm?1)mgs?40NtVSs-9JDVFJBn0|Ab*( zWg=R+*Kj-Z-#ZU zl37Dyk^7JH)x=jRWJmaI8b>^BMhJ^$elwizX*>_gDnyrA`Sh7~BBz8(d&@sOo&DQx z7BFu}wX|>(sW+Ya%t&bsUllsC_7U`*I|>EdgGIMa#EcH(Fuy*P2T2e`0B~@1nb6Mg z33J;qcl|ExWSXhHUS)coCKPe0z}4&osD~Kb{t10@PEMWkjiA|8R7|MK0C*;V8Mjec zUc(RMP%k`>F>ddW3yX{M8XF@3B-Cjon5=Gg+Qfav?e@t3K>=iTrB^WqAFrtvn#2Tk zU#Z+JQ2uLw|HAotV``v_J^qz`GyreUA;&J8?)|!tU@r&Oyo=fS`5|Bl@&f*Comn3l zr~dne){EH>hy>pEDIfmiE`kyxDL8b_&elfXP$_)0ovSSyYaAOJ^MXz26crb*HlGb! z0M9TKbadC#b*1mzCc^;gSy)zPd9go}mzM{is%w=QzdlU8u;|7v`E3Ado=H4rNHn6K zvPw&%0DSYO*O?W+ip5Gt@YlpdK7g{QTLYM1g-7%$0^pS^RT#H`BWigX*sOyJ!|15S z2WU`5+kN9qM^$2C;#n_KY+OQu>{Pi?!}^yu07;h(YN_DA)4u^=q)cGKcoq7(0)Y|f zH(=qGp*^xP4c%ufnORssdlFRa0lfh5y37APT|b;_4mNqPWaO{J^(GBn!A$5 zpUxLLdzSf=L@%n8?xxoiOdi<mb%`V{H5_AMiV2LW1k*+P>$#~`Jfug^igt( zQp7L%pGdz=a@>tVjKJmxqlYu-?3k{OUb}xvkx3{%?ba~s4>Oea9>j=CyZ+H#`zxNr zD~kcLA38(~qIka85L;`EX9i&mAj+J($fsqO>B0Mso;;cE5TyS7>zf>lDfM3p0>2+9 zE6GixpHVGkJePwi_j?7Kr3m8_n}|9H?Wm3+Oz>qq!vK6dCfJl0#)s$@jL2wF^c$}o zNR?v(fk`_d)nD@;s4|1i*aYvybxppqgR;NOZ!3Z^CAvDd8dYcjS4iZSji4=LL+tZ^ zJ$vMgTTs4}COXDUMWhZ)t4K+yXn_*y(h*kv2rxNqMj^TR?V+;aWE7ONeTA?E^@!`_P{;4STR<%FcwH{C$E6b%?f z`1s#V3u$O*c5D)*&iI;FgXDYwtj2ZF#;a+~5OlE0Jw;(*X4bD87Mc!2gk=w z072ga*wuj3VF{SjhJM!@s(UjnK0-H_kzw5+*nH=Bo|Q$P;ko+ck-RFVrCoNJQ2_dX zLSXrxZKgWD@C1&!&ouLu&34C&fJ`tpA_BAZ_Vf*{XIA!^Jvc5$Bremm|7Hq1GXfWC z=GNBM$rnP4o=hN`+$t)_kkQumyFF0^?sLOW6Af2?LIA~z;@`i2pc(vu`RUV%oBfM9 z$F1_F!_}*kwaHfqQzlE|XojTRC&0Zl1*C`Lk!--yed-LvlaGgVwk_qu-H|_qfZf-g z=a|E2i=JyZC0U^O*beF%N?Kos5RQKia9NzL_bI1~exvhqqC=EmVF{h7w${|q3GI7t z4nN-kVRA#Dsgz|uF<>mH0#-6`)m$4>liHXxFj5rL2P?R$stQ=Rv-=nF-t)zad+fiJ z=W9dh$EbORe#gN-WmmmX_?r1kbs#u%mP#LV2pEgmTjkd05D4-+iGE)ZnufQw;4;{V znltlIksqv|A;}GgUK&!DkTKq(VGP8T1X2-*)X?0$mclSZ+;>&Crsu(mZ|cyFNqn(P z7v@By05>K2$?5RZy4h@h3(rWXK%rOwg=#l~>1X3-ou3=O1nA() zR7(^D7zAv1W^l_(L;I4$F>Keuupx%;nhJYt8!8gWHte23cF)i=)eD;mN`EztPD5DPjxp%TL!W%)jEkuMWqPO6v(H>Ii899=?E9 z*s6Y`C|AP->I+?6s~68DAZoet5H%j}Ww8?LhkJ>_;;TMVVq%$|n2hj6GGpOI`rV3l z`|{3#5*V$A_-XGMXl|$HD&obb`lJ~+ruQDNSd`t(1JZTd?N8Ebz3DX;Q21U6&5V!K zVT`Y#e0)+tZ}3-Z>v9m*E=1CHl=*}4xqKvP12HVx7*0BJyJ-bZE*umHJ^>o7&pz+R zwmbKi>#iQmyZ+$OoD{!QQc?m?`8hM2yC+1?;=XY(e8D|{e^^eI>P=nDj;SkVoXG2 z?>~ViO(QZ;$|D>xj-n2_xrpa(H{e`R*Z+QN{R9?#EoaKx;WHbb_(=?E-TV)$o-&>) zmwLsF+Myw{tIFdii+OI*%#MzU{!FUe!&@zPf}H(C@^XoLByHSwo{*EqkIv?%eZT$oQg>iL!XZBsub9!gD= z+glssxym~v>6gI0(BWUNJa9+C$O%*bef35hd^*ueqW8lAp&1!sCw}0nqW;aqO;xWx z_bw{)y5^(|6cgQOi(XB94Kj&+`i_P(TBl@?q0n>1kq!3boD+6{pEkDHoTO|GDHYeC zHr6rTL&u*NSpB+c{Zm1MZNY;L6GvWCV2`No=lLybGs(nZnvSf->}JER^*`>JmJgV0 zx#PQy^a+_B4UTf{eaB`r?l&PfO*q#k$?C-FIzW<-pGKZsa22tZcql<)R4c?iW2jGs z$eInAy`vT7$LYA7kmDfn$r0CQCkaQs{l-!Ol%+5j5@?9>YsGZVlz@tC-_`Zfq?fM%K; za(07SrX)BMfh@t#{cyey^!b&PmF=CP06OGdDZZ`T?cH6nu(NsfWvXD*cB} z?FlvG=_-vJ=W6IScp zax`z}L~hT|G^j6u=vNKwRWq#%4NK~_w$H&XiY`a~F!!-(XyYU|H#ZrC^pT{Tj8amW z3r!w;rtJs?8vv-mje1udmBAeoskt{eQk>$ARoh#lB8T`2&)xw3o!Ts1&ZX8VU+xa8 z>d8)<*IU1Wlf|LWrH> zjFICO3>-9&GW3E8?Zbv?lupo>(Ku!R3Sfg}w-(8@&J$LslO$>n!y*;?u2c|)8@8jyAa_qG=Xv5a4LuZtd9&-A;!w=_l`URq!^Q8S@F6}7 z;^*iTvwkR^LATgf_zBSJv>el*JRHXr^9AL1sXv0 z#5WMAKVRwYNAjFvj7w#P;uln(k9>ytNtgWdwRvg=X%Ey6@=|5DCvyFdiaW3%Ofv>6vxcWa zKUQwHZ3-b=(bjRoF}USIhb9lcKI5n+$ZW-ms*Os`B{FXBHtTZ|dJ{ZW;9vr;J$O^m zYm5R%>6c%K0#t*O#J2e-FF$<(6o@nrec0^=x5sEj=GDyCIMZqe;3K~A-2ggd|Mo3U zVBqh9w#AUBJP*ec*DMf?;SSU|F2&D%BywMQWUbX7hK1ECrwS%?$T(v;5}>7YEdORI zyE_;MI6=YY%f%@Gsfr+BC#`nfdiN-J1H9@S$ECAmo|V_2uC?|vqrM(xk6f6F1t^J~ z|A1P76U251;Qc@t(IQ+>4{=ikgj0C z4x0{|T!m;%IECu}Y5`oH?-u0e>*E*#l*VejnJm}@%hXxt!$BeTYaZ-XhM3?e6b`i$ zT#A%=qgyH749h6{<&yXG<2K^_>>p7{RQG}Guk~xn;czCLxi+7-i>!G~M6gbRm2+u~ zuhrCF8&FfT0Q@)9;eojXpMg=E2}PU8NlHe}@wmlt`Y`C&CQ}-MSVv5t&`c$>kH$bb z7YT<1zJVQ#RC#62B}C%OO8s``piq;8STu>zqx_4-K1wD~c;YYXDfAg1Xnj%P!jz~` zj^$?%B)F=$XtM?!sIUh_E2+pZD>2Cp&JyzkIbv;3K4;)FCp@N2{vu*v6#_6DX= z?k2o%_PgLraKr1p%sTnJKb~{tX-`rE`<_GE@=e<|50NA*4P5sE+Zdrf_zag7(&VP| z(Btj8xU#w%dPaCareCzvdyezR9bDzkcXB`GP6|rUC2s!@`{()DkhaOZJdbczMZ6%~ zGlU^3oSD`srK2_8-Nq2zkOkDYsTCI&-4(Uni}-|ae+ag@*i51qIW&p$Up>#=swF7X zhl!pJ*FKaCvsaKQ$@4zfPRYOCHm}q$&{}x-lT-7xxcR3}PPd0M=|5-?OtC3b|C@-W zrhvb!(L~Z$wlY;5?UJE?1%Ca$qlU(3#RYeRaL=ucjT!%)?X3f`a`9tNT4?F zE^iPcrJJrp;NN(?aVHl1{i^24w6Vckuhx7!#Osn+?1+Zi8?;2Rq7;p9nsyE7W3$+Y zm9(VxWV3?6HI4AW&UN7dXkeL z9o|`Ju)P)VSD8RUsL>brxv6ETO_af@0J8-F*Yh{_5LQ)=6fyRQS)Eo zVJaB^0qWHBcXvWdzG~Gm2ZicHpfMnmdr8G>4yX}PVh3yPJEJW#-G)F3QQAg#d)EL& zFV}NR_X+?`Mg9FTpa4}q(T$A_Faq!uWLI?l{tw=kpfhE&@UiIc4cHSt4geX7ejTR* z+T>D(qJ3;RK$b?-)!p~`JL24Nsf)AP0iHb*}xZ=*{;HVZ&9TV)33(} zMMeqnwnP5rr*v2W)&5BU-_uf%49Q`9rI$NjbI+`kh8m+Y zd^eVnm0s#C!9Wf`p!S)jlvl&2or!G(t{#HwOH04E6kPvQNKmWYR2?I6c-Z&QL?Ob- zyC{rD@>{bU-_xvC{Ze|!nG3?#_(1)&>@WAr?<_K$8`_q{T4eR)XSo>aORJoEQ2e>{ zjo*5aD&rpSKZ@g4dZN)HKKu9fuTV@^Xc#L#JtO<{62{<*G(7Gx@Zu>NOdg{6tY`~y zjPtTmnDjh)&nm_k4FYL9ST@9Qb8ha&4;;Fog;oustM->GhV2`}Q|4lLJe}5q^~3Q` zD%`6!x%i?Moj zV-vxm8?1WfU$OT4xjNP?_J_<}IwF02w;JL7@mUL|0lalJh0dn(=6r*Tpq4eU#rY4( zx!y65<}>e-tO`@yFsz&M1^Fif$g7>0AYk!7`1iF#Wu`8P-LA>5c+;eYcW)&CmmyXZ z^T5QdnR$IYg~`7T1DFrC?l^>aTnU@Y%_9aB=XRMjNYQ5KdevO@=WzBEYfg)LwB(>5evMG@I^;%Yidg%4qe57`^wUvMYK^FXR$1}E*T;YB0rBkR zuvTE6L>JwQSu9ANz|VAQJNVs_`#qRh(TWXI-%CNgboED)w48M%20S8BgEWo$+VW4K zY8$yGxfTs4F1ymufw6O;6VJ2V^>4pjeOp@m%UQWE;GKqN?j%3{m`o-c$F~f_YyDEI z3iUdU=7_dwF0Povg{CIh zJ6vq+bx;%B0ELyDnwqcQ*mYd4Hxdn`mOztQn?vRG>l~em?+sI6nK~PECuMYfdDjka z`Z0b%&LKWu?Gp#u>U-`=O5uAxns`t*9fni2#L7yZ*nR**?p|w6<0RkW;2*!seeZFm zZ)ks(JJaaKZcz8pLCdFqzz7|mKg0S9U+AIqgQdmA&i#BZO4rHvR36qHA}K9TO@i5*suYy$Y1{ef z({Dz%;;F=lT5_D$vpv=k-3jLR@DgjTzeO4+qc$#Rtrvb?uD<_U`!#IP@K;ufOGWXQ z(}~5uHO8UpI3*LQ$C}e{;1h(kou1chky5;st;4o2zzh}DV9hx zL^?8ra!7OS@{I8=Pb@XlLATPEd|Usnx+DD2O?uf2=d_}kbK+3!8m>x5-(h?VruAe)eZv;qJ zK_B5VxS!oj5Y6JbxwW!7w#+4JHZV;V?;8AW&+M%b_Nd)ePO_Yh+PRZhn;JT2w!Sx( zP!L?0v3lKplZAMyLFP+@cKA=vqgF=mTUj5|Sn02ka5wKs8);o-94i+~EgJ`y!>~Z< zm=dAk{>!uqB6I|rpi-N&zy#8lh*}b>h`&SpW{;yp&<<4ZGU58Oy4_xz3gzom&sbpb z(B#;#v?0fRs7dv1pQm=3M?RcJxrnOGR#9nE%wdm<&3L|T%BgdNc3B!#FS4HdV{Gp1 z1{Bx4~zZbWlx07xy z+Ct4#Ua=%G0f|(J_5b1F7x!hqjuSeea?^ZjM1y(bI~mnh?xwR8IOFGd%D-KV`QHf( z23~u3c(~e`a=BYXIdc`C479Orqz@#QM@N+fE#O~n?C%l^xVwrjF07tYU9WyU>v*Pz z({y9?{g?YslF?K_@1G1!@fTe>+uW~?zZv+4ud&JL1d4{`VMuuIW!hbKl;=HSYdPf# z1s^SOufS=M4QNtG*z+RDE>6irAOA*jn85A6Yd|FRw#BhM6kjubeA#h3uO@^=9BC?& z{nOqqJN)Ni<5~DOrU-U!P94L0f}T6XAjGOd{>q{hd8vkgQ5!EeYyG=hEdHH&qxTpW zznav_2o3J=vG_Z1J;c&{7c9AOQoIZmP`-w}@@~$hx9x7oPY0c-OVRhl!Ck~9%D0&5 zx4D9Losa^@pNwPX^&VvF%$6*tUJ;^hWQ-jy{avAl`2DOK_yRg^8DBkZy8=x{#>7l| zo0LXd>49C$H^!mClV{)RvqTGgN34c^5*C^i1eql@a z>I$9`fm)8gE!NAd+W5Ln>poPlJd-cZBJuCV7H9JMUo8H){%~kz;XaP{?h^RqNVI)o z)BBP2ozm8`RafMKJ?vn}68R zd@yUof8kvz`e9gP+vS=l8J5MKt=1RCkcJG+13hrm| zFx=PgsI)6y=YJs~eY7%;*3{InaxplLL=)M1eXFyH{PmYQ8B0aCEc(I71!orcj4C^I zrRjyHVTH@o2I3zl_9Ig2HvDwIx1&AcS~WFtXK#Hr3XnGn1a{Rjmkw+=hQ61_$fX_u zA!BBds=C)MS6;Vq|L8sgddny)*vsXMXEaXDOAfsVt zkNIy|Ejk$%7B-2`VMTj3&r@yPV~boa0fI5QWCyJz568#8*WR{@n$l{wRu&co+gl6CR=&!~f-}iH z8c4s)*Yki#M`a8EdWZxyg+g;LadqAI^|?@uePL>jW<=o(`16cus1mu6jvvdGIYq5u z3pq&$DF{BxD@$0Paa$icA6?fX!T5cB@Qi(MgTK;0oE-LNs#G;j5jy5@owPOf{4{R+ z=f;ThJzp4-+u|J8ul?8lv~>SU{RNd-!&M4FLKdOeMd!^5MGLtH>J?=upw4D^I7VCX znR8lf#rW162~Holf0{zoS90^Qv-<5}HSqlu=IzN; zjZSLyj2&3Jar2P72H3b2QbUcw#}S+vomB-cax+8x?#^y}#}d({Xozu0Ka zs>!pY`0;J;&%Pbbc{tA|jqXW`SIE^tWl)csC*>hoeT7B8Lt>`yc;Cs-OwK$T)}6hH zJjOb^D(Q$^f!v~(2a$;+*sl7w)d`-Al?p8IBw;t#g&ZL|5$JbR_q>9fW%eHEH%Ude z>ZW+-KexN}2PL)_)b__MolyN(a@4C@n+p5OBU-o|^rl8d5<)!#TF7_tb@2gJj5! zIH(2ssNy_(GrB{_!Plv0d%Xd++)W4|hG3A>fq$%u2sD{X+96^=U+d93#CXcm5u#?{Zvg%e5 zMt}Sfbn~{N;5ChrP+9M%BHKsw{72%Z(|DUd&}T%iTEJDeX)Spy*;=m{m@k19rNme) zk2WMBSauP)zN81_e~5RTHrMA~q{w$g>^WIsa9Tftv9L(D z;qGhnXUso6Pb~;^7P010XJ_Y&>_q3nuOhVhL>=IeUareO!21c7-f* zk+%x$Het1MJ))+Kn-)?}od5nKzpcl){D?E;@gG*+4xQ zwVNZ=!J#Nt>!cE#E4V9Oy*7k?rFQ~8Ifd)h=joX5GV+i#Ko)<)`%y2&h;(#rd~;UB zqLyVx&8?t=U@-9e9>=a#Mh%Q@X{!-j<|UquL&-?OfgrAF-=poxseln`|3SnOS&X)a z!sp7y`l6cE?I@e6>vCB~O%pN=CaV`?j84|=!CCWEGF_x)teOsWy+od(^f~(sH0L<5~GrEQ)@x zHXr}j4W6>c`$!ll$r>7iLQH7?fx)usgFF2Oval^$4K)g93xq>(ViJ8igW=1 z0rX5vS!HEl6)F+H>uD3UK3=<14wrk=agoH*2$gSAc-XzK>=W1h?#Er0>Q9FRhZ~wd zj?w{-iiD1y@bWU_>j7)zL6r6`n&H2J5I}lwzap7b8q8Kb<;2$qvx3hPEnx+hk>a4xUPO$c5% zE19Vre>1%h?9?%GM?+8}j9WL%X>C`b8=gQM;w*G}Lw;ap{c2ZN+C7(7L#hn_(+Z>z zdP-Usg}xkxX-#AEcIyUlK=L3yBZ%|(TD@$W)^%d%&*oyv` z`J*)K`tAQRPhu9XiUa#5`>+WAs=cur$6Uy zq#Kr%>^BH5xYZMrB1XQpqvlsVpK+P!a^yk1N;YGXgthh;?%Tp~SH$$`qXb%Pe_51K zYukULpZ$k^?*17)c>#f1e`E#zxx!_7SP4_{YXlW<#q0fU+C71^)hFVMYQ_1L^7zYs zSvYwc@%1IO-6%AXYRbZH!QM@O1NFZpU-fNjgVV^pvbiS{>*shf^xIp=JbF`LeUyHL z-)vM6C0=HV?8BnWkK?<%UinpVf4vFxHmRW98zZq9gR#2rN9xSqM-=_5(I#z7H|HS%3-oCU^tCM`jm6fJi}+B0?eEI`dX4fdjb{;$5om>UPnGpG$lbM=|ihujiQg~1hJeUL~0((e<`%F3- z4_m@p0awP^DZ_lN$^F&CEcZ*V0XpbijUiVW!yPZ0H4}5r?d}# zd$89x$xuA>kR?ot01BNyFile<-AGFgqCQ<7)_xd=0d96jIKmk1etMo1byZjCRd82T zs~JcVUB^yaIAP}Em`B2DP8eHdF|AIhlDW)y5l06^wJ@KgM_P*74VKqrRmPSk^e3J@+vKo|iY0 zZZ0(@G#$AQ{^@FuhxtIr=^c8blvP~mm!4?vD)LM7h4V>lT)FaqG=pxWtnNlY$kZ?P zs7=Kh456%XUAHks$9ZAOFMkFfc6t!0_FH0@tV9^{*yF49ItBTbPZ4?)ND*|SIt}~T z$?VdKugeu*d?&njSt%-teCbqTxV(pcd1KgRtui?}Iucar;H!`+CqY{ycF)c+k?cAg zYFb^0Vplds#Nxv5a-^hhG^rgj>oT$29h;D%U7!7yTj;>)e4Ws1yIm|*=??kkXROMa z%%&^Swd9XjnXm0g0Tf6pLYp&IrN03)8H!pxT2C}+cqCK&CIXq5nABNiKLTz@-A+NF z((V-y9E^0?ooF|!q!bk;f4p6MWCIJU$@b;te@Ab2D5$VNtH^uonR&N9!tG_!E0sZ9>C2kGwwm( zI`;&Nzd3<4j3#A_^L@eG!a~%`3~y|7Sus(+Ij3RasaiYoRHc_9_h)KS($Yo?O&-1q z>|^8O%iy$j1Cq2X#;3~EcGfq24)#OVkRi$3$cp(%BLXk8b_Z;$n;xnV4~d7+qihA4 zIHoy=Uqap^A~cyBy+;x4FwU0&QNRbd`{1+%La~A~cW@Gxo(Ar|5~^qILY^<4p{J(G z?Z6*V!{b`M^#4ifH!P*8Dw^aJvG<$r?FgX}iDK17e$*KG2ucbk+5RjhUxUcsDdEqs z-2OXai=X}``}zqtOn4@`>@p>JL?60hTVZ0zzL{33|E0_5t7{4Ot~7epsyR{c>QAV`dlyRw$!*D_(SQX=0F1WUZu6nUL&ND zl<7l5SOxQ3DZlR50D<9KkM_c)@uWIoTVeA(#DDhB4rF*)% zmcX!#km;)O>(}k`Zi|t)Od1dbh{I9MUY9F>m`bG_9XU`D;6VNs42b;$dQ^r-OYLB0 z<$=)a%|CFy=H?;*Vc7_@;+{z&l1FD^KL;;)RBLP`AY0!*Bz!}MEqDb7ziiu7sb;=YiRi4b}Om!fc6Ok?s z8PEhf@2*eT?G}VgOikOjox#HUz$>t8Th+iIdbu+Mks0l4Vqys34g-q!I&j}i8bxi< zq7R`!9{^ihTU#Yxy-cUr!qQR#%r&1I-QXTyLpl}85 z#!_hAZ1iXJ;7)22;Q)#u=Lcrh#mbWNBz+P$%ru5xCrYUU(V620qJX@^=x*E1R|0(= zXByduHrXw_%$Qx)iFu{_HKGKn&AdZM&B(BX#)z*gn%cA^r7;*OtS|N`4sM6p|xkCgbQ(OtkZ`kyFgS&!Svlr{%jfE%P zEA(*h__kI_0yRTJ3ZSs+yvYt7!wiut@U9dPN3JZ`B*XAwklR+#!5(8o(*E0% z!1yn7rD-y{Z5(1eY0}+-9vSFrAL11c$;dHcbX7zngU})d?B-3$oQ=m%{p zV@jpKxW9^>K0i%O3+qsi93hLRyrt^dw=pWHjOV+Ai=bt26@Kme1B+TT><6>dg6n$g z5Ko<(b~S!C4LSz{<&bGCbp-m5oJvw+Dvq;lcbvD4o}M@4C6`D}rn*74n)62&u@g*s zu&w1q=cU1&Zv^A(eVl}mI`CP0IG0= zIAKdGawYW}M8GbMZ!h+Nfp}fIzEO`)PxqCV_JN*8u)wej;06jG2}J`gzcH9fTks1c z;DCR;nQWa?LNupxaBz50|Hoj9lo=Z-v&S{WFeeG&HPz-$WtZPWqazFx!clG2%e<0u+89tY zSTg&`$_q|n%swlz^^)i9;`oAp`gy-GR7F)bg^#=x7G*;%;9GS`K_EUh9vtZRsq&Tt z4!I~96W(-bEPLT_-`$fMJ)9D08B#$Mr#Ms+ff5x?B1w09i2p_Td?YQ^bSn~8j)KZu zsv;5W(I5jXFIFgN?RX~9rdAB|9L5mYKUsW1vt^>T-`5QKEfPhFu#_HUgXrIS&6iuq zDiX>)nX=0R`E&T|JwR;b#$I5*l*pHPLcIUW1(?x`Dp?nFJ|a`~qnf4{N=x+3ttERz zz3!<}X`Fu$ejnhgoC;k)Q*9M{(XvG^E>*c6>LaQ2a!jR)KDjx{FZ3~;g?z{E49UM2 zh|*3`gqz2>lGFaYDe~Id;&Z+H-(?#eHP0WX#19sVA>@;>p+@EJ-iVUa25!=({!X{f zpJbB6?m){Ol_?+PxY)hmt~y3nGPSEjXVX_UD=M(jDx`jVBEV?aL`JEQTNMzQt58W|;=B5s}(O)V}1XoUJnN1(p&Oyb2>d@U*!CX-(_~OS!HH9$XbNa4O!P!= z0kLaiNC*PTxg$yJ#t2UwVlHm(`dbBg`4hoO(83}D@#!}JOaUN8{@K!!Yf!DuD6Owg z2EN=OC$ThOv3Lp1_rqeE!w>w}d>IW-eMdGdP#-9pG37m=xvC<#d)+p6f+?&_M}xFt zy)=w{ZUga%W9DYTm{8W`T}zV9PG)ghdgA*(K6AKxq!`PN_WyeEh!45J>HAr0KuHLO z=ny|-_dDbLN;+awOu{Er-QxMvZfPh-Gr8^N3}ONnj5J$|1EnbuOuA9Ra-jNr%V# z$x}gW_@1CdXo}J#6@uS{d!XsrT#<<0&)6#N;cbEUc^tWFjaeSLHYg;}FN(#TuHG1d ze$1-58C|iRZ;FVTY%z1BbgvC@6MVNr=&t;i7EZl@)LoYTBDjvtCibOru4xd+< z5?VMs0+E8|y=T{g87Tsp^=UM2V(P)zPDF%DaMx#B?n$B&2t&1h;}6O>RT zDmB(Ka$3HX|5AGKs#Vj}LBpIGVq-Rrs-tMRbJnBQ7F}18ecdkxEe*InkEc^&$Xb;( zcD}RV0GO-`xz*zoDK1#td|cdkLdMukvX|neNSZoU(m@n}CK-#E)BFAnFnDm}bHv;(GpJcMcK`9ENBo>p92xuGx@CvS^yq~p|B+n!_Fp;(*Pn(5XSU< z&fitl)@BtHJjV-v2Hb?+Mq*OZRAF_gMq#=8@dsx8CIX^Jgg0;Aun|RqZzvAVF94iy zGY8Y=895=x9i7s^v!_a%t8-%*Vg-cDO-Nu;i0*C~Gs7<*(8GV4g%x{7K^SnfkA10s zcHseu#rD)$!wiwiA(5a{=G-cfJ{AbvjKhTP5Ja1wO$$Kt!oB|lW?)-J2Qf~QH0|UO zk}#)^Gg+<5yr@YeOn4}yqEbAKPJMjXT;KHQIE^FvB1??bGKEZm#n*~Zzz;uK?@4>V znpHBSZ7pR%80)~V`l4hK`N-O`kIyhO2k!N1=P^?uvR!u{YJXyi&=XwJOC_2hiwCag zRSo#Eo(Bq>y?$Fsp>*zMZ6`c<*97E^hGs;{K>yuzUyQau(#w{DRenq~N}oq7OcKAl z$m&{CE-Fes(I2ZfWWQ?3$Wd|%e<+V!N#S=me(<%Ke=AG`F^}Icp-K;?e4Az`va=y> zq5s||$WIHEaj5#7{l0IUa|?sPdW9Xo_qBOY;M=CVM<>2?<=X?IgL8bCdzd$(R)G$d zKaTTdXuM~e@;?wjNq<}>tM^N8CAkI4sg~T7nO?3GxyI1HHxtw*&Swc7=6yO>kdw>p zTEZgZjC)ifoIkUGNv+~5-ZaI>PpRbZ`3BK1S?7xsvqfW^zGN3Aa7xO(1535Gl*i21 zsX~}3nHS$$C80ybvsRnZQLlA6dZz%ThA-~TzlN*{J$khS%!Faa?2EW+pR$_eqNuVV z2ZfofAv%zJhjQX0Gl`o?(N$@@Vm!kw<9kK_4^RA6Zl2G|-O}l>;C&v*ustS*WmyuR zMj0796+vJh& z=B#`56FMi%N|f7i2cMt@ZVWAQ0H4AYrxRNV;*z|ZC+D4pqN{Nb=Jsx%`Brkk0=a=i zsLDlzKH1oDtu|)nY2^rmH@;YC-hq-`)<3u}(WnhF-nn9!-gW^hW^J@vyOdG}!xGA} z0Snm+7-nm6~!}N4Ge|nhQ{W95ZHt&kPAbf6Q3&AZT7}S-)Al4f{W~~IA&h!w> zwE4B%Qfku4^7&QYR3u8lE_I~*YfV>cjOS&(?}cF6N7~{qw8z+Mttd^NC50)8X9RwI z?tA;XD9%voe4(iRV2I;Fq|WwNlvm&BG<#j=P>D5VtE5~ED@JDPGTG&akC?khrI@Ll z{z@U3nZ(qI?{ms#_a2}9?SgYF#}>)o>U*;qmOghj7t`Z6D;Xg2v!&`$yj&inj6yvj zT;A{Q)OUv(9`suJwfR*Cy+X{s7oKT0`fU;5T{39>3W?M03R)X(L(+B{0M=@?AnN?e zZne>y<=;Wx8@sz-Ysg1U1QCyyZCl}L82uk4{SFB-ijHspJr>?JZG9NpJKGWaNdGaa z_=uye zs)F;+n&-*8TlB)$-8t9m*kXQXQxH~rA-cvj%?bHqvG4C}6Yk!0T)eY8^!v3E31+mc z3kDE+F7Fikk0Ul3jk5;c-7x9(b9;w+iq201mj>^!@q#v7LjMf2tM_xLex!C_KmzQR z&02YONo!b{pvRS9B+Y=@bA%e$oFk5O15v~yo%6UG zIy}UBBNE0@gjRkQ<$Sw{yd;X*HLQ*VyOlr@)d-iiz4GZacG^p|j<`vjWA{*Fj1y8>5H zcOrZ2bOx}yX3;#ea1qlRBQ=bn{4609o^T6RF&WJ9WUKtxT{A7E#=6nz>*x)qxB_XC zJkKl|vD+pwNm#nYLksKzm*^{Av=KRn?R>f-3*cZ zE~+!j#s%}j3$3k@?2_hguG9mQB$Q>}J=j8UqpoJP;TV&|9o?CvazsHC8@hD0u6Z%N|dl|y|)aLx?J7mO=+hoDeNBN3x156_gKq+AVd1UbL09`T$)AS63RHr z(VQl3%45`!UJHCc8eQt%h=BZ!pNx2zZ)bvc-;{lE#-LDaULoA=j zZL>8uTo%Faw4I$Yf2yQbEfcYQc{o8@9o~M54T@X^C;M=cw9Egfixi92i=ybgQeNQ; zvC>Q#6L@tc`93D(=Ih*c90*~2sUG1QpRq`ti8YvJc^;P#F3~k;NJwn%6f8JvbI)Hm zsu%P&o#esiip}NK{M3IAfGdU3Z#j%%v2V;oxHr$W(s*!c$)vwAKmWM=5wr3G2e!kI zinjKbr{bkj?$V_9Y9y}&V|=U;$Y-3iV0jdTJ~Cs_#Viw~!&KI89lwchC$q(`q>Aah z)&sS5b-H+P9Ku9zb$eQ2?nou}k6NkuRQ;UD(`M1dDMUw*idT>HD;Mw^1s<*?G1`|6 zJCURE;XhIT!*IPr3C2T8Mhh_QWYA~DC4CHNAL%c*J6zWyqU2*gdv?C1czqCl=sBGW z(}qa;s1($^#mux6%`INVKX%MYP}SUID|(h${48L!k5Sad)%EW5a&BwzN=AY{3qpz_ z#KY9*ub^q2Z)|-b?wm=0ITFjodYFI18h%h+s~rMsNU~p}DBCz#d4^1oP)?5^<&(}O z>ek%)5Q~SZt$VsmnOm;z2IsEdFQla|41^2_d2>jT*xxH7q;&xbl zzl&WFNZiYwYy?5K23YOE$X6Kr*dKTN9_@dlmCoK9!BMAV@)&|j84LOZsw&yXBqnaYZHg3rKORwe1?nV+K_m|`w7L^ZB_zhpm? zO0B*->1Wldu_h!zM^yYZRG`Pez>vgkg+~|)FhmHg!j$;G#h&A(7X)l1h-R97YQ$da zT&!k$sc~4sQr%CP-0CpixeJs4xxbL`JW|E9q<;Y~o{GZ!S%o8$q>b1FeZ`x~S>;T1 z;fotYLBri7q~f){G*$dB@2?|-q!4|Q93z$f%kqt%`yR?u%0L*NB0KFe;XKqHQps#= zWI~muUd%_r$>7o0O0<_wGcUA8+`0T~zF5%mOpL87DmENLp98J%kLMw66#%x@RnIRByf3>8+aCCqI_=IJ$~N7%dx0MX2J?6({~? zl#lORMd?70;knG2myGQ^kI8yr;8Z2pq30J)l1QNb{H`n_&oX4<1|8~M!4+=7%w6DI z@v+L<4zKY!pHA4Ru@33O>YPo*N}bO)(VF-Z9Ga4&N|{r@{$)0Pnq8!SIA}f3&v~oJ z3lanC14n;S{!h&>1XcF?%Aje3N$&X1rTwoEL>(}B=02jP7A+N}pfh$hdOWfre{Y{V zP1eJIt)kIIAno`%ONjd}daBiA&upElb^H9AN4bIEEU7&7RVpY6Zkpx(Cp&8j(=655 zEwK|b+J3T9CH41O{&%UJgnz=M%wpbWWA!jbbt@_^VC~J&0DloT-RLIhdWh$JlKZCd zJE~(;+o=#xgGQ;UCC0@21*ZPS7(_P{UbQw%{?l_4cGsAC-4*&3FFjaJ8~zVkd>MZ+ zy^UZny#mhL&QXe=09-P6+15&yPP*OSWqc1??@y!#y-idp%r3(AB3836=!w>;ZJ*w8 z8uIDWENGu}vAQ(xZ}w7OqaCHSCxC`g#Jb|uuX1dO$P*H<;PrLICof^y2rvY<9a_>@`1#uUoP@1$I#z(}2v9DZYq-498$wxxNxuoj13Yw&{k z7K|%JOB_h(#4qJh{3A_dR+vA$=@I?URe>j(>%Ln&+=sMD@%wkk-0bwUq|)`ZynK}Z zYHIGjcP@60)R(!W_hUqsBZdVbLj{uJPB5|YuT}b2EgZ1G7wtmka2%I=-N@riML-vZ zNTL+tbGuC2YSjy>2&vI3KUr6tj|Kb(;`n?K{f|v#{^vu%4cYQ~pML~HdH=92v{b%;y$t9R;R;owt&Vu_IK}PdSwS*Gd{$OAGXjUN&zU3_d=- zT2h0G<}b{Wm|fS;zua_fXWr0E5|8&#{`ZC>trT^dSjsva8&%cRv~4?6iL{VcRECIA z#iXjYnwrY0>gu9ZPKPEDa63B5N-&~{jPv$5`Yo!tE1KSZzzrjOrtdmWT; z@X2K)C125CGyMC9K*7&Hc!@vNz^JZ~&~9iUsTe`z;ens}m%FsMSV~foR7Zyk2|0#Z z*atH<_N}r7x{eJecm`sn_}$;qgy9|QJ8bMUZ|=Wh^1oFZ4cx0l&dHG-nVCWTA{CaM zt&U3Ym2}KS!1FBB>-Ih7xn9ugj;}A`13is}cOqQ=&Ytm2c_Y34L(|p2+Rlb5OaROo zi3)<#P)u|{7^Mkbez!!>C`CE%XmYHqQ-Lauxyebh^CCA~p%?xM{XD%=8LSsukd=j9 z$RQvDUw#GD1Bk~QhD91Nb!+ZmA@{v<()NsH>^z;T5@d9- zr|WRLAAa}zxIaNS7zlmhK}R>c8K5Oc2?lmBKqs2~M?pc8!fali7LeO^syq8vco$`m zJG#>PsP_qtctR}q^r10NhcsF>i>0gMvoa+kV@-bx;5Oj2KHQK(hQGsy^a1y|GyzblMdY!*dX5mdFdt>A4aOKbkJjQ!DS)=97-r&R$vu3bW@~s@ z*guy0U%40^6{dES)YQA*io<~*36_E1Ju(C|{1&^mM&gb}WbKvgdcF`)O#9 z;jwrVWy!ojvQ+x~8Sw}Rs745T)zpv9!-=`O!`s%ciwvZ=VeIS{{+!%}pycGh_6n-j z)!DLTCZGp+OV7X$o4(mtoB==Qd%Nf`@8WbH)sLG$%r?MJy8{RHmu=xsV8j5IVZ{{K zXCwvig+L+tedj19RqUu_(vu7F{#s+=7&>K<+Qn|o*EW?+b(;|IJ4L7O>8Kul&HGO? z_P@{DEjtIzUw;t+;`6?W{t*S>4mppt7F_x!yM+o1OMP~(|H6W8TP&@gtnWj#$Sr&F z%<96N+7ZJ><6jFR&=<8j?xh~SoN`-gOOIkNms2L6G8P>^1wvQ6s0hO91#%YEgIZ-v z1!Mty6ecKF%OTZe(?#yr>ux_rKOT>%`E?GZwR(|iYEP@)?b|sWU2mpfl9dS1?sk|u zL0}DK$K7>eR`y!xWDRJhwcTGX1}TeRm7I${9OF!s+;>^c{`w^&boSTgYAcJJ&CuU2 zHz+LZEl^s!7b)eW2wetZ?9@QaW!K|lgKR(bmTu2MC0z4r@Q z1cDlzCF(fa6e{1GH^Ke4*P)tSfiTkh0J&iXz5Jpe!)U^xxZ=SRvFu*>hIzsJto)6Q z32^;L*ym42`9H7F)2~56H1zSyYy@W(A4kM;M*(y1kGWU+=5eXtD~IqbaHqFpKFob! zUtRDr&ni0tEh!cjEMVw@-BHa*z;gd?S54K}P%rpg8vNazwO?wpEz&xMCG)iS7=1cz zt?@0M6b=GB4C9eZU6pt5g2BcX0?qgAD1?N`z|!gIr=s5#J|KhVOVYCpb^=~9?(p(V zEI-_Cc35!vn@ZQM#WLAnuXD!4#zyt_TG*$)aB&%b`l(3b$*g+qz`rV{(bx5vn{jcs zl0T7l9nxoJ7~QyhwY2!i+znY~X3w8M_AjMoSO*8>ywcKkFwXZ0I2j1yv7PfTDBu88 z5;$P17(fjMozr|FA|gVZE#=MCe(m{_`2Lc!z~8a4P#{Ex>a@8qzG)S(%I7A31>9)@ ziw`D{8TdVxuetPx$()ilN3?3j42a7U5%YLIgZO-#2pU~&x`##m0)Z!@0RGvw>6mA2 zJrcf$zufK6wMge&nIIAYJXr6uky)*VGndlM(gMaFQn&XZ6BBq)K%MVuWUcYh(obN5 z5;iutz}DaweQyc*hQ5F*HFk0`zkVex)Fw;b{24rq5#bDer-L@rM7{9rY%T&+0gw*3 zxt|W2KDiS6J*}U&dx#-Ha{bAke&*M$uMk2M!~;(%HZnBl7v0@m7L zq$(jkeVAlOR0po+z~0E@@#^og+5H2L^nNzOuRr9(g1BH!ZyX8s_w#tvC9Ni?^v1~@ z%LB-IpIiWp;aY4V&OS#c%@L9@6i)rVyngQ!`PqNW4(9)}JM>^9Hv1(*u(t38WPf+1 z3ZB4_`7O{JMR@k?76t;1Mq96Zp30=;Gv2@hQ<9-<%Lj|RLEEK%= zXmNc&a!s^*zxJ^?L{j|Y#~48yKc`BBw;zdtehuk+?_*rO8B?43L`cFJv?Qb4KQ*o=+76VkYmLeYkGjIe>A0(^}>eF?Z$j(UH!V zva-gnfa8^Ry}-O&R8!+*1N&+Q(tX$q3jP4BeE=r^s;an*j#p?w#umlmv$2K>M-e&( zf%9;nMzy1rtzvSI;EMI zuBYP=#w&tWK5<8^y={2^oCnulq@+ldeoV~qc>+>z^5 zHC<+e=6h=+3;|qS?!F%XcoypX)O-aChc=wgY&wA?l4lhIi^$yixzlj-h~!_lm^e;U zOBYzE&9=7Fvii)T637`sV)F8Sr2)QfVC__MM7~<+Iqsm(nZ;&Z&RaszaE)UL@ZA() zpnKCWcHaOT3)sDr7xLqB7xsL_=48(v2@l28b=OM%<^G`MJV>34&Z|5HVXlX%!o?L{Sy!?K} zqFLpK^*??Prqqaeh8}wPl-~!T#4Y5u!mVuvHlXA(pZnubf*DUr|BDoCGIMsnNc!f; zToDW~f)b0fg@5lt(Ma+|?H1;H2L_I(4cy&>yaa(y1fS&JERa6u;Dt7i~ z(2_l$ik{vGpd3_Zh-H0*AfPEToVMMDpPmVbH59d-{+l=N8!wC@F|eKD;o&kDZl+@Q zrsSki$(uhkwZB)2!$jJQ`jrqab7#jITb7C!{CKdXJt?ioQtb(8 z-SjXHl{&eNp}pRJa%-XK{~rQMbPu}$f#G~crqHGXTq-gY8Wz@GT+6aIve8362~;5Q zAkfmE{AtiS=D&G?^UH?ED*8Sh_B4`rVWXBcWiYtATj1#`ME`f+aBF9;XM$Q<~o1qL($i=8?*xK8UW3^THze>Jx?MHr1|ri@@RZMB&0fAB#Ot_7g#10#grDo6w;S^Qh5PcnBwPZ zM=o-Q2yTpPo0Hc>5e5p%Va2f887kVl4_kj#eQz0ASwD?#Lx87>dNKo))pX0fudy!) zHiuhbK^kU1B0UQHzB4#?V~70KJSnp>$jE(qRO5PFJQMY5tLCQb9&eTp`^t?!AM78m z=3d$CZaXhqMZJU@K0SyAe_p z-*-*%(#=<|uZ<=X-g7-7;CCbe01o!Tv^;5kc3#>C+~wQDwB6ef@(DxmgRmFcdU^-t zEsYDDxVSk7*jOzbBB{!9(r}sq_t|!qswSy^;I(R`&_8)Bil#y-HQc^l*XP*H{aRy*iM8vxd zQeue-c*gS$;}96Or~s|^R5Bkv67;>9>goH2qP;w2L>L2S2Vp)LC@Id@t*hVw{Ky9g$M({(mJ>F9RLmo7W;;84>vc^F9pqV zM?b}4i;n?iM>GPT!{quU^=zxs($Zl4Qm0{7mFu8|fpLRr*vMZnE-n(DH*0HLUEST! zxb=%`rkhD$j1q3Q_}tQ6UgW9bBaA1ScLyER08UTd>}v=@C!LwJnx&)7ajDc>;y~Y| zR;{3?&x+6ctNCMm<~is*0j}J~KMyK>>L!40jY{h zz2Id>I<^C!v5EA})4*cYplIKw%GF%Q4=)g5{p?y`YjBDbJw)(Ezc>mbDW)w$GQesd zMoed>?MJsIM|D<11FXMF7zGE%Omk_)Nu8G$C{&4`yB#g*L;i9!-EE(y^)AN8)az(_2z=KQtlrFcoD`nCxgl!0 z-cSJG`CLCbpLg|usCeIRz^oEEKHd}xDnj`A^N)xDVl$yKNbRPqYocl-0}fJ5r^zH5 zm2zF|nBtekBVC5mZEVfs4oqQ%sIRE0GGQ;?>F7kv%*aDP&POJi87AoE z{wl|gq&&tHycZBi`{L;k!aFc7ku*hsmPJz>hKoRN%vXv4z|uRoMIfOkiKbA3OHNKx zT`Z}(2ClWV87$nRzzZ+8Umg{Sc9U*^lej-_Y2&OA6r63f*h}sfp_7@X#r2}de#-#F zTjA|mu0`j(4zmtg@o`D_MtG9sd?ud&&c9L4`}En$Rxu|b*P`B6A2JxR>UQsfK^y1s zI7xB;z~b`TH)ndBpL!RS)|?%UAU1d+U5gEb#ejP9K&YaeHiNYiaa1*_WPS-wRMhRKCwm#fb2O;q zQ!-h8$>C2Rw3t~mW+ze+^aKP$7sUQw46+cLJWa^&b5{qnHuY{9K#>dv1UDm?w`!c; zWTkJVJxL6_|E(V52zPdT1Hj~bc7ZeUzAu}j^=4=Kzy^dR(~Qw=v5)y|yawAF)iZP! zTxgL0C%8(lcX1+L*$!BJd<>SuekJzf$|>}3RhB1y!vQH>hD}zMpv|`S>;k@^+e6g; zEH5l(Dtvdu_IGhA<|59S){>GLJA-%cOtoqc{K%a8gx7Z)fBk}`Q@dqoPS_;E0=41a zYvQl)Nn8O<1H?OVtBuhM+NV&qgwc4uESJY8~MzW6o}hoK=9(H7qj@+!XmG1 zAe=BZNP_Vc)PbJ)*d|Wxz6OpZLWc5#Ajot8LfFr;nz>5PbSN7czr8%Px7%Eg+^Q&T zY8rL;!TR>lOMoR$JqB!`IGKG=55FOL6DT-%*Y*k2jWQvgL&1=sy*%TD|L;N3sxU}OT5`5u zy4?~)EV7aluOca`c7q4U$lLQc<%MvN>t9(9J1FfcHfzTsB|Eprr+V z8a{_TEi8zFkrB_*7u*{%9OzF*)Xl!M-uGIF znye?#)B_LgH{1ugzfa^?fOy%7d=j5(J{#kCFe9-wH8iG}qncN95I3Di5^lJGl1HZ~G7doR=7^JNXvsqBB~ zoI+0}y5@O3u9EzGDPXNnOAx_QgYe!y!m#$h5K2%!m!iVj*|jROTSWi%4N3JBwG(8X zw7tE(JO7mJGkuRb-0fxrNYLG8F6-(*uS)9t`uuRvosm)Ah$ls$M43$qKV-hm0b1}`^vps~`0t3WE|XGpcAR{_6E?$`B1M@X zX4v%`N3B#m^)03m%aNae{g_?!EkX($^Sf;mAyEcxYjc@X=3z+Dr>`X`&3#Yd z>G~hoF(mdMT3F@(q3SE6s$9EhHz1&NN`ul!H%KWWAs{6nxe=s0q*Gc-P(WI`yIZ;& zq`SLw1N%Ol^L_Vz_Z??2{NZu4pLacL%{Au=(XpASg@|&2=%qj6omGAKKPP%4ZM!mX zpZ|U3@sUhEwd$z~F;FqzoR2crl0Ymp%NxGfpL4h+yv0Oqr;g6h;AL4Z(SIc&Yu#y5 zWD+vIUqrUSGnM}Q6Kj;ThRRy|8d6JWA(%%rLrpuCa(JmZ zvNfXi`RuSA$H|SE*#geQiBvs9{d68RB_fd`XzGJ(KX$EN^;RI%QRB~b*NYwf>@13% z`s==3{35D~4lOE6w;koMOiLueA3DfS8yG)~lE&bxWb_AH3PcK$V)4^e5b+zoly@XF z-u%465vNAZHWsic5$m6kGxK%3Ew}oemg8TeS)MG3!JWqjbowE*%2~n0^M@to;Vnh{ zcRrmg;B*G2YjD))`GN_k8>N}dRWKD8BXifYUU^jP7s+zRR2$mRy9spJ;(@;sw5IGU zM=?q)_%gSDlDF)`ZreUe0y%_0rdQ3{yyrt+h~o^oK19*zt@$6WhS>oB0gd;*A}Dwt zGdNSf;I6_b4kko;jzb;IzhvdUOTl6$>U<&Ls*zU4<*I+8bm{Ug-e4Jmj*`eFlogyj z)n5C@8(~`HzRF~T8z=ZHte2dkuyYoy!0n=HCZnVChj_OI!v8F{d0h$93%LcU`_%C( zZIgG^4iQmOj?jcP8e=+uEa|tN7#iAz+x`P1ge`U}oF!jh*VVKTbOY9)1gdy9-y;gL zfpu(ujLMM>Bk(hwu%5I)sX)G2R+bL&_VfFkqCvH?3R^y(6zlZGUO)qsY*^TrtG)6| z*9sNJ)}!G!@WA}yBh;8*WVto{NSO%HY@K*N2?nhE_x^k|adS1oP&g0|Uw1&zzfzAb&0Era0vA zONGX%YrmIz;d@?|gS&?&4ELet74bIQ0SXAtjM&L(2C|E}xhPA`-w#FgS$(%}X3Twx z(wZKM_hzv82A5d|J?CK)RHLuRUx+;AY+CFZA<85`<*x4hLbrMrm|$%8)UB-gu5dQb z#rD^k$sTm{9$!>Ab!Io-g2SKJQ^mb7v&tjB8YWp?KgbktXO_qI6|JO?)glWm}0!)dYjdig2{lwq6-hH#H$-+2{s$QTd^ zjUF`Xsa|?u?!L1s<0H+6yz2j1kvT$?O?j}8by41?VE5D*50W9mar1QW^N$HHRflRF zV~PzyD+3v3hx*;KUg7h=rbKRG)9H_bAL|=$1`iDQ$xQVFvK*)31Hy+P<@<5%k^id1eHZR)8%Sk%r-Gb=m z^x_KC0I@cI!uf3#LQao1CeDCi@jg zA|F|d+B@lgLz@Y}Ul#;kgcR_=2%Gpsu|a_GM4<9&zys0q70@I;bCCd#@u}4>#CAf} z>n-3{2?#0dh77@%_k@v)+=v(G)Fbas7PnmNlPM`H2US(6P3=4S`7_}X#d-r0AA^jK zbhS(2{KZtl0R2OH2La;v<;P-)!%sqK7wK#NNRt0*`m9E*H-VQ@K>~jN#L`4H6cPU1 z<{w01OB%_zKDlh}pU12?mL3#I90u%vakVYVi0+iIFZZm9$bCCEb>}0>0$Gc7L0zjN zi~1_}j;5A7D&kt6?~kz|!q{2210vWnl^aS6WZsp3tD%~uC*ChSjD>6hu1NrGnD2ur5vOJZCp3`JY5Qc8icK5y?yW{Y7U~A}mN~5GO+D572i7 zS)$O+uyVT7uV1+MG&(nT-g2gKe<*^!&$?kNvfujUy0UgXwyYtBl^5FuZBcLO^T*Cs zAs1vwz@~HUvGeZU31NNtj|1WhGpI3JiAg_a-Ql~#b&AB^u3H)hw_T#?^QvDN*R_kt zW5(0FZck^l`d1tGcDUeqmsoRpY}N_WnFxI!T_{6Ut@#p|*Kn4NIidtMkn zbaSo$z3oK3*PN*(&-(*M$(Cets?lYh7J(T{jg5D2$^h4Ilf#s{oRF?yqh_pq3G19k z#g43Ox*N@Ny2L)<7rJz@)?mW$AFu@uQlv|$hRy5=jnO&ekB}g;b5P5hHPHK5RvjM= zJX-U8ep(2C7MG5t2{vBDe@>MzNo`Ni>IX*|{QX#?3roXxBg9&SYNU&+t9;QX;Uh=< z-pd|_Nf5|@QjLLaQ`;tS1+x`xFl343auMDx zb4i02Y}-Spz7i!{B)lvtoP>^u`!owf;Gn&o0LUE=LS53)FMy>#{c(ZsTSuVIF9L-N zqi)|DLBqq&@|M%-K<9Neh0q0ciqh76aA@ek#8h;z&79+GtM>&~z_23SK@;rLoX2IG z=>?e8czUcnc80ALudycf*tai04KcRi6(TcW=c4>CQCRP$*sVFG*2*|9+(#rJD zb>CwV0P}3$er`Wax4lF`-Xi=fqw-m0L#`@9A-A(i|`@855O^Ub?HI!gTMCh1O`qp(^UlP?g$t(5Vd9GmLt zrE2?BySVAkISWukZh6!K=FdFeO!8e!;fT;xC)FR&S7%Rft8ERfO!v zxEljyHmXZY1hMv1i%%^^N3yT>Od?cJ+)I%=7WCO(@a+!QMPP%$ zp`@^kjpq7`_X{n#v`Qx;oGPPgf!OMk&i(M0=IO`!5RRCNe#`5^vN^2`r`YSsZT6Ld zL1*yY0x-X6=J%^mMRf-36k_hbFv#U=n<#oPN?~LV4ehPYEvDQ>Mtg%@gx_DBbl`sn zRJ|&93uylM@)DRFSOlx`NGQmQtn4?a!tYkqs(%l8|L6$3&mxVxGGE*BLo2};nsrw5 z%6Z;nlVMs@t0@McY>aDT+5PT{v9k~nCS-+Un{$1*rZ|n^Oo$pScG{V2$gEn;aMy8i zz_wx*1q89?o8oUzGucT`EiPHmfi3px=lob9++_9$KdVmd0kSa7t)YQsqO<8DA1-y^7P5KM>!d||F43i zFh(sS|At0_mVxG>*eg4JX=B%=Sj=~l$@(8KiKRp4PN&U^pXj6*SbMUS96X73o=!`et~Y0eDHA; z6X4@yG{|l4K~vM{ZG-W8?hBpRYi!j?1PXnJ*JFs6ntR1JFV?F*L7gw~1#jTf^@kkw zMfJK@i#iI{W3W)L+DxY`X(~Kq?J1(4-}gs|8Lzft9S>m}D2X=5jTw5n&u|_;pYwB+ zdA6^N_{4T@1y8n=b~Z|qn^b<{%+7WG?rzm2+$h5|%4lt53+6&8CTa-AjNR;uXl*7g z>Y7hrN?8?qxUUWA-m6-&d9g#DU#v{A$IOqNC7||NJ=&XGvy%BX$ zBf;#fwg2&`YKC)a&`ThIhRsvV*24X;6$7h?bWugV+2$xDwpyL_bghrdG^Y{Wu)j7;@k1##p+YZbOXWrtsiYpf1Ao-^|6zj zmnm<=8cQsEls5$b-Xu8P9X6CtyW1>I8sBGny|8uP;djZTr>35%YbZml?{oU} zl0WSDjF?Lj#W>Aa@zDBJSTzcwfLTgV5TDxSs?5zp^F8?PolmT-UW+5KQbxJAKi;U@d!<^ zAbVd&h&?V^iz;_m+*DSfZ117S52vlmnAV3E-fXF~x&0l153xS{3-_~cE_D)LQl?3fSh^qH7qh~9 zlx=nv>Mu{fve5giU?SNoU{N$Qe7qU&pCzJnix-DGx`=z#y6UJV&qci)98@b_^fH|mkD>=pH@t=GAK!{aB8!*8{1-R z%K8@`zUGu&gfp&{Kfmuz^x@xx!1iZJt~WA0jlfROA5=1uq0aXgqhD5%l2V}Cb9tU_ zU6(&gc4>>p0SBd(070l+(A;BP4D`nH-WMR2jaarQK-Hk>g5ctg1Z=zCN;9w+vqSzw z-nfaKoXK#cDo9i?2`GcNxkbNb`sCaYbrk}!q@P7wa?8K>g>Hx*SYc$Txh?`52`M8k z$E)9bGybAy!5 z#KPXY!;o*DG0Fd}1<;+a;k`;3z-CxPhD@K9zG0R}z=ou0k%E@mx%wGVd*9yz#E~DCex1Fy;p95FLuprYu?Wre$wyx3l3PqI? zuRl?Hm@JnT^MZ+3QVZ;+&^U%eQ1#|yTLoDQzz>8ZGERJXPQo?js(!4E7n(<9b$4kV z{05kX&@zFw;uro$$r@Ig*ByxU%-M!1FZGPhu$Zm)hK)`a{K=3rerwiTrTgj`h5l{* z@S;RBG?u0|bCiAUI`~W!qOcfb?o(WDF>zNz=9|7f*5T$~5US*M4GYHSt|!WxkEJ>I zkv~6&2)Q3T$zJWE$3A``C!zJ4E%vRjr$_$X&sg=A{f&yX3Xxhv{yJ)%U(Ee0;;tAP zG=b%%l;h6kiigF4j5Y<>mLk&@Pltq652ex1-TZG=(Rb(;?I%D{jIaE8r> zGhQq0PvqnY)tBnP#%3I>z-sN#=Ia6L)+);yZ8lJJ+NR!6y!$fQIn8+yRI_!8T+WrJ z+SY>FB3v|o=Q3-gdQW(_Ri9lQ!YiD8dynSxJyS{lAd^#$Tm=b<3Sz`(380}PUclt;6 zM@iL|{B?jAnOUoyFs7>wn#^uZ)$H6FPrg_+4?f!n45^i&QTkYk`L27uSs_^8iY=uO%u;qFvMU|22te}pDX^da~fmdO%Qz2I}Me4$y&2bhsO|Fg;e4syb zb!$=8#i;P)AZs|zKcgvb+>y3D5D=TvpFC2b0q z6?okOIORyCFV$7=Ac_sRuP^d92SOK?e>s}$oo0(}gKH>x6@Ih2khHwzx`VY>ye-qn zXWruTS>n_Q`_4=C}u8n;-Sf(25cr<=3yASE#0Td`k< z{(g%dsjNfk;wrL_CvwuCMSITN$ZZ~DiWW0tva9y_e5(6+V!_El$^oOEr}AcMG3ox7 z!OVT-Ov%-isl7`|wuU_9&zjhUu=cOPA^}(E#S44+X6GX+wLPt9>wzn`gg3D_*4oB! zUM)L0p^_t>VV&BIdl7k`+^dkxD|V82eWNpFmP~Ih&jByyc-7h?f#cD3c7`VY=WVtb z6A@3wm4!xUq6~;@>+cUYo48NvZOA+R7UVe$-J*_(oKg|YkNV-+UAkzAGqhduC2KOh zKD3VLfAG-pRPs$okkE`OvHQm3yW`kS`RQyd#`Wa#dmTfD-UBo8kEX6L^VFDPPv3zK zjmO{angyu6dd}1JP>Cp+pHIfO>CDTg(mrbWSkn^-qMH>KTaSNHnN|OWF-HuE)L=i9 zyb{`{1e1_(P;Gw-4YT1)wQhMSbR?Dxc3$G(T8h%22ZNaSSSXvBCPm~Lm%Gr3y@9@4 zU)iotBS*zm?i2e@O)3G`c|!~SNFX>>p|Yty#= zyo0%na@}52mM7!rVS7sb(S1lC(qp1otzyEm^BK8|maEHb40ZoYw+U-=cvLo>L&pG) zyqMQg3VrjOQ0#VsSGK{_dPnDLK5p08xRLsk^d6^vFJqnqOUygByNJWuJ(`7R7|dUu`Cu&(lhOCGKo4Y7R0vm!c< z=mc@OkWW%Js9(*#BcvsK?OO-srnZJm?^iv~&CJa|&GHfWQ*CeW$nTzhe=#n7yvsI- zA$;Rbl`byfVDPUadn@Oec^LQd}tHi-psZ_&qAN8?r`46Av`3!cb zqUPYANuB%2Zwm6d0~M%Coe`?WAqzEd7xGF3jA%auCDdi3KU4$?;f(1M z%V54K=Q-1tw3kBFj!c+Lm=jD3kfmtE=vyyB(v&=H1GMhJnBhpQ>W0zVpqub-^ z`h=ln112GHj0Lsu+Yebh>kZF>w|A#gNFB{_kSVItTOG%b=EI6>^Mq^73yZH~vF|Xw zn}GxAs^Enc%Nw(wLt~2a;c4o;kCcYVXKh=2eo@K#3lF5Wo6QoB)sB`D5j-zH(~gqe zFFiHt;H{R8RIW0bK=jV$<4^;*eMmCN066y?C*!MGNE7BPuD%q57I*Q$oXW3W;kBvo zKFniekw^&=rSmB7z@!G{E&ysE_#4> z<9Tu3a1dlw>%bEBIn}K2B<}PWjfFc*43xjlyZ34vKK_Zy5k>l1JH&ubEF>fOm23&& z^Y9r4>a-AY0pj;z22F~IC4+G+{b%>w5B{8=^xyOJP8uVuSCmuyW@V~h=_d-=`Y~Na zHVxLDuKQmfVhBMVF&kG1#IqLieW^*Gj*8~ikogE~I4d;e!m z1%=2+Nt%*uQRn>cMJ%+~K9eKK5~*b8zPznR8BPd4zl5H=AnFfYz=CWiUneDR3?EPQ zZb~W($**;!~Sdj;!%^(@&t zxM|tqPG3y%GF$Tk=fgr?VqZt?s%lX9Vmzo`@!Z#vAC9^0G3JI;$Pe-@`&ugqyiW7_EH}S_!{I+$w*n@}**T%_IO z09YM6lY^gGrs4!|Yv!%|#Giwi{=BmdxKTZ{0A!*0BYxHh7}|VTf<t*6dCob ztgx=#DFZ&eQX;sz^ZcNuR#fZYZKyHKv;WP$y^N*5zyDdnfcWGW@Jm!wRvu%!R5tT> zb?TiUu4tkmJ{qrm@#+1BK_~|9NLZJ`#>36>PG(_A9@Nw>EJzp9_vHGr7-o@@-}?h! zq&Fyu-%%iDHNnl**=s^;=6%rz&x7sdyG*7&{$G62wdhr+6BAk9Ls03)-0_Q-UPI@X ziC zp4+z$lUFe;j^uOdf`X@E_kvpGFDUx3`aSKJ26w3TosZ3C*Gw^+m48V$BSX+uw^8>J z@7Z@{N}v%ZGy{zzn`{ykZES%^G6}uN!j?O#cfXbwp?6@5h|?&o%WXe{KjbJtfi4f^=FxRzpuMO=@VzY-6HKHQ>fcLEt!|Cl8#3GGiP zL273^`NjI+*daD+-MCloO{#rjmvi)tD%tO&2Yosu zlq8Ie88o%Dit`3PyzpOGSTNkx&T;Arr`X!t^J8R#pUSX2Z7VeZCDdSK&s4n=dq6;d z&Hb&*uPluh41HWbZt`nD7D+SZ{n0Mm=Q*`&QV<3LNceV#R5ylRGZx`mnXAcra+I!& zpW{P??;N$t^Q;}nL0wXNIgX8; zw#KC78oQwcrPt4)_ebIBll4R038cx0OJFR=z#@r(71Sd1r=n0u)4VBv0 zef2+IL!K(qw7xNmD32RQM2|q$kZU@aOg6LrtGexh09kq!f7f{pEmfW=dy~`&Pu&b8 zGLizKRocGPQ^Sf!TEisU-m-QrC76i{X9mwWA#GeF&+2kb;sZe4v$8l8=Mi{e&W#In z=?$mX!n!W&glCtR*3M+WAtZaap2tWIX~p<+8xQ`juD;9)wG~YP?NCiM zwAV$9Xzrp^v@%~UXSeqEK@rc*A$Ll0awljiD#;l1kA$O_6HHZwwY0n@7jbov`Rd7= zt&k#VZGA|%PSLeY)%R}EivkrX1n;IW73G67H3qo&yVovB&%rlvy?eqoqAb2cLX%42 zhh%$V-q@^riC=&J`FLjCaUqJKc^s_fS-ui_{uqT^sy5QH;4bnaJ0Jkxx|BD+bC7I+ z#>@5uo(s#>NwJ+okqsUIi7m%y}hCL9y-HjUIpedtZLnVC=c|IBQvJHoELMXJ9vrgm!-reiW$~MBbK%SIe91Zg z-FZVjr}c{bzwivcXcuB{91q8{H!)Tobvpxv#cXW+=g|u^xnt#G&>1iDtKFg(3I``Q zI}0PnNvUFjo+%*UQH+I$!c)eY)g6uJH+!k9L_A!k&!(?;uFwszYsuai=P!)jyxYc9 zvgtjvyjrwhB)QfR2v3tYTo69XJw#>p`h6babMS4N+4Fn%cJFPu0gT&YgeHx4L(6mZ zLX3TXb*+DJD#W09YZ~|7@mdFy-DRd#J+x^O@CCu_$@g!&?A#&0AL(Qh4PkJcuP>yO zaJeQG=il~xgC*!jer3Bmd8u41$4lQVc5RP7G#}Q2ueE%ma$0ra)o|pm;v^75!7=7~ zIh>MoOo3FLu1Ys2_Z1n1KUF+m_`=H$Nj+A*{i!)dW6KJ zq_+wR*k#i!`lZCKuC6gjxt~h?nQnF1h+e*Y2|6M$WBZWXIB>!wuLH|4P_b_C+oZQi z9^(u@H4~2$YX5_S+c@}Z9PFu^p>B1W@UT1(3%mqbc-Fa&0n~Ya%uZ2++4W~mp_V6i z?!Kd(&|?H6J@xCwY_|>AbJ`1Npu^b0N6!9jLdhFaW1%IW=`O7^b1+c>=WE#MuZq~d%ZxA9p3>)f>% z$O%+Q#O*;|ES2Ae=a<9jK-7ha!v`uB>Nv#j_2CK=@b+Y{#hkmpcjE`mRIoi}RsiLl zSim?s&%Keg<8984Vl1R8lZ-Pg;L+c~uBCa9_{SZLPZcB*c>-JjH14CM7(yj@J$4{1_xko^&* zPJyqZj8>IC6;Hyaa>Lf;sQtekAxd4hLp!c(_bw?Am8Y%RJBHp4cqN_((7BIMba7=px` zZd)k4gyH+CQV6IxddI!;fSeMM`RW&=hu$l)PW%q*bvX|PEh-M1-w2+G&x>3PJWEr` z2vls*tEF$|b#9Hi4o*%qIFFYm)I%;`U}In`VOl%41ysJC{4zc{xe7*n%l@rWCqUNG z(NQGGgK)BZSJz$|KmFGuU2|x_gs% zE*S0$P(lpe4d}mpqoOiKxSp4nhZ8LEu$7)Bw(paL1qLfC>tlbl_X3-z*UDr>Rs|4< zs;YGz8I#qc+0u~|;@V%ssEz^gIG@B*f=p!f9ynT)5JoqZF&BfPqwTexZ74XD^{Mb# zX_LV3H9W)G-#aAM4sOI^+YTJ+jPEU6rghe@|U3X!;dG+t;H9U zKo3wUU!=Sp88CTcV?7|#lTSOAz2P~?_VHZvxm(9dU=i9-NSIhXy!`8nDYDwbtQ4`M z^*HN9Dw)QYr(=MDYqKTbPIm$@-@Nt&WY8z$H&T2R@Ooxwq`R5$-OL_)!MIoE+-YM# z%Jij13!T-+QB}q{JtEd-Vrj=Ky zb<7(-vG-uzYm+ykCcq96ae$qbRk}~C8zL(We@;o%ElQpNuyRCoKOtB|=%!q4aOw$a z8UVW{Ay!8pm19}xCtfT@U9-L|j>4yX?q*BSq(@ZXTK^sDs3Z^WwVLqU2~mr$>S_oy zcFr!1N63Fa$4JtepW7A2r7BSLZV~;<;r!ion6N0>adqKO7fi)wxv;Z zGi^xMSute2&SD-bIO;pm2d0A}>;^T@n(LhWnFLn3Ohlh~RWD?SU9GuQZQX7C3x#f7 zb#jP%(z=KF{%mvhj+T))OcYXMb{8Do8%(fQbx?+wrh;pfm=3W?Pv({Q>qs_b_9MQCP|ZN?`7Un#+gu%#;2yos0BM}AWAmiwRKn-i zjS&c7%?5XO>NE%9eb#gLacjr9*0j)s+ObeVN6k74+(#eNUeJ%DSzB*mevv||e_`}j zC8eJ8m)Tvh_0@-ys}CwSXLA&;Jd;+tj@`D;-mx*!yJpm6vrXX(jykwk8WKE^28{Lf z_ah)9o}c!)^5j-Bsl*go5>R|A(~WLr`W~~ktI=|qJCdIdLpxb6VormfT=WC1rM)M2 z!@|oVB94V;CZ42>ryj$bHBVpiBHtjn>WDcy&?!;(z{vmM&JDRX92K@-GSs6HDbG8; zFD^k-i?e+B`n5M(-FgnGEjB9lUvf;!ocNEq>^}3+i>l1-n+ShqpXyd=+bB$^7dEJB zqdfav^m6U(aqXGEx&PV^uKZ}sz1&2!z1-W+p0p$~DM{9qc6HE`T4kT`vJro}CxGBP zY7IqAqwa8-ct9ifkAi^ZQ?G>UF$(&9RsADvMsO^S$lCI>hL*@zg1-^~iO*bMaD3$Z zY;}rr4_-4fv)_!&#qkV}5lP9&WX%VM1_u`iWIWn9-ld-rzSSrn0LhOz{W(}L{VpOQ zAz|`fS62s9Bj!{5M!d=v^S4&q>s>v;r#CBax=CFCT4?bT#T|)h%Mb;b(9=~@^e)nw zR;W&4Xg%MrA~%Jjo{8R{^QvNg^*e!o>lf*Eql;eyd3AT890s_bM&-B@Si%kD%u+4! zc0ba^%WZ8JY^yrbo48u1DJ%{E)a66=i?H-X3xB?zoBaF$1F8kb{vclv^HEoJ>tY5y zb=I7JPckelulQEl{!o+Op}<*AwI%%FMb?&EU1?qx1B9E$U)_YZG+}=5<~Sf!)w(z5 z;To#j7yWI_hV|svp;Y>I?EJ!lF`!m-t4`!BWOqL6(w*{Y;Yr|ijWA*fcg1x+6TD;%{>g14F z60E?<#OXi(;=vTH<>ge_HDo zmUt~8RpbA}1Lz4s53&MyG=C&jugNXm&OS|c)1qM@Aq1I~RtO`3)P8JjA7>P$_}}hj z@eRbX*8_yLg@Z(5oFs-nP4cBPl?6O*cvgKRNB8Bhhgt- zuZq`3N8MCevge-WW+!VNPq8$S7;v_6s3j7%>^q)Dd?F06T+w!aELaYa`97@r7cHef z6AchT)c;WJfTFi8Uli~Ptbfi^Ig0VrmF18GK0DT|4^kTlj?USJt?xxq1Kl_3uPx8! ztoRAu`lK;tj?9jjIH*YW$VOEdk!UoXo!&|=D3-)wwAJK^>MjgT>}VZayMBzR)K~T@ zjC*8d7+{&k&KJ~>`TD7$Lx9$=VFv1%)_>qEHFaIbI$OpCKscTz7g|WZX0@B4c(w|` zc|WU90-zD=T4&R9I^pXCn@|G1==sSrjUYs?0)T_-{DZ#+XT41BURBFH^8LW#PzPeO7smn}vrlqMlJUv~jKU$rgZLIQe!cf(?3A{!1vCDxf zfym}$(IQnVTaPH(<#3^cnjOB}yk30x3)7hJrYjsC*+3TQt#1{r%=T(X)M*(PRP1HRHDuOR^Zl=Xk*dZYYpCzMC|^ChFd;&j3Ab zW&No&2Aa)-Nf1U<+?(q;brAjJ8sZ#bl?PP?I?f8;+bhWJk}keT+^4SdTfrytq%@%h z?cjefd4fs|(;xb-kG6>WtbaQESzL+Z7v@Vb=OvZEHjLp;JQfPO=cRNrWzJ_XO_{yu zzaJJ&wjJuuYRby&22K(<-%&9xXxs3@uuy!}8=9TZ?lq6UkfTmpIF7B>*CYi?=nqUI z$c?ege=gm-W_kQLkX?sj61T7mpK#7v5&=rXgNkBN74_8lHT<0C@FnK?f|0Ixlwn` zPLKc%xm~~1aJK2HruYl_1NaT;fI!;X+Hzj@MQi=H7NDr8>6oE>^_4fK)KpXXBLWso zkJ9H`RS2reBZLbG)q5+*F+p2K*TUU6prvO-En9L9S-p~fH}Ap-S+!)^D{X+GMj`}w zBX9?v-ZkH~e%DfJrkS}Ypn3U~CL~E`bPRjnbFYX?()ey|j~uaWC~adad*tZsFzxnC ztc8%Q^Va9GMXf%c@LQ zWGtB3oSxR`9~dBvsANfRYVw@8das~RT;uLz8V}f=#!p?1rKP23HPzKl9hnFiZLh!@ z_V{yNe*Rb^x?d?NU7&xcZf02k3!`o7yC_o}Q%(BjbJA%`7ZIk9q&F+*Pdt$4Bh~27 zf`hEMfkO+DFuZt63T4K(ffkUb<{t4EM{)@(-_kLM*s*;^#*0YmK7|2wm+t-GzU zbk+!pWZS-r=TZc_5_+3I(Rqc$Y!8Edq*7AqCst_$_{Q5A8@RKVk_0hxOHN+yjWizm zP+B#8veYu~b%1=QeUg-vw6(pB0D0g_^z{)_6>};&VdDB1AVCzc-{}9AKEVGijd7~h z-q_UdYH8_NXIB>}qxj8;#ubO5oJ&C7^_mv%HK?*UIywrdPOq;g0}H1*0MrMBN{}=y zq7p%*9DS@@CKSw6{rmSX;qLSUf(L=-w?G{ntm2ViB=|}z(`{7Vy&1v`)PTz8lo7F6 zrHgXFJ%a(+0*-sc7PFoG{TRvaCo+J9X<4^>o-!0W17^M>yj9}h<*hVJ0=zZQCAM~V zf&LvDs`vTxqZU0JkP4?U0+Hc!xds86f0)Q5@{?<6A?qg-p3X&~>lhX)zV(L-fS~F3pOE!{0Mg{SPo~O|A&axuSIa2VwpcH#mSUgH zFWw!a2IOJF#>*o_%opArM*+5%Bf>Lqky9RU}lbhXAUj zo+RXS0A7nYps>I@2GgG3J&%6JswGN}#jcUIR42_PB|S`B+X0^$@Co9EMf5yaMk}l727DHw|mZg27#L*Tv$`oq!uk! zNgon-M5w%u*5V?BdCQYvY-S|RX`4$)qR38X)~a3RaMnbjH2RVc9C$2B5PA$K;?nuWC##IY!kd~ zLBbaH4i0|OuFna)es>v0^I`;82mFO0gN%#}7@`l@F;9#4WUCHKRTbO~MJNgW~F{ok@8SljyZs*ELerGCa1^;_^Aja0avogJRNy*)5pMFM1iNdY4nBteby z=#z}L+Djw!7tG9Hjm0I+(~wU<*@=_O8fVamSw6kS!9aO1{)nz}3yKy!n*saJbTJHw z^2e{_b-hiV{-$StH99tobZ+C9V9dK38rfFc&cQPKRpIzIO@;3Ru1z2>Uit6)kZ^dT zO)HOOx*(M=n6|sHtzttbY0GNvu*qB> zIdvCKw&2)T7E@r{^qogno`aaB5Z)98a`_1Z`o5gPLY>Ocygc)m+IYUg?0#2PCl&8B zMiW!h*FZ}8GO>T`JiM^bWEOXFa>Bc-!R(mESR~Z+%9b8XS&;<#l&-3_8qH%wu(NS; zB4r)ukNG=o>lV{t;u=EZ{RC@s!od32L{LS>A88$1T$?M*bOV@7TxHP4>^4kd<^6E=uQ2c)EhZT&M@$ z(yOqDKire(Lwh8Q)3v*K3fO#lo^XXWADcE9m!RO2E~W)P;QMYp#S}E_Djy@By`o^5 zz~<#e85v#rhWY1AwdF_^;cE$9LeePrmO|)Hyc?ZJ3ORsrEb;$0++jMc5GfEt-XGli zw*9DLA}?EZMjmO(hZzKRh5g=)_JP7S) zR^9akjsq}KJ2KK|!vv~s8Z zmGz&a;{M;G+TCe~CUHO-W{v{`yWYyz4B+D0=q-+12h(6`I4!STdATvcU8@uZ2ik;Y zk%la=$FCkg5=bbclKRf!v~7%pu|?>i7nk)PMSipY#>{`Vwt6h#OBr zLcNTFu7mLbwY|H6FB)$iM>~m3PwBxj~Sn}E!``RBmY|2Z(rk|wE?Z-_x$;J15tCYR=+ zl{emFIw{UJ$~cL$HBL{la9iBr+kRjjCFvi!Ab9!uZ zdeCh$JD=}NV@@lS?LYs#d+4kq{vdB;Wzi?QN$f5?rasdX0amZ>ikOz$LMVdW21OcE zcWIl!Wo_sDNgjxV`bix}=*<9*3k??TNL9!GSeT`tM}#P0NBzS2_W-v_-F9`=*;&w?>)h*^Wj|9Fw%ZkE3t`*7x>r)|NG;c-|+%jzYF4>w~khF~EPdGe}^2 zQ+JDyJ!#Puqx%o`;+iC+R}jUZ3hFwT4p9p3$Uu7}0ReTPB3HR$S^4)00^w2wlNOYz zJTH|C!GwrUJR%5Pwu&2A>8#_AeI~J~(<~MKsUrU;Z3{ACCc7zQ_;E+&als_rYKl;` zFOY+Sd7V1g72(i2O+C%hT;enWFPvyChm4(04&25=dl8dP*X;)= zvciD>5IGM}u5Mc3|K?Gze%UVtg|j$1f`qr`^2eowl>`zk4f^D5E%^8@pLMh}#RY^h z6)cFNiUG3))3d70u2la77lPm_rat`FsSOlWD+m_oW+#XMV?XMu+o_}H;wG*a5O4mg z9=!2N1W8W90bT#WZ~W)6dY3JFCQ)@bV%${EVYc|x@B1evY(OK4RWUVtpoJsz@*=2P zfG^#KYiN>cWU~BJAPNxzOesU$ZhC)}Ld?%%AAwRh&2lhCn^D5C^+*lzD?~%Kl|lr& zr@OE|eB_Llz6`7d(X^%`y{p7v!5v}!_P^gemo;?8X{lczjYgnsYI@!ECUridvDSj- zl4jeqW)t>&GaUjMBNT5cK#CEn$ViNDDA~J+3*e`2T=k^nNYnCv_UG}qIQpSCYcCE9 zMbXr=pZyq10{K=ivE3W^40`I`VcwlljqO<4bY9JY{tN^H> z8@Ah)x(&?JBo~-eueB5(=>HjYUB%e;nD37rax7~ReOIwFD|E#Fs>tc+74H03;A`p| z9+JJNj!**GkpL;`#?xzzt;l~tTa%f?;2^Dg4v z+{*&tztvpw*?w_Ln{AutDg2E(>Q4b|QHZ&rQvr~oAd`kP=a^oYX)rIe@INUA!B(o_ zjBi5rybp@~kzS2*gL@FmGO_%pMbWO{=dkt3Bp2u{kmwl7c&J4A&tu6z6Ct7nIT{`8 z!sZvq`jY(weG{)O=j3NPlOnn;MbDQOzSNA)=jKchZ<^fUub3sOa8j+8iYoY-#xcx6 zwX>p+wH!}@{0j`+!U@OB{SmkT(d7P_QooURITPdmxIf2&m|%^k&S&IId^!H z_az)p{UK$4Oucd0OeFa`2Gu91Xz7(VWsYyIGj4(_vY?2snbND@4P*!p6v5X2M-fyJ zIU<)}%d)iWc4 z)rfUqx=OL^gFbu&MfmTlp{~me%0f|ae>@Q5<7!Pu-VtaDrE_IP+QdsAwp^WgQ6<;z z*bHW00cXuMEa8Csk0|@Pw_DuLd$1eSm9@woGV2ob&U~t+fk1#c0N3<2m(v*>&&j5G z0Kks_^=g<{Kj+A`N9=(UCV%{$Mo4kim6*`S^)b%(@i76B#fWM5vmNtTolM>mXxwhc zCA|XIn#2cv!KbbUz8y@L&wq=QA-$gPGJLJ{^{JWPQwu+upzr;{bET)>i_@3+UghsO%ITrqRbIp@s$D+m40?o{o3uz>BZL-EJtSWwMo5IC%@Z}4xi&|p?i zylT3`FiG594i2KEkq_QaSG(*}cP!fdi?q@#iO0Vd?%YzImNoLB(&Z=BARDBf3>a|6 zN6K-lVTBx$OhixyngZn`;xiq#Z5)i1<>#n)Hy_r2{!{Q;Wx$!m5xOm)KX^peD@un4 za3x3Z8Th=%#B z{Pjn>L{>yUdT>P@3Ys7M`>?vALH`a~L7$*5XQf22VIc-{8*2w2`V zex5LxBPrbuz{EA(;5=@_)hHWl&Z`E2;Lw{;5En9jJ;&gakHQUJr!bpfyL=neksJcF zUZozT6%?@0W||OnLb~SxH>^}xhM5KQl!y;F6iExbn$HxF1Z*J^a>L}nCBulpQHOX5 zjPVd$P!s9G#Gi2oHwT=ut-niaV;tT*aK<0!9?NVy4rG$_@CVQXI5kvBi3W!A-^crI zm!`u<)_D#VfTN>_B`%jUOOuYZ9>qNDPe26mJm@7Yq*P$GM;pZG=)&~2nkG&+YNGY|GNuPasD1Z-r76v zx@k}#;XwyE369;btm-0yQ5Y}`uqELIQbhS}v4Ry;FVvW|6*E`|$}F5qo{WTN50_#< zeSg9Z>W0z5=n8cHn5ztQYj)=S3k3%syp&v-rl->LT=k{RZ=Z)!*ayT%%cw-k@EtiS z^ta-TwZ0Fd_5hGF;yE&P9toj{{|rv+bmJ~4z^;Y0RX2rT=QCMr%>-;G4mlQ3GwwpZ z<3?yCMg;yfHG~s!?{rJOp>8RW2jxzV2rEDB!IrIpS@`V*2*NDb?@7s4`PED#XH!VU zTI8639@(mu4jnQbpSOyzZzr2tcp7LYl0v*OvSyf~>5sM>0?~bf0XW)kOUPP7j>#+J z87xgP3l6k1=8T9nUi*C>u2En5XRSTYNT#|LWA7ar2B3wVa_uLeL%e1FUT$h3)`0dW zY4@FgJdYbHLx4~{j*@A^+A6?Ha-pjmSOem?OQG6Ej;Nkf9e4Lf^-=7NabW;m{Vx z+jb0uc@GE!qhpy^^dvg_4s}3kmt~5#(%6hE~kPTY!nEYFV`HyB4 zv8bc$w0g_X03~gpJ+0&O2g0o)3v&zLT)c71Rv#mI!hhp(IT^#=^xt@;rYLLFhj6Pc z6HWB$Ddp>w0Y+J;cgVzXrx{Yrb6sf9jH;II7NQ-xrikdgz!DC<5I6+?;&gU63 zk~+{Fa3kphizs(k_~WYoUbd>QPt)sFlG90dT$StZIvQhQw^A5cU#}0}Nv&2(%*o;! zikS2vryP3Ok&OK%lGqT``(SF2CB)n9>nuroy?ajYKYZk`u^VNq<|#tTGAO871}Y{% zZsL`u;FC-v7Q6vW*D@`Mkr67{7eQQtAiTzI)xpNo-v!aL$ijM3>bM_t#_g3R`B(#e zC|vVIL*E6-4y%H~`M$jAVu!8;AwooH=F<~Y$1B@(-2JEUdLH{T2+j_9Q0x8IrA<9r z758)4@ew@_zc=w6Vj}3X(n|M!3yKClzVhrUFKxJU%N>t|PI+b3QP_)wQ>-#)Cdzo} z?zCQ1h+6ZdyUzWqjnyX-647&WU_j?7ORS=8r(jf66sY@3S*)I>aK zDL^xO=rqaUSqYy512>BRrWYBixc~*b<9Bt)LJn15e`Ca;6B0hmk)mL z#KF*^Zu`Orld3t?i;cSL;fgOL{^{@g3kNwTYHVn|t?T&(6`YV`4y$a!T0bQeEeNG6 z9N@shOm5B`cx0uH?wf|qN|BPhrl2tvfiIbfecO^3t8FwLH59$qPa_!mS8}{?^}_Z+;6VXD&vo`oWsL*Ge;c?t0BG2@41WgFMBycOa8w zf;mv!V`0E$c{6$; zvESb~Rl(}{eD0uTX`3Dc|Gr5mwH1EIkmi&ohq~US&7|EdA4dG(FG{7%T=gkU#Yyb0 z4oI3}a@eh*HsQNTZ;|)jUNjeH#lvp5w<2yHbESz!u_b+CXX`UXR-$X7xQJxP8OjMh ztA-{BI6uN&0#9mhA*i7|Ob(okp!kg}Tun*e7w7%%80{ReeV_Bw?@uWPaC6A7U2bo0 zVV2!OFhV}JD`$xoG@)sxhrQqp!sA2cU@~dEo0kqVGk-JBG^Pj_&om7fbgzP#`b$}Q z-Pux{@ZBdW=I}>i_$noW$5{Gh=i81rvm7I9nG-P*>wdF->}fMdp_%&q_9fwLA%>5C zFj9L3iz^;{CIC4r4o=QwtrlNEIyZWsVL-=TVtFd5@;ncme~Esfd=e!ejKx4xM{}1` zj-54wkt^OTI72ybXc8lVRC>iqS}`@i*jX@li9I;xc|_o4oDSN=^_pWGEt~_n`MZar z|2~6^wF8CYVArD8iB{+3jEI*pLj&nO3w~^HMrplhKZ=}_ojc_i=l9_An0O5B2MkaK z#=bip;coj&V#J-{{Fh9`h#$PyXExATdweOjqv`n<6vYq(BW(=+cTH|y_;kZxb&pci z@kX!3V)8;khK9@T=JWNP!WvTr#w#2zIAz@tAK3B*Y?|}(Vb{!mSQJ&=Z7s~++)uGg@2@j z4&zxkCO+J9yG5>tHPoHy=q|lmDYIK0K>YKT0J&sL(Z=#E>5ls{QpnDt`g!ClPinR8aVQDiHeCptR z+gLv00oSdJN)O*rc}bnouPFVdl$s))9vlQd0nYXP*ST`HO#jZumK}<}d15Gh%+@4< zq`mX^8ED6Z84?mQcm*nOwoZPnIA@lWkUUBxgbUO+H#ITj=H|Z7RK>{u>PCYl1;Kw5 zo^^Koct-lpi-tKIqsp21fo24=rTLw2ku0q&1^pozN})reB!cAdEg$ zW_=hCIa{u-ok!9#=e{bX&Al9lo!^$kfpfjB@yA~nV7jUzt?;GDsoJwHPxpixa2L2B zL7-5=S>unF!)_4p)KBkz`6$chYEFhGEd;2 z>kX5f#+zQn-M3W4#D4^@;zs;gryZeW?w#{z)THqv*u3+fYlX*bCcSlu^n7lMkYHr7 zxh;E(h3AKs^I}naz0jHRaSa&tD&Tx~2AIu>cMAleacpc!15M?2w9auVPn(S=gT2LX zI(|K)?(FRB=<50fM)+mhR0;_Ss{RzXHqrq(F23YX!f(lc1U4493dmtzzI^$SOi;J%~p}FC8U& zEqJ{rdQ808Okgd4?&$G8cldu=09KhUCNkOqe+Eqge}ylH?l2DuL~ZLCAtZ`xO>yOM z2j)Y>nTyI-{G#c93+@kb5Y9J~A!wjd;-lA;VQ6GOewC1$q=MI%b_wi-K!FYx#-RnQ z|1D5(489x|Rrge`+(KX_qQd#sC#KNz?Bri-VM#_(N#S;GSo&7)t25^YS&&<{0bxgx zKhpe18*sDPh>w(q*|#rDbSVJ&3`WM#LY#N~9UOSx3(uY#vS)=$GC^r;Efw7HN-M#{ zuL%R;;0*}OGB(ydjrZy$2!xvFNfXR7H4Ej<;b9yR3r|TvMwl&5s!%IRsX0WN8MvDc zvx+7ABL0Oo4TU>c;RBp;>h>JLpT3?4vC1Z61XSJT@Or53!5xFGKk$B>Iio))Lpe`~ zQ5r9KfulgeGZ`j|zp$HRR|qkL2oPS8!HJ<7=@eok|9_?~xS+d_&&Dx+uuFMT!uKRo z>Y9Zcc9bB}ti7J2gZ&3N)Mu^bmp|&=qce4)Bk@dTQ#di>t2Ln^J#P~cknrOVEpYH`PNT=CpXftkCJ4jHfwM+!b^e6 z3!_h?@C})qXeB!Rhr?hm$IzO}#8G@o%G6p@{~9gXNbRI;K~bDvR#$JYoS&cHFX7ab zl!4JvY^b21fXeX`9%yh1GG1WZOt5sOcyy;3ADC~#bnhPK#l?ksQSOC0xY=Q(j5z&_ zZ^}X5TMn1)w-Oi?r>i?za2(fbA>->SR;+FFp0>M!H(5DJ*!96badCHcMW&GM>jR)5 z%N{~pUmRtsbEvLgJo~B&lE|PnuQYjXFlevN#Ls^RSPRAH fhl7wLr6BC!|eP4K5 z(qGrx(INT#IsOosb2gj4^c3`*Cxk!?`QT)J%fK~HkQL4R{+(ESzPv?{OS@Q7Q*)?e ziqbK&Z;~7P`0n!pMNW)vx_! zk@Oiq>O7i<@u!WkAHAyIyxmVPmSQGLr-e7)6NCy0oZ|j5_mLos>6{NMe z=JKNXXB(!_uS0r>p|yPEG+&0s>amv6kL~?t1=xQ3WWNODsJdKFjFW!gFL3QZ(&+Pl zPDmr1g=#vRs|!0V{Sjt&#ix#s+}=--QIXX|{M#$U?OJMx5cf@qKY2Cu3vzxvQFoS~ zi8gJ?U9VQXF&W8G={S1wWwIA&lW!(9gx;Mw@emz61m4-HTT4er0dw|XUlg=lCIQcC zJ$m9`7dp!CA}n))5Ify^6qs?O0J6IR6ZFh@7$9?OT>cu&@UUH8U5!#gpPZhOO!||A z-TP^a(SO8#tPc)}BS;xPEj7Od^enuoyDjtfI%xQl?N~4_6xS){mIGULEHFlXiy;`-EMHK~wKi1^VV_8|ooS9b?f{8Y<9=c3AZ7 zF`a1YzS)rjEZ%K$>#vYF-Z%{6V()x*fz=*(Nt9D{%}89XW^q1WV#2V*3Nb)k`_KBr zJs+mR^obF|9Zd$vsOSH0FPt@9C48bv1B9&$qKNmpB2yW{rvee9!?P0h-iB*4h|~EFS3h$?&Qr$)=#%_X%*ltWaD#d`Iey|Y?vLBswq|B#lHauzaE*bg1(Xg6 z2na~B#bst?v6wUlG8qHaI@qTHD}j!_zECiY5a5^=;#TuBk%0x<5WHJBxg_SCA#s8k zj63e~p&2Fw9=RkB+(lmZ5vr)s2maI#9^qsHuDV=Z-T&|o^LHtNfe*lT|;IvY% z^KZ@`wc{j03T{aYi|c1xz!rh(5?{?78v&a5_;^rWEE{wgYiHU5w#QtT+pvLZV`F1* zhnkw2gM$MBg#V!7Gl2$qxj)*E+o;J$YwRNxl^oS>VxMd;zv#d3o7*t?z5u zYi^mb$%zRn;&q#9J#M`!E5t&?moIeW+vz4PyJs_f%@t4aWAat z`qRwDv)}%Hg}c^>7h&_iiTTHr!`Eo>^J=O<1DenA@kCto=noNCR(A=omDyhYs(4p@ znRE8a+}u1J?pLf^Y0eE1tDTVi)ff-h)HO=FKLO3aVF-{S-CFg|tUqqrqPk8oK;i7>QM=h0n+3eBkP z(nIfl`KMz};BdyZ+=cAJs(Ml#NM)-3R@q?$gFS(5F9&6Z zkfgWwRSy0pkRmELfV0g;l#m!7p8*PY`=1OOfuicCQ6CM}WXyd5unpAv9y-6rs7(Q{ zsHj*XS%2f>CQvWJ6u~ky%E=+P9Mn=sIS^gP+pWS*6v1vcB#LE>32zLFH)24!OElj9-+3kDOB4QTsQ6o7I5aguLX8aBTd#)JpditH93R(fQOj#dEX zJEm~XG?5g@90NEami{?dki`px5?|=O4K$yW;=cv7ZUmrJ@GtvXQa!fQ@|Fax@ISmn znlE1tPELMM=LiPn>afDgd!fT^ElG`FNrCKDUWJChwJzUHQUa$`IW)$QB)9S|Fa!kA z)ZA-Q12Jk2X2E36w>7|8wi7SBQIk}IhR}rRi7y~gkzdGKNt)t=N=I)rhV?}VDw$G=nRsD6RG>cn z{|;%-&h?> zO(+vTfBw8xFWWGGn3ZT#A>kDSh@?m0yNGlxx8ZS;OuSa=M%jZzMUhBXuVSm69V{qG zV1r`-4624vW~-R3;N?(A?UqXqPUGvAQNHtjsFlgoSO`M*=h9Nh(tIcfv6uaL`a%1` zQ$e~HoO>9KOZPXkQwM}*(fyF_?H?!Q>DPvaQNMou5}yWIGN79SGs1AC$UyssSSy-~gP7RZ z008&DEI0u!r68CX^?pnV*1}eZ`xF`3J4AxhU z>832PBUjMC*qGp7!9OH|8+#rJR_n)YILE{41^s`6?IZpcjg^p$(Ta24vw&2v-la9FEz<=UNj=Ncf>Fby-d#4wgT%vwM zU&O?ca z5CgzHNl?O1M*;%w_ik?TFY83WNVVB|%8wKeg*n&w+)h1NT8zv3kA!b!-!)Ua@8%ga zp?k$XL<#JoUo#`54!X75gSrEoGdjX`OPk1!ar0q+rSfq~DlFG$j^BFlLJ|B~8I*|9 zsu}-Pso_#_2_-QQSD8i!UK$x4UHRKn=X}YZM;_mzTe=4|hu8UYv8ky`xGi$oW0mIb zr6C^AL;Hg8(U_qF9vE3?of`#Qv5CVyP`Csh4yNtl`Q50us{u z5m|3_#(cM|2UeVTt%u6K?JE~Q5WTaTf$LoyR-WrZ8YP^fzGmCva)pk3V`)TqdOs&Q zUC@pO1gTh3@LBt?Nn3dfi}@Vm^1^=8pz$C-Qv6RoaySX*H*N&LKai2g1fq8kaA8BQ zxhc+l#H|EUoc7WuT>s+UA;QV-*UeNX!UKJl(JTQ6<>b2AhGbYEg(FS=huN}~bd(bK z8OOSu3k%i!qa3W0)LD97Z##%1Lt=SLD=oM->M4zbZoEa%r%w2%WINCGVs5pVi*E(* z;?lhUN!vyO;@;9%&?anzp}naPvR|*YLo%6Pl_FE01uhlimXRX1_mM};->;hNZy%S^ zw_{wngx2-%K!<9}^*nF~%yS|{;C^$Wv=VxcW_Rt|KG-P?4=O}2XLz?=Elh&ZS&qln{Di(d=gwCgSa#ZF;Ja>Yf>WX9(-oVbIq`E?9%C4(H8w zcbt1(b`?GoEUi^1r@jmEvP{Rcm`J>=7ME>w`TM@>{+Hx>GRxIRvg!+t18(G3whH@B z-4^=lWwtZZ*@IT<>gw<|`3)OYJG+Mnm;dmI}wy(-Y{w zKa8!o)n8$qn7*th72-!6gmV+daC~m~^Y+7>s)`Cd86D#uik1ZMiQ`*~j!&8JFl+^B zL2R9|@+^wB!T>~n>)Y`ODOL^+3Vwd5ABNqtehLB^>FHl0x9{IWhVf(RNG@0GP9G=l z4BlPAh~6NINiP2V3^;5Nl~<`h3&H@AY)|hmL8TZTBnS#39yuS19O^~g$J3%Q zV@A{01jUJ@GON^JQ3G3`wGpyPsFT6 z2+?7I9)6XNdCL;iAebANnIV!Q1CzH+%3ojhjE!$ZvHscyWkdZ(z82~2@9gav+`2BA zuh`kB<|GOjL}*bSe3(R(?M^qEe-o3%4D%c8QtRT3Q!?vUap}MTf*;VDb#N_)qhNkw z0#AzU_E=(ETo9;=U?L}Iv8ephd&!$HG{VV?7$XNmnwV0BBid*zM=!?U56TqD zV~JQ3^ceIFEtds%EWa_S^8j4P;lG>rC~3qj24gOj`Fk_!X%5^GmV@vhEf9BGn)>%n8U^vbDivI~98& zbKg^(U4NVpVjSoW0XAlw{fV1t3}qaQ$)L!J&eq`a1BXG;vUrm@>d0D8PR4Bdl&yWJ zTzuC%tXOxCzRFL+xw$9l_#z?wp%1sAWOFllB=>P*5toGTS=(eYhmy{)q~O24(~0S5 zGNXo3GD0&Zz(iKwjF!ZyhyAB_QwALM)THH0m1)uFJ92WmtIm?VyuALOyP&62VPPSZ zb2463w1l>`5KMIfkTbZQAVhki*&iKKIP+DNy;ytW&{UTKCn&Rj$+=Ec*kyk0NUl37 zWOMV~4S1h;6v0A5_ms@{k=}Pfdv$+-D+hs?GgfS>zmm4_VyxUD(mM*)WxR`#3Xozuzqm_?g&8*|$J zK88tZq)hp3CBMA=i22Aoj{l^$akVh^N+>O@xpXGyKz_HmdOoGGE;FAD>f=_i`)M0m z9!Dv=y1}Ezh0?Jznpj%=WXxp22FCdsBvn2W&90A*bd5T;6sqOka>j8H(^pI{{^yLF z>-p?OJz<19E5R@S(Z2%M-nE`IkXF(^+!DfKwSLC4Vsf$5&u~V|H20bhUn#U)DcDv{ zdk0<^KFWzJ=e8W;6zS(wn?_bv=4nN*U1(N(`r#3Qsw6Gi`2!iBIOdxA_OuHlR69(%0|q{uje?YU~eM30aPo^AfuHX}vAU0FHZyof9<&4V`s+&M-#B?6d$p{6Z{<*dtc3=v+<#!K+yen z{ltqZQl5$krvP;DBEkeoCom)H^z_t94;dI32-_OL98x|~Eb|22Xn3iJ)Qd(3b&=qp z1A;|7)9*1&BuH*dRG1SI5D*v((14W40w_V6nW6i-K>~rwOgaG-e-P!m?Myv+fE3aL zMP7hC2$-$UO_#K>vAM}MUtjxJ34q2@Qc1lQH+(L+T#|Ag^Ykly{ZDCW4DVfCnaK$j zi%OfM_4H`Ki3j--tYqa-5DJ%$6Na_Iibe^dmfFN#NqlfDyjn3CgqmdDq933pZ0lrr<=dJfSkH%-e*Qlie95QDFv=(p zgo`e&cXN6mA=Zg@Vry-b%%UK$nZb+uVI!s6+YR*4GWXIO?F8qoZCl?NYYJ~YgUdbC z62f7|PaGFoUD=xF60)voOYmIn9wR)ht=ECUwK=_i?~rmbT>m&|cjFmfYy392+Zsi& zwn#UHYFFn{N`nnJz6q#mPVQFD{%8qwnn-<=I=G zI2)=SPNOOAZSnA-gr0Y4;q`-JP|qgH71C{eR0S@%2O%%FGclhS)8AISDQC0UN?Ez!qh+d!G2#} z4a4TwQpRe%NY0me8HI)TaPo!To}N!7r0JhOhkzJZnVp&s*(WJ0|}!zkp}rv4Iy-MG}So-%+%9OsCv>R7)M7(t*orz z91*^K8d=DW-?1+9p{phH>6WHJ0dmGk@52v0PZbhUfBKBg4i}MN7XlQtj3p8Fl2Ta@ zFd)iMUkJH}AyEd#!OJO3prA8=#$X7ARvZcKV6agY*^okMG7Y_i3>5R8!d%rr28yqw<_Iua)*R|QFuWss)%-z9XSL;^RL!*dYE_pr%s#-D$yZ%}v zwDv#f8>|lGcmR&`aN+W52Ge~{3?Cdwo{ZW13ceEBZ9zp^M9zl_LyI48%|YpZco&G@ z$#GF3c1X?TEItTL`M&Two?2|>F%Uy$d-Uhdtw!PmTbi@B6HnAWLk^xke5gt*P~hJ? zPik0!DnL%hwqWxm;OD)&+p9#qbcQ8uttydD2<`MCwz-T%&$m=+d+HsEF}5)0Tv{9I z{-*_qk@4BKlzXh3O%yp=Za6vm?8y+C2otqi`LUU5$JDmiB?o=D^Ya@M&pKedKH@}X zy(%*EUM6b2ke03(ZePLne5^~x>~it+VWacER`V!aNh0Mj zYo{YWeIkOw^W39jI?X^7jlb-p3K^S=PlSUBKp!xk^ad%Q`ztWtT3T8}Td%VR{cmw{a)O^d=oilZ z@F8&kV!-Gb7-Smr&p&~n^B%05DoL3wJ|Tex{2Qnr^u076K$}wGfDB$of4>6A8h{T2 zm`OMvi26Xg0ZDCb3h;rJ#_l{b)YQD)Sk|sP#v^eW&LZeqDpt?;8}G9y&norj4Qu*W z-R{d;b`=_=!0a7lXP}lXVf+0cTt{z5;s9co5 z&Uv!H&qPe-7&<*cVC{x>MEYQvGt!uN%RneP)C*j7JZq*vY)O$QVzO{YjMx0~YkI-) zQ~p8*r@=9un^7(isDn76sGeJL9j9)`X|;=1{a0Mwn%PF1+W_QOP@c{W?yN+>GB^w> zPyB`Y{4@O@TxT%xi5ZqiUj8CMMf$GNYVqZr9Zu@{qh~{Mr;i^b;KJL51a-vA3ol5b zDC0mdXW3-?M%VP<(LR>va3JTU`&i#&j>3?w&HEC#fTJz^ zkq$4-NVB7t*Nm3)n4vY>k5A?pi7G;Fzqwq7w-mH}^lZ>1P={_?bA{+mwF1v0q=29m zI{)ELZgh#mbs;;#QrJhoMa4m@)b9+}#gI1osJglPPH$fJKb0kl%~2s@-%Hj7XoBtruXDFYwmEE$G-AA%?xZeY#z&sq7SiA|@No zi&_b7B;{k!{(?DL$bEOlr1s&HCwD(m^5FCz?O*s9Q&}L6x;s0iR8{-Rwv<&>+lqCE z2ukVNnW>0?8lq#x84wlB{pHe`iXLMBl(+Me1)PM{`}wCuuGEEOA>NrguX10>STa7Z z_eS2@a00*SJvk}lOHxwnHlObMR$RE8@Ok0qmt#?bDy1NSXe45@nlzPuRpHv&(l_UV zt)ZobEJbyFHYY@3L1}O%5%i>3twP{t9p^VO##sThb@e~DF2|2f{cKbf$wncjh?v69 z#XBoD7S|TK&y~_=e9#VZEwR8~)(VZC_|QUbfbg-T+Mj~*bc4vwH45cLBRmptI!9zl+>lCGMGP*4;QHs*2&HuVOX+*g zK0vXQQQ4uO1QKg0$rIV5V}skxgMNNM1eR@BDxC?J&di|fyZ`y5JNltQy531)(|9x7 zd~RFp_sy#44&vPmo&w}j=y={^ZI+?eessm!3+&QmIqNVWcbBPB3Ih2 zsE2!=7EYU6NX*Mylv)C5G& z6SHYwvvGv?R2o(J8j&&MGR=9(?=midG-*ka-KNKSTwCKq67)4-0UNvr5?n91my)=X zIsk;9XiM1Wcd$d6m}!eZJ~n=1VINK_Jp;`J)yiyNGfL_vU=FYs?5&-z^GMQ!_$W8c z;L1qzF`3g-m9i533yj!UHQ>K2LuY1ja9Ct{@iq<-)|I@D=F(sX?l)CTixWZTLk(`$ zk3A-BwcGOWVY_r!n3lVsm+_JIzr?lpf7E7tqc;n#QXLtsgD&79SMkDm77Wzm`i>7^X+@1wy6e;Vv*yLtC?-sg3@-0nw~q&ee`-v@8gIqwr5p6sEHdpMt% zdKzGPKK^Nyuh()J_CVt7Y=h&0W3Ib=T7Y2e*7AiJBYV#5l@#(C zmxp^&u2k{Hvc)t}^$~uqhjFui553Rv9&i04Q`hLoGe8GLy;z-1rQLq@(T+L9>v8ul zM38~)8*lBg0I1SZZZwbVgp4_uC^8TbzJU36SM$%lZhnC}AFUHtRkYbWeHdXoUTD69 zM)iHd4bxL=Y`w)1HG>+PDOUN=H8$ll_~$CmlGQ)f%HVvbw=X`12|MxRf#*zy=hsn6 zvC}GBgPG*Z@2dU+1`5{^7cHO%996+Cetz2Ti!uF@5dVS1s%&;4jeD* zu7erLb=%zSJ*S$D8Z0i+q2IPXv9$gKCf%R+%hKknZ8*=i67?N}W^t<9TBc63uPoU1 zCdRgVFyWLnRkH_!CeypDa-rYHA9c(%|3EBc=&;?;P)&VQcro?UnfpqNL2~jqs)=DS zY;nS2fRe<2Q98Bjh3w)I;!ZdHijR{U*g>xRlGj@7Rd;Q^wq4fVG6F_i6-*M7$mmLxwH6Bs3o;HUpu5|4?cPF@{RY}q}(*2<|t}$Bsadi zr7(GFNj0@pyCdt$EAUs$N0q(~)j89FF1zw+VY(LQu;^6232`-lm(A%SRF!u^Z}(>m zV_;1#9(rFN8BLqYS166?e#xAzuMjX_>wQ5OMO1T)`7e)GsMvNQy=V({zhFdq?Q`{V{f z^6h#^nUSQ5-7TM1bxQQ>!nXo)-5|Z)SS6 zgZ^tt=m((5DCN#O*YE4Z`;)QlUYI_oC^vWT=j_{BZPheS`qzuDxT5abfuhehZXv&7 zecQ(eIg_PKnz$#%W&M_oI*Q=Pw6Xv|F^==d4Vt6#d8B#b=nmpSA|de}CM z4+te+^p{s%5FWk9#DN;>=Tq)H?{`qv{4I8Rg=-fQJ3T490v8tk+A`9yBZnPY>+tNJ z15)u)l{qZN3O}Ipoqbw~mqYy(%SMCKOJO>{SE>=hyi@JZ#~;+W&ZnLT!fTD@Z909= zGR#%0|A>4a-bn8|$fAH6zQmyVzkZcC!tp#wAJ?+a(BTFaXV;$Ar^yuG8i+Gv;&Za2 znB!8o*Mo;f@j^*4AdgoBfj_0g`B+^^z?VMOxsScqTsGrxk<1a-8(&%6$xYc`E&jtc z|6qZU)eU}`rN+)JFo3NmNwD>Be9)fY=n3X=v%qNx(+D3k3g%;@x*D74_I^JLFPTXiRUb{O=5MdE(pE0I zn({2~^ofh|4m5X$g*Ca9{-pQ(tgoc#*T%&Gig~Nr&7qTWLyuL#GMvo$+xogyz_6!n z=TKLdZtI*7U~AR~TYYObPH>i2DLyTne|Hr2zO)6>z)%mFV@#pp>(A8e_1F3`$p_}*o{b%ko2TWtd7YqjKW}0th83^3Dmc=)IQs`m_ z{>0ed6xgH#r*5h5xTrr`cnVnBy|FA;y88P%2o2W2LU%ZsQct2?7$j! z{HQ~X%B=Zo%pX0320A9wESn#nz|_=Z<`3Iq+huQ6*!7mYXts6s^ODcZv@8>zqDKJ_ zsr&rhFG=K>g=Lj(fYjGAThm&#CH#F|C_|@H_;2s+(i)Gf6P4bPHhT4eAz#}XbRg%? z;2@G2DZ>{!A!fipzDmiQbROA!g9B~Zct5J~4oYq3%1Fz3dtsEW)37H(SYeYWQGG<( z6ca^J?dzQ_y9<1p4Wm&k-a3R($7jK|xZ$Xi;g~ z=BQ4Pxq0@Gj(9qy(2OJYI~&sB5E)g@3OJ^c&K)YLl&_4MxdFqJQgpfF-~f|3i)!x9 z&D_&|h#tu^w*2r(od_qFViG3~S@>#t$l+?hwc*4Kx!aSh+l;z~PI#G@wGf(=Hx|N7 z^m;im#hj6@!j};u=W9D!_>)Uol9FJ8TBUpGoy%!`2igxT_<7ctZF76n^u$pPVzQ)0 zZ|b&1$(nE3%~`>Hz53KEFCT`mARzej{HQlItxnv={%+QKGkg?kWq1%3&h=e>{?sc8 z`w`Bp*!X)Cd+&epMO_?FU+nP`c|bQlwa7@^W5=U5lS5^RpN(tl-dVjbGmVE7-ZOJi z^!1W2#wDcw#@lKFEGW^nSXEOKP8J7=4H^todn+kvls^onn=8#6j*jAn700c~JoWM5 z0i!rnXPmo7_pf}x5^jw((h;w#C2Iy>?Out2VGhI%hh1SFE1yZ7cIy%cu;th&nXz(< z@E<5Vq+-U?$dhHtrE7`TBv=Q97s+Hv-&B~F8fo3SL3uD9QG3U0-;NcrX=ik@dAmWr zeofaQqKm809iKTL$nb6|HOzYcg+8v{_4X7n`8K|Q`u<1y-aCQYR$f>0#Sf6tpUeXx z$KQW-4>r^Ku02j}SPop7Kn6&&I)aV0`~D}E_jQ4TOxG8msR8fjJv}>EfnA6hbO};r zZlwYl0O^UYsi@Q*^9)by3?E;%Rw73uqPQC$Of>i0<&VULg+l{VJ8$+oE?22rH(JNf z^qlMU173Os4BJqe)pGMZN4obs({)`P7IApDeQF(A*~}CkWRY3n9Tai%wPAodTw0kG zf23#cm0*wXY+7r~Cd?PFT3IY4we$4o+pC>bD0KuSUIPQ_A6>T$8OnU`13ed0y(VWP ztEZ^_d)I2xItLdQv9a&o_@Ev~JgdP+m@1T7U7VaCvt(EC7}YB@|G|6L^Zup#wB6Za z1Xqcio}PSFkB*V=2MN>xN@Jt6U;K*0%X?>Pt<3A>Ns`c>fR=WjwD}4*P^S&c$*~c8 zX3)}1qpW@+W7@Ecrj+-;k^gI9?xKV)y zl%862HQ%Q_o|}btdbX8X+UR9}I)!reb^cAG;-7A#&Pp`oeG?|O--R^4p=CLdu#8SW zTME>dh5+j$iISSsr-sTH@b}-dG$<@3rP3!+on1e6L`wNu_2Tm;=O%VURIrjHe8zm% z(%ja5emi{KqR8vP!&LM6^n$gzvDL3`qc5()3{YLq zbVZsuzgy0?gN((|?26i6S5diFAI87Ta)rdM{!@0fvyEid!YdsWe!>2pp;P(3gHYl6 zgQd;giieR=!*A$Oo=XW}0eZ#w#9;@7ne$sL z5(1q&%did8GLDxl89bpGBQMv0aZ4U>{x)|M@m~t;KA^xp>SqtdZ+i9okU@V`saYJm z$M@GHT)9FyRH&X&+)`u8AbrT)L%JhJPRbWt<={`r08}Mjo4h=t2TUpz&ac`vO9|h3 z&p%gxKl?6MMAY(}5BF6*GgN-(g0(7s{k*lOzVG2~+}c^$pF6}2i8N1$KSUwNU@nh% z!j+&7MQuT%QEeB8GTy_ldqW#-d8p{p(yb^45q4HTFVw^&N^WCQ3YT8gePyKCXDJtn z-3Sv;rIZ~Fol3J28uL4``>tg1s&itFiN^|qS&ETb7J`z6S4<|!lJQmZ+!T!M>+kWp zPZ$ln92xVF@}=$$Pxa$NPdYUEWGo$praj5R(~Y5gFOu>#anH)l5H%6X#tZM@RaPXE zlWW-9Hka>BY!)s(Z+sg4cqa-mGk%)1)>P9@6!~RGgUgI0G+>?Zu<*wDaTdfsZahoV zIcE2|dq-LoJKZ0JUt@Pg1UUj@x^UQC8i@nA9}A!D9?wt{$AdpMZv!t!U;jMbmO7jL z>H1P2-2Y-_JR>{%$>oGGupBU~+}xK-IL9!_QnePl+S#yFhb96D;Y9bXh}3wlft7v4 z($IkiVtS^_C=>@V-}x#!Kn!d(nKXnMJi_Mq(b)lBJYm4@5*+_!Vj=K;&2-Z`x9SF_9$wP843>{&yBB4 z#g>2iv3sfOfIKU+>%O;@+N5E5lEy%t0fSf&;Yf^2=Y)eq+G-;|Z>dvuA?{B4fiw9? zzNf5mjYY%s1lluq@xWrxIGs;4iEev%+3W2fadQVfpYu?H)g z(X}sjvKK*+QZ7O5oEIq{A0KmEXUh?DRgK`UwIk{5LAK)j%uI@wjH++Ss(s`=>HaiS zp+Hj%%1Wr;OQey4@AfBz1~!g=Y~&gJ{Z^zc&sbUz-b|oktL|UMH+Z(_x&=l`SWE1;t4-mmG9?rs4QDQW3ODFG>Ik&;F_W{9CnLO{ACrKD>>Ql-1Q zbEqL_;Jd#6-#2T)S`4hq+;h))azA^gg4h7+js(u_N?u%1thD_-eBCz+g*-oZQ}>LG2Q? zwQgW9KU~CfzZxB$24uHlo_NkET3g4ew?B4I_@gqjz21*8*qQn6duVJ+-Gtudt2-&N z@%8pLIz-edXDtH7wa?M2dsjkGZSlTOi%Jscyw0eO*p>34 zDU~*Pq54~zDVfWcUuS-61ru#-O`~P10=cTDdQ{|(9g{RZn?6^j`dP2GYWZ?fEulW| z`|G@1PQyB#4_~Y<@4Rrk@d*|E%~|*2=HvJF+4LVZe9u+OI{lpWb)%IjX@68@k$YO~ z@=?lc5JexYdV7%{+j&vUcbJqvM4%=Ay&S5 zuK5jC#{w=3*?dLy17>BV21<}aRST-P_$^9MNXQ2vp(%M{jHziotDPZV+{1mQkF%eW z6Eh_+ot%bCD=HX0I)U&*Ss}FkV1x+lBO%61O_1dv^q!nD72mhLOqUEBzI_graoU52 zZyr=onFl=27-#O@bla?Ib3Up&MqC(d{=&}PudDq29hFGZ7269IF9SLQm|qnwEx7sfkC)ReJ7cHbyEY!n0E%ORXc%gf6qR z?e=Ii=K(NG%3gC%p!C^{Ig^bz0O3O?9$H!?^zbNU$r_{EF~@bBwP4>tGDcz}{I0}rth$qQ}; zFE6uPcYAx9z!N4ZKMc{;017|9Bb1_T+~?12n7O&Bq@=v)U)5s+gESf%p3U2-6=!Rq zqaU{a;zc-WdBWuYxocBkunmYQ&d;CZy?YMKED*k(F(eJez0LO!m8bMjt7d7zLJbs9 z%*X~x*jwbuKzrEckj>VkF62Wkb0nAhZ>ss3oUh*)Ew1?WGB*e>ZK=;9eU1vrxaAQM zzQ^Ktg0P4BV80aNm*3vL{lBq;FP47x{)-;k7=thIynn%^A-gYbEQ zj*aX6ac8i6zO1LGqtsa_4Cwxy`h9;Y{PG(;;3%=3(wI+g4$U8lI{;0(iu`E~CIc|? zstFvsV)nC$7#M!EQ2iwVbENTL>RgRzJ82c59pmH64ZnwhY1!Ea791Ng&brV$yVA^^ zQ3l*f%Uc;JC?*Thaj-7ttU8sSk$1yqA2DXl{Mqoa?EBII;6}{%WL{S^S;&%A_{1bu zBpOikwy*1A-$lb$TL#9jwo*Q}U#K-1>NWfta!wz4_HjET60Jgh$j*Q7h{5;zM-x#% z6FCw3z|R?eSy~K`=^>d~>ds?8MZ&+8G#calSC0a=d-%RZT8Sc%*<74^F#UL@QTyLg zmiI(luXjEMGN8PD|B9l*zA-_Wf|Va=d9e7DpBqQ_t79WSMNRIE$>c523Lda&LcEGGROFRYz1qeWL{7)Y1yjLvU^*os#m+Tk) z8#(4vS0va`b@udHg;4QML-?MAL=v;S(IqD*ey8|W&Yl%X!2adS7mNvi`GXUVMgLqs z*!@6@;Ad~Ig&D`SfE?Q#sk+ZQOP@!@n}(1Ne_^bR%j4ty$4#rFfH`Z@8pBR>?#fh_ zJ0I@b+JD+;Dcacha_pUCDh54GvMYOBN(dB?EmehHJyT096~C@rivM(Hf+-Q5WT7{W z2`9>Zr7igUCTO7Yh;X7;JyYtc+Wjdz>7Ux(@zW_L+!r&0U{sO7;Q=4|(;zc5Oi7BO z5tB8M4ur@lUyD%0i;G~UgKg2^G;AM#+%7!77K~;!O;nUolpYi!SK&OY0B1C_iBUIr z*D(52!slQ-6j4`BW*!N4(x@$m5eZRsFs6@EJVcR|=C55wzg5C=LV5J!)}nJ;wQ>6W zcp>M_-}`HroSvR~@frmD^Vk2!0?1p~@5{ZCkU(L1Yy0sdDr*2V4ITT+^`LC`Zbjy5 zj?`TBAs4X)T77KuLttM|iO#%MRkfJ^dEFXt;;{fQtz$C^$p_ft~|PDhgN^xkI2!f6{u$Qq_9kfs)GOpC)l~!(LXl&$7rUXoF^8au(Q< z$Bv5K9Nkbyt9EEfPP}san$5LumpIQ-6;M=BJa6Ezkv%;_f;`nBWIixMpTyu&N_&588a}bHfWY&KNR(7 z&}Pk(SyQG&>S>7z@bv$EBKP{+)1IceR!=FEiyfW~2-%KzgbZxEM+;O&tNB!q5 zRovU#KDk9fzY+hAOH_CF@Z_v4c{Me``|EotxIq;nov+gbw~IY3AY$VK2}NFQZO*~L z6BI|0OxWXkW0TXOa-&ll*A^5IoC*^hxuHxm^W!-?3(b^iZ*0u?`7$wDn3p$aQd5G@ z5m{@+`4WU62ej(r2e&^Q&cK1#(B}7DNiGOAnpNq26jO2=nehdxy0hiS!i6N)r&I;s zHn-OP-NzPP%z5E=!nKPGY)zWNZ^OFedc)D43z`*yds_tGuB*!5rM92^@U>evX++(Zf_8QZ`e`({3%=Vm}HP!q!Iok zNvY;7gRU{zIu!$1JpS;25X3SaBU;J@mM2V|%;Mw0l#yH~Q|#MB{HLq3LKF_h;sS69 zNS@2fP4%IHoL6Q~?tll&nz;87`C(hSZR5T3Op3mS-Up#R$?B8D+t2m0yn>;W2R5_O z8Ol4p>pG3Fg)8CFNS zV4qGiSLepL-?Rwdts_vH&-!yc{$Mgy zF{}n!D$(%p3~vO7{pn5Cg8mj9ZSVf~MW)sMyW?8=Z_BP(^!y8aG=QzYhrv65NDq#G zXDBKvdiQsJeq0KBi7J{FCv=@l0^mSc-sj;q>T--jJLVqz%P4Y_Z ze@B&+{p(j1Ksr$7er2Gy_Z*lsVfYF1Y*;;!N-1U96L-0s!Vbu}D?hN@>~BCEyMR&h zO6~B0(`K61+V3SFb=UujO@&gx`=r3xlecy6lco$3&AAi#N=T!y=%Wl1=tjggH}EVk zzG?Y{NSYepshJc`@veT$jx`UL(NIbYw?4rq5p0D z*Ub)l+tvO-Jf1|n<qIcfvnYU+MzytZwRs6;t z?ar&OIaJzceq$*!PtZ)E-;;<~t0YtB;izH~q6u3bJdeb|43nj+iCIzM`i&ZnuAhg$ zF1~E+4wr7&yC0h$XgP2EM(~umK$t$#y^7=TerocVK||+qvk&WNazKDwzo%9Y`{=kt z*d#BPuYI!^zSjI)QE~Ki@4}pF+dGY={UQQg6qSgr(D^d`M<2IzwXf^1`DcH1n6-oP z#_I8gGUSI9V!Rxp8_5$I>)F|zzg}*n{m&}=E!J=_YCP&N19uh|ClwcC>F7?jAv!C5 zOVhA3FL}G2iM^o2b8cFsx&p7XD-C70)u?qeHtNUIb5V${*r=tAJ(@OA8QomFpY_e& zC?W~yW&m1iC4K#oyf|NF<>G1+Gc(F*?{+KVo4_ax*>OojV3ue5bj&g|6Xbp-{K&Rh)zD8F{Td-PksI%-RE`O3x( zJRVx$@t7x=X9YBHb|wZhp&Fq5B1QE=$wkja=~PfeMe)I7@v)^o$iZ^Nndqm$k(2y_ z+ocusnWW%HBC>0<%N&0ZUs+2D`F3Shf9>hoi#12cb)$HmCL1eSH)#PCtAEe49(oYr z5>_ns2`cX|rf(=~c*qMrf=4xFNKK1&E*qalSLj`{GFx{%mNROVJkE}?7it39W9&~J zJo!i@6JnoKmYS|T4`xhfs?e%JI)4$g7>oyp(w0rkj=1c75+YXBYNU#<3&y^5mvUuX z$J~YOTOC${zdpSVK72nt z&rZFo2%+eAruHx1U`@nWiY;dZUAF;V00zA$(Uf!py<0`DT=Nz|&2S=t;kAwCP0fSz z7VVEj_#H$Rb@R_&&<{T#{d*I*W5NL0^iJarpmLLEzp-niJ#P2mLr6qL!i+AH#oguR z??JlPN`02^^sM`$zng$=y|JB^1`=`s=8{`Ptx0+*~;eXY`~?;8s}vi*VT7i5(=}7t56Za@>;S&W$=+Iz{KXYYz^m2PG(lYA%)0^&zIsnggT-Wmg)I>V1@j>``c&4R0Si7 zkW>Fxt_11zxy86;R};F*L; z*=UlDTUHC}$cJo;xvNZ;14TM*78=VhuCLP@MCdG^UgG>Bz=;irm)V{8rmEdJ3!%IU ze5%R^`nL^$6DZ@-0e|U#9i97(j6;2UBc}S-$Cpa*<&6yiX>IT-G$-Z`WjFWHsnKN> zr)#RLj_U>ea;2uwhd>Pt3Ic+L4I2|}YEfsMIVd3N2xuUUR8&h7bwyJ!5tf(cW@U93 zy`G$}wG!k_i}79!{8~}f%JLEEd8n_g&+#*qor6Qb`tOStANIV~+6IO*|J}?V%}Fsp zOz^k%KY!6HJDx;wwSY}|GSGY0C9k1Kz=#vgK)Ksl&#pG2m-<3``?nepGj&@;-U)P_ z`L*DZe2^g34ZbEWsC*I-bb2F&A;jE^7{@ug`v4KGq@r`JGkc%_@tr(=Z}WD?lb^f( zAn&v2r;Az7*1MiSoZm1R^!f72io|U08od+eClLoi-i={74qFyyeg$uow~O(EWbxtV z>GzeiN*ouLQ-4hJ!h~6zwFI1BKFsMfW7TbN-@qq^aW$b5H0y3ollLCjr_`rq(%#i9 z7^Ck48Ntyw-w11{E&0buKHPT>@MX1L-ujWP<1!`?>$*%g`3ra7-$RL=tl1!ju*Rl+ zt0uYJm`##IA{%DOs}2Wy+pboDbB8rszxhho;o{vw93~}m0aX!O?|$dLgBbO=qFW?Z z(+X#G(=A^R7=vb0HyF%<5!eQz{}EGz8@AxCd|6E~9a%==7?_U;ikvkn6u^xpk=S z@6Qds9kha80|jZ9sSs&d?Q!avYIA}$L45uX)blUnjVDLm>!>a(dqSJO%#9SPx zdu(m}F4#fb+5%oiBXb7e@=|wq|C<;0xSeJ`8qd(3degEC8hy=~{RRZ`mbe0VMHmA; zYGFuBDib*qlXjD?Ro9_-4#!INXN+f&zsS;p*=kYLCnU`OZT=*Sj}ISi4Y~p|`jHJp zJn1D|e}%iOqh%Nz9`juFE3NzZaI28O8Tq4a$q!=B3R<=DWmxZ-_CNEpY$INW$q?$; zGO#WooIK!&Dit5v0cMB-vDu-PO6~IM2Gyk~TH3_&W+08c;)CI8zZS9lXNza4<2MR1 z5qKRixHBQ=LKRld(`HMQE-*aMH1zb76rLLiUF0dh@~=xr2Gfuf#V8N?flNEY zyVGKUSQoAjICp?E_YQ^`K2<>@%*Oz}nL~74=^yeqyAC8Ri+mFU9=cy1d~?&kqE82A zHF*<x?8T=_E76 ze;z><>WtXtOSqLh){?%hch0#=eD>nfwiS;YUQxkq=SUvhwy{Y@E&lU*{aIn=gbPc>I)Tlu+8=q0PJ7%Xx>^s9Ust|yk*Cu%83I=5A^gz zobEzYm^#vuNE2`QxVh7q5Nlp7K0x?gj_06;OBsQ)RPxP9EIHX7YiECdP#@!+OmXH4x&N8yU%_P3%lW#OAG{RIFC@#<#6%`J8o>TAMYz86nf&3)RUD4iF#*e+H!+D)w_44E zMpv$x#b=q}Rjo5y@>aE6GGesLM;3hfcWZ4BLI#=ms`as4B}HW%5idUKzlCt&0KdqF zc6^q|P<}gV$|6mnaKe^wcwO9D8I`jFH53=nXOH%7Uha*FMxzNlOpW6og zG!c`~L-H`|o7}MLR(qxOuvTo%Z_Fw!14GJo{!DB+1K6AD3fT@@UkL0+M2DvJLTfrK z!;g_rrUidfmqXLml0>Za(*5NXHU(c+EQP7jmnnyZr7~z>jNl?^$0p9(-x@a^sltkk zS35YXx4+*qE)E`F8;o39Y)TM+Hjb2ujl*7Frhl#u<<65KQkol}RS-9{yI-ZA>C1+U z+7p>#KFYcYch- z-v;rQ{vLY-bD!#@3`|=>utCqz3fY#(*XfDv38)De{QT9wqh3?*JoqVo41v-g=Ewh1 z_pO96-O0^eZ@@Pm8_K!b92npG$F?qa@^V5mMv5D>IFd#5dRi2`6ABDH@spN=kfU3 zlQuToq?`*&f4dN!c(rkHoh@#F;a|-^5^b9@5de{J_a+H)Xq1mbT3W!a!>;`{Q~>-y z13k|W9&~$!Eh;uyV`(ILy_5+=x^Tl68@3famL&xRv5g{+JV)JOwigND_JQ80$*m0} zR)J4euMn(frk+U&o|j=&QxZ4u%_OooeH#Uk;fzw8AS&W98czv?or7&w>JT1vJ6tHo zB0`N<`G1~He+1Qm{>)F@RoM<xy;TIUGLx;S3Fhy9D@}iD zgxdWHPw^7csWd*TqaZ$pxxs;8z50yff^bct{_70EZcePdxfp=4w?AV%zT0;RCE~kg zoj~hl#XzXxTY>pl_2k525AXJIs1_v^eA!rq<%JyblGnzW?t0$tMBEVQKByWfgshBw zO$-l;QOYd~_$ZgJcX$zY=n%B9rrlzsonpIRP5d6j_duunUP7XR>3uVnlCOf^)b|z{ zb8XfE6IL3@9CHKhchinq7|kHfm6i4>!*9_8QFz?7~m171@e+n&{ZBR zP+1w~EF<{xr_cSdu-szH(a%Hi>m@Z*Rn3#Yw`l@a95;<5WMou7SIi#|5*#zWex>rf z2i(P^HIsTV6tBpoy_QsCvcQ=eAh~Wk@r%rKZcT1*FnFu)^##bUNy(D`3^V+~Tpl(9EOcZ_ z+FQR^X#mE!YDs)aR7vgIim>D%n{5fZ1~r4aQ|%gd@<9^~9L53t38k){GTi~JAgF7X8*bXJTI(WkeSktW+P zR*dWGD9trk)HA>QGZP|&5+5h&!tRlg43doj0jBGDC!~KU{nQmgEr4# za*-p-&@h>g5qMzg2Oq`wJBz2vEug#mr$%U!VUh2M6J#`cT+T{FBd?mH>#<@_IAO}A znPKJ_t8x4u`@>;uI@x&WGg;S$S}YzOF}Nf+kIZ=V++$g?wCVlWu_ zE%ic03D{bURN@sU-Cwg3qyk@Ne-k|I{c9XuJ>8L5z#rfv*I7?do?Ty$KQZz0ZV6;! z67VkY`1rxFat1_)w@UQTjxY=4V`q;8s$8YBi^r92;U6OzOBk^mQx(#)H%f{=MfH3$ ziT$MUC6w)UVZ*(p^auHaAY;NVUDKnmu&{>*4imM;WO$Yj!!QCM2Qp{K9G-z~vSJ4Q z8E3wRZc7BWHO%t1HSJU^gmEaf=<7B|{?;_v@KJ87*;xI($iW@7-KdVFFF(jIPSoyLDs>C=aGty?H8;1s2Cyq3B#Dx5K6y4rQW=2DKs|b62pOG5cBgQmXcvK<`^am3eXfQ8W?DYdS_zsmgoKz-e#}~3Lzo>qr zN-(Bp@mPTEd7o6(Z9E`%O-|s8>SQt&Qyb&)ndFaQ;Xw0vxi&U?(eM`)xj{XO*syr%Tk~ zQZZwtp*MNWR+jm|_ES_+K4ALQTk@>eVIG8;Z)|t)P0IQvV*aHcSSJIRl>P*bGd+1_ zcF&qOKNopJx6(fX`nglnQ9;pw?ky)5*K5LPpy_0z%?n;ee}SIL07CROr^h#61UYEe z*G1-~ah3VKA3m4!ja=r0Lk&F`VFCFNBy^RF@d``_ai9c*j34k^msaJ)nX zZ!P^IZSTl;Uh2A%ZiE*-(3TIVB~asQch0lDUb)}FGxXAc3aFA&mK*aSd;5MNj?j&!IL0$5-L8F)89N* z2sH|oG~kj=G+T*sy<}89?@CLu8q^ju^Xmob9Q{jkQh_@4`rzegVog4|t=_BXS7|Kg z9*-?VoRFiS9>rLS(H5lRt>_@VavH8RpWIw#~zWI9;zZm2th)w#QU*2LD_*QP;7X$0UmVJ}(n zmJ(x4`qxA~=B5TkVh8TPLHAf|Vy|I+cS(w}RF`R+L^RV>;c2u7)p^ z_hNZ_FRqV0w9+K!KC)fL2(6)XPfDbKD62>9QCTNo%6?R5qNpqZ2~)GLY2;Oo;(9V@ z)(s`(VS~=C2jYT$wK+qPdH|A~vnZfTOj#H=nT@;dO;V zgbGGk!O)z21iuUX|sM z|A}~qtA@6p$&~2w;b4YtPG-l{@&CsHm>LWCn`_+fi9~QxBo}ZDq-S#MK0StR7m;KR z*bb;<7Z(8O*~M?p?*mSicG*iit|k3OJWfVYjei|>x2m9apKPkTduF}GD#3`O-&+mV z8p)Xw%llqw8PqOy^|7k8Uri->Y&V;1KQinO-pl}hM+AQNlCJB6{?uxg%+8=4w5g+< z?F?p4{|)U6z3qPcc+r#F$x-RpLY)J z!xI@6f(?XfHPhKrEP?lWb?$>h9N!nu;N7j!-3$~?Ehto4qbq{d{C$}N(dL03uJ1ob zNI;j%?ncI8&ege%`Srr5DuUNp+RZF%D?3kSI@3hn?R0i@c>l#}1+UlppiNAGV!g4)66PGC7=4e{T&e#{trO#+=EwX>FTO!d@ zSU%IDR92DO`Y|9)m2dD`)Ea?|VH;rhR8Qq4Ci&-i2G_4o_OENcEu8CEJ?G2FXgEf^ z{-$^Sf<-(7CjoiV4JFbz-05=}O^=rPOnYUidpf7H#ry{7!ceQ9XgUsK3$*m{lnrD= za(~B}rsu@|51x=@6`OUh?~Dy9<}o_I@VoenSsn!o@)D58lhsb)!lit`L0AN}zKvB) z^A|VGwf8?`7W$qL?2rGr8s!=pCxC$YwSH1Dvq6mrL4Z+RvE{9B+Y`$$8C9rdApX!5 z^8yenEBtI8hjZ?T*^?dz8`>82LjfK})5Ym`>!kSP%(n}9XGl}Ip%zScd3*NKB)7t$ z{ZMrvUiBAJtw1ncjByNp8&(21&CvGxK`O`rXc zJtLP#!+3$cqYUF3bN3Vs_uLo1`wb?tmu9H^`MaANi=_}5Nl5aZ!qhb+2WNPbI8}ap zN;p1E*?XKGVi_cA;0pz@;)c=OG^``*P;=>WKTs-`A=4XnJfpK97qq9)Xu;@SI*Se03`6tgS2$98bTTHEXvy zpHM{xTux5B9{uquKrLc0+5Tt}y5ObH^r`(wt0FlS>V>RN^ZN;F>S*y^F8Yk;u&b(F3$aEj$es&QEnfGkHojCWgb^0qJ+_$T`x&``zUVa zt4J?&y3Vik!%);mG!W^6$l3Wy3a7Fa@xj+WkDEVh|EdmYaO;z4GCiGtC?0G}=v*aq zoL})v4e+T!bZsO%6CoZ3=_)84xm={Mnfreryv!{B`oV1Zsw!G%+O)zOs|aQi25mpC zrfXXqx{uZ$$m-r5yTFhjBZb{8XY*3n$DZ|6ya#C2#&3QhZf*w6%S7}9_pAe?^_RZGHzzTsyuekRn=*m_ys;5N zXEA^51-wyS4qUW$-m$m0_cz&JQ5@?(q!c7vcauE{xjt$?dc5+Bzd0*fF+N$X4zycu z9r90;SscymK)az^XCNK!a)j7yP{8ZX95j_T_IMu}nIX1pQC;S^&M0f_WIh-yh};BX za45;2{At4yf<)(6)T^jDQT%2CUDI9mU9a)83YT9X2n+pz7HTb#R{^)InC^&@o&PTL zga{FyjwmIG_FV8-fxM*x&ZErt3bG%Gk`UcXG{uw)*Kn%jphvc5wnjas`+1z6u>Ek) zI+>x202E6Tl7pw&I7{JUUQ}bkf7=r6bkbA?>3iCb=^)1hVxcStqRfgrWs#=@JtXg}iR&(y#5wUPWXgUKjs+{a&3v0^zHd(E|itJ7TD z@4ETNO&e5LyxBV-MnRY_;1e=CBJ&8kq#+Vh#?CY1j`8+j(KO6iFXppvJ%s3b3hX^T zNv9HU*JA`Gm@Zij(0O%x&HV;k^(lj*t7z?yYRz#%UHU=>L(Y#K&Dq~P+m*W}r%OAf zrsX5|hp81aUq#IK+Cqi7;xBz1qNaa{Ei!!iRJSaQ_V#MpD<@#ZTy(edB6MHPk5n&j zusQws($FA2eW~BZtu@UrvGiU?Yq9Blie`n8{dMX4ALUU@?3Bq9WzUx5L^M#fMN&`7 z2cfrcscRYnAyTWM=|8opv=M4?E_)sn_O!(|ZMMCz)U+2>j&73hxiG6)xH^|P{hfid5nttAtU&-=>p-lcrEgz*^Zn7ukXHn! z3i|iSN{+vEAs?SH%b%%m9(jxFFaE7<{F)rFXDT`K4^3KAqE#&T87pT_NWA7B5HIS_ zw6gjj3waZc^mZ-#raG^(;35Fh3&)4U;YX{Txp#Md?-~wyy!`xL_4V}u;u09h6F^ML z-rhdHu&{9OF$8z_^Ue^9h=|BX8NgluOkRN82U^0wW9gJl9YC1Wl`ddaKpG^hQCLtA zGBKe;OGh^@^r(fQqz%v*SscyP7L}AlI2JZEeC09jWh=1k2^X;J#sG}gT$?u^K!0R< z=RG36dub%=)wT@q7N-GhnQI<*O!EN5DV&IDq}gq=o7T{Au`zCEXQ#BE#sYpYElqL< zA=BKu@&%*<=tLY(D}!GrbHME%0ieMuG!UTOU3a}9=mg|s0De~WbbeHz_OU4-;ju>y z5IO>IBF_%KhW7wtiX}j%-#ZX7BT2+0tqzC-p%khlaEOYgwYI)mJzF!C0Lc2Xd^VyU z?yb{ekoVW00ZvKYd#j~tfJh49ID7WuMGEIP7h*Q>&qgvI8lRT^#N-K+a`;PvO9s{;DK(ymnU=~84j6u*{BKAcJ{rxKgY(gw?DoKwpCf7Md4^JZdc z-)dsX+8@FhKB8Y!hN$?6oMLp>ftmRa$ZXjzQ*|H)i1$d}aWu#nQ^J@v2%|kR^2^c8 zAv9vlTp7I+g=5d@0e{_<<^@nDa2b&3ti(Z&^#(w)XLp7O`h0G)$gw{sgtdHyBn#iJ zKio#GG!ft^#I@jT#^FY$46p140%e;ObGf}YtK+LpGAkTIVC=iJhIT(b84$|aCk*6;_aVjL(!orj7OOv18L6@u3M2Uf%2K{l0?Qr4<`7jDR3=kNTIqA8h`Pv`YZdqXhb-8JJwq zJd&Ay+f7E5V0=RG-E4ZJW*Kp3u=+l*^h`%~UVGl_j-dKP`exQe>!xTCH?Ng#J)z<+ zXp7?VLf1<4VgLLLoRgfpVJtEF<2l{MB89cgg6=YO%u$E+yWe`afp_i!#U&3%A7};= zn+CeC6c~(S*)U%=5!^^)qHk`gS{5%Zo76j92E&>ykBN>)<)b$btY-((_w8YiEmi%FOdnE;D{3no|xxs%G$H-B02ly+G%8x7OZmL>xl$T2N#q1w`#SWNV^Gi#jb+fTzu=SXuGVa;*0{eW0!i?p|dA%hrQby zt_RHIWCZ$6kcGPExlkt%luv%*9H~JA{A#zKO(UMBJbZgKv)oy{Osw*$PIQcE6;22 zdp&7?u#-xyq@Zf}lMafjK+*InuZ@Zs67vSldd`j;0G29>k4d5CAU;Frrk2lT1C#+> z2doEwCF0WvX;=YDN%XHgo?W`Y!m(&+X@3t7TldG(EGxnr8%1){9TDdIvGP95!^6Wr zjGK5%We+@F%{RF)0fJ`qV8oeaSYX;<-yQtW-p&rEWfai)0R+qh{4VVPAq%Q^Ukvo7 zo6OzZ+<-T0Eqkz*P98?5_FTnoHe&cf;xh-g3yO*$<>k+JMzZO2@|;~vjAdc!g9&^+XWq>gtkBHRp*Q_O%WF`%zo_7a*Gh2(&yA zzgj_hyvqLPO8`b(3{%nmTC+AUprf%Tv=4-=01qVaDk^;>;D7^oURG8NnZ@Rha-U>G zkjA}&NH55&Z(g2Js4E%*B|mYSM!G_U8s+KcER`3V;{0ioh0=U*S~6LVF&|sahkecquTyjJW{{tgF*nB! zcH&q|2ydsdWFDhp6%VmtX65!7m=T{El>v~7j|?RGsB5etF?WKOVS$X@ty>BZYQy|o zjB}Dyx{LN}i^!UbU+yW;h?;0r%VqZp(3%fZfK0GVnpos124x>_2W&A zz+O}yUHQrV9j-G;_n4(%caklj9+(kJjO@7e&V=9MUk&!5ONOtGZIVLRr!Vf_)0QP{ zbKMzMV^S*bt?J?=24;cIO^Z7LTnQo?3hHNN<<2U-zRxm?R}q4fBe3vR8hoUG^_}>` zH%IUh)zB&;xTPjw9DSY>V^Ibg@rB(e6hU5kq-%b30bgE>4MeKad?sNrFq`BnAc%ac z*hAdU!Ff1fS_i^c-uQrYDFOX><0tow=wD9jxPESjOLI1Z4pfCN(!~D=m^z17WhMHH zjobKMh8uVRplJRy5w8*HsWBRZ6vo+Y#D7U$n)UGNd7z8_$Yf`y>k+M0cGed4ZssZw z@L2OXeg=ExoxKVA-u9`w%I^7{B%g5OAX$*a>b!q&56^j{Eyn`%?s7)F6OX*aMO++g zuC0s+T?Jdu7Mk%Lzjc&eN^F~atJ~8jlGedpldXy;3)!WA{`fcpL8hP!N#}wO0Q`#eQ-hMe>^>$8wc$3zK z@X0)!*f-inVS?GGZy+BF#_e5rr3=OE$n))6{#1gYlD^^-b80WAMkWKvuj2j#37%_o zZ+&7fz9o)U!I92Mf0y+q%#LF}n~&!G(R^J=OkQqgrka@gw2c%)8zA|ueuPh)x%@lD0n3$7OQ+Ez?7mtep9qiGI8s7^WfRa=7&6|Se=Jb>l z@^^aQ^Yeqzu?g2)#cCFhff-S%Ecd@$hA&dBY~t)Q`TSu3!(SpdUtpb^fgQfrN}H>mb!~_WDg#( zd}Sg~*go7Ye0JAKmA`EoIwtyL4`AUb^$y~9Zq8WKZZ%ig%ePT%UBQCzGaiXs2zURw zWkm+^bAP|RX_%&6djXmJ#6QDaUynx|PPNPI!N=D5@@P;Hrzh58ip8UTfOTkEy+h*? z-i$-C%tEa~%!QRRv6`#kP2lncW^R8s*U$D7A0Lz z&xu(Vyn9}hW|DmH;(v&&`FI}Q!!u!SmWnYYQ^c1yOB~Wdus$AOlpa{l+Pr%b3wmWQ zRdM}?j(3Y3N#WDoO51D|L}jLhVT#!~t{}zQ`$nvw_U{C-9+U){=r; z^M&slCNIq7gSl9t4;zfE&Q zK63KcTvQEX8>3~{`ImKh#vkU1I?rPVa!WWf;9@=eqsD$M4+RY;_v*Q`o%>YNPuK46 zV)#Y56V>xMX=jj~xY^yZq~U*6D9JPQ`Z)I-x~UJ*|8)&N8qNIc8m7cEIA_aj+{((W z4&EletZ+}V^3oHMC@!T&cz+18927yZFIt zI7GjVb9R566bv8FZvjICLbMbcB!nkt&h@KHcZH1a=XNybyK{7% zcf?)bvJ{DW`n~WkU(iB(OFs1|B@WipYMLI~zY2?GO0}q9cys8`7tFCL?DZ=hfH-0X zt}gq>{`tK6tuUu;Tc(W(J7uDOM7%;RV{(V07$T`PU_9KwXDR`Q*AQjWKh)ABG8%9v z+ysB$2-f8=e9_CujRe50$EW1pKqEAXWpy?@YBmvi;j3O153;YWpy~|aW=V)Yn)k37 z>H7LUHvBcO9WXEFSm0OF!1Qavh=8^<8q+{WI;Yvv-SMfyKMh(!#!HFc8kr(-iEJuf zWyxQ^Dgox{aJE`SPp`bZ98g%Zy*XWnzFMh403npLwhSj{M1B2U+!b){w8_H&Y1pK+ zwAt;tr6m)9ig&(ILu`aW(&vkIp)w}CYPIEaiPqRCkm3U4am(^Ew$|1Hz_FjHGL2b0 zwi(Tl0y1e}UX=JJSVKep`kSP$yJ+fOII7XWbp z0wC96kGW!Q8v=DBfB*gs&?fuv;X}HFw{u#prl#h<%#=aG>n*^=UMr=iH{a}5s5QnU z{E2*XAOZH15wJJE2M0F+iJ+sU7BY>=f5h+?yQ4&z9Qgp`q+Ep8Nr+&b65>JkQj|&lhCvig_#-f6O7UxwlX{-G=-3r`&m@m`*AG``%8K}w%4rUd1{5$^p6wgbA;4lTN01G*~?mShG=&)+y(1fh_yJ&Lau%EVY4KGnZ%M1 zMeq+YmwT9EX=eOt$R{mwCj+{L72;wu0{O^J+~)YIocV^x8dko8i;bun4I);PKSc0Z zYUCqtU>s77$+!ecgiR9SW123ab_oxp=ze}A24N=-Z&V{*S4w4jF&FX;Awtv7_Kr8l z5E8jb;jWJr0HW`X-1ZDKhjF-{-26YL-U2G>uKOMzI;25Bx&%R5x*H!ll#)ieLAs@- zM39gY5b5q38l*vD=0|K)w&_xJy<#jFLhhMCXI+;i_f`|NY}?QyOnJ8jcM)a@*p zbW0Q6xnc9G8^@FHz6zlgS*JCZe$*m|T;t(unAgZO@EwC;TS#&6L%}Sbev?#12MN;U z(WP(VADIuuy`oX;OMMY__$~Z2{!`&jv{xhG;uq+lK;zh$`LGd;4W^3N7w1{HD!{$6 z+}gX~`sAl~`_bNGIq7VDxz_w-fGoYKeYd!!xx%O7SobKy_C0Nr{taXK#tEqsnJ>Gp z7jH&tE$6qqmcoX?&2Fc8#|}*?lDOH;P5bMf2K3qjBSz~gWzfaF;4A|&MPAKneqo)J zShL%+bJIb_ZUT6HfI%Dp;Gu_Gd)Z?n=<&`01SZ zWrhsZ-C%9Lf=IWWR*roUeWv;N1X3>j9F_`acsAV?JI-!4cEV55)g|=%={M+#&`i&k zU(yb(H0t}*6&{RxDY49?;w%e(TQI57lF7>N?UD17&7J+bI?oLklHchgulS-oa0CU6k=fxwGxYF<^!>k)AdHerI|xl!qT6DIJ|YK!>rXg!(ekf_7BmXyEYsGVeGj>y<*fQ1Z>q&DVU= zCNh4f&dvMuxVVlWv`jx9qUX;?M@Ga+l@c=ldX9s!-5{b#^`l)0%ug_J1V;9F1eTw_SCp?CF*BoN+NWfe_~xyW zDs$1&C#l|jApZorjYd!!>)%Byo+suI@amB!-c`Kkvkx>i?&=hf0-6t;smy(DIQM;g=e)}`% zltgN$pFpcK5g*%fUQfm%ElI-J&k_WqyW?LXgxfbwXEKHkw13%!;OKTrNg6i4wERY~ zZ8&qe$L>HAk~R=6zTu%g)*^7(!|it0!`_4hamdvj3M~ZIOzxEC`Jzx|DF&@#(u7v} zY4}OYBheg=u;IvHSfciK@<_;Xp-MA9q;kAQvrnZ#a}o2L+pbHR?jn8MbkdD}ueQn@ zTO`vPR(m70!IPUi8{>cDns}JshZI){{W89g`Z>NigB+(8ox6C5Rrw>7+4iD&_@D5o zD}?o`@(wE#=HK+C)M8ZcS+G7x=5A&DqpEw%)7AV*Q?HY*fWu7PI`@O3wR2sir(n&C zV;A0bwgO zliWg!1^nGdJ|FkLxzA*BZ0j8t{*-=;`4)D+Sj-*#h)Z0qzSej27_W%mDR0j|g1^qAT- zySgr9YNcfLy%87Hr=8QWOzkCeE;_p`)G?oTcNRFfnxkISw4TMqV-lhKTEk7C%NYFW z<6yUl4av|N{2lU>`k(Vml?!`vvO`NCI5xqoV&yH9)c1U8I!WU9@AFG8Lg7oyL~qqK~?kpM>bwPElwPp9mkKR-)Kuwr^70cd!O5SA$=FCU?d7!wo2 zASfufM+O3}+~@Y9%r@cs_v=1jxg0(f3ViyQUI)8s0uDuET>0(WBk6Qhm@?Y}&K+Ja zm3kD8ZS!bwsjLNl1{K-n0-ZqH%xkrRFw~jltAtD;l-L>`yzW585pgzl7W=aCY2km) zy^$rtMDSykc=67qTQ(ZxLx2;fO4`^<$-8)JFYubB|xAD z37WAR8-@9^a0?oBFjPfE+^4NtlF`Y2|0cRxX0hW?7Jq*FL(gC1$i4gP!GSa7^(l7c+XTOC|F3s^c%o{G>xEd_|sRlbSv$W0we2;OLsT7JNOu zQ3$iU406f;qf{oP!0y`}WK^9S(|@%Y>a|mf5)W!J))N8UuQkXmnv8o?00cFqCvF#97O5oxE47;4ycuxpWnNglaZ7}%yT9bCIpOsx50jX z16|C>@&)TsOO=$0XOi;O#QILj{`kbT{WYzJ2?8*k`1b9ag^kTG;6zwEOa2#!ij_6` z>gozEC1zk?AaBS0vb?l82f*cHD93V)`U84GI*yE?BCK)YT zJq`6&QFJw9^NeS7k{+g2i4>C47Kq1AV`nca61Pa2~6 zz<{S8F4Mj<=K}%63n%2cWO~~J_lf-yVNl*9PP>z2h(^L&x*n$TD_Ya0Fw{4+vEHj3 z=f^YpqbnC4Yd;jei~*v%CNb8~<%$9|PbYD17AtYbnyuT|NoEWMs5ygfiVEs0OO<^H z8hdQzS|?wtK6cl4VBcMg{p}cZh5h-hq}g`5%k%~6rkj2!b9Fr2r=;eR9q6cyX3e=` zTe9GKwtU)Cl<_srVVF`>)X7mh+vhhRXCOiaW`$NRN+mPuqSx8N=dg|39$R8npw#RCTR~8kC_!i zfZ74J^>y=YepW~!8<(Evg#{!hf-|K6^6&s)r`+vO|6+eu*2;>h zBLtI{mNpQ$wEfH{p!??g>jm!mHLlc;#b-E1n~FEq0S^sIkIYAq{Gtm8*7e&L7FPct)Qyo2AVS~&zk+V&jz0Rq{SQ#^uE3yN^M%ytmd;B zE)wDN(9`b!C3&6}mm;7W{Nt)TC!YJ$Q2j>Nez5($;m-V+Q``p>Cm*`$0qC(?!RVlQ zW4}Erjy3oBY+OYn;q$#FYt^O@K6##hT0k74md8%(~_MF^F?gfkIpd*t)ZjK;8HI7==b+w{as+CXow->>Q27H*9XMH{GTSY}tv!0?UU|Kt55V~JaAPsl(k(EGIwfhFCM_3g zSVUy?82RWXkOr)GE@sItKc?p@riAoJ%^kd#Mok2UN%|L5YN4`CzYub_)|DE*+;m@;L^&<8 zp^mj6-esO$Vz=IG@9HXbmHOEqba;|Gkww)R6$YrDG%*9EEOOJY79h(L{YGYTqBSVj z*q{9p(#PPnQ#llkv*6?G5y6&jkUBBfb;Bt<09B`B2K;(oZqe>lf}KM^o@*%VK_=v^~4 zuRZFW+t9Urd;E>q#BTdg%DcL{#y}~1%cy2Nc#;(q z0DCl=Evx?iy)+Wy7&3+;3+mtHQYp4=@^w}3{x$OYc!ZeX%QF*;eP_Q&0B4Ay}6 z)g8yK!Hm}S>E8@~8jmiq3#0BoYYDti!Sg6m{rP#|TKppgf;H}@gzU+>^ZF`%rEXql z$~}Zgd63EHXjPKpeaGC!(OLD&=X9U zn5YggfK$cAQ7zAWw~d3!g0@ZFY%XU;uYS69f#6vi$n?*QZfdH5QgA3!AFP z=}#gW+X$`OV`&*ZSS0b+xfG)5xY@#=*yrD_Dfge7FKEg+p;Xp8%N9m(Jd1cs_j$q! zY54T5vht(4>6dbyViC>oMtuf@!)Yiz+L6%WOEKT_Idh?5FP2SD#T+%p5G-;}@zNBv zd?`RB00`Jx!3^rPBje*w<=xHUr|jo*U?48YYV3v9yvtiDrdqn!9oL;{&m@u_uP{{ z_o)*ZX!y~mjz@m-pPxnZ1UyX%{3FevSIoD}x1c@e8C+X2C=+p|^*-fIk}wnNR^9F? z3@a+{eOPc5#E`u**8Y$$Zn+rx9Hrp*jn(4UEyyO+C`D;}*ogn3iS0w05c~KGss){K z&-%6jG-qqfIEWY(q}IZzopLjq+?lf@?eUe4mcAp8%iE+O;X~{5Nr=6v$!AkD7ymUb z$dOPIZ%xK=1KCj}0{1_IGS=u2cKCqv8eCP6&0UhH2u=QBQ!{@4`;T$Yx-`Wyh)|8N z{t2Z&ml;=de=(uHAnUww&n^>{?f>90vS8P~8pGB(ru>LW>0P$hl+jXAfgzJETL5D%Y70Egy%`Cde-RgS~NW*L`=~#Y$DqSr=l9`G72^Y2AA0A%dF@AFU zPF^Jk=5~3$K3->OUfxe$awlm0furx#;HeR74?U&J zdPDpRG(&H;ndO5wWdxs)r8ug5?q+M;jmA*eP8SHzl?*z1b35G|2jBm3@ZWIe`Yigv#xpey%2P@*Gb8H(JBe$8L;ESa^@O=XgPFaLzu4x3liGX0~M_w7&Vyw`XMTx?6)_x2hHD3ZlS+r$omXx_aRVvH^$xo!fBejPt)m5x zFTQ;~T-x5Q>yUciv3quQ+R|6Q%e0&68{hjY!WFlF*lj36i0x9{(h_%I!?pAhn|kdI z4ISMHIUq_Vd-!KXm$A6BbLFWcu+)sLShHDcQwsm)z-_UTQ>>>;t4Sf>w3wI+w z)XnD3yF72~T@#jpO}YC%tUK76OG-+X-fTs`rIT=QD7&|R7cHr%I4JEQ94tW%$|7ko zn8*mt+g~5pQNjK)_NQMc-Uz~sc$Bo-gWk$}uz;)^O&KC=+sLrb>VgWffv|VWd%*1l zJhDM_4>qnBel4H~p*-kaUVCB*n+(qxYi{_5B2uD)`M288c{BY)?@d3j{E2%QL1t_7^S$Pj<9D`gBG zZBgCF2W0Nn5b`X-K^D_&>Pr8y$Mmpu-C?t|0`9L^nilVsZCRnm?`&r=dUow7o}X)% z>00dSFWx$j`8J&0?=d33A)U6WpjUe2GvEIE8b5K7lcoU=!c4|?1yk-afeNU(QT87j zhpUX38F|FK?naSj!l;djey~_?`_Ti3dXZ4Qz;z=U`i(F;;^tuC*s%*wW@#WoYsyfq zNLVcenq6mVW0ZaTmo%hMQ$haO;b|_55qh}GSDpGxDcjlaWZ0u_3?xJX>CSpxel;58 zCG~r;hv+4LA~g=zxSNsB>ATQWAkPwH)_ zD6gwBgn4OSewbD@ThIK_$y=LI4=a~5pdqMg7O-BLd8iY8tj*oI+4N;m?EQi^<8ohi zB*}{vQxg9V;?Rp_Qx^K`XQGJ;3wnPAT2?>Yop;b)jwti+FYdiQGe9xn{tPx=GzwJp zEA}c`jL?_ta0E>)e7aly9^&HW8uH5GhA>Woy5_pHLu+Y#5!CuRR77~dRQ#-{`1{cE z)`%MD2lmUoRaIl(;LBTByjnX+@6FLwFRrbv&A1fl>gma@t0N}s$!usKIR?{cpFO)( z*Np+EC@`szpm(OswYIWyGNS~86!qxY`4Ev@>^{s*O@$>9*PbSW-2iyW56QQ{!^8hn z%l|oHWyF)Bs;U}rb>&`bs+}OMt(`n9fRB$qIy1A=WDinVw!UIQU55Ir8Jz$e(I|+6 zWlthjtI||pUUeCY0X}3ve3QxM%^sYt+boXntSUC73F9{X>^*rY+@APJhF%{VlEX^~ zx4#wy1r--hUO-qCC`>&;3LG;S%H52E5j(=+#+)q2?E2f&E&) z+MqwYi2OfzkHd)mDMJ|~_c^I{^R^GXf%ZKhE@@6x;z@mqza_bVG$V@9``2&H`8J}r zzAv-)8%e`#<%Ar%2&E$}@$Cg%(@#~>3>kQ+B#j(GDec> zpW+;_TBrF<--gO+zMi)H=dkI>y4P7F zL6fUS`!A6_v-j;ZrTk8gME8i987(t25h{ds6`mKi#T@RUQwdXUPXzxxZd}E(=v}Z( z5gnSB)y1_DY|z);bn>BImD)@P2|lEHN-JlyFL;GaddQ9hxB67d_V_!*aMhh35*7^K z@%Ke|=xw%dMAS0@o%{@+DmBsD^~HW0x%@Ky0d}*5)nB%Rtf}kn=G}qu4%Z8SJ)3vq z+4QiI`x}1Fe~uJooABv%|7$mb+J&0zI_sFvnqLsp= zdtB5rZI7~zW3~!wOu6v9UK`Uu1)iaLZ>dhynMV%CR>3&6&mV5A_b|;gnrn;ByPHjm zd0D4*ZLq$wM@fW~YiTLjRXHq^4GW6iH_?fo@89Da%WviWNL9x3;cr*+4Z0{(NTQKx zC*b(3wr%iBDdU3tooMamrC*Ln2Rktzdmz9i1LbNNMnS}U4kP@cl!{kWYb#*BY~;?4 zl>=EQP(~9F64G#UPb4qQ0U3Cfd>LpqpRn~|`d-j)4gRLWOKXXVH|2csEiO(~*JhE4 zUr>-#TU$FU@+_4fw5ViPRN&FXqK1cu6Yc4gF-Pp}*+i1^mh`Wowx*kjFfioyyO<=U z|KkFJBsS4}_=oQM_fM@ZL7|~;j3$-SU*qBeYiepvCu@OfhfLTi=KI?EIyDv5Pubrn z38s!`TdFPeft}smSyfd8I>m(aacEEMf1YY~lN}l`2v*h52nh-bV!hm+=5yY_0NEpG znGsf^U-F>HRd%zp+-I#sqBd`Hu`i^W3Y>Hnif0M%$*IWQCtN(t8!opJtXu8REckGZ z4tWD93KULy4RMg}Nn~Jva5Y~0+H351&31V6@3pOw3R0N6c-PN7TVkEk^tlx8@ z*+epch+ns)|AI0_?{=uQmulw!LJCi&_0Zj7 zyysa7~Rtg7t7n`3Ae?oDYfLcM|biq-ybDS zD(E5pZ38kSh6dh4sIB@x$SREP(sSxLL%@Qr zVlIR<{Qgbw3@Ieup)1oGIh5!OMm#S{U9&d0Z_ZS`udyj&vTe1#z#iN=18WGCVw8^{ zi8LjNg?`>NwhMI1fWy*(qUikVC;#|z7dGpOB=Jn}`H733u!by50 zh!x*D-rZ}ImfjZb+7er)VMz;9kDTt0`zQIzAAi!>_l^gBcZ&??A2V1Ld0YI)o>Ob( zocxle|5(jdQ1IvN)SoVees0Njm@glj3DLb&+-Vd(ql(}$=7`nZDJ|O}9~f90o|PrE z^+^Y;(xBjAS~@y1OUpnG*FCAu&QB$V)|Qr?quCc87Cq4}2PEq6U64KRWAt{gu&}ze zM)Bv;VP=)fBboEStqPmzb$=$znmY0I*VNQ#%dwKSr8aGmS$!45MRX1-DxC6iVa@4e z1>hS!6)19i+^h0R$gghbR0#c~-$jG*{ZD)4Nzg#V|ZKpy`PFTj52aBqjno(d#+A9LNU%#=s!2`bjm}x}A%gG6z zYB-lAAl%3*Rz#^5K*nd#4|rWtMz?(aTn-+%n7ra*kbHx8Lj|htsfe_Av0=7J7XpWXd{!?CXmjB7%;$5%)!yEY zLqhV!-yaetMFU=n7z25He4KBXkpMk56$tzIeu>^aYsD@r)rVqSOf0LGmEzR4L5&|# z_wr9##5W7P+=|Lj8W$n#aIOB)4TxnZVW)FV8+?1>rzvoA5U(X+yo>jf%zLj(A|QJx zM}m7Fkm{Tf#k6~@=UA)^4GpwgP{f`=lqfJ@bOha&Y`? z$hOZNZmZ^K6X}JNl}cRF6SNjYuj=|e1b@n({ZO;70U0yIK4A0r>1ss3D8l+)hO!L% z6d5%fgC?+ZMc#@~hEnsg%-oa@@wVVTiX7%ED?6`PhH@yJAK=s8Rus(>9Vna~xEd3r zGo|@p?xs7o2&XkSuD-!Zfg*Gtj$qi_Z=!seC5o?rt~cbiHhdN${;y}t*|dw~ggJrn z@)(3&?B#iV`CN9~>O;~ul5fu-+U0N*=CSB6;971j1`MpY_Gd+BG}AnqbVOBPcF)X+ z;R?TnSFf%hD2cLjP%rBB`M4br<2!G3FOMqUySa5OHRlvde%h}wOb)Y*K}&!M32|Kf zG-)~aGe|zMjBBGo@jnh1*VlAg9Pjsb51=c0DWoK0ek684j~yX? zPFF}tUdn91M_9tO5pBXG<~Zr(v}>+T>f#pYafRLpdpJ90 zFJnWJ;>lJV)q8)b)NjLDqI3}q6_s*W^O`1#KItMr{)C~AyZH_Nd>d?ZE19i9CX_v2 zc4MfhVOhusI$h&AaZRu^<$jbA0AmuB8vYcu{ zS`JuXA>Lj^zAD8-c$3CeK_tbg678r(Mskj0dG7Czv%-(BH&=e(uVAO>Dh4`GfbC?t z8kXMf((SePKP^CS9F*Zw^lC|6tIh!zVz@cP;(k7n>lyE{U!CjaeYjJ&S&zB%c+>&Y zDqkT78{@gV-Eg|?)!ivJhx3N+-D!rs7^`NTt-7u4^C}%Lv-qm20g6ICxSx5iu*u9b zs@7~@UYxAko2+)Tjm+)&+RxwL2AuC|Qgb*`>HFQsIX8T{%Z^!SO&6YpKz3n=Vn)l1 zO?M}Fz^TTSvDO+!=zTtc4(@nl_c&32i0SEZ>$@-Gel)*S5IH>^CVjG0S)Dddyte6R z3-nt8?@i|Y`(b|5MmPJezVC;tGuHNP-(HqpDXd2o?Ly%}tL9U$ia>lfX}kM8Lo=?Ej9gor?X*lJc8V5+_K zse#p0D?vE-9v2mnSkRUEu5Q`PC9CuQs0^GGNLpg^F@c4NdRCy&aM486E|sOJK&_0ilsJ2SJ*q!Atg zfx~M2)!yu;V19}MeaEkV;+`NIkWqkt291W}Ft0maJdBPruZ=L!&c_<3-w-F40|r`g z5uhVAPk;5jhGAa#U;9ptWebYpNnUhO_Vv+ktb@yn%e89#cvB3 zpMLf`cbTiZHisFPcdxQfog*0<=48|9QJc=qeL8;r3!`qBsih`;pWu;Se@wWhWlv{e z)k6XZ{$zs&fpydyql;e(G)w6XG|L1fZ~4~q2j+}$tDB46WMte|BO{NHeZj>T@X&bl6Y%^bO30=UiR*ve=?ZNOn-izWq5` zl?4`jaq&fMYf#Y8b9XR{H!Mogf7(VuJ??&AHWO9s?b~7D&$7{Eu=VmmvyRhYw(bO$ zv7KoSkn$9{Uc4`k zmhtw+dsl#CQC*x@Ah4<{uCK2j8A{QBM9Q-#rSf$rRhLvYgbeK%op_A($))CeBjrKdv`Ww6rVu25-WS)uaq0C_96_3@7t`ZfByOfF{>1I zIjUXs4LDw9FzJgg6iR=*zclFD%<$X7XblYtqUPtv6c=yH%X=mHPp>6dR$yo~dU5ff z`{|(aygp^C$ngs0W8HJ}p(!FEv|yT#rcvSw3gFC7iA`(9zP@HSnlf>7v5c#|-xs}J zWAVP(ZXCKmLds{i7-2_5L&!Z-@rvsBMadIQpNxu0FeyA5H~ia>Fe(w9Un(*hVC{QfM@zH7hG1x4j$d8 zIDaq$U0Pc5rF5&-!K|F#6M|hGud0H^h%aBz%~cjgVE~(e?Z5+bOYnxW;NbxGADfgEHl=mj`Q#MrK(^^95hx0iW%@4AkBEfiVM7FxzBgtVKnv}#F_6rP zFB>oS(%8fVz)1K^l_^tQ_w>st^Za*T zIW+I?;8%XjK$bSky|&6MvfDT{YESvEdk`>ZCW`FlVb;6BPEwHh^U(>oK~*Mk@5;RT zt`~?aH2ehDdF<}U0#UHtd>;c)(mN5bg~EYE5_wp-mKfr&TYR|!?mhE3FzruwK|V7v zUMc_B+h2F~H(yVL49FC)!O4Lwo^9E-&^&uQYrhB_$?Ao7b{Z%f4u3*(TWuvJ$+NPI z9Ut#wRxA2{({ghD?%@NN55E+8L$I?`FgiEqub2XDO>+|&KZO0x&u`;b{k+Q8biJX5 zLB@wFpV0IfmsWt`?cQuHg0fIK4q*fmav;U-q)$j`%Y!HYbwZ~DyGg=cP-i3`Nl6w8 zuRXg>Ef5mC6oEGxzBjBIr4JB)ANjx@NmG$H?qR^2SBP}amtdBbJ*{Er^7dONL^(Kp8+A zHh`k2!R`7mFqr~}lynz^!iy%c|7o9Z*~P@hJ}j4QDiy(e!P$9z+htY#dDEliHb>=^ z=LB@2Z(#5GAq6bK&Q7T#A#4DA%eD3#ITINqGI0Qs2X!u#{))0`o3C;?!hKQ19PQS^ zoXG>`kAYydP|$)i4J_`?t}c)Lj7G=fODKp+!icVVrp1i%i0Einp#|1ceN%xr4|->2oPqSDuUBZ zwmsYt6tY6}%A+~wNAXXehVS*2Gh8tXZn3_U7_{J}@c5uX{`?^hVS`>Ffjnq*RdT~| zm9-;+&DD(k?qZg0esWV;Wl~j!3ddbHpOT^k_@=dh0m6siY2BJ!+k+VOWfVko*hy$i@_AoOW9tD=Dt4oG0$sOx+o2vxpEG0|p4TGxpYZMKmZMo2k0*94fag(5 zS`26_nosw-y~sH3Ww=m?rlvO1YQA9@NcT~Xc^;H4tK15lSGK(GfX~-v4HT?*X`@Wx z=KHz&TBGTT^`}?C!Nm3#y5LE?`ClR_IC09;XI)$0LRJEkQiAl|Evi9h6sQR2(V1b-A)vv@_)y)?;_SB zn5nYG^Yc-2JEPG{lRi!yjMw#=hu+Y}^->W=F#ZJAY(%B1kP`SKWP#d%C5ZP~;+Y_n!Eb!rW zMp|>av~+7!{^W0Yj-eh$$8cf#-=0|NpEOiP%W0s{Q^_nZH`-8GtTh*b612392PqQP*G3b;FrrGFsN zFYJfmUfsQRP5S)r!@y(TvehBG@Y*`He3>SCDm5D#1e24I?YO>dDI6B@!nU=&_&((} ze_l5G{CJ-Ka}9`zYn{-IjUldc@HHAHrN1VNh&VN>oUWDJi}^k!kVZ&mzF0FlPqBaB z<{}@^cyNOP4)GXMTb{yGzSq{w^vdbmFm5-^%>Qq_e_cX@}mb!d9otm$n0iRM|-xml3cs?V9V4kLF@Uxaw z4gn{;7!z_FoUK%`e?#w&^iPP`Ka|jgOzJyzhk54^q%xvbZixPMp{te@EcL$uih8`b z)Yg$^dBkgbg9nb{BvC(V))w!OS+2W-ckk02f6HG!XE)a-^}TZpyw7ltMFq2&rCiQm zl$LTND~wouNQ-IAeR;uf?g#rmwJ>W@Iqk62$_l&&Nu{Md0J!};I)$8FKA1z9dU~ac z6hLlkMvM&8eKTx5eR5*FSzcNZ?Xw?H0{01}r@#|ZcfH|tXH_Wo{Br@*tqTFF08E#Q zm)G#}pcvwxm#3wW>K;7Ypj-K>t}$Kou58G|d)oAk_rg;;9~I@odZ`SHiLtUSOuf|9 z(V@?I`QJS#)xHO!05^jAlf-|cbiwFfm+}9skY;xCA#a=M6Y`46OE`#=bIfx6CZRYT zp3!(7T9Swl&u^vX8EenGg77-RN1I+wPq=dKjwYGkeBJ<0{PJU8}(fMeb@yuiDE>L^~B6fqmoxgjj5y~q<$cAsZ@0_8&2azR zXH@quZDr{xci+xoXed%smM4ug>HtGExX9YtGF9>=QVW|zAMa?_*2zz`3=CY-g4hSF zwbM6MwJa7N01br-qSqHT@6&X05*=u|KTGiOBgVP)gh2lc@Ic=c@^f%VI<2Rbz2J|j ztmwX)xLjF*kJ$$V(pTr%z)I@xeV#Vhqb@~^d0=w`@0OLxlnZgKH0=LIFMyqMp1VGE z=aaWz6WuUSuuu{Zx1GEXln9l_T}c@Dp4M?SO?JZVMAhLIee?D>#n$FW`Qs3z(A~? z#wst5tNvk9l6!{B3k**J!kJm1T#r~<8nL@nHb)Ky{(I=B8}rkJj;6%O7_>sW<^MZF z|GE*31Bd2+gZjqsXo(qvA~iVp84)N55ZJfe3tb${V?m~>t)G#QSY>L+x_Ly=z9In| zfwSNB#s>YSqk@8W;{Digh*GY&VwAAFkz5~DSl0@LDcD4S%m%@s<=dh$hsk>1B?N*l z4S8BCAXA|j)>d&q2Qop3e`jYr#Vp)e=mO#oFZD^YU6D5Grv6h~+ce9wmw5RGw)gwB zl&L+dJ<10sy7Kbbr&Ae@GZpJz0C~m0`l_LMc}XIRq9v;gfG|zs`?`SmpDz-aUy_+v zp#Jywpsx?Olo^qacjl&vV&*?eW?6!taH{ z7iS`lD`Cz#w^zrBUYJ`5tuxgbp2el2d>UhKB5?qb>=POAD1Hx`5VV-hVoaZZfowXO zn4}H6?wkwU0fGavr%hlsl)77PbLo{Um{7gETPk`wLloe@4ZF<+2($i1l$$%dhspQf zPaT!|KIZ?dI8i)w>8FJsGqID$It&|s#~0&M{1>I+_b1U$7H%1{o62IdnlE7Sr<}=i zyVp=#f!|IY{br~dy$d?z*% zQtqA3c50k64%lMg46)(e7R}R4`8ORU0}sE?uXZasQGjvPB&Md)zSWmg<&t(Shfh~b zHOtL%*Bd9By1m%gKP(#XQvLv;$B}h&VDHjeUCSL^X!usI#RP)@{pHGPoR7Yn{p$Bv z4ltC@CRzOg)l|Iv5h^d+l4=EIz!!BA=N* zo^atlf0?M@`a6!7_^fU-efMaPH{&wN(6BryHqnjjwI(zMC>`M`ed`8nMRmuu45fH4wLp}#e0%_N5_Q26{HZ@*21oHg3srz!UY--^!-0jC;Wf6~OmkWYK8if|jiK>3`y=aUg2#-*338=iX*Zgt_JJM)g&gnLr>IU(|NK zNvzY8JeOeAM?$J+C&9dWxUGJ=0o00%tE$3;F#7lJ76t(VAeF|HTfP8u!}B}_W`(HT zlE&wYzPCuA!rRR~Rp%IWdJT3$5Q)Bt4a7|sGd)jV20)`?xnoP`u#C0}(_tDK8a(52 z0{S>ykcMg1ICtH#)XvSW&dHdNf{G6W5+05)|2x05h&d2Ij8-gz)+=*9{O zvIH~WT?RMt;%N9DM}R{Rhfl%P9dD$(F0&Zjhk#5+3eXQ1kGI>}zsH9E5N}j7_c*>{ z9za;~_>2IV^SPuU<2yo8O8q0_?2JIj21NinBS@@AW@ZqxN0*pM$%P>xyF^n{L#{7= zO-+qdJLkj(y>sG+7%oD|len#!kul!-51L8(_^@bAB7bK>1coV`={^^SBDeQvW2Kqk zXaGszQ$@-9dR7a1XA5uuAXi&$c*JwZKJ}ur+s&Ek_vkPadhH?n7ruXekbZrR2LAZV z_Hb|A8!y<>=kqvJ>a^&$UHh$rfdikq3(7DDk|(6C3kSZ)^)4m2AY$DJ27Dnw3^@} zi0Co~W6F@+7OqudJUBZgpn~2BvIRg0sq}ohY)n~|HT|&i@j=P#{g`!9z;x0 z5eXp0?EHKR78XTz;xkl?chn*xFSNBya7a^@Dn&9?f{>m)M1vbvv!XAi{d|W^MNf|fUZ34U#VjTq9&39$83dIN#J^SS z`z$9g2Xje4!@@eB2?gfZwLHR@Pj+owuRf;J4>=wE$ z-jfLpi&Gh<=06D6Z~t!Ou%3*9_#Yj;Ej6X3%}%qud|#?S-}5)eekI+DaJ4l+FRzF> zbiyh?hdc_TeNSof+Z;~OLtbh?l^_kIxG}gMK9E;ezgKw`5BQ}nV5yO6B53&+qow4z zVaB^7u$-5H7kvBEdwLg-ou-74V2X=_6PA}pOh=E^`IVYhs79StQ($E ze{X-w2FixC^z_ih9TVcr+2#>p)xvC0ahR(VNR-G7lE^&Mp$9lJAm9N7kX~b;Fn4h5 zQLnoUlM03E#qqWD1CaqW9o;}_g<5_Q%r+*P!oS|xemBv#tA}B^X7=E))*5L_b6+`K z2tSTtc+bVF)A*RbCEVjh>6F$)gM>Qgt}PPXE@OSy@?4NNYoC2%Lc#Ou>dr zi-v_qsU-djSBD9_vq2rxwpfyc@p-NfbI-Gd1#J#@Yw^=9!a(kCMu;|Ly5paub*5rv zb2~TXysL_9MFii)#eWzhM1sID-xDdwq1lZl^DpnVe2)}T$t3;#X1bkNrnLa!Wi<@@ z{h@*TKMKjUH&6OI1Il7b49K>XX~&bq1+sLj9yOXlS&}yLvr?*B+olbUQ+4+sRMu~+ zrPuKmc0&uatnJs8gZ+9KKKBhqS~4bF&l~i7l@!7fW~i6BLbI=J0j(_SQM+7pkP)#MWxtELEusWl#b z+Y+t}l^Ta@t*|xcBt6>@gSb0cwa4aB?HBp!)0Zoj2Pnm z7YJYX{~Ik%a4fnhf+psUyL+HKx_xl{jUNBvyX9u_lz|@flgtXrlQ?8GzeJINgmZFz zTo?m#jTv$X$~1O$9_PJ{r#qOMpI9urw>>+?DNjl4J+}+udCA-SlrIvjB`IdUKE{v~ zdstjRUIi}aoAyA1a zmiY5Q_O<{8`Y78quV|YvP65lEetIfrQNefWS1SYYF&n1&g}2#{)-mIXA{xalBoHZy zw%G}ixRYxGd5VeJ%JTW!486=o*Y(VuW&;-DhB@^|KQ^l<=QB}miTc~$MPV`ax*jJ( z>eaSmaHwSKi12Sve#)ynmQ>ek+6m7KEw-P)&F}8l#S~?)2KwGuwUxPutA(6TmbyM0 zo~yrD+iTAGVeK`Z%Z&g5AM){Zo7L)h2hHQ*J?US!#zJ#zBlXTo(S!x<1!I!c8DH3~ z(|+ts!!295_VL--T3q@}<%WY|`~P%D?o7^0k%ax1@24L`s1F7=OXY^PAMOx#PjhXC zY;_HR%qLgCm7sQ|izVk~#ZRe8nI6}nWVl#RgV#(GoVt(^tBR5&Zoa$AgVIONMmtxJ z2uo(jufp%c1mo5D>G%0pu?PDl797-#YP_3;v@R%qb#7O;(9eg;cPj3^FxlP1>Zv*^ z*kx#z48oJ}mXDM5sOQ|x)aU$q`N#E>bPrM|^7_R7Dd|1SZEsfx1Z<$lgyZ%4n^*rO z0YL7^7WJR=;}25&2kSqd5YK>M9gpt$#GjXu6p%Bs4|PE51W#qbt&N{(!|MB@Rn0!Y z2PGQCd4hpoS;Km;15S+b`J{YsWZQrf9P*n&WJq9gl=nu@iXUdwdl4QXfwzYx=^f8? zCO)$%i5!wBX!dq}XUZ>Y`?w1K12Z82T;!S8WV)x3yFx4Frg0=w^+wplbTzh)wm#!W z#0}J(I?&oS^Fd(}E+o4AwH24*kIEITsi9EW=9OXeL+uO-16?C*RLM?M1x}9a>awQx zJ|{i+N+5A2sl8Fzr-|O|f>7c3VZUdNNMFF&pz%zl}?N^N6R`AfKB`%GRSVm~L? zq}d#EtJtcT|E_;;rbSyzq*eV<{)a;1khUZ9*=0+Ncb8l1_m20Jlpo%KWdUn`BqYsS zp6UCa7Jz6=$7x1Z_E1E%xTWtpl$Mq~_*JJlF&tl1FQIjfBNMoY&>@S`#=B<8@DG$E z*S~X*OUII5!k9%bEOLJP#IJfqLTYg8uOi(D zxAe#QwHK99|6gz49n@6w{u@*jK}AJCy4VmSp!6ODq)8F!U8GA1MF>3>5Tzqczz_uy z=^YGJQ3yTs4nYEh-a-f@gxrJg`@O&4{o~G^xik0f%$Yfp?Ah7f^PK1TJkLI#T_(R^ zkw-TRPbWpDo5iwAa20Aw#->)KjeUdBz6DoTAwpndtAnqnR`!LO1ZS7{qHH`*;OIdm z@1wv;7rz?aIzOr{Dg2`k$RpLTt8T9w+4?~~5X5FW@71mBodEwfV!V)T_ft$wjJ?pk z=Io5WwVN$U3E08SW8unVqzzD zA)>oYnagICWuCLOAIBs1V^7{Y_EO@ilI0y%(UXWYBUkO=z3gQ2*honFwQSfkWr4shr^dgkkst2zPoPG4PUI=SU3awcVQBZvTLotAw_rm*i=_i^P zSN2UX`?G4V{M<&7Ov=jfYY>xmHAC25UNu6 z*O0-NVzn}Wd{(nnPd0nki;Kz5kdkZ8nc>n;QpdU<*m2X*0vjAQ5jG1`d$pENaH=69Qq60>ogX{ZL6Pid_G(ojxyGdizJ+;j;JcR1Z*XFPFY*Ffp>%4F@3!-foH=K+zbtVa=7%)wbE|wyt1Iwc#GrHr$hc5bqyY>W zgPTu$uRPo!Aw_gT?{Rc}+&uIR5UXL66q%vBubH^i&kQp;SS1sdH!VU9eoLWO)?p`h z0Qv$fSQg&447livtE89dUree~gk_B;B(2&Qx^_Ht7-URZV-aD1``xGXFAQbixBds7 zwuKWq0Rfsk>|JDy)6;A-snxU!b$3)?LF z72>SvlK8|7iV}$u@%MiWtSX^Vz9(ytUudMb$mb(NlYG;67w6aMn(uAhUfYd8*Ryim zYIl;c4is*EJMZuEENnY&jXB1=FT{pm;*`s49URbX0rCg675DZ`TVV8r0<9=vfh#n(TIuPIQaqqAH>19 z7Ly$~Z9d3PQM^_?kZMw_+aCJdyE)@k>>*!Xzg0!HDa(R7%~c93wQcjz={g^5gyxw2 zpng9|;mKLh(e%S%dc%PuFRC7Ql!6zW+$`?iB!{3$v4%^IR3N*okNOen4iVdX-e7zC z3G)kxyvvVrU)Wj7^pqbRDJD*6iagVXX*9p<=0n2s9)8`F%uZ%HzWQ>J8W$ZfaWrfC z_aAq*2EB2wzbZ7x1S<8a4<4)D=Bgbr>U+w`I4Wj;;Au0gz78xNmU_BrkCni=Pz%_p z#~bi~FOu!e<6>Ob-?TBq{W{~ZlMb2;1{#GT*2&)Dly_dxTOn?GGt9tC!k;sOmv3d- za#^AE1RX|2*zFI+#cz5>;er4~S>I*Fb0oMQ8>>tpJfG7*TOGrO#)5M>5 zd8Y&D9^3MOKDd8;=4y*kT5ZLhHQ+(xF20ho8FJ65aOLAgyf0n;NG-d1-joIl8OLX4 z+K5Wq|=19ZK@(Q%pT`?8&=wW*h19SYPqDBP*JCxYq zOWAO2w2w+77wFkmN){tymoVj_3PX{psoaS{{K zte_Jb;l4<0<~|~~ywfq4H~MVK!t;;F0H~f(9P1L`+W0IO@m}EC>x4L6M=q{Oa~JpK zGjc@(tj4SMI|Wd#@(zpENLGRv+*k|XM?M;SQeUa-l=58>YP$~t#lPc-fmldBANeG8 zN;+00X||O>%)D^niVSs2heJ+Y0gnIh%{>z(e3xDu8)pAa6rXxC@5$#pf^Kqu2URKFFu;a&!Bh=y5e3m(S zYwMYcKM4;uA4hP|t@H6!OjuYkp<9#aJ6l-sSsTZd?a{R*@f#kkpEj*xaaku{k7%E~ z$e#^&4D9!UI#jbu%8^2vuJb$RmPlHse|vV;KuZgxQh(yV* zpta~Ul0}?@b|dHPraQO``ejxwUtp>19APAY`;`&*7y8w3k4Es~H;oO8#N?VtzX$hh zdK|eGA(nDo2{QaTLXa7cMPv!KgeZoivqk{pTE z3hkfEjnQ8BU)xUpVJun>u4j*qWtzkj&A+^n6=Sb78|5|QYLqoc&YA|4oQuD-7l_=O z2-|DeIj8;K=k7)p|50>PxPC)VLJgj5C0AWvrC{b%p6AXjP_bnds>j5A$`Q;yQ_#2x z(yS^Gz3;6rCcax#UbQBPTJaHBiKbHnbJ2kwU&tm_YGIAE`P_N;8Hx)nij_l*lMcvsuh!2U? zh-y!F{^S4BE?SDaAPROOA^EW_wX$-v!h9&x!pYM*@qGA~7NqOdLQ-om~J`vm5YG0MrJ9AKDj9?~8GAT^Azy z{>`n+xvI9(HmUSq)6XD7`;X>0@<4+c6@B&evzH!!Ke66W=Dmq(s?z@bAvihVZnW6E zbSJEP+Q1OYHYaVY5hiq430CEIIeCVvsHXq&y`zo88}(C50JQJ&t{?2!W<;d11dfj@ z2|MmIE{mCsokwqVyq1V7kvAPEkd&%6Lx71JRv8d?3ok+yFYPkR-IS~x7GBa$u>Ge| zC)yF`@2Xm_2hJY_7+rw!Ux6UU^ zreBKHJS)LjA$Ga&{X5Q{T}5Xxn04aRjUoapwX$cUdqrZ+Ew%kF-=wrNt}D*xO*Lnc zru-EkaX2OqxXe-w;*`=#C2weZxo!WLS%q7se88_;zrTAdbZdT#@6LqzVWHh*9N^>1 z08ZrsybkPctEJgKb89|fB|@DW<)Hf90Zx_wmh>12nNWWDgK=|aKhq5Ld<~KK%FsKZ z73tH_qI{PAZ&3JO&5iQ>ckZunvVhYmaCC|!SCJs-2g8s2*Ixqk>E3JQsM<5ci@HDl z+{*|uPGO~a*Y0~Xf!TMe+$Hh9=`LK`bZsfKJVQWT76?~e9N7;)_A;Dag#OC6zRJ#5 z@`Xw4r!Qxe)D}wWPNzy*xD^thcN$vq#Xpz9E40mFa$Bf4P(sDmF zzTKLFbD=u{uAg_=7>1A`lfq7?R5X3n=LFH$F{45_A>IWtanHS`JhIV}B?T8&9G9p( z|J90inOG7b#b)XAMN0|=$f;b3nH*P0p*bIL!H2d(hPnxAs>PwMy5ld`eT5tEG+q(1 zESnp0pO;FKE zM!G1~j0lKD>3<@>N22Wg6WFByfKDhSHh2c4`$8{sLR6<>tst&DFOT2D*=fgg0kB9;z%rVkK86yJWg5$Mtk=q*2#ym--Iv~lS zPM6D@zuMpYp+EbEUqY5}9)038fJ-O`kJjwem?%FZ2%>(zCAj>be~2b$&(R(^ErjtM z>MQ!&pgc6ZDg-OWo@o9FV4(Y?3+34EeKQ~ezVRv-<41XUm=!My@C(goH%m;Vb?N>a zU}&RP`Nw1(lQGZX+mH?o44hTZ1W=7<`S^Kn#fcQ&&x1c3ubTEcmOa=bq~FZGF>6_w zqlT-S20U%n&aI1}__voEN)&J*`Wj)J*~u{=P(cj4wS+XFg;oM7wUt!RE^=+fmFS41 zLaYmzo);FUGSIvK^tMGB8+SfXF8bM(Pd8(4l|d%|c~(MUb`yDa7B72jR9Wqb=IKQ#zkhAe2ox&%LFSLeL}x?@!T^?N zH`~oDkik~3m@$EKv52@L_)AeiVgLGzM9?FE8eG3)(LTrXph(KpRJa%5eTjGoHrbk+ z@1fbZLj+_s7J`v^12yQq+gJei^N^1Hx2W)3^OL_T-|iY?dDlZI8nSt-qq!==Meq}{sD1CRwr!S@7*e<<96w3|u%#>3W6kb`8!M)B2 zkHsNw6#WvJ3wb=;zg|IG@!>>JnVI8vP&EDf?;oSL5*QJfAn3rmH)qgZ-;^zH+n?yU zv~|P;zI5*x%M%;oVGJMXy_`zcF-C4Qs>&U|lYT4b4CTP6T?UJlo4t|uw(r=B@cCm+ z?Rin8npnW;_`a~)g=DwBNO*}0N>1PHVxl(sI0QPE;l69x^@x9{^LW@hMe{RqkiF^l zKi!Wbdwk}f>>GB5&Iz#vPk9kw*MK1HvS}casgN_igs%&E_jwx^i!(Y^AP@wmtQSY# zFAjQ7$~7+v7>#|Z^Pxs`g3-$AMGfz~()C1^s$px4TxQJGd%Yp9LUv z?g>Dfj~;%z@JC;=Fw~~&ii=^mJ=K`Qasc$N79I4=rnvMF)Nj>&v;iVpr$3ix$D(Ny zR`*ui#w(v_?|7+QDLtkXGb|qr>>Uv1qp%Bb(CSv5a1U-8Kdkqa8zihb*}P-+9hRA=bW6h5DaoB2;7U0>(p_ z)8MlUAN{C76GTEG)Wim}zn2tt(59rR_{8RONGBf6zR&1({6*{U$l-;-R-zLvfVWwo zNu)k^s=1C@w`K!Ljm`&*6CDT`sG_3GXAG5wk50;FEGxYLiZ&KLh?PJ|1>6q8)*gVD zi798$`>EOwM~Ab!!ss9jvgU8((X3Fx2iwH{T(#vfLS5|r5bp221Vo4@9IbAxw%V|DTN!C_*C z8e;bs9;2{KhD>`?_-a0umy9%_@(3SD)8&iX(<@LdM@B^n=zO$X=F~PSvl_Q@o9}~s z;jFt2IOjoMm%0FRV+Ect+b!gz-Q?Sz%jV`*yy{}n0GvPhhp%DpPedw|Vz{xDJ7_Gv zHjNdCD>8$is%KM6LZT1cG^sgactz3{wq#m=it_zUis6B4T%-3+5*NF*-=RV$X0!o0 zf#D5;KFIR)Vc-{sIllQAfR7rn@x*qJHX0L^olJyfp_@`A3cpi_T~fQrlo&XS9UfA+f@p8xb!0BkzH``z!tnx)hprrt z5Do}%r8MXjyUGLUYAGW%-3K!~7|g90bal&B{(ciJmW(KZLfpvw1qt~@A{Z1bkbJGkm`3hgJG9Na)F1a?1 ze%sj6j^g8>u0^feggh`1waQrSF{VBntJZ_m2T#!7*c$E0GOervCkp9&tuRV+TPMah zIMwY>d-JPGO6A;~uBo)04SC3=>~+8Lp8jQ5Mt)V1IG2eKpbnXc=6iz7?Cz&_GJhoEBH&(w`%7vSdhzRCVk0&5f%k*S|YH!uw4|Q9P(w zO5G?`acp2XxqWwRD4}@e^iG7)O*Y8gpym;vmm_lbBIq3GyCJf$)gkIboGUPTb2%{; zPM}eRmoC>Yma{~pK(^~J+&(@o`r;KL=)=`P+ky@1?>!M7#nK&kc0wl_0!PmEGI}8v=Tk-d#fP z{IA#LHflv>rJ+UAo>S6TP6FovUY_b3$kcYCNZuT{HvTy6rNYZlq-x<|TYA6A^fx3& zHU$bln0;r`!>L62l$w5dLi8FZ?*G9g*cq)TQ!eM7O@ zYt^7cJZ@sFZ%BkU<_lPhF(bC5z|yO4OU0xmJHgUKl6`+T?;0Mki~(7|@t6cAtl0CV zz4US!{;<1_N-hH#G1PgM6<+JQ1}Zaum}jun(69oJl;aak%0Q|vdHOg8N@<~Dq_oGU zGkMR)sEQihps!PWZclt~hCVaRt;~Rwvt2Y0h|V|XP-b9xle9LO>YIMAmbRPPI7!Q= z`9B@7)hl%292p%OlH!cSHz*39*u&`k_y39XUlnGA7hbXCR?rxQTcmsKTbpXH;be#Umwbe}Iiz=zWwKY@HqSCoDK9 zZ=J17}I09`<5GI4HF}mKRx=?$eONndHeZ<*II|q8m^Jq1lZxW9WdEC@4hf z>-0ps;q}$agCACl-Ii91->Rup-@X9lR`}$Pd$7Sne(G+N<)i1i-&{G4cH$l0XgRO^ z;kgD<>VT)o+j~lHCBvWz@M;avyA#`v5{Cpa| z-N`B2VJul`G-%9PrN?PL%^h{PKbc}EIXs-q10b*);eA&7J5P=si7fY>Ffu6qlrYk` zZhPCsc0RK)PW>8+DVA%yW11}O{YZ0yi~d89C?E|zQcS;szj8d8e|0~dW^wdxOfZE({9&8TSf9|VyzSO|>-;OiXHYoov4^f7m1QOB1_825RyJ-bc@s(#Ash0OTM#Fku1kwONghxg=Bd7E3DH!hG9V`Z;F=XO2wS>A1i#BQI(O90F61*n zyW+NgTBacxcD@{SeqwsqcXYS9_}BE}_am#}FrYq3PJ=O<6+Ant@A-s*NcW|0V8SpIe@)U z=Y=BGElwr3Na^Jjnfi6N`(glX^f8hSX#2;@YlEji+dV230+W2o-)T>*Xs;{Kkju*c zy~#i1T!#BzOJQUKHyx9YI`ZOx&FHTn0rzcs${|O;0j160|L-lm76=42_!?q&)4E)R zA9U#VT5a(8x4#{$mjR}>UPr>ls{exWdEKKC0@nKD8bB8LTdwGxCjMXDvfldp*k;@v zfO!fWF&z%_=aQ9^V+Byah2`T%ZPQ9?>*{PWbr@8vQG7z*IvD zpUYAwfs?W@k4^)!*%S$+F#c2K1db%)ZrYqC{7-H)HM^Qoz<1;87q_c>&aLS0)~2umj%^+5B1u4=LJlUM%( DnFbz4 literal 0 HcmV?d00001 diff --git a/docs/structure/abstract_model.rst b/docs/structure/abstract_model.rst index 1eee97f0..dcebecf5 100644 --- a/docs/structure/abstract_model.rst +++ b/docs/structure/abstract_model.rst @@ -1,5 +1,5 @@ -Abstract Model -============== +Abstract Model Representation +============================= The Abstract Model representatin allows a separation of concern between translation and the building. The translation will be called anything that happens between the source code and the Abstract Model representation. While the diff --git a/docs/structure/structure_index.rst b/docs/structure/structure_index.rst index d53e26db..043805bb 100644 --- a/docs/structure/structure_index.rst +++ b/docs/structure/structure_index.rst @@ -1,11 +1,15 @@ Structure of the PySD module ============================ -PySD provides a set of translators that interpret a Vensim or XMILE format model into a Python native class. The model components object represents the state of the system, and contains methods that compute auxiliary and flow variables based upon the current state. +PySD provides a set of translators that allow to build an original model into an abstract model representation (AMR), also called Abstract Model. This representation is based on a series of Python classes that allow to have a version of the model independent of the source language which classifies its elements depending on their type and expresses the mathematical formulations in an abstract syntax tree. This representation can be used by a builder, which allows to write the final functional language in another programming language. See the example of the complete process in the figure below. -The components object is wrapped within a Python class that provides methods for modifying and executing the model. These three pieces constitute the core functionality of the PySD module, and allow it to interact with the Python data analytics stack. +.. image:: ../images/abstract_model.png + :width: 700 px + :align: center +Currently, PYSD can translate Vensim models (mdl format) or models in Xmile format (exported from Vensim, Stella or other software). The only builder available at the moment builds the models in Python. +For models translated into Python, all the necessary functions and classes are incorporated in this library so that they can be executed. The Model class is the main class that allows loading and running a model, as well as modifying the values of its parameters, among many other possibilities. Translation ----------- @@ -20,7 +24,8 @@ The internal functions of the model translation components and relevant objects abstract_model -The PySD module is capable of importing models from a Vensim model file (\*.mdl) or an XMILE format xml file. Translation makes use of a Parsing Expression Grammar parser, using the third party Python library Parsimonious13 to construct an abstract syntax tree based upon the full model file (in the case of Vensim) or individual expressions (in the case of XMILE). + +The PySD module is capable of importing models from a Vensim model file (\*.mdl) or an XMILE format xml file. Translation makes use of a Parsing Expression Grammar parser, using the third party Python library Parsimonious to construct an abstract syntax tree based upon the full model file (in the case of Vensim) or individual expressions (in the case of XMILE). The translators then crawl the tree, using a set of classes to define a pseudo model representation called :doc:`Abstract Model `. Its structure is defined in the following document: @@ -32,7 +37,7 @@ In addition to translating individual commands between Vensim/XMILE and Python, During translation some dictionaries are created that allow the correct operation of the model: * **_namespace**: used to connect real name (from the original model) with the Python name. -* **_subscript_dict**: Used to define the subscript range and subranges. +* **_subscript_dict**: Used to define the subscript ranges and subranges. * **_dependencies**: Used to define the dependencies of each variable and assign cache type and initialize the model. diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst index add6b36e..399893b3 100644 --- a/docs/structure/vensim_translation.rst +++ b/docs/structure/vensim_translation.rst @@ -74,6 +74,11 @@ Not all the Vensim functions all included yet, the list of supported functions a :header-rows: 1 +Stocks +^^^^^^ +Stocks defined in Vensim as `INTEG(flow, initial_value)` are supported and are translated to the AST as `IntegStructure(flow, initial_value)`. + + Subscripts ^^^^^^^^^^ Several subscript related features all supported. This include: diff --git a/docs/structure/xmile_translation.rst b/docs/structure/xmile_translation.rst index 9b0ea94b..314dcbe5 100644 --- a/docs/structure/xmile_translation.rst +++ b/docs/structure/xmile_translation.rst @@ -75,6 +75,10 @@ Not all the Xmile functions all included yet, the list of supported functions ar :header-rows: 1 +Stocks +^^^^^^ +Stocks are supported with any number of inflows and outflows. The stocks are translated to the AST as `IntegStructure(flows, initial_value)`. + Subscripts ^^^^^^^^^^ Several subscript related features all supported. This include: diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index f035d2bb..e620add9 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -333,7 +333,6 @@ def build_function_call(self, arguments): elif self.function == "active_initial": # we need to ensure that active initial outputs are always the # same and update dependencies as stateful object - # TODO: update calls as statefull object name = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_active_initial") final_subscripts = self.reorder( @@ -1094,8 +1093,6 @@ def build(self, arguments): reference = self.section.namespace.cleanspace[self.reference] - # TODO lookups are passed as a reference first, in that case we will - # need to replace () in the lookup call expression = reference + "()" if not self.subscripts: diff --git a/pysd/building/python/python_functions.py b/pysd/building/python/python_functions.py index 75f986b1..519a9470 100644 --- a/pysd/building/python/python_functions.py +++ b/pysd/building/python/python_functions.py @@ -35,8 +35,8 @@ "vmin": ("vmin(%(0)s, dim=%(axis)s)", ("functions", "vmin")), # functions defined in pysd.py_bakcend.functions - "active_initial": ( # TODO replace time by stage when doing a non compatible version - "active_initial(__data['time'], lambda: %(0)s, %(1)s)", + "active_initial": ( + "active_initial(__data[\"time\"].stage, lambda: %(0)s, %(1)s)", ("functions", "active_initial")), "if_then_else": ( "if_then_else(%(0)s, lambda: %(1)s, lambda: %(2)s)", diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 286109f2..e18fe849 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -57,11 +57,6 @@ def build_section(self): element.identifier = identifier self.subscripts.elements[identifier] = element.subscripts - # TODO - # 1. split control variables, main element, elements from other modules - # 2. build elements (only build 1 time!) - # 3. write model - for element in self.elements: element.build_element() self.dependencies[element.identifier] = element.dependencies @@ -561,7 +556,6 @@ def build_element_out(self): def %(identifier)s(%(arguments)s): """ Real Name: %(name)s - Original Eqn: Units: %(units)s Limits: %(range)s Type: %(type)s diff --git a/pysd/building/python/subscripts.py b/pysd/building/python/subscripts.py index d004f0a9..8fc56c8f 100644 --- a/pysd/building/python/subscripts.py +++ b/pysd/building/python/subscripts.py @@ -17,7 +17,6 @@ def __init__(self, abstract_subscripts: List[AbstractSubscriptRange], self.elements = {} self.subranges = self._get_main_subscripts() self.subscript2num = self._get_subscript2num() - # TODO: manage subscript mapping @property def subscripts(self): diff --git a/pysd/py_backend/decorators.py b/pysd/py_backend/decorators.py index 195a5c77..72de509a 100644 --- a/pysd/py_backend/decorators.py +++ b/pysd/py_backend/decorators.py @@ -38,6 +38,22 @@ def wrapper(*args): return decorator +def metadata(name, units=None, range=(None, None), + dims=None, type=None, subtype=None): + """ + This decorators allows assigning metadate to a function. + """ + def decorator(function): + function.original_name = name + function.units = units + function.range = range + function.dims = dims + function.type = type + function.subtype = subtype + function.args = inspect.getfullargspec(function)[0] + return decorator + + class Cache(object): """ This is the class for the chache. Several cache types can be saved diff --git a/pysd/py_backend/external.py b/pysd/py_backend/external.py index fbeeee50..a4568662 100644 --- a/pysd/py_backend/external.py +++ b/pysd/py_backend/external.py @@ -85,13 +85,7 @@ class External(object): """ missing = "warning" - def __init__(self, py_name, final_coords=None): - if py_name is None: - # backwards compatibility - # TODO remove in future - self.final_coords, py_name = py_name, final_coords - else: - self.final_coords = final_coords + def __init__(self, py_name): self.py_name = py_name self.file = None self.sheet = None @@ -335,11 +329,6 @@ def _resolve_file(self, root): self.py_name + "\n" + f"Indirect reference to file: {self.file}") - if isinstance(root, str): # pragma: no cover - # backwards compatibility - # TODO: remove with PySD 3.0.0 - root = Path(root) - self.file = root.joinpath(self.file) if not self.file.is_file(): @@ -704,16 +693,16 @@ class ExtData(External, Data): """ def __init__(self, file_name, sheet, time_row_or_col, cell, - interp, coords, root, final_coords=None, py_name=None): - super().__init__(py_name, final_coords) + interp, coords, root, final_coords, py_name): + super().__init__(py_name) self.files = [file_name] self.sheets = [sheet] self.time_row_or_cols = [time_row_or_col] self.cells = [cell] self.coordss = [coords] self.root = root - # TODO remove in 3.0.0 (self.interp = interp) - self.interp = interp.replace(" ", "_") if interp else "interpolate" + self.final_coords = final_coords + self.interp = interp or "interpolate" self.is_float = not bool(coords) # check if the interpolation method is valid @@ -735,8 +724,8 @@ def add(self, file_name, sheet, time_row_or_col, cell, self.cells.append(cell) self.coordss.append(coords) - if not interp: - interp = "interpolate" + interp = interp or "interpolate" + if interp.replace(" ", "_") != self.interp: raise ValueError(self.py_name + "\n" + "Error matching interpolation method with " @@ -750,16 +739,7 @@ def initialize(self): """ Initialize all elements and create the self.data xarray.DataArray """ - if self.final_coords is None: - # backward compatibility - # TODO remove in the future - self.data = utils.xrmerge(*[ - self._initialize_data("data") - for self.file, self.sheet, self.x_row_or_col, - self.cell, self.coords - in zip(self.files, self.sheets, self.time_row_or_cols, - self.cells, self.coordss)]) - elif len(self.coordss) == 1: + if len(self.coordss) == 1: # Just loag one value (no add) for self.file, self.sheet, self.x_row_or_col,\ self.cell, self.coords\ @@ -791,14 +771,15 @@ class ExtLookup(External, Lookups): """ def __init__(self, file_name, sheet, x_row_or_col, cell, coords, - root, final_coords=None, py_name=None): - super().__init__(py_name, final_coords) + root, final_coords, py_name): + super().__init__(py_name) self.files = [file_name] self.sheets = [sheet] self.x_row_or_cols = [x_row_or_col] self.cells = [cell] - self.root = root self.coordss = [coords] + self.root = root + self.final_coords = final_coords self.interp = "interpolate" self.is_float = not bool(coords) @@ -820,16 +801,7 @@ def initialize(self): """ Initialize all elements and create the self.data xarray.DataArray """ - if self.final_coords is None: - # backward compatibility - # TODO remove in the future - self.data = utils.xrmerge(*[ - self._initialize_data("lookup") - for self.file, self.sheet, self.x_row_or_col, - self.cell, self.coords - in zip(self.files, self.sheets, self.x_row_or_cols, - self.cells, self.coordss)]) - elif len(self.coordss) == 1: + if len(self.coordss) == 1: # Just loag one value (no add) for self.file, self.sheet, self.x_row_or_col,\ self.cell, self.coords\ @@ -847,7 +819,10 @@ def initialize(self): self.cells, self.coordss): values = self._initialize_data("lookup") - coords = {"lookup_dim": values.coords["lookup_dim"].values, **self.coords} + coords = { + "lookup_dim": values.coords["lookup_dim"].values, + **self.coords + } if "lookup_dim" not in self.data.dims: self.data = self.data.expand_dims( {"lookup_dim": coords["lookup_dim"]}, axis=0).copy() @@ -861,15 +836,16 @@ class ExtConstant(External): """ def __init__(self, file_name, sheet, cell, coords, - root, final_coords=None, py_name=None): - super().__init__(py_name, final_coords) + root, final_coords, py_name): + super().__init__(py_name) self.files = [file_name] self.sheets = [sheet] self.transposes = [ cell[-1] == '*' and np.prod(utils.compute_shape(coords)) > 1] self.cells = [cell.strip('*')] - self.root = root self.coordss = [coords] + self.root = root + self.final_coords = final_coords def add(self, file_name, sheet, cell, coords): """ @@ -890,16 +866,7 @@ def initialize(self): """ Initialize all elements and create the self.data xarray.DataArray """ - if self.final_coords is None: - # backward compatibility - # TODO remove in the future - self.data = utils.xrmerge(*[ - self._initialize() - for self.file, self.sheet, self.transpose, self.cell, - self.coords - in zip(self.files, self.sheets, self.transposes, - self.cells, self.coordss)]) - elif len(self.coordss) == 1: + if len(self.coordss) == 1: # Just loag one value (no add) for self.file, self.sheet, self.transpose, self.cell, self.coords\ in zip(self.files, self.sheets, self.transposes, diff --git a/pysd/py_backend/functions.py b/pysd/py_backend/functions.py index ea751e69..28aa9242 100644 --- a/pysd/py_backend/functions.py +++ b/pysd/py_backend/functions.py @@ -179,49 +179,6 @@ def pulse_magnitude(time, magnitude, start, repeat_time=0): else: return 0 - -def lookup(x, xs, ys): - """ - Intermediate values are calculated with linear interpolation between - the intermediate points. Out-of-range values are the same as the - closest endpoint (i.e, no extrapolation is performed). - """ - return np.interp(x, xs, ys) - - -def lookup_extrapolation(x, xs, ys): - """ - Intermediate values are calculated with linear interpolation between - the intermediate points. Out-of-range values are calculated with linear - extrapolation from the last two values at either end. - """ - if x < xs[0]: - dx = xs[1] - xs[0] - dy = ys[1] - ys[0] - k = dy / dx - return ys[0] + (x - xs[0]) * k - if x > xs[-1]: - dx = xs[-1] - xs[-2] - dy = ys[-1] - ys[-2] - k = dy / dx - return ys[-1] + (x - xs[-1]) * k - return np.interp(x, xs, ys) - - -def lookup_discrete(x, xs, ys): - """ - Intermediate values take on the value associated with the next lower - x-coordinate (also called a step-wise function). The last two points - of a discrete graphical function must have the same y value. - Out-of-range values are the same as the closest endpoint - (i.e, no extrapolation is performed). - """ - for index in range(0, len(xs)): - if x < xs[index]: - return ys[index - 1] if index > 0 else ys[index] - return ys[-1] - - def if_then_else(condition, val_if_true, val_if_false): """ Implements Vensim's IF THEN ELSE function. @@ -263,48 +220,6 @@ def if_then_else(condition, val_if_true, val_if_false): return val_if_true() if condition else val_if_false() -def logical_and(*args): - """ - Implements Vensim's :AND: method for two or several arguments. - - Parameters - ---------- - *args: arguments - The values to compare with and operator - - Returns - ------- - result: bool or xarray.DataArray - The result of the comparison. - - """ - current = args[0] - for arg in args[1:]: - current = np.logical_and(arg, current) - return current - - -def logical_or(*args): - """ - Implements Vensim's :OR: method for two or several arguments. - - Parameters - ---------- - *args: arguments - The values to compare with and operator - - Returns - ------- - result: bool or xarray.DataArray - The result of the comparison. - - """ - current = args[0] - for arg in args[1:]: - current = np.logical_or(arg, current) - return current - - def xidz(numerator, denominator, x): """ Implements Vensim's XIDZ function. @@ -382,7 +297,7 @@ def zidz(numerator, denominator): return numerator/denominator -def active_initial(time, expr, init_val): +def active_initial(stage, expr, init_val): """ Implements vensim's ACTIVE INITIAL function Parameters @@ -398,23 +313,13 @@ def active_initial(time, expr, init_val): ------- """ - # TODO replace time by stage when doing a non compatible version # NUMPY: both must have same dimensions in inputs, remove time.stage - if time.stage == 'Initialization': + if stage == 'Initialization': return init_val else: return expr() -def bounded_normal(minimum, maximum, mean, std, seed): - """ - Implements vensim's BOUNDED NORMAL function - """ - # np.random.seed(seed) - # we could bring this back later, but for now, ignore - return stats.truncnorm.rvs(minimum, maximum, loc=mean, scale=std) - - def incomplete(*args): warnings.warn( 'Call to undefined function, calling dependencies and returning NaN', @@ -428,26 +333,6 @@ def not_implemented_function(*args): 'Not implemented function {}'.format(args[0])) -def log(x, base): - """ - Implements Vensim's LOG function with change of base. - - Parameters - ---------- - x: float or xarray.DataArray - Input value. - base: float or xarray.DataArray - Base of the logarithm. - - Returns - ------- - float - The log of 'x' in base 'base'. - """ - # TODO remove with PySD 3.0.0, log could be directly created in the file - return np.log(x) / np.log(base) - - def integer(x): """ Implements Vensim's INTEGER function. diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index fb684af7..9209d516 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -1423,24 +1423,8 @@ def set_stateful(self, stateful_dict): for attr, value in attrs.items(): setattr(getattr(self.components, element), attr, value) + @property def doc(self): - """ - Formats a table of documentation strings to help users remember - variable names, and understand how they are translated into - python safe names. - - Returns - ------- - docs_df: pandas dataframe - Dataframe with columns for the model components: - - Real names - - Python safe identifiers (as used in model.components) - - Units string - - Documentation strings from the original model file - """ - warnings.warn( - "doc method will become an attribute in version 3.0.0...", - FutureWarning) return self._doc def _build_doc(self): @@ -1461,44 +1445,25 @@ def _build_doc(self): collector = [] for name, varname in self.components._namespace.items(): try: - # TODO correct this when Original Eqn is in several lines docstring = getattr(self.components, varname).__doc__ lines = docstring.split('\n') for unit_line in range(3, 9): - # this loop detects where Units: starts as - # sometimes eqn could be split in several lines + # this loop detects where Units: starts if re.findall('Units:', lines[unit_line]): break - if unit_line == 3: - eqn = lines[2].replace("Original Eqn:", "").strip() - else: - eqn = '; '.join( - [line.strip() for line in lines[3:unit_line]]) vardoc = { 'Real Name': name, 'Py Name': varname, - 'Eqn': eqn, 'Unit': lines[unit_line].replace("Units:", "").strip(), 'Lims': lines[unit_line+1].replace("Limits:", "").strip(), - 'Type': lines[unit_line+2].replace("Type:", "").strip() + 'Type': lines[unit_line+2].replace("Type:", "").strip(), + 'Subtype': lines[unit_line+3].replace("Subtype:", "").strip(), + 'Subs': lines[unit_line+4].replace("Subs:", "").strip(), + 'Comment': '\n'.join(lines[(unit_line+5):]).strip() } - if "Subtype:" in lines[unit_line+3]: - vardoc["Subtype"] =\ - lines[unit_line+3].replace("Subtype:", "").strip() - vardoc["Subs"] =\ - lines[unit_line+4].replace("Subs:", "").strip() - vardoc["Comment"] =\ - '\n'.join(lines[(unit_line+5):]).strip() - else: - vardoc["Subtype"] = None - vardoc["Subs"] =\ - lines[unit_line+3].replace("Subs:", "").strip() - vardoc["Comment"] =\ - '\n'.join(lines[(unit_line+4):]).strip() - collector.append(vardoc) except Exception: pass @@ -1507,7 +1472,7 @@ def _build_doc(self): docs_df = pd.DataFrame(collector) docs_df.fillna("None", inplace=True) order = ["Real Name", "Py Name", "Unit", "Lims", - "Type", "Subtype", "Subs", "Eqn", "Comment"] + "Type", "Subtype", "Subs", "Comment"] return docs_df[order].sort_values( by="Real Name").reset_index(drop=True) else: diff --git a/pysd/translation/structures/abstract_model.py b/pysd/translation/structures/abstract_model.py index b5383ccc..2c1f5949 100644 --- a/pysd/translation/structures/abstract_model.py +++ b/pysd/translation/structures/abstract_model.py @@ -246,16 +246,17 @@ class AbstractSubscriptRange: ---------- name: str The name of the element. - subscripts: tuple or str or dict - The subscripts as a tuple for a regular definition, str for a - copy definition and as a dict for a GET XLS/DIRECT definition. - mapping: tuple - The set of subscript range that can be mapped to. + subscripts: list or str or dict + The subscripts as a list of strings for a regular definition, + str for a copy definition and as a dict for a GET XLS/DIRECT + definition. + mapping: list + The list of subscript range that can be mapped to. """ name: str - subscripts: Union[Tuple[str], str, dict] - mapping: Tuple[str] + subscripts: Union[list, str, dict] + mapping: list def __str__(self) -> str: # pragma: no cover return "AbstractSubscriptRange:\t%s\n\t%s\n" % ( diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index ffa344eb..59a8a681 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -229,10 +229,6 @@ def visit_expression(self, n, vc): def generic_visit(self, n, vc): return "".join(filter(None, vc)) or n.text - def visit__(self, n, vc): - # TODO check if necessary when finished - return " " - class SubscriptRange(): """ From b595cfa1ea3e11ecdb29a35e634b2103b72e4026 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 6 Apr 2022 16:20:00 +0200 Subject: [PATCH 27/96] Make non-back compatible changes --- .idea/.name | 1 - .idea/codeStyleSettings.xml | 13 -- .idea/dictionaries/houghton.xml | 8 - .idea/encodings.xml | 6 - .idea/inspectionProfiles/Project_Default.xml | 30 --- .../inspectionProfiles/profiles_settings.xml | 7 - .idea/modules.xml | 8 - .idea/vcs.xml | 7 - pysd/__init__.py | 2 +- pysd/_version.py | 2 +- pysd/building/python/imports.py | 5 +- pysd/building/python/namespace.py | 7 +- .../python/python_expressions_builder.py | 16 +- pysd/building/python/python_model_builder.py | 87 +++++--- pysd/building/python/subscripts.py | 1 - pysd/py_backend/cache.py | 50 +++++ pysd/py_backend/components.py | 22 +- pysd/py_backend/data.py | 4 +- pysd/py_backend/decorators.py | 99 --------- pysd/py_backend/external.py | 8 +- pysd/py_backend/lookups.py | 23 ++- pysd/py_backend/statefuls.py | 69 +++---- pysd/py_backend/utils.py | 30 +-- .../structures/abstract_expressions.py | 10 +- pysd/translation/structures/abstract_model.py | 8 +- .../parsing_grammars/common_grammar.peg | 2 +- .../vensim/parsing_grammars/components.peg | 2 +- .../vensim/parsing_grammars/lookups.peg | 2 +- pysd/translation/vensim/vensim_element.py | 33 ++- pysd/translation/vensim/vensim_section.py | 6 +- pysd/translation/xmile/xmile_element.py | 24 +-- .../test_circular_reference.py | 7 +- .../test_initialization_order.py | 41 +--- .../more-tests/type_error/test_type_error.py | 2 +- .../version/test_current_version.py | 2 +- tests/unit_test_external.py | 193 +++++++++++++++--- tests/unit_test_pysd.py | 41 ++-- 37 files changed, 443 insertions(+), 435 deletions(-) delete mode 100644 .idea/.name delete mode 100644 .idea/codeStyleSettings.xml delete mode 100644 .idea/dictionaries/houghton.xml delete mode 100644 .idea/encodings.xml delete mode 100644 .idea/inspectionProfiles/Project_Default.xml delete mode 100644 .idea/inspectionProfiles/profiles_settings.xml delete mode 100644 .idea/modules.xml delete mode 100644 .idea/vcs.xml create mode 100644 pysd/py_backend/cache.py delete mode 100644 pysd/py_backend/decorators.py diff --git a/.idea/.name b/.idea/.name deleted file mode 100644 index f06fb359..00000000 --- a/.idea/.name +++ /dev/null @@ -1 +0,0 @@ -pysd \ No newline at end of file diff --git a/.idea/codeStyleSettings.xml b/.idea/codeStyleSettings.xml deleted file mode 100644 index 7fb8ed00..00000000 --- a/.idea/codeStyleSettings.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/dictionaries/houghton.xml b/.idea/dictionaries/houghton.xml deleted file mode 100644 index fdb52ce8..00000000 --- a/.idea/dictionaries/houghton.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - houghton - vensim - - - \ No newline at end of file diff --git a/.idea/encodings.xml b/.idea/encodings.xml deleted file mode 100644 index 97626ba4..00000000 --- a/.idea/encodings.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml deleted file mode 100644 index 3bd5b081..00000000 --- a/.idea/inspectionProfiles/Project_Default.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml deleted file mode 100644 index 3b312839..00000000 --- a/.idea/inspectionProfiles/profiles_settings.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml deleted file mode 100644 index 845831c5..00000000 --- a/.idea/modules.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 06b38168..00000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/pysd/__init__.py b/pysd/__init__.py index 05ca59dd..5c967112 100644 --- a/pysd/__init__.py +++ b/pysd/__init__.py @@ -1,4 +1,4 @@ from .pysd import read_vensim, read_xmile, load from .py_backend import functions, statefuls, utils, external -from .py_backend.decorators import subs +from .py_backend.components import component from ._version import __version__ diff --git a/pysd/_version.py b/pysd/_version.py index 62fa04d7..528787cf 100644 --- a/pysd/_version.py +++ b/pysd/_version.py @@ -1 +1 @@ -__version__ = "2.2.4" +__version__ = "3.0.0" diff --git a/pysd/building/python/imports.py b/pysd/building/python/imports.py index 521ba44a..ed0549d1 100644 --- a/pysd/building/python/imports.py +++ b/pysd/building/python/imports.py @@ -10,7 +10,7 @@ class ImportsManager(): ] def __init__(self): - self._numpy, self._xarray, self._subs = False, False, False + self._numpy, self._xarray = False, False self._functions, self._statefuls, self._external, self._data,\ self._lookups, self._utils, self._scipy =\ set(), set(), set(), set(), set(), set(), set() @@ -72,7 +72,6 @@ def get_header(self, outfile): "module": module, "methods": ", ".join(getattr(self, f"_{module}"))} - if self._subs: - text += "from pysd import subs\n" + text += "from pysd import component\n" return text diff --git a/pysd/building/python/namespace.py b/pysd/building/python/namespace.py index 7781b19f..59f0a5aa 100644 --- a/pysd/building/python/namespace.py +++ b/pysd/building/python/namespace.py @@ -7,7 +7,7 @@ from builtins import __dir__ as bidir from pysd.py_backend.components import __dir__ as cdir from pysd.py_backend.data import __dir__ as ddir -from pysd.py_backend.decorators import __dir__ as dedir +from pysd.py_backend.cache import __dir__ as cadir from pysd.py_backend.external import __dir__ as edir from pysd.py_backend.functions import __dir__ as fdir from pysd.py_backend.statefuls import __dir__ as sdir @@ -16,7 +16,7 @@ class NamespaceManager: reserved_words = set( - dir() + bidir() + cdir() + ddir() + dedir() + edir() + fdir() + dir() + bidir() + cdir() + ddir() + cadir() + edir() + fdir() + sdir() + udir()).union(kwlist) def __init__(self, parameters=[]): @@ -129,6 +129,9 @@ def make_python_identifier(self, string, prefix=None, add_to_namespace=False): elif re.findall(r"^_", s): s = "nvs" + s + # replace multiple _ after cleaning + s = re.sub(r"[_]+", "_", s) + # Check that the string is not a python identifier identifier = s i = 1 diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index e620add9..c7d64eab 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -298,8 +298,16 @@ def build_incomplete_call(self, arguments): order=0) def build_lookups_call(self, arguments): - expression = arguments["function"].expression.replace("()", "(%(0)s)") - final_subscripts = self.get_final_subscripts(arguments, self.def_subs) + if arguments["0"].subscripts: + final_subscripts =\ + self.get_final_subscripts(arguments, self.def_subs) + expression = arguments["function"].expression.replace( + "()", f"(%(0)s, {final_subscripts})") + else: + final_subscripts = arguments["function"].subscripts + expression = arguments["function"].expression.replace( + "()", "(%(0)s)") + # NUMPY: we need to manage inside lookup with subscript and later # return the values in a correct ndarray return BuildAST( @@ -449,7 +457,7 @@ def build(self, arguments): } return BuildAST( - expression=arguments["name"] + "(x)", + expression=arguments["name"] + "(x, final_subs)", calls={"__external__": None, "__lookup__": None}, subscripts=final_subs, order=0) @@ -993,7 +1001,7 @@ def build(self, arguments): } return BuildAST( - expression=arguments["name"] + "(x)", + expression=arguments["name"] + "(x, final_subs)", calls={"__lookup__": None}, subscripts=self.def_subs, order=0) diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index e18fe849..0de8e641 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -367,7 +367,11 @@ def _init_outer_references(data): __data[key] = data[key] + @component(name="Time") def time(): + ''' + Current time of the model. + ''' return __data['time']() """ % {"control_vars_dict": control_vars[0]}) @@ -402,6 +406,27 @@ def __init__(self, abstract_element: AbstractElement, self.dependencies = {} self.objects = {} + def _format_limits(self, limits): + if limits == (None, None): + return None + + new_limits = [] + for value in limits: + value = repr(value) + if value == "nan" or value is None: + self.section.imports.add("numpy") + new_limits.append("np.nan") + elif value.endswith("inf"): + self.section.imports.add("numpy") + new_limits.append(value.strip("inf") + "np.inf") + else: + new_limits.append(value) + + if new_limits[0] == "np.nan" and new_limits[1] == "np.nan": + return None + + return "(" + ", ".join(new_limits) + ")" + def build_element(self): # TODO think better how to build the components at once to build # in one declaration the external objects @@ -468,7 +493,13 @@ def build_element(self): self.pre_expression = "" # NUMPY: reshape to the final shape if meeded # expressions[0]["expr"].reshape(self.section.subscripts, {}) - self.expression = expressions[0]["expr"] + if not expressions[0]["expr"].subscripts and self.subscripts: + self.expression = "xr.DataArray(%s, %s, %s)\n" % ( + expressions[0]["expr"], + self.subs_dict, list(self.subs_dict) + ) + else: + self.expression = expressions[0]["expr"] self.type = ", ".join( set(component.type for component in self.components) @@ -520,22 +551,8 @@ def build_element_out(self): The function to write in the model file. """ - # TODO: merge with the previous build to do all at once contents = self.pre_expression + "return %s" % self.expression - self.subs_dec = "" - self.subs_doc = "None" - - if self.subscripts: - # We add the list of the subs to the __doc__ of the function - # this will give more information to the user and make possible - # to rewrite subscripted values with model.run(params=X) or - # model.run(initial_condition=(n,x)) - self.subs_doc = "%s" % self.subscripts - self.subs_dec =\ - "@subs(%s, _subscript_dict)" % self.subscripts - self.section.imports.add("subs") - objects = "\n\n".join([ value["expression"] % { "final_subs": value.get("final_subs", "")} @@ -543,27 +560,41 @@ def build_element_out(self): if value["expression"] is not None ]) + self.limits = self._format_limits(self.limits) + + if self.arguments == 'x': + self.arguments = 'x, final_subs=None' + + # define variable metadata for the @component decorator + meta_data = [f"name={repr(self.name)}"] + + if self.units: + meta_data.append(f"units={repr(self.units)}") + if self.limits: + meta_data.append("limits=%(limits)s") + if self.subscripts: + self.section.imports.add("subs") + meta_data.append("subscripts=%(subscripts)s") + if self.documentation: + doc = self.documentation.replace("\\", "\n") + contents = f'"""{doc}"""\n'\ + + contents + + meta_data.append("comp_type='%(type)s'") + meta_data.append("comp_subtype='%(subtype)s'") + + self.meta_data = f"@component({', '.join(meta_data)})"\ + % self.__dict__ + indent = 12 # convert newline indicator and add expected level of indentation self.contents = contents.replace("\n", "\n" + " " * (indent+4)) self.objects = objects.replace("\n", "\n" + " " * indent) - self.documentation = self.documentation.replace( - "\\", "\n").replace("\n", "\n" + " " * indent) return textwrap.dedent(''' - %(subs_dec)s + %(meta_data)s def %(identifier)s(%(arguments)s): - """ - Real Name: %(name)s - Units: %(units)s - Limits: %(range)s - Type: %(type)s - Subtype: %(subtype)s - Subs: %(subscripts)s - - %(documentation)s - """ %(contents)s diff --git a/pysd/building/python/subscripts.py b/pysd/building/python/subscripts.py index 8fc56c8f..043b52c4 100644 --- a/pysd/building/python/subscripts.py +++ b/pysd/building/python/subscripts.py @@ -1,4 +1,3 @@ -from multiprocessing.sharedctypes import Value import warnings from pathlib import Path import numpy as np diff --git a/pysd/py_backend/cache.py b/pysd/py_backend/cache.py new file mode 100644 index 00000000..25d76481 --- /dev/null +++ b/pysd/py_backend/cache.py @@ -0,0 +1,50 @@ +""" +These are the decorators used by the functions in the model file. +functions.py +""" +from functools import wraps +import inspect + + +class Cache(object): + """ + This is the class for the chache. Several cache types can be saved + in dictionaries and acces using cache.data[cache_type]. + """ + def __init__(self): + self.cached_funcs = set() + self.data = {} + + def __call__(self, func, *args): + """ Decorator for caching """ + + @wraps(func) + def cached_func(*args): + """ Cache function """ + try: + return self.data[func.__name__] + except KeyError: + value = func(*args) + self.data[func.__name__] = value + return value + return cached_func + + def clean(self): + """ Cleans the cache """ + self.data = {} + + +def constant_cache(function, *args): + """ + Constant cache decorator for all the run + The original function is saved in 'function' attribuite so we can + recover it later. + """ + function.function = function + function.value = function(*args) + + @wraps(function) + def wrapper(*args): + return function.value + + return wrapper diff --git a/pysd/py_backend/components.py b/pysd/py_backend/components.py index 6d4c41f0..21efe300 100644 --- a/pysd/py_backend/components.py +++ b/pysd/py_backend/components.py @@ -4,12 +4,32 @@ import os import random -import numpy as np +import inspect from importlib.machinery import SourceFileLoader +import numpy as np + from pysd._version import __version__ +def component(name, units=None, limits=(np.nan, np.nan), + subscripts=None, comp_type=None, comp_subtype=None): + """ + This decorators allows assigning metadata to a function. + """ + def decorator(function): + function.name = name + function.units = units + function.limits = limits + function.subscripts = subscripts + function.type = comp_type + function.subtype = comp_subtype + function.args = inspect.getfullargspec(function)[0] + return function + + return decorator + + class Components(object): """ Workaround class to let the user do: diff --git a/pysd/py_backend/data.py b/pysd/py_backend/data.py index 2ead4bf5..375dd32a 100644 --- a/pysd/py_backend/data.py +++ b/pysd/py_backend/data.py @@ -186,7 +186,7 @@ def __call__(self, time): if time in self.data['time'].values: outdata = self.data.sel(time=time) elif self.interp == "raw": - return np.nan + return self.nan elif time > self.data['time'].values[-1]: warnings.warn( self.py_name + "\n" @@ -295,6 +295,7 @@ def _load_data(self, file_name): if not self.coords: # 0 dimensional data + self.nan = np.nan values = load_outputs(file_name, transpose, columns=columns) return xr.DataArray( values.iloc[:, 0].values, @@ -306,6 +307,7 @@ def _load_data(self, file_name): values = load_outputs(file_name, transpose, columns=columns) + self.nan = xr.DataArray(np.nan, self.coords, dims) out = xr.DataArray( np.nan, {'time': values.index.values, **self.coords}, diff --git a/pysd/py_backend/decorators.py b/pysd/py_backend/decorators.py deleted file mode 100644 index 72de509a..00000000 --- a/pysd/py_backend/decorators.py +++ /dev/null @@ -1,99 +0,0 @@ -""" -These are the decorators used by the functions in the model file. -functions.py -""" -from functools import wraps -import inspect -import xarray as xr - - -def subs(dims, subcoords): - """ - This decorators returns the python object with the correct dimensions - xarray.DataArray. The algorithm is a simple version of utils.rearrange - """ - def decorator(function): - function.dims = dims - function.args = inspect.getfullargspec(function)[0] - - @wraps(function) - def wrapper(*args): - data = function(*args) - coords = {dim: subcoords[dim] for dim in dims} - - if isinstance(data, xr.DataArray): - dacoords = {coord: list(data.coords[coord].values) - for coord in data.coords} - if data.dims == tuple(dims) and dacoords == coords: - # If the input data already has the output format - # return it. - return data - - # The coordinates are expanded or transposed - return xr.DataArray(0, coords, dims) + data - - return xr.DataArray(data, coords, dims) - - return wrapper - return decorator - - -def metadata(name, units=None, range=(None, None), - dims=None, type=None, subtype=None): - """ - This decorators allows assigning metadate to a function. - """ - def decorator(function): - function.original_name = name - function.units = units - function.range = range - function.dims = dims - function.type = type - function.subtype = subtype - function.args = inspect.getfullargspec(function)[0] - return decorator - - -class Cache(object): - """ - This is the class for the chache. Several cache types can be saved - in dictionaries and acces using cache.data[cache_type]. - """ - def __init__(self): - self.cached_funcs = set() - self.data = {} - - def __call__(self, func, *args): - """ Decorator for caching """ - func.args = inspect.getfullargspec(func)[0] - - @wraps(func) - def cached_func(*args): - """ Cache function """ - try: - return self.data[func.__name__] - except KeyError: - value = func(*args) - self.data[func.__name__] = value - return value - return cached_func - - def clean(self): - """ Cleans the cache """ - self.data = {} - - -def constant_cache(function, *args): - """ - Constant cache decorator for all the run - The original function is saved in 'function' attribuite so we can - recover it later. - """ - function.function = function - function.value = function(*args) - - @wraps(function) - def wrapper(*args): - return function.value - - return wrapper diff --git a/pysd/py_backend/external.py b/pysd/py_backend/external.py index a4568662..82559d70 100644 --- a/pysd/py_backend/external.py +++ b/pysd/py_backend/external.py @@ -6,7 +6,6 @@ import re import warnings -from pathlib import Path import pandas as pd # TODO move to openpyxl import numpy as np import xarray as xr @@ -764,6 +763,13 @@ def initialize(self): self.data.loc[coords] = values.values + # set what to return when raw + if self.final_coords: + self.nan = xr.DataArray( + np.nan, self.final_coords, list(self.final_coords)) + else: + self.nan = np.nan + class ExtLookup(External, Lookups): """ diff --git a/pysd/py_backend/lookups.py b/pysd/py_backend/lookups.py index 5f4c4c7a..61389695 100644 --- a/pysd/py_backend/lookups.py +++ b/pysd/py_backend/lookups.py @@ -11,35 +11,40 @@ class Lookups(object): # as Lookups # def __init__(self, data, coords, interp="interpolate"): - def __call__(self, x): - return self._call(self.data, x) + def __call__(self, x, final_subs=None): + return self._call(self.data, x, final_subs) - def _call(self, data, x): + def _call(self, data, x, final_subs=None): if isinstance(x, xr.DataArray): if not x.dims: # shape 0 xarrays return self._call(data, float(x)) + + outdata = xr.DataArray(np.nan, final_subs, list(final_subs)) + if self.interp != "extrapolate" and\ np.all(x > data['lookup_dim'].values[-1]): - outdata, _ = xr.broadcast(data[-1], x) + outdata_ext = data[-1] warnings.warn( self.py_name + "\n" + "extrapolating data above the maximum value of the series") elif self.interp != "extrapolate" and\ np.all(x < data['lookup_dim'].values[0]): - outdata, _ = xr.broadcast(data[0], x) + outdata_ext = data[0] warnings.warn( self.py_name + "\n" + "extrapolating data below the minimum value of the series") else: - data, _ = xr.broadcast(data, x) - outdata = data[0].copy() + data = xr.broadcast(data, x)[0] for a in utils.xrsplit(x): outdata.loc[a.coords] = self._call( data.loc[a.coords], float(a)) - # the output will be always an xarray - return outdata.reset_coords('lookup_dim', drop=True) + return outdata + + # return the final array in the specified dimensions order + return xr.broadcast( + outdata, outdata_ext.reset_coords('lookup_dim', drop=True))[1] else: if x in data['lookup_dim'].values: diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 9209d516..2709969e 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -5,7 +5,6 @@ class objects. """ import inspect -import re import pickle import warnings @@ -16,7 +15,7 @@ class objects. from . import utils from .functions import zidz, if_then_else from .external import External, Excels -from .decorators import Cache, constant_cache +from .cache import Cache, constant_cache from .data import TabData from .components import Components, Time @@ -1071,16 +1070,15 @@ def get_coords(self, param): else: func = param - # TODO simplify this, make all model elements have a dims attribute - if hasattr(func, "dims"): - dims = func.dims + if hasattr(func, "subscripts"): + dims = func.subscripts + if not dims: + return None coords = {dim: self.components._subscript_dict[dim] for dim in dims} return coords, dims elif hasattr(func, "state") and isinstance(func.state, xr.DataArray): value = func() - elif self.get_args(func) and isinstance(func(0), xr.DataArray): - value = func(0) else: return None @@ -1149,7 +1147,6 @@ def get_series_data(self, param): func_name = utils.get_key_and_value_by_insensitive_key_or_value( param, self.components._namespace)[1] or param - print(func_name, self.get_args(getattr(self.components, func_name))) try: if func_name.startswith("_ext_"): @@ -1257,7 +1254,7 @@ def _timeseries_component(self, series, dims, args=[]): if isinstance(series.values[0], xr.DataArray) and args: # the argument is already given in the model when the model # is called - return lambda x: utils.rearrange(xr.concat( + return lambda x, final_subs: utils.rearrange(xr.concat( series.values, series.index).interp(concat_dim=x).reset_coords( 'concat_dim', drop=True), @@ -1274,14 +1271,14 @@ def _timeseries_component(self, series, dims, args=[]): elif args and dims: # the argument is already given in the model when the model # is called - return lambda x: utils.rearrange( + return lambda x, final_subs: utils.rearrange( np.interp(x, series.index, series.values), dims, self.components._subscript_dict), {'__lookup__': None} elif args: # the argument is already given in the model when the model # is called - return lambda x:\ + return lambda x, final_subs:\ np.interp(x, series.index, series.values), {'__lookup__': None} elif dims: @@ -1425,7 +1422,13 @@ def set_stateful(self, stateful_dict): @property def doc(self): - return self._doc + return self._doc.copy() + + def namespace(self): + return self.components._namespace.copy() + + def subscript_dict(self): + return self.components._subscript_dict.copy() def _build_doc(self): """ @@ -1443,38 +1446,24 @@ def _build_doc(self): - Documentation strings from the original model file """ collector = [] - for name, varname in self.components._namespace.items(): - try: - docstring = getattr(self.components, varname).__doc__ - lines = docstring.split('\n') - - for unit_line in range(3, 9): - # this loop detects where Units: starts - if re.findall('Units:', lines[unit_line]): - break - - vardoc = { - 'Real Name': name, - 'Py Name': varname, - 'Unit': lines[unit_line].replace("Units:", "").strip(), - 'Lims': lines[unit_line+1].replace("Limits:", "").strip(), - 'Type': lines[unit_line+2].replace("Type:", "").strip(), - 'Subtype': lines[unit_line+3].replace("Subtype:", "").strip(), - 'Subs': lines[unit_line+4].replace("Subs:", "").strip(), - 'Comment': '\n'.join(lines[(unit_line+5):]).strip() - } - - collector.append(vardoc) - except Exception: - pass + for name, pyname in self.components._namespace.items(): + element = getattr(self.components, pyname) + print(pyname) + collector.append({ + 'Real Name': name, + 'Py Name': pyname, + 'Subscripts': element.subscripts, + 'Units': element.units, + 'Limits': element.limits, + 'Type': element.type, + 'Subtype': element.subtype, + 'Comment': element.__doc__.strip() if element.__doc__ else None + }) if collector: docs_df = pd.DataFrame(collector) docs_df.fillna("None", inplace=True) - order = ["Real Name", "Py Name", "Unit", "Lims", - "Type", "Subtype", "Subs", "Comment"] - return docs_df[order].sort_values( - by="Real Name").reset_index(drop=True) + return docs_df.sort_values(by="Real Name").reset_index(drop=True) else: # manage models with no documentation (mainly test models) return None diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index 0dfce6c5..ed46732e 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -15,35 +15,6 @@ import pandas as pd -def xrmerge(*das): - """ - Merges xarrays with different dimension sets. - - Parameters - ---------- - *das: xarray.DataArrays - The data arrays to merge. - - - Returns - ------- - da: xarray.DataArray - Merged data array. - - References - ---------- - Thanks to @jcmgray - https://github.com/pydata/xarray/issues/742#issue-130753818 - - In the future, we may not need this as xarray may provide the merge for us. - """ - da = das[0] - for new_da in das[1:]: - da = da.combine_first(new_da) - - return da - - def xrsplit(array): """ Split an array to a list of all the components. @@ -145,6 +116,7 @@ def make_flat_df(df, return_addresses, flatten=False): new_df = {} for real_name, (pyname, address) in return_addresses.items(): if address: + print(df[pyname].values[0], "\n", address, "_________\n") # subset the specific address values = [x.loc[address] for x in df[pyname].values] else: diff --git a/pysd/translation/structures/abstract_expressions.py b/pysd/translation/structures/abstract_expressions.py index 28ff2bbc..0e36a26d 100644 --- a/pysd/translation/structures/abstract_expressions.py +++ b/pysd/translation/structures/abstract_expressions.py @@ -420,9 +420,9 @@ class LookupsStructure: The list of the x values of the lookup. y: tuple The list of the y values of the lookup. - x_range: tuple + x_limits: tuple The minimum and maximum value of x. - y_range: tuple + y_limits: tuple The minimum and maximum value of y. type: str The interpolation method. @@ -430,13 +430,13 @@ class LookupsStructure: """ x: tuple y: tuple - x_range: tuple - y_range: tuple + x_limits: tuple + y_limits: tuple type: str def __str__(self) -> str: # pragma: no cover return "LookupStructure (%s):\n\tx %s = %s\n\ty %s = %s\n" % ( - self.type, self.x_range, self.x, self.y_range, self.y + self.type, self.x_limits, self.x, self.y_limits, self.y ) diff --git a/pysd/translation/structures/abstract_model.py b/pysd/translation/structures/abstract_model.py index 2c1f5949..da0acb84 100644 --- a/pysd/translation/structures/abstract_model.py +++ b/pysd/translation/structures/abstract_model.py @@ -194,8 +194,8 @@ class AbstractElement: The list of AbstractComponents that define this element. units: str (optional) The units of the element. '' by default. - range: tuple (optional) - The range of the element. (None, None) by default. + limits: tuple (optional) + The limits of the element. (None, None) by default. units: str (optional) The documentation of the element. '' by default. @@ -203,12 +203,12 @@ class AbstractElement: name: str components: List[AbstractComponent] units: str = "" - range: tuple = (None, None) + limits: tuple = (None, None) documentation: str = "" def __str__(self) -> str: # pragma: no cover return "AbstractElement:\t%s (%s, %s)\n%s\n" % ( - self.name, self.units, self.range, self.documentation) + self.name, self.units, self.limits, self.documentation) def dump(self, depth=None, indent="") -> str: # pragma: no cover """ diff --git a/pysd/translation/vensim/parsing_grammars/common_grammar.peg b/pysd/translation/vensim/parsing_grammars/common_grammar.peg index c36e4c74..a71fbb73 100644 --- a/pysd/translation/vensim/parsing_grammars/common_grammar.peg +++ b/pysd/translation/vensim/parsing_grammars/common_grammar.peg @@ -14,6 +14,6 @@ escape_group = "\"" ( "\\\"" / ~r"[^\"]" )* "\"" number = raw_number raw_number = ("+"/"-")? _ ~r"\d+\.?\d*([eE][+-]?\d+)?" string = "\'" (~r"[^\']"IU)* "\'" -range = _ "[" ~r"[^\]]*" "]" _ "," +limits = _ "[" ~r"[^\]]*" "]" _ "," _ = ~r"[\s\\]*" diff --git a/pysd/translation/vensim/parsing_grammars/components.peg b/pysd/translation/vensim/parsing_grammars/components.peg index 18bce6be..e1032c02 100644 --- a/pysd/translation/vensim/parsing_grammars/components.peg +++ b/pysd/translation/vensim/parsing_grammars/components.peg @@ -11,7 +11,7 @@ exp_expr = neg_expr (_ exp_oper _ neg_expr)* # exponential neg_expr = pre_oper? _ expr # pre operators (-, +) expr = lookup_with_def / call / parens / number / reference / nan -lookup_with_def = ~r"(WITH\ LOOKUP)"I _ "(" _ final_expr _ "," _ "(" _ range? ( _ "(" _ raw_number _ "," _ raw_number _ ")" _ ","? _ )+ _ ")" _ ")" +lookup_with_def = ~r"(WITH\ LOOKUP)"I _ "(" _ final_expr _ "," _ "(" _ limits? ( _ "(" _ raw_number _ "," _ raw_number _ ")" _ ","? _ )+ _ ")" _ ")" nan = ":NA:" diff --git a/pysd/translation/vensim/parsing_grammars/lookups.peg b/pysd/translation/vensim/parsing_grammars/lookups.peg index ef088c9e..c4a731b8 100644 --- a/pysd/translation/vensim/parsing_grammars/lookups.peg +++ b/pysd/translation/vensim/parsing_grammars/lookups.peg @@ -1,7 +1,7 @@ # Parsing Expression Grammar: lookups lookup = _ "(" _ (regularLookup / excelLookup) _ ")" -regularLookup = range? _ ( "(" _ number _ "," _ number _ ")" _ ","? _ )+ +regularLookup = limits? _ ( "(" _ number _ "," _ number _ ")" _ ","? _ )+ excelLookup = ~"GET( |_)(XLS|DIRECT)( |_)LOOKUPS"I _ "(" (args _ ","? _)+ ")" args = ~r"[^,()]*" diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index 59a8a681..e5ab0265 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -38,7 +38,7 @@ class Element(): Original equation in the Vensim file. units: str - The units of the element with the range, i.e., the content after + The units of the element with the limits, i.e., the content after the first '~' symbol. documentation: str @@ -49,7 +49,7 @@ class Element(): def __init__(self, equation: str, units: str, documentation: str): self.equation = equation - self.units, self.range = self._parse_units(units) + self.units, self.limits = self._parse_units(units) self.documentation = documentation def __str__(self): # pragma: no cover @@ -67,17 +67,16 @@ def verbose(self): # pragma: no cover print(self._verbose) def _parse_units(self, units_str: str) -> Tuple[str, tuple]: - """Split the range from the units""" + """Split the limits from the units""" # TODO improve units parsing: parse them when parsing the section # elements if not units_str: - return "", (None, None) + return "", None if units_str.endswith("]"): units, lims = units_str.rsplit("[") # types: str, str else: - units = units_str - lims = "?, ?]" + return units_str, None lims = tuple( [ @@ -107,7 +106,7 @@ def parse(self) -> object: tree = vu.Grammar.get("element_object").parse(self.equation) self.component = ElementsComponentVisitor(tree).component self.component.units = self.units - self.component.range = self.range + self.component.limits = self.limits self.component.documentation = self.documentation return self.component @@ -534,14 +533,14 @@ def __init__(self, ast): self.translation = None self.visit(ast) - def visit_range(self, n, vc): + def visit_limits(self, n, vc): return n.text.strip()[:-1].replace(")-(", "),(") def visit_regularLookup(self, n, vc): if vc[0]: - xy_range = np.array(eval(vc[0])) + xy_limits = np.array(eval(vc[0])) else: - xy_range = np.full((2, 2), np.nan) + xy_limits = np.full((2, 2), np.nan) values = np.array((eval(vc[2]))) values = values[np.argsort(values[:, 0])] @@ -549,8 +548,8 @@ def visit_regularLookup(self, n, vc): self.translation = structures["lookup"]( x=tuple(values[:, 0]), y=tuple(values[:, 1]), - x_range=tuple(xy_range[:, 0]), - y_range=tuple(xy_range[:, 1]), + x_limits=tuple(xy_limits[:, 0]), + y_limits=tuple(xy_limits[:, 1]), type="interpolate" ) @@ -643,14 +642,14 @@ def visit_reference(self, n, vc): self.subs = None return id - def visit_range(self, n, vc): + def visit_limits(self, n, vc): return self.add_element(n.text.strip()[:-1].replace(")-(", "),(")) def visit_lookup_with_def(self, n, vc): if vc[10]: - xy_range = np.array(eval(self.elements[vc[10]])) + xy_limits = np.array(eval(self.elements[vc[10]])) else: - xy_range = np.full((2, 2), np.nan) + xy_limits = np.full((2, 2), np.nan) values = np.array((eval(vc[11]))) values = values[np.argsort(values[:, 0])] @@ -658,8 +657,8 @@ def visit_lookup_with_def(self, n, vc): lookup = structures["lookup"]( x=tuple(values[:, 0]), y=tuple(values[:, 1]), - x_range=tuple(xy_range[:, 0]), - y_range=tuple(xy_range[:, 1]), + x_limits=tuple(xy_limits[:, 0]), + y_limits=tuple(xy_limits[:, 1]), type="interpolate" ) diff --git a/pysd/translation/vensim/vensim_section.py b/pysd/translation/vensim/vensim_section.py index 444a728d..dc33b42a 100644 --- a/pysd/translation/vensim/vensim_section.py +++ b/pysd/translation/vensim/vensim_section.py @@ -171,9 +171,9 @@ def _merge_components(self) -> List[AbstractElement]: if component.units: # add units to element data merged[name].units = component.units - if component.range != (None, None): - # add range to element data - merged[name].range = component.range + if component.limits: + # add limits to element data + merged[name].limits = component.limits if component.documentation: # add documentation to element data merged[name].documentation = component.documentation diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py index 3c440087..16041364 100644 --- a/pysd/translation/xmile/xmile_element.py +++ b/pysd/translation/xmile/xmile_element.py @@ -58,7 +58,7 @@ def __init__(self, node: etree._Element, ns: dict, subscripts): self.name = node.attrib["name"] self.units = self._get_xpath_text(node, "ns:units") or "" self.documentation = self._get_xpath_text(node, "ns:doc") or "" - self.range = (None, None) + self.limits = (None, None) self.components = [] self.subscripts = subscripts @@ -104,8 +104,8 @@ def _get_xpath_attrib(self, node: etree._Element, except IndexError: return None - def _get_range(self) -> Tuple[Union[None, str], Union[None, str]]: - """Get the range of the element""" + def _get_limits(self) -> Tuple[Union[None, str], Union[None, str]]: + """Get the limits of the element""" lims = ( self._get_xpath_attrib(self.node, 'ns:range', 'min'), self._get_xpath_attrib(self.node, 'ns:range', 'max') @@ -145,8 +145,8 @@ def _parse_lookup_xml_node(self, node: etree._Element) -> object: return structures["lookup"]( x=tuple(xs[np.argsort(xs)]), y=tuple(ys[np.argsort(xs)]), - x_range=(np.min(xs), np.max(xs)), - y_range=(np.min(ys), np.max(ys)), + x_limits=(np.min(xs), np.max(xs)), + y_limits=(np.min(ys), np.max(ys)), type=self._interp_methods[interp] ) @@ -203,7 +203,7 @@ def _get_empty_abstract_element(self) -> AbstractElement: return AbstractElement( name=self.name, units=self.units, - range=self.range, + limits=self.limits, documentation=self.documentation, components=[]) @@ -229,7 +229,7 @@ class Flaux(Element): def __init__(self, node, ns, subscripts): super().__init__(node, ns, subscripts) - self.range = self._get_range() + self.limits = self._get_limits() def _parse_component(self, node: etree._Element) -> List[object]: """ @@ -299,10 +299,10 @@ class Gf(Element): def __init__(self, node, ns, subscripts): super().__init__(node, ns, subscripts) - self.range = self.get_range() + self.limits = self.get_limits() - def get_range(self) -> Tuple[Union[None, str], Union[None, str]]: - """Get the range of the Gf element""" + def get_limits(self) -> Tuple[Union[None, str], Union[None, str]]: + """Get the limits of the Gf element""" lims = ( self._get_xpath_attrib(self.node, 'ns:yscale', 'min'), self._get_xpath_attrib(self.node, 'ns:yscale', 'max') @@ -364,7 +364,7 @@ class Stock(Element): def __init__(self, node, ns, subscripts): super().__init__(node, ns, subscripts) - self.range = self._get_range() + self.limits = self._get_limits() def _parse_component(self, node) -> object: """ @@ -437,7 +437,7 @@ def __init__(self, name, units, documentation, eqn): self.name = name self.units = units self.documentation = documentation - self.range = (None, None) + self.limits = (None, None) self.eqn = eqn def parse(self) -> None: diff --git a/tests/more-tests/circular_reference/test_circular_reference.py b/tests/more-tests/circular_reference/test_circular_reference.py index 9fd64ac3..2dd40476 100644 --- a/tests/more-tests/circular_reference/test_circular_reference.py +++ b/tests/more-tests/circular_reference/test_circular_reference.py @@ -1,4 +1,5 @@ from pysd.py_backend.statefuls import Integ, Delay +from pysd import component _subscript_dict = {} _namespace = {'integ': 'integ', 'delay': 'delay'} @@ -8,7 +9,7 @@ '_integ_integ': {'initial': {'delay': 1}, 'step': {}}, '_delay_delay': {'initial': {'integ': 1}, 'step': {}} } -__pysd_version__ = "2.0.0" +__pysd_version__ = "3.0.0" __data = {'scope': None, 'time': lambda: 0} @@ -54,10 +55,12 @@ def saveper(): return __data["time"].save() +@component(name="Integ") def integ(): return _integ_integ() +@component(name="Delay") def delay(): return _delay_delay() @@ -65,4 +68,4 @@ def delay(): _integ_integ = Integ(lambda: 2, lambda: delay(), '_integ_integ') _delay_delay = Delay(lambda: 2, lambda: 1, - lambda: integ(), 1, time_step, '_delay_delay') \ No newline at end of file + lambda: integ(), 1, time_step, '_delay_delay') diff --git a/tests/more-tests/initialization_order/test_initialization_order.py b/tests/more-tests/initialization_order/test_initialization_order.py index 8106907a..48e409ab 100644 --- a/tests/more-tests/initialization_order/test_initialization_order.py +++ b/tests/more-tests/initialization_order/test_initialization_order.py @@ -5,8 +5,9 @@ from pysd.py_backend.statefuls import Integ +from pysd import component -__pysd_version__ = "2.0.0" +__pysd_version__ = "3.0.0" _subscript_dict = {} @@ -49,65 +50,43 @@ def _init_outer_references(data): __data[key] = data[key] +@component(name="Time") def time(): return __data["time"]() +@component(name="Initial time") def initial_time(): return __data["time"].initial_time() +@component(name="Final time") def final_time(): return __data["time"].final_time() +@component(name="Time step") def time_step(): return __data["time"].time_step() +@component(name="Saveper") def saveper(): return __data["time"].saveper() +@component(name="Stock B") def stock_b(): - """ - Real Name: Stock B - Original Eqn: INTEG(1, Stock A) - Units: - Limits: (None, None) - Type: component - Subs: None - - - """ return _integ_stock_b() +@component(name="Stock A") def stock_a(): - """ - Real Name: Stock A - Original Eqn: INTEG (1, Initial Parameter) - Units: - Limits: (None, None) - Type: component - Subs: None - - - """ return _integ_stock_a() +@component(name="Initial parameter") def initial_parameter(): - """ - Real Name: Initial Parameter - Original Eqn: 42 - Units: - Limits: (None, None) - Type: constant - Subs: None - - - """ return 42 diff --git a/tests/more-tests/type_error/test_type_error.py b/tests/more-tests/type_error/test_type_error.py index 7e0a7ece..41b7c4a4 100644 --- a/tests/more-tests/type_error/test_type_error.py +++ b/tests/more-tests/type_error/test_type_error.py @@ -1,6 +1,6 @@ from pysd import external -__pysd_version__ = "2.2.2" +__pysd_version__ = "3.0.0" _root = './' external.ExtData('input.xlsx', 'Sheet1', '5', 'B6') diff --git a/tests/more-tests/version/test_current_version.py b/tests/more-tests/version/test_current_version.py index 0c5e2a15..e42610f0 100644 --- a/tests/more-tests/version/test_current_version.py +++ b/tests/more-tests/version/test_current_version.py @@ -1,4 +1,4 @@ -__pysd_version__ = "2.99.3" +__pysd_version__ = "3.0.0" _namespace = {} _dependencies = {} diff --git a/tests/unit_test_external.py b/tests/unit_test_external.py index 779b79f8..d463dbf1 100644 --- a/tests/unit_test_external.py +++ b/tests/unit_test_external.py @@ -146,7 +146,6 @@ def test_reshape(self): External._reshape test """ from pysd.py_backend.external import External - import pandas as pd reshape = External._reshape @@ -303,6 +302,7 @@ def test_data_interp_h1d_1(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -335,6 +335,7 @@ def test_data_interp_hn1d_1(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -364,6 +365,7 @@ def test_data_interp_h1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -394,6 +396,7 @@ def test_data_interp_v1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -424,6 +427,7 @@ def test_data_interp_hn1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -454,6 +458,7 @@ def test_data_interp_vn1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -484,6 +489,7 @@ def test_data_forward_h1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -514,6 +520,7 @@ def test_data_forward_v1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -544,6 +551,7 @@ def test_data_forward_hn1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -574,6 +582,7 @@ def test_data_forward_vn1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -604,6 +613,7 @@ def test_data_backward_h1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -634,6 +644,7 @@ def test_data_backward_v1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -664,6 +675,7 @@ def test_data_backward_hn1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -694,6 +706,7 @@ def test_data_backward_vn1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -724,6 +737,7 @@ def test_data_interp_vn2d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -754,6 +768,7 @@ def test_data_forward_hn2d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -784,6 +799,7 @@ def test_data_backward_v2d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -807,6 +823,7 @@ def test_data_interp_h3d(self): cell_2 = "C8" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} interp = None py_name = "test_data_interp_h3d" @@ -817,6 +834,7 @@ def test_data_interp_h3d(self): cell=cell_1, coords=coords_1, interp=interp, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -847,6 +865,7 @@ def test_data_forward_v3d(self): cell_2 = "F5" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} interp = "look_forward" py_name = "test_data_forward_v3d" @@ -857,6 +876,7 @@ def test_data_forward_v3d(self): cell=cell_1, coords=coords_1, interp=interp, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -887,6 +907,7 @@ def test_data_backward_hn3d(self): cell_2 = "data_2db" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} interp = "hold_backward" py_name = "test_data_backward_hn3d" @@ -897,6 +918,7 @@ def test_data_backward_hn3d(self): cell=cell_1, coords=coords_1, interp=interp, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -935,6 +957,7 @@ def test_data_raw_h1d(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -976,6 +999,7 @@ def test_lookup_h1d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1004,6 +1028,7 @@ def test_lookup_v1d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1032,6 +1057,7 @@ def test_lookup_hn1d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1060,6 +1086,7 @@ def test_lookup_vn1d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1088,6 +1115,7 @@ def test_lookup_h2d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1111,6 +1139,7 @@ def test_lookup_vn3d(self): cell_2 = "data_2db" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} py_name = "test_lookup_vn3d" data = pysd.external.ExtLookup(file_name=file_name, @@ -1119,6 +1148,7 @@ def test_lookup_vn3d(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -1149,6 +1179,7 @@ def test_lookup_vn3d_shape0(self): cell_2 = "data_2db" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} py_name = "test_lookup_vn3d_shape0" data = pysd.external.ExtLookup(file_name=file_name, @@ -1157,6 +1188,7 @@ def test_lookup_vn3d_shape0(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -1193,17 +1225,19 @@ def test_lookup_vn2d_xarray(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=coords_1, py_name=py_name) data.initialize() - all_smaller = xr.DataArray([-1, -10], {'XY': ['X', 'Y']}, ['XY']) - all_bigger = xr.DataArray([9, 20, 30], {'ABC': ['A', 'B', 'C']}, - ['ABC']) - all_inside = xr.DataArray([3.5, 5.5], {'XY': ['X', 'Y']}, ['XY']) - mixed = xr.DataArray([1.5, 20, -30], {'ABC': ['A', 'B', 'C']}, ['ABC']) + coords_2 = {'XY': ['X', 'Y']} + + all_smaller = xr.DataArray([-1, -10], coords_2, ['XY']) + all_bigger = xr.DataArray([9, 20, 30], coords_1, ['ABC']) + all_inside = xr.DataArray([3.5, 5.5], coords_2, ['XY']) + mixed = xr.DataArray([1.5, 20, -30], coords_1, ['ABC']) full = xr.DataArray([[1.5, -30], [-10, 2.5], [4., 5.]], - {'ABC': ['A', 'B', 'C'], 'XY': ['X', 'Y']}, + {**coords_1, **coords_2}, ['ABC', 'XY']) all_smaller_out = data.data[0].reset_coords('lookup_dim', drop=True)\ @@ -1212,25 +1246,28 @@ def test_lookup_vn2d_xarray(self): all_inside_out = xr.DataArray([[0.5, -1], [-1, -0.5], [-0.75, 0]], - {'ABC': ['A', 'B', 'C'], - 'XY': ['X', 'Y']}, + {**coords_1, **coords_2}, ['ABC', 'XY']) - mixed_out = xr.DataArray([0.5, 0, 1], - {'ABC': ['A', 'B', 'C']}, - ['ABC']) - full_out = xr.DataArray([[0.5, 0], - [0, 0], - [-0.5, 0]], - {'ABC': ['A', 'B', 'C'], 'XY': ['X', 'Y']}, - ['ABC', 'XY']) + mixed_out = xr.DataArray([0.5, 0, 1], coords_1, ['ABC']) + full_out = xr.DataArray([[0.5, 0, -0.5], + [0, 0, 0]], + {**coords_2, **coords_1}, + ['XY', 'ABC']) with warnings.catch_warnings(): warnings.simplefilter("ignore") - self.assertTrue(data(all_smaller).equals(all_smaller_out)) - self.assertTrue(data(all_bigger).equals(all_bigger_out)) - self.assertTrue(data(all_inside).equals(all_inside_out)) - self.assertTrue(data(mixed).equals(mixed_out)) - self.assertTrue(data(full).equals(full_out)) + self.assertTrue( + data(all_smaller, {**coords_1, **coords_2} + ).equals(all_smaller_out)) + self.assertTrue( + data(all_bigger, coords_1).equals(all_bigger_out)) + self.assertTrue( + data(all_inside, {**coords_1, **coords_2} + ).equals(all_inside_out)) + self.assertTrue( + data(mixed, coords_1).equals(mixed_out)) + self.assertTrue( + data(full, {**coords_2, **coords_1}).equals(full_out)) def test_lookup_vn3d_xarray(self): """ @@ -1246,6 +1283,7 @@ def test_lookup_vn3d_xarray(self): cell_2 = "data_2db" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} py_name = "test_lookup_vn3d_xarray" data = pysd.external.ExtLookup(file_name=file_name, @@ -1254,6 +1292,7 @@ def test_lookup_vn3d_xarray(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -1292,11 +1331,16 @@ def test_lookup_vn3d_xarray(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") - self.assertTrue(data(all_smaller).equals(all_smaller_out)) - self.assertTrue(data(all_bigger).equals(all_bigger_out)) - self.assertTrue(data(all_inside).equals(all_inside_out)) - self.assertTrue(data(mixed).equals(mixed_out)) - self.assertTrue(data(full).equals(full_out)) + self.assertTrue( + data(all_smaller, final_coords).equals(all_smaller_out)) + self.assertTrue( + data(all_bigger, final_coords).equals(all_bigger_out)) + self.assertTrue( + data(all_inside, final_coords).equals(all_inside_out)) + self.assertTrue( + data(mixed, final_coords).equals(mixed_out)) + self.assertTrue( + data(full, final_coords).equals(full_out)) class TestConstant(unittest.TestCase): @@ -1324,6 +1368,7 @@ def test_constant_0d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data2 = pysd.external.ExtConstant(file_name=file_name, @@ -1331,6 +1376,7 @@ def test_constant_0d(self): root=_root, cell=cell2, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() data2.initialize() @@ -1356,6 +1402,7 @@ def test_constant_n0d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data2 = pysd.external.ExtConstant(file_name=file_name, @@ -1363,6 +1410,7 @@ def test_constant_n0d(self): root=_root, cell=cell2, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() data2.initialize() @@ -1387,6 +1435,7 @@ def test_constant_h1d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1409,6 +1458,7 @@ def test_constant_v1d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1431,6 +1481,7 @@ def test_constant_hn1d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1453,6 +1504,7 @@ def test_constant_vn1d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1475,6 +1527,7 @@ def test_constant_h2d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1497,6 +1550,7 @@ def test_constant_v2d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1519,6 +1573,7 @@ def test_constant_hn2d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1541,6 +1596,7 @@ def test_constant_vn2d(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -1562,6 +1618,9 @@ def test_constant_h3d(self): coords2 = {'ABC': ['A', 'B', 'C'], 'XY': ['Y'], 'val': [0, 1, 2, 3, 5, 6, 7, 8]} + final_coords = {'ABC': ['A', 'B', 'C'], + 'XY': ['X', 'Y'], + 'val': [0, 1, 2, 3, 5, 6, 7, 8]} py_name = "test_constant_h3d" data = pysd.external.ExtConstant(file_name=file_name, @@ -1569,6 +1628,7 @@ def test_constant_h3d(self): root=_root, cell=cell, coords=coords, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -1596,6 +1656,9 @@ def test_constant_v3d(self): coords2 = {'ABC': ['A', 'B', 'C'], 'XY': ['Y'], 'val': [0, 1, 2, 3, 5, 6, 7, 8]} + final_coords = {'ABC': ['A', 'B', 'C'], + 'XY': ['X', 'Y'], + 'val': [0, 1, 2, 3, 5, 6, 7, 8]} py_name = "test_constant_v3d" data = pysd.external.ExtConstant(file_name=file_name, @@ -1603,6 +1666,7 @@ def test_constant_v3d(self): root=_root, cell=cell, coords=coords, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -1630,6 +1694,9 @@ def test_constant_hn3d(self): coords2 = {'ABC': ['A', 'B', 'C'], 'XY': ['Y'], 'val': [0, 1, 2, 3, 5, 6, 7, 8]} + final_coords = {'ABC': ['A', 'B', 'C'], + 'XY': ['X', 'Y'], + 'val': [0, 1, 2, 3, 5, 6, 7, 8]} py_name = "test_constant_hn3d" data = pysd.external.ExtConstant(file_name=file_name, @@ -1637,6 +1704,7 @@ def test_constant_hn3d(self): root=_root, cell=cell, coords=coords, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -1664,6 +1732,9 @@ def test_constant_vn3d(self): coords2 = {'ABC': ['A', 'B', 'C'], 'XY': ['Y'], 'val': [0, 1, 2, 3, 5, 6, 7, 8]} + final_coords = {'ABC': ['A', 'B', 'C'], + 'XY': ['X', 'Y'], + 'val': [0, 1, 2, 3, 5, 6, 7, 8]} py_name = "test_constant_vn2d" data = pysd.external.ExtConstant(file_name=file_name, @@ -1671,6 +1742,7 @@ def test_constant_vn3d(self): root=_root, cell=cell, coords=coords, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -1761,6 +1833,7 @@ def test_not_implemented_file(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(NotImplementedError): @@ -1787,6 +1860,7 @@ def test_non_existent_file(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(FileNotFoundError): @@ -1813,6 +1887,7 @@ def test_non_existent_sheet_pyxl(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError): @@ -1839,6 +1914,7 @@ def test_non_existent_cellrange_name_pyxl(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(AttributeError): @@ -1861,6 +1937,7 @@ def test_non_existent_cellrange_name_in_sheet_pyxl(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) with self.assertRaises(AttributeError): @@ -1892,6 +1969,7 @@ def test_data_interp_h1dm_row(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with warnings.catch_warnings(record=True) as ws: @@ -1925,6 +2003,7 @@ def test_data_interp_h1dm_row2(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with warnings.catch_warnings(record=True) as ws: @@ -1960,6 +2039,7 @@ def test_data_interp_h1dm(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with warnings.catch_warnings(record=True) as ws: @@ -2004,6 +2084,7 @@ def test_data_interp_h1dm_ignore(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with warnings.catch_warnings(record=True) as ws: @@ -2047,6 +2128,7 @@ def test_data_interp_h1dm_raise(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2076,6 +2158,7 @@ def test_data_interp_v1dm(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with warnings.catch_warnings(record=True) as ws: @@ -2120,6 +2203,7 @@ def test_data_interp_v1dm_ignore(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with warnings.catch_warnings(record=True) as ws: @@ -2163,6 +2247,7 @@ def test_data_interp_v1dm_raise(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2192,6 +2277,7 @@ def test_data_interp_hn1dm(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with warnings.catch_warnings(record=True) as ws: @@ -2236,6 +2322,7 @@ def test_data_interp_hn1dm_ignore(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with warnings.catch_warnings(record=True) as ws: @@ -2279,6 +2366,7 @@ def test_data_interp_hn1dm_raise(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2298,6 +2386,7 @@ def test_data_interp_hn3dmd(self): cell_2 = "data_2db" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} interp = "interpolate" py_name = "test_data_interp_hn3dmd" @@ -2310,6 +2399,7 @@ def test_data_interp_hn3dmd(self): cell=cell_1, interp=interp, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -2357,6 +2447,7 @@ def test_data_interp_hn3dmd_raw(self): cell_2 = "data_2db" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} interp = "raw" py_name = "test_data_interp_hn3dmd_raw" @@ -2369,6 +2460,7 @@ def test_data_interp_hn3dmd_raw(self): cell=cell_1, interp=interp, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -2404,6 +2496,7 @@ def test_lookup_hn3dmd_raise(self): cell_2 = "C19" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} py_name = "test_lookup_hn3dmd_raise" pysd.external.External.missing = "raise" @@ -2413,6 +2506,7 @@ def test_lookup_hn3dmd_raise(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -2438,6 +2532,7 @@ def test_lookup_hn3dmd_ignore(self): cell_2 = "C19" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} py_name = "test_lookup_hn3dmd_ignore" pysd.external.External.missing = "ignore" @@ -2447,6 +2542,7 @@ def test_lookup_hn3dmd_ignore(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -2488,6 +2584,8 @@ def test_constant_h3dm(self): 'val': [0, 1, 2, 3, 5, 6, 7, 8]} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C'], 'val': [0, 1, 2, 3, 5, 6, 7, 8]} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C'], + 'val': [0, 1, 2, 3, 5, 6, 7, 8]} py_name = "test_constant_h3dm" pysd.external.External.missing = "warning" @@ -2496,6 +2594,7 @@ def test_constant_h3dm(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -2527,6 +2626,8 @@ def test_constant_h3dm_ignore(self): 'val': [0, 1, 2, 3, 5, 6, 7, 8]} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C'], 'val': [0, 1, 2, 3, 5, 6, 7, 8]} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C'], + 'val': [0, 1, 2, 3, 5, 6, 7, 8]} py_name = "test_constant_h3dm_ignore" pysd.external.External.missing = "ignore" @@ -2535,6 +2636,7 @@ def test_constant_h3dm_ignore(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -2562,6 +2664,8 @@ def test_constant_h3dm_raise(self): 'val': [0, 1, 2, 3, 5, 6, 7, 8]} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C'], 'val': [0, 1, 2, 3, 5, 6, 7, 8]} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C'], + 'val': [0, 1, 2, 3, 5, 6, 7, 8]} py_name = "test_constant_h3dm_raise" pysd.external.External.missing = "raise" @@ -2570,6 +2674,7 @@ def test_constant_h3dm_raise(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -2595,6 +2700,8 @@ def test_constant_hn3dm_raise(self): 'val': [0, 1, 2, 3, 5, 6, 7, 8]} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C'], 'val': [0, 1, 2, 3, 5, 6, 7, 8]} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C'], + 'val': [0, 1, 2, 3, 5, 6, 7, 8]} py_name = "test_constant_hn3dm_raise" pysd.external.External.missing = "raise" @@ -2603,6 +2710,7 @@ def test_constant_hn3dm_raise(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -2634,6 +2742,7 @@ def test_data_interp_h1d0(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2660,6 +2769,7 @@ def test_data_interp_v1d0(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2688,6 +2798,7 @@ def test_data_interp_hn1d0(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2715,6 +2826,7 @@ def test_data_interp_hn1dt(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2742,6 +2854,7 @@ def test_data_interp_hns(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2769,6 +2882,7 @@ def test_data_interp_vnss(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2797,6 +2911,7 @@ def test_data_interp_hnnwd(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError) as err: @@ -2825,6 +2940,7 @@ def test_data_raw_hnnm(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -2848,6 +2964,7 @@ def test_data_raw_hnnm(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -2883,6 +3000,7 @@ def test_data_h3d_interpnv(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) def test_data_h3d_interp(self): @@ -2898,6 +3016,7 @@ def test_data_h3d_interp(self): cell_2 = "C8" coords_1 = {'ABC': ['A', 'B', 'C'], 'XY': ['X']} coords_2 = {'ABC': ['A', 'B', 'C'], 'XY': ['Y']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} interp = None interp2 = "look_forward" py_name = "test_data_h3d_interp" @@ -2909,6 +3028,7 @@ def test_data_h3d_interp(self): cell=cell_1, coords=coords_1, interp=interp, + final_coords=final_coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2932,6 +3052,7 @@ def test_data_h3d_add(self): cell_2 = "C8" coords_1 = {'ABC': ['A', 'B', 'C'], 'XY': ['X']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} interp = None py_name = "test_data_h3d_add" @@ -2942,6 +3063,7 @@ def test_data_h3d_add(self): cell=cell_1, coords=coords_1, interp=interp, + final_coords=final_coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2965,6 +3087,7 @@ def test_lookup_h3d_add(self): cell_2 = "C8" coords_1 = {'ABC': ['A', 'B', 'C'], 'XY': ['X']} coords_2 = {'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} py_name = "test_lookup_h3d_add" data = pysd.external.ExtLookup(file_name=file_name, @@ -2973,6 +3096,7 @@ def test_lookup_h3d_add(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) with self.assertRaises(ValueError): @@ -2998,6 +3122,8 @@ def test_constant_h3d_add(self): coords2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C'], 'val2': [0, 1, 2, 3, 5, 6, 7, 8]} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C'], + 'val': [0, 1, 2, 3, 5, 6, 7, 8]} py_name = "test_constant_h3d_add" data = pysd.external.ExtConstant(file_name=file_name, @@ -3005,6 +3131,7 @@ def test_constant_h3d_add(self): root=_root, cell=cell, coords=coords, + final_coords=final_coords, py_name=py_name) with self.assertRaises(ValueError): @@ -3031,6 +3158,7 @@ def test_constant_hns(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) with self.assertRaises(ValueError): @@ -3057,6 +3185,7 @@ def text_openpyxl_str(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) expected = xr.DataArray( @@ -3076,6 +3205,7 @@ def text_openpyxl_str(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) data.initialize() @@ -3104,6 +3234,8 @@ def test_constant_hn3dm_keep(self): 'val': [0, 1, 2, 3, 5, 6, 7, 8]} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C'], 'val': [0, 1, 2, 3, 5, 6, 7, 8]} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C'], + 'val': [0, 1, 2, 3, 5, 6, 7, 8]} py_name = "test_constant_hn3dm_raise" pysd.external.External.missing = "keep" @@ -3123,6 +3255,7 @@ def test_constant_hn3dm_keep(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -3148,6 +3281,7 @@ def test_lookup_hn3dmd_keep(self): cell_2 = "C19" coords_1 = {'XY': ['X'], 'ABC': ['A', 'B', 'C']} coords_2 = {'XY': ['Y'], 'ABC': ['A', 'B', 'C']} + final_coords = {'XY': ['X', 'Y'], 'ABC': ['A', 'B', 'C']} py_name = "test_lookup_hn3dmd_ignore" pysd.external.External.missing = "keep" @@ -3178,6 +3312,7 @@ def test_lookup_hn3dmd_keep(self): root=_root, cell=cell_1, coords=coords_1, + final_coords=final_coords, py_name=py_name) data.add(file_name=file_name, @@ -3219,6 +3354,7 @@ def test_data_interp_v1dm_keep(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -3254,6 +3390,7 @@ def test_data_interp_hnnm_keep(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) data.initialize() @@ -3282,6 +3419,7 @@ def test_lookup_data_attr(self): cell=cell, coords=coords, interp=interp, + final_coords=coords, py_name=py_name) datL = pysd.external.ExtLookup(file_name=file_name, @@ -3290,6 +3428,7 @@ def test_lookup_data_attr(self): root=_root, cell=cell, coords=coords, + final_coords=coords, py_name=py_name) datD.initialize() datL.initialize() diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index ebd9839b..7546c46c 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -586,9 +586,9 @@ def test_docs(self): model = pysd.read_vensim(test_model) self.assertIsInstance(str(model), str) # tests string conversion of # model - print(model.doc().columns) + print(model.doc.columns) - doc = model._doc + doc = model.doc self.assertIsInstance(doc, pd.DataFrame) self.assertSetEqual( { @@ -600,12 +600,13 @@ def test_docs(self): "Room Temperature", "SAVEPER", "TIME STEP", + "Time" }, set(doc["Real Name"].values), ) self.assertEqual( - doc[doc["Real Name"] == "Heat Loss to Room"]["Unit"].values[0], + doc[doc["Real Name"] == "Heat Loss to Room"]["Units"].values[0], "Degrees Fahrenheit/Minute", ) self.assertEqual( @@ -625,30 +626,12 @@ def test_docs(self): "Normal" ) self.assertEqual( - doc[doc["Real Name"] == "Teacup Temperature"]["Lims"].values[0], - "(32.0, 212.0)", - ) - - def test_docs_multiline_eqn(self): - """ Test that the model prints some documentation """ - - path2model = _root.joinpath( - "test-models/tests/multiple_lines_def/" + - "test_multiple_lines_def.mdl") - model = pysd.read_vensim(path2model) - - doc = model.doc() - - self.assertEqual(doc[doc["Real Name"] == "price"]["Unit"].values[0], - "euros/kg") - self.assertEqual(doc[doc["Real Name"] == "price"]["Py Name"].values[0], - "price") - self.assertEqual( - doc[doc["Real Name"] == "price"]["Subs"].values[0], "['fruits']" + doc[doc["Real Name"] == "Teacup Temperature"]["Limits"].values[0], + (32.0, 212.0), ) def test_stepwise_cache(self): - from pysd.py_backend.decorators import Cache + from pysd.py_backend.cache import Cache run_history = [] result_history = [] @@ -692,7 +675,7 @@ def downstream(run_hist, res_hist): "up", "down"]) def test_runwise_cache(self): - from pysd.py_backend.decorators import constant_cache + from pysd.py_backend.cache import constant_cache run_history = [] result_history = [] @@ -1053,10 +1036,10 @@ def test_get_args(self): self.assertEqual(model.get_args('teacup_temperature'), []) self.assertEqual(model.get_args('_integ_teacup_temperature'), []) - self.assertEqual(model2.get_args('lookup 1d'), ['x']) - self.assertEqual(model2.get_args('lookup_1d'), ['x']) - self.assertEqual(model2.get_args('lookup 2d'), ['x']) - self.assertEqual(model2.get_args('lookup_2d'), ['x']) + self.assertEqual(model2.get_args('lookup 1d'), ['x', 'final_subs']) + self.assertEqual(model2.get_args('lookup_1d'), ['x', 'final_subs']) + self.assertEqual(model2.get_args('lookup 2d'), ['x', 'final_subs']) + self.assertEqual(model2.get_args('lookup_2d'), ['x', 'final_subs']) with self.assertRaises(NameError): model.get_args('not_a_var') From 3676fd96abd916dcd6e91ef7e5e3d4f1fbd1677a Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 6 Apr 2022 19:29:13 +0200 Subject: [PATCH 28/96] Track external objects with dependencies dicts to preven loading them when selecting a submodel --- .../python/python_expressions_builder.py | 6 +- pysd/building/python/python_model_builder.py | 2 +- pysd/py_backend/statefuls.py | 57 +++++---- tests/unit_test_functions.py | 113 ------------------ 4 files changed, 38 insertions(+), 140 deletions(-) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index c7d64eab..82b87227 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -458,7 +458,7 @@ def build(self, arguments): return BuildAST( expression=arguments["name"] + "(x, final_subs)", - calls={"__external__": None, "__lookup__": None}, + calls={arguments["name"]: 1, "__lookup__": None}, subscripts=final_subs, order=0) @@ -513,7 +513,7 @@ def build(self, arguments): return BuildAST( expression=arguments["name"] + "(time())", - calls={"__external__": None, "time": 1}, + calls={arguments["name"]: 1, "time": 1}, subscripts=final_subs, order=0) @@ -565,7 +565,7 @@ def build(self, arguments): return BuildAST( expression=arguments["name"] + "()", - calls={"__external__": None}, + calls={arguments["name"]: 1}, subscripts=final_subs, order=0) diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 0de8e641..b5bff998 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -577,7 +577,7 @@ def build_element_out(self): meta_data.append("subscripts=%(subscripts)s") if self.documentation: doc = self.documentation.replace("\\", "\n") - contents = f'"""{doc}"""\n'\ + contents = f'"""\n{doc}\n"""\n'\ + contents meta_data.append("comp_type='%(type)s'") diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 2709969e..d3796539 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -771,7 +771,7 @@ def _get_full_dependencies(self, element, dep_set, stateful_deps): deps = deps[stateful_deps] for dep in deps: if dep not in dep_set and not dep.startswith("__")\ - and dep != "time": + and not dep.startswith("_ext") and dep != "time": dep_set.add(dep) self._get_full_dependencies(dep, dep_set, stateful_deps) @@ -864,7 +864,8 @@ def _assign_cache(self, element): self.cache_type[element] = "run" for subelement in self.components._dependencies[element]: if subelement.startswith("_initial_")\ - or subelement.startswith("__"): + or subelement.startswith("__")\ + or subelement.startswith("_ext_"): continue if subelement not in self.cache_type: self._assign_cache(subelement) @@ -890,7 +891,7 @@ def _isdynamic(self, dependencies): return True for dep in dependencies: if dep.startswith("_") and not dep.startswith("_initial_")\ - and not dep.startswith("__"): + and not dep.startswith("__") and not dep.startswith("_ext_"): return True return False @@ -1448,7 +1449,6 @@ def _build_doc(self): collector = [] for name, pyname in self.components._namespace.items(): element = getattr(self.components, pyname) - print(pyname) collector.append({ 'Real Name': name, 'Py Name': pyname, @@ -1457,7 +1457,8 @@ def _build_doc(self): 'Limits': element.limits, 'Type': element.type, 'Subtype': element.subtype, - 'Comment': element.__doc__.strip() if element.__doc__ else None + 'Comment': element.__doc__.strip().strip("\n").strip() + if element.__doc__ else None }) if collector: @@ -1711,23 +1712,6 @@ def select_submodel(self, vars=[], modules=[], exogenous_components={}): "Selecting submodel, " "to run the full model again use model.reload()") - # reassing the dictionary and lists of needed stateful objects - self._stateful_elements = { - name: getattr(self.components, name) - for name in s_deps - if isinstance(getattr(self.components, name), Stateful) - } - self._dynamicstateful_elements = [ - getattr(self.components, name) for name in s_deps - if isinstance(getattr(self.components, name), DynamicStateful) - ] - self._macro_elements = [ - getattr(self.components, name) for name in s_deps - if isinstance(getattr(self.components, name), Macro) - ] - # TODO: include subselection of external objects (update in the deps - # dictionary is needed -> NO BACK COMPATIBILITY) - # get set of all dependencies and all variables to select all_deps = d_vars["initial"].copy() all_deps.update(d_vars["step"]) @@ -1746,6 +1730,32 @@ def select_submodel(self, vars=[], modules=[], exogenous_components={}): if py_name.startswith("_") and py_name not in s_deps: del self.components._dependencies[py_name] + # reassing the dictionary and lists of needed stateful objects + self._stateful_elements = { + name: getattr(self.components, name) + for name in s_deps + if isinstance(getattr(self.components, name), Stateful) + } + self._dynamicstateful_elements = [ + getattr(self.components, name) for name in s_deps + if isinstance(getattr(self.components, name), DynamicStateful) + ] + self._macro_elements = [ + getattr(self.components, name) for name in s_deps + if isinstance(getattr(self.components, name), Macro) + ] + + # keeping only needed external objects + ext_deps = set() + for values in self.components._dependencies.values(): + for value in values: + if value.startswith("_ext_"): + ext_deps.add(value) + self._external_elements = [ + getattr(self.components, name) for name in ext_deps + if isinstance(getattr(self.components, name), External) + ] + # set all exogenous values to np.nan by default new_components = {element: np.nan for element in all_deps} # update exogenous values with the user input @@ -1884,7 +1894,8 @@ def _get_dependencies(self, vars=[], modules=[]): """ def check_dep(dependencies, initial=False): for dep in dependencies: - if dep in c_vars or dep.startswith("__"): + if dep in c_vars or dep.startswith("__")\ + or dep.startswith("_ext_"): pass elif dep.startswith("_"): s_deps.add(dep) diff --git a/tests/unit_test_functions.py b/tests/unit_test_functions.py index 39ec8d0d..8f1c8c0b 100644 --- a/tests/unit_test_functions.py +++ b/tests/unit_test_functions.py @@ -121,26 +121,6 @@ def test_zidz(self): self.assertEqual(zidz(1, 0), 0) self.assertEqual(zidz(1, 8), 0.125) - -class TestStatsFunctions(unittest.TestCase): - def test_bounded_normal(self): - from pysd.py_backend.functions import bounded_normal - min_val = -4 - max_val = .2 - mean = -1 - std = .05 - seed = 1 - results = np.array( - [bounded_normal(min_val, max_val, mean, std, seed) - for _ in range(1000)]) - - self.assertGreaterEqual(results.min(), min_val) - self.assertLessEqual(results.max(), max_val) - self.assertAlmostEqual(results.mean(), mean, delta=std) - self.assertAlmostEqual(results.std(), std, delta=std) - self.assertGreater(len(np.unique(results)), 100) - - class TestLogicFunctions(unittest.TestCase): def test_if_then_else_basic(self): from pysd.py_backend.functions import if_then_else @@ -190,99 +170,6 @@ def test_if_then_else_with_subscripted(self): if_then_else(xr_mixed, lambda: 1, lambda: 1/0) -class TestLookup(unittest.TestCase): - def test_lookup(self): - from pysd.py_backend.functions import lookup - - xpts = [0, 1, 2, 3, 5, 6, 7, 8] - ypts = [0, 0, 1, 1, -1, -1, 0, 0] - - for x, y in zip(xpts, ypts): - self.assertEqual( - y, - lookup(x, xpts, ypts), - "Wrong result at X=" + str(x)) - - def test_lookup_extrapolation_inbounds(self): - from pysd.py_backend.functions import lookup_extrapolation - - xpts = [0, 1, 2, 3, 5, 6, 7, 8] - ypts = [0, 0, 1, 1, -1, -1, 0, 0] - - expected_xpts = np.arange(-0.5, 8.6, 0.5) - expected_ypts = [ - 0, - 0, 0, - 0, 0.5, - 1, 1, - 1, 0.5, 0, -0.5, -1, - -1, -1, -0.5, - 0, 0, 0, 0 - ] - - for x, y in zip(expected_xpts, expected_ypts): - self.assertEqual( - y, - lookup_extrapolation(x, xpts, ypts), - "Wrong result at X=" + str(x)) - - def test_lookup_extrapolation_two_points(self): - from pysd.py_backend.functions import lookup_extrapolation - - xpts = [0, 1] - ypts = [0, 1] - - expected_xpts = np.arange(-0.5, 1.6, 0.5) - expected_ypts = [-0.5, 0.0, 0.5, 1.0, 1.5] - - for x, y in zip(expected_xpts, expected_ypts): - self.assertEqual( - y, - lookup_extrapolation(x, xpts, ypts), - "Wrong result at X=" + str(x)) - - def test_lookup_extrapolation_outbounds(self): - from pysd.py_backend.functions import lookup_extrapolation - - xpts = [0, 1, 2, 3] - ypts = [0, 1, 1, 0] - - expected_xpts = np.arange(-0.5, 3.6, 0.5) - expected_ypts = [ - -0.5, - 0.0, 0.5, 1.0, - 1.0, 1.0, - 0.5, 0, - -0.5 - ] - - for x, y in zip(expected_xpts, expected_ypts): - self.assertEqual( - y, - lookup_extrapolation(x, xpts, ypts), - "Wrong result at X=" + str(x)) - - def test_lookup_discrete(self): - from pysd.py_backend.functions import lookup_discrete - - xpts = [0, 1, 2, 3, 5, 6, 7, 8] - ypts = [0, 0, 1, 1, -1, -1, 0, 0] - - expected_xpts = np.arange(-0.5, 8.6, 0.5) - expected_ypts = [ - 0, 0, 0, 0, 0, - 1, 1, 1, 1, 1, 1, - -1, -1, -1, -1, - 0, 0, 0, 0 - ] - - for x, y in zip(expected_xpts, expected_ypts): - self.assertEqual( - y, - lookup_discrete(x, xpts, ypts), - "Wrong result at X=" + str(x)) - - class TestFunctions(unittest.TestCase): def test_sum(self): From 5085fe576a47fedd54632da91b2537c244c1ee0c Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 8 Apr 2022 13:02:37 +0200 Subject: [PATCH 29/96] Remove namespace from model file --- pysd/__init__.py | 2 +- pysd/building/python/imports.py | 2 +- pysd/building/python/python_model_builder.py | 35 ++++++++---- pysd/py_backend/components.py | 38 +++++++------ pysd/py_backend/statefuls.py | 53 +++++++++++-------- pysd/py_backend/utils.py | 13 ++--- .../test_circular_reference.py | 37 ++++++------- .../test_initialization_order.py | 40 ++++++-------- .../more-tests/type_error/test_type_error.py | 4 +- .../version/test_current_version.py | 10 +++- tests/more-tests/version/test_old_version.py | 1 - .../pytest_select_submodel.py | 8 +-- .../vensim_parser/pytest_split_views.py | 4 +- tests/unit_test_functions.py | 1 + tests/unit_test_pysd.py | 2 +- 15 files changed, 135 insertions(+), 115 deletions(-) diff --git a/pysd/__init__.py b/pysd/__init__.py index 5c967112..e0fbe6d2 100644 --- a/pysd/__init__.py +++ b/pysd/__init__.py @@ -1,4 +1,4 @@ from .pysd import read_vensim, read_xmile, load from .py_backend import functions, statefuls, utils, external -from .py_backend.components import component +from .py_backend.components import Component from ._version import __version__ diff --git a/pysd/building/python/imports.py b/pysd/building/python/imports.py index ed0549d1..53fade01 100644 --- a/pysd/building/python/imports.py +++ b/pysd/building/python/imports.py @@ -72,6 +72,6 @@ def get_header(self, outfile): "module": module, "methods": ", ".join(getattr(self, f"_{module}"))} - text += "from pysd import component\n" + text += "from pysd import Component\n" return text diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index b5bff998..475dff1c 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -49,6 +49,12 @@ def __init__(self, abstract_section: AbstractSection): self.macrospace = {} self.dependencies = {} + # create parameters dict necessary in macros + self.params = { + key: self.namespace.namespace[key] + for key in self.params + } + def build_section(self): # Create namespace for element in self.elements: @@ -198,7 +204,7 @@ def _build_main_module(self, elements): # import of needed functions and packages text = self.imports.get_header(self.path.name) - # import namespace from json file + # import subscript dict and dependencies from json file text += textwrap.dedent(""" __pysd_version__ = '%(version)s' @@ -208,10 +214,14 @@ def _build_main_module(self, elements): } _root = Path(__file__).parent - - _namespace, _subscript_dict, _dependencies, _modules = load_model_data( + %(params)s + _subscript_dict, _dependencies, _modules = load_model_data( _root, "%(model_name)s") + + component = Component() """ % { + "params": f"\n _params = {self.params}\n" + if self.params else "", "model_name": self.model_name, "version": __version__ }) @@ -246,16 +256,17 @@ def _build(self): } _root = Path(__file__).parent - + %(params)s _subscript_dict = %(subscript_dict)s - _namespace = %(namespace)s - _dependencies = %(dependencies)s + + component = Component() """ % { "subscript_dict": repr(self.subscripts.subscripts), - "namespace": repr(self.namespace.namespace), "dependencies": repr(self.dependencies), + "params": f"\n _params = {self.params}\n" + if self.params else "", "version": __version__, }) @@ -367,7 +378,7 @@ def _init_outer_references(data): __data[key] = data[key] - @component(name="Time") + @component.add(name="Time") def time(): ''' Current time of the model. @@ -566,10 +577,12 @@ def build_element_out(self): self.arguments = 'x, final_subs=None' # define variable metadata for the @component decorator - meta_data = [f"name={repr(self.name)}"] + self.name = repr(self.name) + meta_data = ["name=%(name)s"] if self.units: - meta_data.append(f"units={repr(self.units)}") + meta_data.append("units=%(units)s") + self.units = repr(self.units) if self.limits: meta_data.append("limits=%(limits)s") if self.subscripts: @@ -583,7 +596,7 @@ def build_element_out(self): meta_data.append("comp_type='%(type)s'") meta_data.append("comp_subtype='%(subtype)s'") - self.meta_data = f"@component({', '.join(meta_data)})"\ + self.meta_data = f"@component.add({', '.join(meta_data)})"\ % self.__dict__ indent = 12 diff --git a/pysd/py_backend/components.py b/pysd/py_backend/components.py index 21efe300..fb28bb08 100644 --- a/pysd/py_backend/components.py +++ b/pysd/py_backend/components.py @@ -12,22 +12,28 @@ from pysd._version import __version__ -def component(name, units=None, limits=(np.nan, np.nan), - subscripts=None, comp_type=None, comp_subtype=None): - """ - This decorators allows assigning metadata to a function. - """ - def decorator(function): - function.name = name - function.units = units - function.limits = limits - function.subscripts = subscripts - function.type = comp_type - function.subtype = comp_subtype - function.args = inspect.getfullargspec(function)[0] - return function - - return decorator +class Component(object): + + def __init__(self): + self.namespace = {} + + def add(self, name, units=None, limits=(np.nan, np.nan), + subscripts=None, comp_type=None, comp_subtype=None): + """ + This decorators allows assigning metadata to a function. + """ + def decorator(function): + function.name = name + function.units = units + function.limits = limits + function.subscripts = subscripts + function.type = comp_type + function.subtype = comp_subtype + function.args = inspect.getfullargspec(function)[0] + self.namespace[name] = function.__name__ + return function + + return decorator class Components(object): diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index d3796539..9c6d7352 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -624,11 +624,18 @@ def __init__(self, py_model_file, params=None, return_func=None, + "Please translate again the model with the function" + " read_vensim or read_xmile.") + self._namespace = self.components._components.component.namespace + self._doc = self._build_doc() + if params is not None: + # add params to namespace + self._namespace.update(self.components._components._params) + # create new components with the params self.set_components(params, new=True) + # update dependencies for param in params: self.components._dependencies[ - self.components._namespace[param]] = {"time"} + self._namespace[param]] = {"time"} # Get the collections of stateful elements and external elements self._stateful_elements = { @@ -670,6 +677,14 @@ def __init__(self, py_model_file, params=None, return_func=None, def __call__(self): return self.return_func() + @property + def doc(self): + return self._doc.copy() + + @property + def namespace(self): + return self._namespace.copy() + def clean_caches(self): self.cache.clean() # if nested macros @@ -804,7 +819,7 @@ def _assign_cache_type(self): """ self.cache_type = {"time": None} - for element in self.components._namespace.values(): + for element in self._namespace.values(): if element not in self.cache_type\ and element in self.components._dependencies: self._assign_cache(element) @@ -1024,7 +1039,7 @@ def get_args(self, param): if isinstance(param, str): func_name = utils.get_key_and_value_by_insensitive_key_or_value( param, - self.components._namespace)[1] or param + self._namespace)[1] or param func = getattr(self.components, func_name) else: @@ -1064,7 +1079,7 @@ def get_coords(self, param): if isinstance(param, str): func_name = utils.get_key_and_value_by_insensitive_key_or_value( param, - self.components._namespace)[1] or param + self._namespace)[1] or param func = getattr(self.components, func_name) @@ -1114,7 +1129,7 @@ def __getitem__(self, param): """ func_name = utils.get_key_and_value_by_insensitive_key_or_value( param, - self.components._namespace)[1] or param + self._namespace)[1] or param if self.get_args(getattr(self.components, func_name)): raise ValueError( @@ -1147,7 +1162,7 @@ def get_series_data(self, param): """ func_name = utils.get_key_and_value_by_insensitive_key_or_value( param, - self.components._namespace)[1] or param + self._namespace)[1] or param try: if func_name.startswith("_ext_"): @@ -1188,7 +1203,7 @@ def set_components(self, params, new=False): for key, value in params.items(): func_name = utils.get_key_and_value_by_insensitive_key_or_value( key, - self.components._namespace)[1] + self._namespace)[1] if isinstance(value, np.ndarray) or isinstance(value, list): raise TypeError( @@ -1335,7 +1350,7 @@ def set_initial_value(self, t, initial_value): for key, value in initial_value.items(): component_name =\ utils.get_key_and_value_by_insensitive_key_or_value( - key, self.components._namespace)[1] + key, self._namespace)[1] if component_name is not None: if self.components._dependencies[component_name]: deps = list(self.components._dependencies[component_name]) @@ -1421,13 +1436,6 @@ def set_stateful(self, stateful_dict): for attr, value in attrs.items(): setattr(getattr(self.components, element), attr, value) - @property - def doc(self): - return self._doc.copy() - - def namespace(self): - return self.components._namespace.copy() - def subscript_dict(self): return self.components._subscript_dict.copy() @@ -1447,7 +1455,7 @@ def _build_doc(self): - Documentation strings from the original model file """ collector = [] - for name, pyname in self.components._namespace.items(): + for name, pyname in self._namespace.items(): element = getattr(self.components, pyname) collector.append({ 'Real Name': name, @@ -1491,7 +1499,6 @@ def __init__(self, py_model_file, data_files, initialize, missing_values): self.time.set_control_vars(**self.components._control_vars) self.data_files = data_files self.missing_values = missing_values - self._doc = self._build_doc() if initialize: self.initialize() @@ -1613,7 +1620,7 @@ def run(self, params=None, return_columns=None, return_timestamps=None, return_columns = self._default_return_columns(return_columns) capture_elements, return_addresses = utils.get_return_elements( - return_columns, self.components._namespace) + return_columns, self._namespace) # create a dictionary splitting run cached and others capture_elements = self._split_capture_elements(capture_elements) @@ -1721,9 +1728,9 @@ def select_submodel(self, vars=[], modules=[], exogenous_components={}): all_vars.update(c_vars) # clean dependendies and namespace dictionaries - for real_name, py_name in self.components._namespace.copy().items(): + for real_name, py_name in self._namespace.copy().items(): if py_name not in all_vars: - del self.components._namespace[real_name] + del self._namespace[real_name] del self.components._dependencies[py_name] for py_name in self.components._dependencies.copy().keys(): @@ -1763,7 +1770,7 @@ def select_submodel(self, vars=[], modules=[], exogenous_components={}): { utils.get_key_and_value_by_insensitive_key_or_value( key, - self.components._namespace)[1]: value + self._namespace)[1]: value }) for key, value in exogenous_components.items()] self.set_components(new_components) @@ -1920,7 +1927,7 @@ def check_dep(dependencies, initial=False): for var in vars: py_name = utils.get_key_and_value_by_insensitive_key_or_value( var, - self.components._namespace)[1] + self._namespace)[1] c_vars.add(py_name) for module in modules: c_vars.update(self.get_vars_in_module(module)) @@ -2011,7 +2018,7 @@ def _default_return_columns(self, which): return_columns = [] - for key, pykey in self.components._namespace.items(): + for key, pykey in self._namespace.items(): if pykey in self.cache_type and self.cache_type[pykey] in types\ and not self.get_args(pykey): diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index ed46732e..c8934000 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -302,7 +302,7 @@ def load_model_data(root, model_name): """ Used for models split in several files. - Loads subscripts_dic, namespace and modules dictionaries + Loads subscripts, depenencies and modules dictionaries Parameters ---------- @@ -314,14 +314,13 @@ def load_model_data(root, model_name): Returns ------- - namespace: dict - Translation from original model element names (keys) to python safe - function identifiers (values). - subscripts: dict Dictionary describing the possible dimensions of the stock's subscripts. + dependencies: dict + DIctionary containing the dependencies of each model component. + modules: dict Dictionary containing view (module) names as keys and a list of the corresponding variables as values. @@ -335,8 +334,6 @@ def load_model_data(root, model_name): with open(root.joinpath("_subscripts_" + model_name + ".json")) as subs: subscripts = json.load(subs) - with open(root.joinpath("_namespace_" + model_name + ".json")) as names: - namespace = json.load(names) with open(root.joinpath("_dependencies_" + model_name + ".json")) as deps: dependencies = json.load(deps) @@ -345,7 +342,7 @@ def load_model_data(root, model_name): with open(root.joinpath("modules_" + model_name, "_modules.json")) as mods: modules = json.load(mods) - return namespace, subscripts, dependencies, modules + return subscripts, dependencies, modules def load_modules(module_name, module_content, work_dir, submodules): diff --git a/tests/more-tests/circular_reference/test_circular_reference.py b/tests/more-tests/circular_reference/test_circular_reference.py index 2dd40476..fb4451ad 100644 --- a/tests/more-tests/circular_reference/test_circular_reference.py +++ b/tests/more-tests/circular_reference/test_circular_reference.py @@ -1,8 +1,7 @@ from pysd.py_backend.statefuls import Integ, Delay -from pysd import component +from pysd import Component _subscript_dict = {} -_namespace = {'integ': 'integ', 'delay': 'delay'} _dependencies = { 'integ': {'_integ_integ': 1}, 'delay': {'_delay_delay': 1}, @@ -13,54 +12,52 @@ __data = {'scope': None, 'time': lambda: 0} +component = Component() + +_control_vars = { + "initial_time": lambda: 0, + "final_time": lambda: 0.5, + "time_step": lambda: 0.5, + "saveper": lambda: time_step(), +} + def _init_outer_references(data): for key in data: __data[key] = data[key] +@component.add(name="Time") def time(): return __data["time"]() -def _time_step(): - return 0.5 - - -def _initial_time(): - return 0 - - -def _final_time(): - return 0.5 - - -def _saveper(): - return 0.5 - - +@component.add(name="Time step") def time_step(): return __data["time"].step() +@component.add(name="Initial time") def initial_time(): return __data["time"].initial() +@component.add(name="Final time") def final_time(): return __data["time"].final() +@component.add(name="Saveper") def saveper(): return __data["time"].save() -@component(name="Integ") +@component.add(name="Integ") def integ(): return _integ_integ() -@component(name="Delay") +@component.add(name="Delay") def delay(): return _delay_delay() diff --git a/tests/more-tests/initialization_order/test_initialization_order.py b/tests/more-tests/initialization_order/test_initialization_order.py index 48e409ab..e50387d9 100644 --- a/tests/more-tests/initialization_order/test_initialization_order.py +++ b/tests/more-tests/initialization_order/test_initialization_order.py @@ -5,38 +5,28 @@ from pysd.py_backend.statefuls import Integ -from pysd import component +from pysd import Component __pysd_version__ = "3.0.0" _subscript_dict = {} -_namespace = { - "TIME": "time", - "Time": "time", - "Stock B": "stock_b", - "Stock A": "stock_a", - "Initial Parameter": "initial_parameter", - "FINAL TIME": "final_time", - "INITIAL TIME": "initial_time", - "SAVEPER": "saveper", - "TIME STEP": "time_step", -} - _dependencies = { 'initial_time': {}, 'final_time': {}, 'time_step': {}, 'saveper': {'time_step': 1}, - 'initial_parameter': {}, + 'initial_par': {}, 'stock_a': {'_integ_stock_a': 1}, 'stock_b': {'_integ_stock_b': 1}, - '_integ_stock_a': {'initial': {'initial_parameter': 1}, 'step': {}}, + '_integ_stock_a': {'initial': {'initial_par': 1}, 'step': {}}, '_integ_stock_b': {'initial': {'stock_a': 1}, 'step': {}} } __data = {"scope": None, "time": lambda: 0} +component = Component() + _control_vars = { "initial_time": lambda: 0, "final_time": lambda: 20, @@ -50,47 +40,47 @@ def _init_outer_references(data): __data[key] = data[key] -@component(name="Time") +@component.add(name="Time") def time(): return __data["time"]() -@component(name="Initial time") +@component.add(name="Initial time") def initial_time(): return __data["time"].initial_time() -@component(name="Final time") +@component.add(name="Final time") def final_time(): return __data["time"].final_time() -@component(name="Time step") +@component.add(name="Time step") def time_step(): return __data["time"].time_step() -@component(name="Saveper") +@component.add(name="Saveper") def saveper(): return __data["time"].saveper() -@component(name="Stock B") +@component.add(name="Stock B") def stock_b(): return _integ_stock_b() -@component(name="Stock A") +@component.add(name="Stock A") def stock_a(): return _integ_stock_a() -@component(name="Initial parameter") -def initial_parameter(): +@component.add(name="Initial par") +def initial_par(): return 42 _integ_stock_b = Integ(lambda: 1, lambda: stock_a(), "_integ_stock_b") -_integ_stock_a = Integ(lambda: 1, lambda: initial_parameter(), "_integ_stock_a") +_integ_stock_a = Integ(lambda: 1, lambda: initial_par(), "_integ_stock_a") diff --git a/tests/more-tests/type_error/test_type_error.py b/tests/more-tests/type_error/test_type_error.py index 41b7c4a4..c44f18cf 100644 --- a/tests/more-tests/type_error/test_type_error.py +++ b/tests/more-tests/type_error/test_type_error.py @@ -1,6 +1,8 @@ -from pysd import external +from pysd import external, Component __pysd_version__ = "3.0.0" _root = './' +component = Component() + external.ExtData('input.xlsx', 'Sheet1', '5', 'B6') diff --git a/tests/more-tests/version/test_current_version.py b/tests/more-tests/version/test_current_version.py index e42610f0..dcc7e5b9 100644 --- a/tests/more-tests/version/test_current_version.py +++ b/tests/more-tests/version/test_current_version.py @@ -1,6 +1,7 @@ +from pysd import Component + __pysd_version__ = "3.0.0" -_namespace = {} _dependencies = {} __data = {'scope': None, 'time': lambda: 0} @@ -12,27 +13,34 @@ "saveper": lambda: time_step() } +component = Component() + def _init_outer_references(data): for key in data: __data[key] = data[key] +@component.add(name="Time") def time(): return __data["time"]() +@component.add(name="Initial time") def initial_time(): return __data["time"].initial_time() +@component.add(name="Final time") def final_time(): return __data["time"].final_time() +@component.add(name="Time step") def time_step(): return __data["time"].time_step() +@component.add(name="Saveper") def saveper(): return __data["time"].saveper() diff --git a/tests/more-tests/version/test_old_version.py b/tests/more-tests/version/test_old_version.py index e833a17f..64b1f21d 100644 --- a/tests/more-tests/version/test_old_version.py +++ b/tests/more-tests/version/test_old_version.py @@ -1,6 +1,5 @@ __pysd_version__ = "1.5.0" -_namespace = {} _dependencies = {} __data = {'scope': None, 'time': lambda: 0} diff --git a/tests/pytest_pysd/user_interaction/pytest_select_submodel.py b/tests/pytest_pysd/user_interaction/pytest_select_submodel.py index e868e06d..bddaf916 100644 --- a/tests/pytest_pysd/user_interaction/pytest_select_submodel.py +++ b/tests/pytest_pysd/user_interaction/pytest_select_submodel.py @@ -119,11 +119,11 @@ def test_select_submodel(self, model, variables, modules, assert "_integ_other_stock" in model._stateful_elements assert "_integ_other_stock" in model.components._dependencies assert "other_stock" in model.components._dependencies - assert "other stock" in model.components._namespace + assert "other stock" in model._namespace assert "_integ_stock" in model._stateful_elements assert "_integ_stock" in model.components._dependencies assert "stock" in model.components._dependencies - assert "Stock" in model.components._namespace + assert "Stock" in model._namespace # select submodel with pytest.warns(UserWarning) as record: @@ -137,11 +137,11 @@ def test_select_submodel(self, model, variables, modules, assert "_integ_other_stock" not in model._stateful_elements assert "_integ_other_stock" not in model.components._dependencies assert "other_stock" not in model.components._dependencies - assert "other stock" not in model.components._namespace + assert "other stock" not in model._namespace assert "_integ_stock" in model._stateful_elements assert "_integ_stock" in model.components._dependencies assert "stock" in model.components._dependencies - assert "Stock" in model.components._namespace + assert "Stock" in model._namespace if not dep_vars: # totally independent submodels can run without producing diff --git a/tests/pytest_translation/vensim_parser/pytest_split_views.py b/tests/pytest_translation/vensim_parser/pytest_split_views.py index 1267bfe1..c8223869 100644 --- a/tests/pytest_translation/vensim_parser/pytest_split_views.py +++ b/tests/pytest_translation/vensim_parser/pytest_split_views.py @@ -112,7 +112,7 @@ def test_read_vensim_split_model(self, model_file, subview_sep, assert file.is_file(), f"File {file} has not been created..." # check the dictionaries - assert isinstance(model_split.components._namespace, dict) + assert isinstance(model_split._namespace, dict) assert isinstance(model_split.components._subscript_dict, dict) assert isinstance(model_split.components._dependencies, dict) assert isinstance(model_split.components._modules, dict) @@ -124,7 +124,7 @@ def test_read_vensim_split_model(self, model_file, subview_sep, # assert that original variables are in the namespace for var in original_vars: - assert var in model_split.components._namespace.keys() + assert var in model_split._namespace.keys() # assert that the functions are not defined in the main file model_py_file = model_file.with_suffix(".py") diff --git a/tests/unit_test_functions.py b/tests/unit_test_functions.py index 8f1c8c0b..97a30e73 100644 --- a/tests/unit_test_functions.py +++ b/tests/unit_test_functions.py @@ -121,6 +121,7 @@ def test_zidz(self): self.assertEqual(zidz(1, 0), 0) self.assertEqual(zidz(1, 8), 0.125) + class TestLogicFunctions(unittest.TestCase): def test_if_then_else_basic(self): from pysd.py_backend.functions import if_then_else diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index 7546c46c..93eac14f 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -734,7 +734,7 @@ def test_initialize_order(self): ["_integ_stock_a", "_integ_stock_b"]) self.assertEqual(model.components.stock_b(), 42) self.assertEqual(model.components.stock_a(), 42) - model.components.initial_parameter = lambda: 1 + model.components.initial_par = lambda: 1 model.initialize() self.assertEqual(model.components.stock_b(), 1) self.assertEqual(model.components.stock_a(), 1) From 6b44a29d8d29bfac2d1525d7b46bff9384b94ff2 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 8 Apr 2022 16:13:28 +0200 Subject: [PATCH 30/96] Improve user interaction with lookups and data --- .../python/python_expressions_builder.py | 16 +- pysd/py_backend/data.py | 37 +++- pysd/py_backend/lookups.py | 36 +++- pysd/py_backend/statefuls.py | 172 ++++++++---------- tests/pytest_types/data/pytest_data.py | 16 +- tests/unit_test_pysd.py | 6 +- 6 files changed, 161 insertions(+), 122 deletions(-) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index 82b87227..3e12afb2 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -458,7 +458,10 @@ def build(self, arguments): return BuildAST( expression=arguments["name"] + "(x, final_subs)", - calls={arguments["name"]: 1, "__lookup__": None}, + calls={ + "__external__": arguments["name"], + "__lookup__": arguments["name"] + }, subscripts=final_subs, order=0) @@ -513,7 +516,10 @@ def build(self, arguments): return BuildAST( expression=arguments["name"] + "(time())", - calls={arguments["name"]: 1, "time": 1}, + calls={ + "__external__": arguments["name"], + "__data__": arguments["name"], + "time": 1}, subscripts=final_subs, order=0) @@ -565,7 +571,7 @@ def build(self, arguments): return BuildAST( expression=arguments["name"] + "()", - calls={arguments["name"]: 1}, + calls={"__external__": arguments["name"]}, subscripts=final_subs, order=0) @@ -600,7 +606,7 @@ def build(self, arguments): return BuildAST( expression=arguments["name"] + "(time())", - calls={"time": 1, "__data__": None}, + calls={"time": 1, "__data__": arguments["name"]}, subscripts=final_subs, order=0) @@ -1002,7 +1008,7 @@ def build(self, arguments): return BuildAST( expression=arguments["name"] + "(x, final_subs)", - calls={"__lookup__": None}, + calls={"__lookup__": arguments["name"]}, subscripts=self.def_subs, order=0) diff --git a/pysd/py_backend/data.py b/pysd/py_backend/data.py index 375dd32a..e4b7a94f 100644 --- a/pysd/py_backend/data.py +++ b/pysd/py_backend/data.py @@ -181,6 +181,32 @@ class Data(object): # as Data # def __init__(self, data, coords, interp="interpolate"): + def set_values(self, values): + """Set new values from user input""" + self.data = xr.DataArray( + np.nan, self.final_coords, list(self.final_coords)) + + if isinstance(values, pd.Series): + index = list(values.index) + index.sort() + self.data = self.data.expand_dims( + {'time': index}, axis=0).copy() + + for index, value in values.items(): + if isinstance(values.values[0], xr.DataArray): + self.data.loc[index].loc[value.coords] =\ + value + else: + self.data.loc[index] = value + else: + if isinstance(values, xr.DataArray): + self.data.loc[values.coords] = values.values + else: + if self.final_coords: + self.data.loc[:] = values + else: + self.data = values + def __call__(self, time): try: if time in self.data['time'].values: @@ -210,15 +236,18 @@ def __call__(self, time): else: # Remove time coord from the DataArray return outdata.reset_coords('time', drop=True) - except Exception as err: + except (TypeError, KeyError): if self.data is None: raise ValueError( self.py_name + "\n" "Trying to interpolate data variable before loading" " the data...") - else: - # raise any other possible error - raise err + + # this except catch the errors when a data has been + # changed to a constant value by the user + return self.data + except Exception as err: + raise err class TabData(Data): diff --git a/pysd/py_backend/lookups.py b/pysd/py_backend/lookups.py index 61389695..3a8ecee1 100644 --- a/pysd/py_backend/lookups.py +++ b/pysd/py_backend/lookups.py @@ -1,5 +1,6 @@ import warnings +import pandas as pd import numpy as np import xarray as xr @@ -11,8 +12,40 @@ class Lookups(object): # as Lookups # def __init__(self, data, coords, interp="interpolate"): + def set_values(self, values): + """Set new values from user input""" + self.data = xr.DataArray( + np.nan, self.final_coords, list(self.final_coords)) + + if isinstance(values, pd.Series): + index = list(values.index) + index.sort() + self.data = self.data.expand_dims( + {'lookup_dim': index}, axis=0).copy() + + for index, value in values.items(): + if isinstance(values.values[0], xr.DataArray): + self.data.loc[index].loc[value.coords] =\ + value + else: + self.data.loc[index] = value + else: + if isinstance(values, xr.DataArray): + self.data.loc[values.coords] = values.values + else: + if self.final_coords: + self.data.loc[:] = values + else: + self.data = values + def __call__(self, x, final_subs=None): - return self._call(self.data, x, final_subs) + try: + return self._call(self.data, x, final_subs) + except (TypeError, KeyError): + # this except catch the errors when a lookups has been + # changed to a constant valuue by the user + # TODO need to expand data to final_subs if they are given + return self.data def _call(self, data, x, final_subs=None): if isinstance(x, xr.DataArray): @@ -92,6 +125,7 @@ class HardcodedLookups(Lookups): def __init__(self, x, y, coords, interp, py_name): # TODO: avoid add and merge all declarations in one definition + # TODO: add final subs self.is_float = not bool(coords) self.py_name = py_name y = np.array(y).reshape((len(x),) + (1,)*len(coords)) diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 9c6d7352..710be700 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -625,6 +625,7 @@ def __init__(self, py_model_file, params=None, return_func=None, + " read_vensim or read_xmile.") self._namespace = self.components._components.component.namespace + self._dependencies = self.components._dependencies self._doc = self._build_doc() if params is not None: @@ -634,7 +635,7 @@ def __init__(self, py_model_file, params=None, return_func=None, self.set_components(params, new=True) # update dependencies for param in params: - self.components._dependencies[ + self._dependencies[ self._namespace[param]] = {"time"} # Get the collections of stateful elements and external elements @@ -685,6 +686,10 @@ def doc(self): def namespace(self): return self._namespace.copy() + @property + def dependencies(self): + return self._dependencies.copy() + def clean_caches(self): self.cache.clean() # if nested macros @@ -717,7 +722,7 @@ def _get_initialize_order(self): # includying all levels self.stateful_initial_dependencies = { ext: set() - for ext in self.components._dependencies + for ext in self._dependencies if (ext.startswith("_") and not ext.startswith("_active_initial_")) } for element in self.stateful_initial_dependencies: @@ -781,12 +786,12 @@ def _get_full_dependencies(self, element, dep_set, stateful_deps): None """ - deps = self.components._dependencies[element] + deps = self._dependencies[element] if element.startswith("_"): deps = deps[stateful_deps] for dep in deps: if dep not in dep_set and not dep.startswith("__")\ - and not dep.startswith("_ext") and dep != "time": + and dep != "time": dep_set.add(dep) self._get_full_dependencies(dep, dep_set, stateful_deps) @@ -821,7 +826,7 @@ def _assign_cache_type(self): for element in self._namespace.values(): if element not in self.cache_type\ - and element in self.components._dependencies: + and element in self._dependencies: self._assign_cache(element) for element, cache_type in self.cache_type.items(): @@ -835,22 +840,20 @@ def _assign_cache_type(self): def _count_calls(self, element): n_calls = 0 - for subelement in self.components._dependencies: + for subelement in self._dependencies: if subelement.startswith("_") and\ - element in self.components._dependencies[subelement]["step"]: + element in self._dependencies[subelement]["step"]: if element in\ - self.components._dependencies[subelement]["initial"]: + self._dependencies[subelement]["initial"]: n_calls +=\ - 2*self.components._dependencies[subelement][ - "step"][element] + 2*self._dependencies[subelement]["step"][element] else: n_calls +=\ - self.components._dependencies[subelement][ - "step"][element] + self._dependencies[subelement]["step"][element] elif (not subelement.startswith("_") and - element in self.components._dependencies[subelement]): + element in self._dependencies[subelement]): n_calls +=\ - self.components._dependencies[subelement][element] + self._dependencies[subelement][element] return n_calls @@ -869,18 +872,17 @@ def _assign_cache(self, element): None """ - if not self.components._dependencies[element]: + if not self._dependencies[element]: self.cache_type[element] = "run" - elif "__lookup__" in self.components._dependencies[element]: + elif "__lookup__" in self._dependencies[element]: self.cache_type[element] = None - elif self._isdynamic(self.components._dependencies[element]): + elif self._isdynamic(self._dependencies[element]): self.cache_type[element] = "step" else: self.cache_type[element] = "run" - for subelement in self.components._dependencies[element]: + for subelement in self._dependencies[element]: if subelement.startswith("_initial_")\ - or subelement.startswith("__")\ - or subelement.startswith("_ext_"): + or subelement.startswith("__"): continue if subelement not in self.cache_type: self._assign_cache(subelement) @@ -906,7 +908,7 @@ def _isdynamic(self, dependencies): return True for dep in dependencies: if dep.startswith("_") and not dep.startswith("_initial_")\ - and not dep.startswith("__") and not dep.startswith("_ext_"): + and not dep.startswith("__"): return True return False @@ -1164,20 +1166,22 @@ def get_series_data(self, param): param, self._namespace)[1] or param - try: - if func_name.startswith("_ext_"): - return getattr(self.components, func_name).data - elif self.get_args(getattr(self.components, func_name)): - return getattr(self.components, - "_ext_lookup_" + func_name).data - else: - return getattr(self.components, - "_ext_data_" + func_name).data - except NameError: + if func_name.startswith("_ext_"): + return getattr(self.components, func_name).data + elif "__data__" in self._dependencies[func_name]: + return getattr( + self.components, + self._dependencies[func_name]["__data__"] + ).data + elif "__lookup__" in self._dependencies[func_name]: + return getattr( + self.components, + self._dependencies[func_name]["__lookup__"] + ).data + else: raise ValueError( - "Trying to get the values of a hardcoded lookup/data or " - "other type of variable. 'model.get_series_data' only works " - "with external lookups/data objects.\n\n") + "Trying to get the values of a constant variable. " + "'model.get_series_data' only works lookups/data objects.\n\n") def set_components(self, params, new=False): """ Set the value of exogenous model elements. @@ -1218,34 +1222,48 @@ def set_components(self, params, new=False): % key) if new: - dims, args = None, None + func = None + dims = None else: func = getattr(self.components, func_name) _, dims = self.get_coords(func) or (None, None) - args = self.get_args(func) + + # if the variable is a lookup or a data we perform the change in + # the object they call + if getattr(func, "type", None) == "Lookup": + getattr( + self.components, + self._dependencies[func_name]["__lookup__"] + ).set_values(value) + continue + elif getattr(func, "type", None) == "Data": + getattr( + self.components, + self._dependencies[func_name]["__data__"] + ).set_values(value) + continue if isinstance(value, pd.Series): new_function, deps = self._timeseries_component( - value, dims, args) - self.components._dependencies[func_name] = deps + value, dims) + self._dependencies[func_name] = deps elif callable(value): new_function = value args = self.get_args(value) if args: # user function needs arguments, add it as a lookup # to avoud caching it - self.components._dependencies[func_name] =\ - {"__lookup__": None} + self._dependencies[func_name] = {"__lookup__": None} else: # TODO it would be better if we can parse the content # of the function to get all the dependencies # user function takes no arguments, using step cache # adding time as dependency - self.components._dependencies[func_name] = {"time": 1} + self._dependencies[func_name] = {"time": 1} else: - new_function = self._constant_component(value, dims, args) - self.components._dependencies[func_name] = {} + new_function = self._constant_component(value, dims) + self._dependencies[func_name] = {} # this won't handle other statefuls... if '_integ_' + func_name in dir(self.components): @@ -1260,23 +1278,15 @@ def set_components(self, params, new=False): if func_name in self.cache.cached_funcs: self.cache.cached_funcs.remove(func_name) - def _timeseries_component(self, series, dims, args=[]): + def _timeseries_component(self, series, dims): """ Internal function for creating a timeseries model element """ # this is only called if the set_component function recognizes a # pandas series # TODO: raise a warning if extrapolating from the end of the series. # TODO: data type variables should be creted using a Data object # lookup type variables should be created using a Lookup object - if isinstance(series.values[0], xr.DataArray) and args: - # the argument is already given in the model when the model - # is called - return lambda x, final_subs: utils.rearrange(xr.concat( - series.values, - series.index).interp(concat_dim=x).reset_coords( - 'concat_dim', drop=True), - dims, self.components._subscript_dict), {'__lookup__': None} - elif isinstance(series.values[0], xr.DataArray): + if isinstance(series.values[0], xr.DataArray): # the interpolation will be time dependent return lambda: utils.rearrange(xr.concat( series.values, @@ -1284,19 +1294,6 @@ def _timeseries_component(self, series, dims, args=[]): 'concat_dim', drop=True), dims, self.components._subscript_dict), {'time': 1} - elif args and dims: - # the argument is already given in the model when the model - # is called - return lambda x, final_subs: utils.rearrange( - np.interp(x, series.index, series.values), - dims, self.components._subscript_dict), {'__lookup__': None} - - elif args: - # the argument is already given in the model when the model - # is called - return lambda x, final_subs:\ - np.interp(x, series.index, series.values), {'__lookup__': None} - elif dims: # the interpolation will be time dependent return lambda: utils.rearrange( @@ -1309,20 +1306,9 @@ def _timeseries_component(self, series, dims, args=[]): np.interp(self.time(), series.index, series.values),\ {'time': 1} - def _constant_component(self, value, dims, args=[]): + def _constant_component(self, value, dims): """ Internal function for creating a constant model element """ - if args and dims: - # need to pass an argument to keep consistency with the calls - # to the function - return lambda x: utils.rearrange( - value, dims, self.components._subscript_dict) - - elif args: - # need to pass an argument to keep consistency with the calls - # to the function - return lambda x: value - - elif dims: + if dims: return lambda: utils.rearrange( value, dims, self.components._subscript_dict) @@ -1352,8 +1338,8 @@ def set_initial_value(self, t, initial_value): utils.get_key_and_value_by_insensitive_key_or_value( key, self._namespace)[1] if component_name is not None: - if self.components._dependencies[component_name]: - deps = list(self.components._dependencies[component_name]) + if self._dependencies[component_name]: + deps = list(self._dependencies[component_name]) if len(deps) == 1 and deps[0] in self.initialize_order: stateful_name = deps[0] else: @@ -1626,7 +1612,7 @@ def run(self, params=None, return_columns=None, return_timestamps=None, capture_elements = self._split_capture_elements(capture_elements) # include outputs in cache if needed - self.components._dependencies["OUTPUTS"] = { + self._dependencies["OUTPUTS"] = { element: 1 for element in capture_elements["step"] } if cache_output: @@ -1640,7 +1626,7 @@ def run(self, params=None, return_columns=None, return_timestamps=None, res = self._integrate(capture_elements['step']) - del self.components._dependencies["OUTPUTS"] + del self._dependencies["OUTPUTS"] self._add_run_elements(res, capture_elements['run']) self._remove_constant_cache() @@ -1731,11 +1717,11 @@ def select_submodel(self, vars=[], modules=[], exogenous_components={}): for real_name, py_name in self._namespace.copy().items(): if py_name not in all_vars: del self._namespace[real_name] - del self.components._dependencies[py_name] + del self._dependencies[py_name] - for py_name in self.components._dependencies.copy().keys(): + for py_name in self._dependencies.copy().keys(): if py_name.startswith("_") and py_name not in s_deps: - del self.components._dependencies[py_name] + del self._dependencies[py_name] # reassing the dictionary and lists of needed stateful objects self._stateful_elements = { @@ -1754,10 +1740,9 @@ def select_submodel(self, vars=[], modules=[], exogenous_components={}): # keeping only needed external objects ext_deps = set() - for values in self.components._dependencies.values(): - for value in values: - if value.startswith("_ext_"): - ext_deps.add(value) + for values in self._dependencies.values(): + if "__external__" in values: + ext_deps.add(values["__external__"]) self._external_elements = [ getattr(self.components, name) for name in ext_deps if isinstance(getattr(self.components, name), External) @@ -1901,12 +1886,11 @@ def _get_dependencies(self, vars=[], modules=[]): """ def check_dep(dependencies, initial=False): for dep in dependencies: - if dep in c_vars or dep.startswith("__")\ - or dep.startswith("_ext_"): + if dep in c_vars or dep.startswith("__"): pass elif dep.startswith("_"): s_deps.add(dep) - dep = self.components._dependencies[dep] + dep = self._dependencies[dep] check_dep(dep["initial"], True) check_dep(dep["step"]) else: @@ -1935,7 +1919,7 @@ def check_dep(dependencies, initial=False): for var in c_vars: if var == "time": continue - check_dep(self.components._dependencies[var]) + check_dep(self._dependencies[var]) return c_vars, d_deps, s_deps diff --git a/tests/pytest_types/data/pytest_data.py b/tests/pytest_types/data/pytest_data.py index 4aebd8d1..1939b968 100644 --- a/tests/pytest_types/data/pytest_data.py +++ b/tests/pytest_types/data/pytest_data.py @@ -16,26 +16,14 @@ ), # test that try/except block on call doesn't catch errors differents # than data = None - ( # try_except_1 - 3, - None, - TypeError, - "'int' object is not subscriptable" - ), - ( # try_except_2 - xr.DataArray([10, 20], {'dim1': [0, 1]}, ['dim1']), - None, - KeyError, - "'time'" - ), - ( # try_except_3 + ( # try_except xr.DataArray([10, 20], {'time': [0, 1]}, ['time']), None, AttributeError, "'Data' object has no attribute 'is_float'" ) ], - ids=["not_loaded_data", "try_except_1", "try_except_2", "try_except_3"] + ids=["not_loaded_data", "try_except"] ) @pytest.mark.filterwarnings("ignore") class TestDataErrors(): diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index 93eac14f..55e40369 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -1112,15 +1112,13 @@ def test_get_series_data(self): with self.assertRaises(ValueError) as err: model.get_series_data('Room Temperature') self.assertIn( - "Trying to get the values of a hardcoded lookup/data " - "or other type of variable.", + "Trying to get the values of a constant variable.", err.args[0]) with self.assertRaises(ValueError) as err: model.get_series_data('Teacup Temperature') self.assertIn( - "Trying to get the values of a hardcoded lookup/data " - "or other type of variable.", + "Trying to get the values of a constant variable.", err.args[0]) lookup_exp = xr.DataArray( From 617e39a7145482976b6b936df4e4629f3bdf0518 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Tue, 19 Apr 2022 10:41:34 +0200 Subject: [PATCH 31/96] Improve hardcoded lookups initialization --- .../python/python_expressions_builder.py | 5 +- pysd/py_backend/data.py | 1 + pysd/py_backend/lookups.py | 58 ++++++++++++++----- pysd/py_backend/statefuls.py | 14 +++++ 4 files changed, 61 insertions(+), 17 deletions(-) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index 3e12afb2..6b5cb436 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -999,10 +999,13 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_hardcodedlookup") + arguments["final_subs"] = self.element.subs_dict + self.element.objects["hardcoded_lookups"] = { "name": arguments["name"], "expression": "%(name)s = HardcodedLookups(%(x)s, %(y)s, " - "%(subscripts)s, '%(interp)s', '%(name)s')" + "%(subscripts)s, '%(interp)s', " + "%(final_subs)s, '%(name)s')" % arguments } diff --git a/pysd/py_backend/data.py b/pysd/py_backend/data.py index e4b7a94f..a19ac935 100644 --- a/pysd/py_backend/data.py +++ b/pysd/py_backend/data.py @@ -258,6 +258,7 @@ def __init__(self, real_name, py_name, coords, interp="interpolate"): self.real_name = real_name self.py_name = py_name self.coords = coords + self.final_coords = coords self.interp = interp.replace(" ", "_") if interp else None self.is_float = not bool(coords) self.data = None diff --git a/pysd/py_backend/lookups.py b/pysd/py_backend/lookups.py index 3a8ecee1..bb218334 100644 --- a/pysd/py_backend/lookups.py +++ b/pysd/py_backend/lookups.py @@ -123,28 +123,54 @@ def _call(self, data, x, final_subs=None): class HardcodedLookups(Lookups): """Class for lookups defined in the file""" - def __init__(self, x, y, coords, interp, py_name): + def __init__(self, x, y, coords, interp, final_coords, py_name): # TODO: avoid add and merge all declarations in one definition - # TODO: add final subs self.is_float = not bool(coords) self.py_name = py_name - y = np.array(y).reshape((len(x),) + (1,)*len(coords)) - self.data = xr.DataArray( - np.tile(y, [1] + utils.compute_shape(coords)), - {"lookup_dim": x, **coords}, - ["lookup_dim"] + list(coords) - ) - self.x = set(x) + self.final_coords = final_coords + self.values = [(x, y, coords)] self.interp = interp def add(self, x, y, coords): - y = np.array(y).reshape((len(x),) + (1,)*len(coords)) - self.data = self.data.combine_first( - xr.DataArray( - np.tile(y, [1] + utils.compute_shape(coords)), - {"lookup_dim": x, **coords}, - ["lookup_dim"] + list(coords) - )) + self.values.append((x, y, coords)) + + def initialize(self): + """ + Initialize all elements and create the self.data xarray.DataArray + """ + if len(self.values) == 1: + # Just loag one value (no add) + for x, y, coords in self.values: + y = np.array(y).reshape((len(x),) + (1,)*len(coords)) + self.data = xr.DataArray( + np.tile(y, [1] + utils.compute_shape(coords)), + {"lookup_dim": x, **coords}, + ["lookup_dim"] + list(coords) + ) + else: + # Load in several lines (add) + self.data = xr.DataArray( + np.nan, self.final_coords, list(self.final_coords)) + + for x, y, coords in self.values: + if "lookup_dim" not in self.data.dims: + # include lookup_dim dimension in the final array + self.data = self.data.expand_dims( + {"lookup_dim": x}, axis=0).copy() + else: + # add new coordinates (if needed) to lookup_dim + x_old = list(self.data.lookup_dim.values) + x_new = list(set(x).difference(x_old)) + self.data = self.data.reindex(lookup_dim=x_old+x_new) + + # reshape y value and assign it to self.data + y = np.array(y).reshape((len(x),) + (1,)*len(coords)) + self.data.loc[{"lookup_dim": x, **coords}] =\ + np.tile(y, [1] + utils.compute_shape(coords)) + + # sort data + self.data = self.data.sortby("lookup_dim") + if np.any(np.isnan(self.data)): # fill missing values of different input lookup_dim values values = self.data.values diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 710be700..1cfbc335 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -17,6 +17,7 @@ class objects. from .external import External, Excels from .cache import Cache, constant_cache from .data import TabData +from .lookups import HardcodedLookups from .components import Components, Time from pysd._version import __version__ @@ -610,6 +611,7 @@ def __init__(self, py_model_file, params=None, return_func=None, self.cache = Cache() self.py_name = py_name self.external_loaded = False + self.lookups_loaded = False self.components = Components(str(py_model_file), self.set_components) if __version__.split(".")[0]\ @@ -662,6 +664,11 @@ def __init__(self, py_model_file, params=None, return_func=None, if isinstance(getattr(self.components, name), TabData) ] + self._lookup_elements = [ + getattr(self.components, name) for name in dir(self.components) + if isinstance(getattr(self.components, name), HardcodedLookups) + ] + if data_files: self._get_data(data_files) @@ -937,6 +944,13 @@ def initialize(self): 'time': self.time }) + if not self.lookups_loaded: + # Initialize HardcodedLookups elements + for element in self._lookup_elements: + element.initialize() + + self.lookups_loaded = True + if not self.external_loaded: # Initialize external elements for element in self._external_elements: From 401bc7cc997dd16543fca8213e23350eef891d47 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Tue, 19 Apr 2022 11:36:32 +0200 Subject: [PATCH 32/96] Remove namespace.json from split models --- pysd/building/python/python_model_builder.py | 1 - .../pytest_translation/vensim_parser/pytest_split_views.py | 1 - tests/unit_test_cli.py | 7 +------ 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 475dff1c..99179ffb 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -123,7 +123,6 @@ def _build_modular(self, elements_per_view): for file, values in { "modules_%s/_modules": elements_per_view, - "_namespace_%s": self.namespace.namespace, "_subscripts_%s": self.subscripts.subscripts, "_dependencies_%s": self.dependencies}.items(): diff --git a/tests/pytest_translation/vensim_parser/pytest_split_views.py b/tests/pytest_translation/vensim_parser/pytest_split_views.py index c8223869..b4ae704b 100644 --- a/tests/pytest_translation/vensim_parser/pytest_split_views.py +++ b/tests/pytest_translation/vensim_parser/pytest_split_views.py @@ -85,7 +85,6 @@ def expected_files(self, shared_tmpdir, _root, model_path, ) modules_dir = shared_tmpdir.joinpath("modules_" + model_name) files = { - shared_tmpdir.joinpath("_namespace_" + model_name + ".json"), shared_tmpdir.joinpath("_subscripts_" + model_name + ".json"), shared_tmpdir.joinpath("_dependencies_" + model_name + ".json"), modules_dir.joinpath("_modules.json") diff --git a/tests/unit_test_cli.py b/tests/unit_test_cli.py index 36754f9e..ffd3a110 100644 --- a/tests/unit_test_cli.py +++ b/tests/unit_test_cli.py @@ -198,7 +198,6 @@ def test_read_vensim_split_model(self): root_dir = os.path.join(_root, "more-tests/split_model") + "/" model_name = "test_split_model" - namespace_filename = "_namespace_" + model_name + ".json" dependencies_filename = "_dependencies_" + model_name + ".json" subscript_filename = "_subscripts_" + model_name + ".json" modules_filename = "_modules.json" @@ -209,8 +208,7 @@ def test_read_vensim_split_model(self): out = subprocess.run(split_bash(command), capture_output=True) self.assertEqual(out.returncode, 0) - # check that _namespace and _subscript_dict json files where created - self.assertTrue(os.path.isfile(root_dir + namespace_filename)) + # check that _subscript_dict and dependencies json files where created self.assertTrue(os.path.isfile(root_dir + subscript_filename)) self.assertTrue(os.path.isfile(root_dir + dependencies_filename)) @@ -233,7 +231,6 @@ def test_read_vensim_split_model(self): # remove newly created files os.remove(root_dir + model_name + ".py") - os.remove(root_dir + namespace_filename) os.remove(root_dir + subscript_filename) os.remove(root_dir + dependencies_filename) @@ -254,7 +251,6 @@ def test_read_vensim_split_model_subviews(self): subview_sep=["."] ) - namespace_filename = "_namespace_" + model_name + ".json" subscript_filename = "_subscripts_" + model_name + ".json" dependencies_filename = "_dependencies_" + model_name + ".json" modules_dirname = "modules_" + model_name @@ -293,7 +289,6 @@ def test_read_vensim_split_model_subviews(self): # remove newly created files os.remove(root_dir + model_name + ".py") - os.remove(root_dir + namespace_filename) os.remove(root_dir + subscript_filename) os.remove(root_dir + dependencies_filename) From 2c10ec33acaf9336e57da4143059f9d5d4e7f16e Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Tue, 19 Apr 2022 15:57:17 +0200 Subject: [PATCH 33/96] Remove None from doc --- pysd/py_backend/statefuls.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 1cfbc335..050d228e 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -1471,7 +1471,6 @@ def _build_doc(self): if collector: docs_df = pd.DataFrame(collector) - docs_df.fillna("None", inplace=True) return docs_df.sort_values(by="Real Name").reset_index(drop=True) else: # manage models with no documentation (mainly test models) From a0712026428c4bc581c80c367eac1209ca6b413f Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 20 Apr 2022 11:16:05 +0200 Subject: [PATCH 34/96] Correct bug when selecting a submodel with active initial --- pysd/py_backend/statefuls.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 050d228e..ba8556e0 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -1736,6 +1736,12 @@ def select_submodel(self, vars=[], modules=[], exogenous_components={}): if py_name.startswith("_") and py_name not in s_deps: del self._dependencies[py_name] + # remove active initial from s_deps as they are "fake" objects + # in dependencies + s_deps = { + dep for dep in s_deps if not dep.startswith("_active_initial") + } + # reassing the dictionary and lists of needed stateful objects self._stateful_elements = { name: getattr(self.components, name) From c562bc29b925a78d7609f5bef64d94d9e09d671e Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 20 Apr 2022 12:04:18 +0200 Subject: [PATCH 35/96] Update basic usage docs --- docs/basic_usage.rst | 67 +++++++++++++++++++++++++++----------------- 1 file changed, 41 insertions(+), 26 deletions(-) diff --git a/docs/basic_usage.rst b/docs/basic_usage.rst index 2a67244c..df4d27c2 100644 --- a/docs/basic_usage.rst +++ b/docs/basic_usage.rst @@ -5,8 +5,9 @@ Importing a model and getting started ------------------------------------- To begin, we must first load the PySD module, and use it to import a supported model file:: - import pysd - model = pysd.read_vensim('Teacup.mdl') + >>> import pysd + >>> model = pysd.read_vensim('Teacup.mdl') + This code creates an instance of the PySD class loaded with an example model that we will use as the system dynamics equivalent of ‘Hello World’: a cup of tea cooling to room temperature. @@ -14,21 +15,35 @@ This code creates an instance of the PySD class loaded with an example model tha :width: 350 px :align: center +.. note:: + The teacupe model can be found in the `samples of the test-models repository `_. + To view a synopsis of the model equations and documentation, call the :py:func:`.doc()` method of the model class. This will generate a listing of all the model elements, their documentation, units, equations, and initial values, where appropriate. Here is a sample from the teacup model:: - >>> print model.doc() + >>> print(model.doc()) + + Real Name Py Name Unit Lims Type Subs Eqn Comment + 0 Characteristic Time characteristic_time Minutes (0.0, None) constant None 10 How long will it take the teacup to cool 1/e o... + 1 FINAL TIME final_time Minute (None, None) constant None 30 The final time for the simulation. + 2 Heat Loss to Room heat_loss_to_room Degrees Fahrenheit/Minute (None, None) component None (Teacup Temperature - Room Temperature) / Char... This is the rate at which heat flows from the ... + 3 INITIAL TIME initial_time Minute (None, None) constant None 0 The initial time for the simulation. + 4 Room Temperature room_temperature Degrees Fahrenheit (-459.67, None) constant None 70 Put in a check to ensure the room temperature ... + 5 SAVEPER saveper Minute (0.0, None) component None TIME STEP The frequency with which output is stored. + 6 TIME STEP time_step Minute (0.0, None) constant None 0.125 The time step for the simulation. + 7 Teacup Temperature teacup_temperature Degrees Fahrenheit (32.0, 212.0) component None INTEG ( -Heat Loss to Room, 180) The model is only valid for the liquid phase o... + .. note:: You can also load an already translated model file, what will be faster as you will load a Python file:: - import pysd - model = pysd.load('Teacup.py') + >>> import pysd + >>> model = pysd.load('Teacup.py') .. note:: The functions :py:func:`read_vensim()`, :py:func:`read_xmile()` and :py:func:`load()` have optional arguments for advanced usage, you can check the full description in :doc:`User Functions Reference <../functions>` or using :py:func:`help()` e.g.:: - import pysd - help(pysd.load) + >>> import pysd + >>> help(pysd.load) Running the Model @@ -46,9 +61,9 @@ The simplest way to simulate the model is to use the :py:func:`.run()` command w Pandas gives us simple plotting capability, so we can see how the cup of tea behaves:: - stocks.plot() - plt.ylabel('Degrees F') - plt.xlabel('Minutes') + >>> stocks.plot() + >>> plt.ylabel('Degrees F') + >>> plt.xlabel('Minutes') .. image:: images/Teacup_Cooling.png :width: 400 px @@ -116,29 +131,29 @@ In many cases, we want to modify the parameters of the model to investigate its This argument expects a dictionary whose keys correspond to the components of the model. The associated values can either be a constant, or a Pandas series whose indices are timestamps and whose values are the values that the model component should take on at the corresponding time. For instance, in our model we can set the room temperature to a constant value:: - model.run(params={'Room Temperature': 20}) + >>> model.run(params={'Room Temperature': 20}) Alternately, if we believe the room temperature is changing over the course of the simulation, we can give the run function a set of time-series values in the form of a Pandas series, and PySD will linearly interpolate between the given values in the course of its integration:: - import pandas as pd - temp = pd.Series(index=range(30), data=range(20, 80, 2)) - model.run(params={'Room Temperature':temp}) + >>> import pandas as pd + >>> temp = pd.Series(index=range(30), data=range(20, 80, 2)) + >>> model.run(params={'Room Temperature': temp}) If the parameter value to change is a subscripted variable (vector, matrix...), there are three different options to set new value. Suposse we have ‘Subscripted var’ with dims :py:data:`['dim1', 'dim2']` and coordinates :py:data:`{'dim1': [1, 2], 'dim2': [1, 2]}`. A constant value can be used and all the values will be replaced:: - model.run(params={'Subscripted var': 0}) + >>> model.run(params={'Subscripted var': 0}) A partial *xarray.DataArray* can be used, for example a new variable with ‘dim2’ but not ‘dim2’, the result will be repeated in the remaining dimensions:: - import xarray as xr - new_value = xr.DataArray([1,5], {'dim2': [1, 2]}, ['dim2']) - model.run(params={'Subscripted var': new_value}) + >>> import xarray as xr + >>> new_value = xr.DataArray([1, 5], {'dim2': [1, 2]}, ['dim2']) + >>> model.run(params={'Subscripted var': new_value}) Same dimensions *xarray.DataArray* can be used (recommended):: - import xarray as xr - new_value = xr.DataArray([[1,5],[3,4]], {'dim1': [1, 2], 'dim2': [1, 2]}, ['dim1', 'dim2']) - model.run(params={'Subscripted var': new_value}) + >>> import xarray as xr + >>> new_value = xr.DataArray([[1, 5], [3, 4]], {'dim1': [1, 2], 'dim2': [1, 2]}, ['dim1', 'dim2']) + >>> model.run(params={'Subscripted var': new_value}) In the same way, a Pandas series can be used with constan values, partially defined *xarray.DataArrays* or same dimensions *xarray.DataArrays*. @@ -177,14 +192,14 @@ Setting simulation initial conditions ------------------------------------- Finally, we can set the initial conditions of our model in several ways. So far, we’ve been using the default value for the initial_condition keyword argument, which is ‘original’. This value runs the model from the initial conditions that were specified originally by the model file. We can alternately specify a tuple containing the start time and a dictionary of values for the system’s stocks. Here we start the model with the tea at just above freezing:: - model.run(initial_condition=(0, {'Teacup Temperature': 33})) + >>> model.run(initial_condition=(0, {'Teacup Temperature': 33})) The new value setted can be a *xarray.DataArray* as it is explained in the previous section. Additionally we can run the model forward from its current position, by passing the initial_condition argument the keyword ‘current’. After having run the model from time zero to thirty, we can ask the model to continue running forward for another chunk of time:: - model.run(initial_condition='current', - return_timestamps=range(31,45)) + >>> model.run(initial_condition='current', + return_timestamps=range(31, 45)) The integration picks up at the last value returned in the previous run condition, and returns values at the requested timestamps. @@ -195,11 +210,11 @@ Querying current values ----------------------- We can easily access the current value of a model component using curly brackets. For instance, to find the temperature of the teacup, we simply call:: - model['Teacup Temperature'] + >>> model['Teacup Temperature'] If you try to get the current values of a lookup variable the previous method will fail as lookup variables take arguments. However, it is possible to get the full series of a lookup or data object with :py:func:`.get_series_data` method:: - model.get_series_data('Growth lookup') + >>> model.get_series_data('Growth lookup') Supported functions ------------------- From 58efc2253b86042cc9fdfda2f4addda9c8d8aa3f Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 20 Apr 2022 17:08:52 +0200 Subject: [PATCH 36/96] Add unit tests --- pysd/building/python/python_model_builder.py | 2 +- pysd/py_backend/statefuls.py | 2 +- pysd/translation/vensim/vensim_element.py | 2 +- .../test_split_model_sub_subviews.mdl | 28 +++++++++---------- .../pytest_select_submodel.py | 2 +- tests/unit_test_utils.py | 16 +++++++++++ 6 files changed, 34 insertions(+), 18 deletions(-) diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 99179ffb..bc4118e3 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -423,7 +423,7 @@ def _format_limits(self, limits): new_limits = [] for value in limits: value = repr(value) - if value == "nan" or value is None: + if value == "nan" or value == "None": self.section.imports.add("numpy") new_limits.append("np.nan") elif value.endswith("inf"): diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index ba8556e0..d34f47b5 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -1585,7 +1585,7 @@ def run(self, params=None, return_columns=None, return_timestamps=None, -------- >>> model.run(params={'exogenous_constant': 42}) >>> model.run(params={'exogenous_variable': timeseries_input}) - >>> model.run(return_timestamps=[1, 2, 3.1415, 4, 10]) + >>> model.run(return_timestamps=[1, 2, 3, 4, 10]) >>> model.run(return_timestamps=10) >>> model.run(return_timestamps=np.linspace(1, 10, 20)) diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index e5ab0265..4f695b40 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -84,7 +84,7 @@ def _parse_units(self, units_str: str) -> Tuple[str, tuple]: for x in lims.strip("]").split(",") ] ) - return units, lims + return units.strip(), lims def parse(self) -> object: """ diff --git a/tests/more-tests/split_model/test_split_model_sub_subviews.mdl b/tests/more-tests/split_model/test_split_model_sub_subviews.mdl index 74cf9fb2..9a90a4c1 100644 --- a/tests/more-tests/split_model/test_split_model_sub_subviews.mdl +++ b/tests/more-tests/split_model/test_split_model_sub_subviews.mdl @@ -2,63 +2,63 @@ other stock= INTEG ( var tolo, 0) - ~ + ~ ~ | interesting var 2 looked up= look up definition(interesting var 2) - ~ + ~ ~ | look up definition( (1,0), (10,1), (50,1.5), (100,4), (1000,5), (10000,3), (100000,4)) - ~ + ~ ~ | var tolo= 55+great var - ~ + ~ ~ | great var= 5 - ~ + ~ ~ | interesting var 1= "variable-x"+1 - ~ + ~ ~ | interesting var 2= interesting var 1*5 - ~ + ~ ~ | another var= 3*Stock - ~ + ~ ~ | "rate-1"= "var-n" - ~ + ~ ~ | "var-n"= 5 - ~ + ~ ~ | "variable-x"= - 6*another var - ~ + ACTIVE INITIAL(6*another var, 1) + ~ ~ | Stock= INTEG ( "rate-1", 1) - ~ + ~ ~ | ******************************************************** @@ -77,7 +77,7 @@ INITIAL TIME = 0 ~ The initial time for the simulation. | -SAVEPER = +SAVEPER = TIME STEP ~ Month [0,?] ~ The frequency with which output is stored. diff --git a/tests/pytest_pysd/user_interaction/pytest_select_submodel.py b/tests/pytest_pysd/user_interaction/pytest_select_submodel.py index bddaf916..99c3654e 100644 --- a/tests/pytest_pysd/user_interaction/pytest_select_submodel.py +++ b/tests/pytest_pysd/user_interaction/pytest_select_submodel.py @@ -33,7 +33,7 @@ [".", "-"], ["variablex"], ["view_3/subview_1", "view_1/submodule_1"], - (12, 0, 1, 1, 1), + (12, 0, 1, 1, 2), {"another_var": 5, "look_up_definition": 3} ) ], diff --git a/tests/unit_test_utils.py b/tests/unit_test_utils.py index bb3f1be6..e859ab3a 100644 --- a/tests/unit_test_utils.py +++ b/tests/unit_test_utils.py @@ -133,6 +133,22 @@ def test_make_flat_df(self): self.assertEqual(set(actual.columns), set(expected.columns)) assert_frames_close(actual, expected, rtol=1e-8, atol=1e-8) + def test_make_flat_df_0dxarray(self): + import pysd + + df = pd.DataFrame(index=[1], columns=['elem1']) + df.at[1] = [xr.DataArray(5)] + + expected = pd.DataFrame(index=[1], data={'Elem1': 5.}) + + return_addresses = {'Elem1': ('elem1', {})} + + actual = pysd.utils.make_flat_df(df, return_addresses, flatten=True) + + # check all columns are in the DataFrame + self.assertEqual(set(actual.columns), set(expected.columns)) + assert_frames_close(actual, expected, rtol=1e-8, atol=1e-8) + def test_make_flat_df_nosubs(self): import pysd From b1c938b685123e7b15578e51fbf33a777f1565f8 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 21 Apr 2022 15:27:56 +0200 Subject: [PATCH 37/96] Document and correct some messages --- pysd/building/python/namespace.py | 59 +++++++++++++++----- pysd/building/python/python_model_builder.py | 4 ++ pysd/building/python/subscripts.py | 44 +++++++++++---- pysd/py_backend/external.py | 36 ++++++------ pysd/translation/vensim/vensim_element.py | 2 +- tests/unit_test_external.py | 2 +- 6 files changed, 102 insertions(+), 45 deletions(-) diff --git a/pysd/building/python/namespace.py b/pysd/building/python/namespace.py index 59f0a5aa..733d9cc7 100644 --- a/pysd/building/python/namespace.py +++ b/pysd/building/python/namespace.py @@ -1,6 +1,7 @@ import re from unicodedata import normalize +from typing import List # used to create python safe names with the variable reserved_words from keyword import kwlist @@ -15,21 +16,50 @@ class NamespaceManager: + """ + NamespaceManager object allows includying new elements to the namespace + and searching for elements in the namespace. When includying new + elements a python safe name is used to be able to write the equations. + + Parameters + ---------- + parameters: list (optional) + List of the parameters that are used as argument in the Macro. + By defaukt it is an empty list. + + """ reserved_words = set( dir() + bidir() + cdir() + ddir() + cadir() + edir() + fdir() + sdir() + udir()).union(kwlist) - def __init__(self, parameters=[]): + def __init__(self, parameters: List[str] = []): self.used_words = self.reserved_words.copy() + # inlcude time to the namespace self.namespace = {"Time": "time"} + # include time to the cleanspace (case and whitespace/underscore + # insensitive namespace) self.cleanspace = {"time": "time"} for parameter in parameters: self.add_to_namespace(parameter) - def add_to_namespace(self, string): + def add_to_namespace(self, string: str) -> None: + """ + Add a new string to the namespace. + + Parameters + ---------- + string: str + String to add to the namespace. + + Returns + ------- + None + + """ self.make_python_identifier(string, add_to_namespace=True) - def make_python_identifier(self, string, prefix=None, add_to_namespace=False): + def make_python_identifier(self, string: str, prefix: str = None, + add_to_namespace: bool = False) -> str: """ Takes an arbitrary string and creates a valid Python identifier. @@ -48,12 +78,13 @@ def make_python_identifier(self, string, prefix=None, add_to_namespace=False): string: str The text to be converted into a valid python identifier. - namespace: dict - Map of existing translations into python safe identifiers. - This is to ensure that two strings are not translated into - the same python identifier. If string is already in the namespace - its value will be returned. Otherwise, namespace will be mutated - adding string as a new key and its value. + prefix: str or None (optional) + If given it will be used as a prefix for the output string. + Default is None. + + add_to_namespace: bool (optional) + If True it will add the passed string to the namespace and + to the cleanspace. Default is False. Returns ------- @@ -88,15 +119,15 @@ def make_python_identifier(self, string, prefix=None, add_to_namespace=False): 'nvs_123abc' already in namespace - >>> make_python_identifier('Var$', namespace={'Var$': 'var'}) + >>> make_python_identifier('Var$') # namespace={'Var$': 'var'} 'var' namespace conflicts - >>> make_python_identifier('Var@', namespace={'Var$': 'var'}) + >>> make_python_identifier('Var@') # namespace={'Var$': 'var'} 'var_1' - >>> make_python_identifier('Var$', namespace={'Var@': 'var', - ... 'Var%':'var_1'}) + >>> make_python_identifier('Var$') # namespace={'Var@': 'var', + ... 'Var%':'var_1'} 'var_2' References @@ -139,9 +170,11 @@ def make_python_identifier(self, string, prefix=None, add_to_namespace=False): identifier = s + '_' + str(i) i += 1 + # include the word in used words to avoid using it againg self.used_words.add(identifier) if add_to_namespace: + # include word to the namespace self.namespace[string] = identifier self.cleanspace[clean_s] = identifier diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index bc4118e3..6fc3cb4b 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -424,15 +424,19 @@ def _format_limits(self, limits): for value in limits: value = repr(value) if value == "nan" or value == "None": + # add numpy.nan to the values self.section.imports.add("numpy") new_limits.append("np.nan") elif value.endswith("inf"): + # add numpy.inf to the values self.section.imports.add("numpy") new_limits.append(value.strip("inf") + "np.inf") else: + # add numeric value new_limits.append(value) if new_limits[0] == "np.nan" and new_limits[1] == "np.nan": + # if both are numpy.nan do not include limits return None return "(" + ", ".join(new_limits) + ")" diff --git a/pysd/building/python/subscripts.py b/pysd/building/python/subscripts.py index 043b52c4..5ee024da 100644 --- a/pysd/building/python/subscripts.py +++ b/pysd/building/python/subscripts.py @@ -1,12 +1,27 @@ import warnings from pathlib import Path import numpy as np +from typing import List + from pysd.translation.structures.abstract_model import AbstractSubscriptRange from pysd.py_backend.external import ExtSubscript -from typing import List class SubscriptManager: + """ + SubscriptManager object allows saving the subscripts included in the + Section, searching for elements or keys and simplifying them. + + Parameters + ---------- + abstrac_subscripts: list + List of the AbstractSubscriptRanges comming from the AbstractModel. + + _root: pathlib.Path + Path to the model file. Needed to read subscript ranges from + Excel files. + + """ def __init__(self, abstract_subscripts: List[AbstractSubscriptRange], _root: Path): self._root = _root @@ -18,11 +33,11 @@ def __init__(self, abstract_subscripts: List[AbstractSubscriptRange], self.subscript2num = self._get_subscript2num() @property - def subscripts(self): + def subscripts(self) -> dict: return self._subscripts @subscripts.setter - def subscripts(self, abstract_subscripts): + def subscripts(self, abstract_subscripts: List[AbstractSubscriptRange]): self._subscripts = {} missing = [] for sub in abstract_subscripts: @@ -60,7 +75,7 @@ def subscripts(self, abstract_subscripts): self._subscripts[sub.name] =\ self._subscripts[sub.subscripts] - def _get_main_subscripts(self): + def _get_main_subscripts(self) -> dict: """ Reutrns a dictionary with the main ranges as keys and their subranges as values. @@ -99,7 +114,7 @@ def _get_main_subscripts(self): return subranges - def _get_subscript2num(self): + def _get_subscript2num(self) -> dict: """ Build a dictionary to return the numeric value or values of a subscript or subscript range. @@ -139,10 +154,10 @@ def _get_subscript2num(self): return s2n - def find_subscript_name(self, element, avoid=[]): + def find_subscript_name(self, element: str, avoid: List[str] = []) -> str: """ - Given a subscript dictionary, and a member of a subscript family, - return the first key of which the member is within the value list. + Given a member of a subscript family, return the first key of + which the member is within the value list. If element is already a subscript name, return that. Parameters @@ -154,6 +169,9 @@ def find_subscript_name(self, element, avoid=[]): Returns ------- + name: str + The first key of which the member is within the value list + in the subscripts dictionary. Examples -------- @@ -172,7 +190,7 @@ def find_subscript_name(self, element, avoid=[]): if element in elements and name not in avoid: return name - def make_coord_dict(self, subs): + def make_coord_dict(self, subs: List[str]) -> dict: """ This is for assisting with the lookup of a particular element. @@ -208,7 +226,8 @@ def make_coord_dict(self, subs): coordinates[sub] = self.subscripts[sub] return coordinates - def make_merge_list(self, subs_list, element=""): + def make_merge_list(self, subs_list: List[List[str]], + element: str = "") -> List[str]: """ This is for assisting when building xrmerge. From a list of subscript lists returns the final subscript list after mergin. Necessary when @@ -312,7 +331,8 @@ def make_merge_list(self, subs_list, element=""): return dims - def simplify_subscript_input(self, coords, merge_subs): + def simplify_subscript_input(self, coords: dict, + merge_subs: List[str]) -> tuple: """ Parameters ---------- @@ -345,4 +365,4 @@ def simplify_subscript_input(self, coords, merge_subs): # write whole dict coordsp.append(f"'{ndim}': {coord}") - return final_subs, "{" + ", ".join(coordsp) + "}" \ No newline at end of file + return final_subs, "{" + ", ".join(coordsp) + "}" diff --git a/pysd/py_backend/external.py b/pysd/py_backend/external.py index 82559d70..e4433798 100644 --- a/pysd/py_backend/external.py +++ b/pysd/py_backend/external.py @@ -180,7 +180,7 @@ def _get_data_from_file_opyxl(self, cellname): # key error if the cellrange doesn't exist in the file or sheet raise AttributeError( self.py_name + "\n" - + "The cell range name:\t {}\n".format(cellname) + + "The cell range name:\t'{}'\n".format(cellname) + "Doesn't exist in:\n" + self._file_sheet ) @@ -265,8 +265,8 @@ def _get_series_data(self, series_across, series_row_or_col, cell, size): self.py_name + "\n" + "Dimension given in:\n" + self._file_sheet - + "\tDimentime_missingsion name:" - + "\t{}\n".format(series_row_or_col) + + "\tDimension name:" + + "\t'{}'\n".format(series_row_or_col) + " is a table and not a vector" ) @@ -285,8 +285,8 @@ def _get_series_data(self, series_across, series_row_or_col, cell, size): self.py_name + "\n" + "Dimension and data given in:\n" + self._file_sheet - + "\tDimension name:\t{}\n".format(series_row_or_col) - + "\tData name:\t{}\n".format(cell) + + "\tDimension name:\t'{}'\n".format(series_row_or_col) + + "\tData name:\t'{}'\n".format(cell) + " don't have the same length in the 1st dimension" ) @@ -297,7 +297,7 @@ def _get_series_data(self, series_across, series_row_or_col, cell, size): self.py_name + "\n" + "Data given in:\n" + self._file_sheet - + "\tData name:\t{}\n".format(cell) + + "\tData name:\t'{}'\n".format(cell) + " has not the same size as the given coordinates" ) @@ -326,7 +326,7 @@ def _resolve_file(self, root): # TODO add an option to include indirect references raise ValueError( self.py_name + "\n" - + f"Indirect reference to file: {self.file}") + + f"Indirect reference to file: '{self.file}'") self.file = root.joinpath(self.file) @@ -375,7 +375,7 @@ def _initialize_data(self, element_type): self.py_name + "\n" + "Dimension given in:\n" + self._file_sheet - + "\t{}:\t{}\n".format(series_across, self.x_row_or_col) + + "\t{}:\t'{}'\n".format(series_across, self.x_row_or_col) + " has length 0" ) @@ -393,7 +393,7 @@ def _initialize_data(self, element_type): self.py_name + "\n" + "Dimension given in:\n" + self._file_sheet - + "\t{}:\t{}\n".format(series_across, self.x_row_or_col) + + "\t{}:\t'{}'\n".format(series_across, self.x_row_or_col) + " has length 0" ) if self.missing == "warning": @@ -401,7 +401,7 @@ def _initialize_data(self, element_type): self.py_name + "\n" + "Dimension value missing or non-valid in:\n" + self._file_sheet - + "\t{}:\t{}\n".format(series_across, self.x_row_or_col) + + "\t{}:\t'{}'\n".format(series_across, self.x_row_or_col) + " the corresponding data value(s) to the " + "missing/non-valid value(s) will be ignored\n\n" ) @@ -410,7 +410,7 @@ def _initialize_data(self, element_type): self.py_name + "\n" + "Dimension value missing or non-valid in:\n" + self._file_sheet - + "\t{}:\t{}\n".format(series_across, self.x_row_or_col) + + "\t{}:\t'{}'\n".format(series_across, self.x_row_or_col) ) # reorder data with increasing series @@ -423,7 +423,7 @@ def _initialize_data(self, element_type): raise ValueError(self.py_name + "\n" + "Dimension given in:\n" + self._file_sheet - + "\t{}:\t{}\n".format( + + "\t{}:\t'{}'\n".format( series_across, self.x_row_or_col) + " has repeated values") @@ -448,7 +448,7 @@ def _initialize_data(self, element_type): self.py_name + "\n" + "Data value missing or non-valid in:\n" + self._file_sheet - + "\t{}:\t{}\n".format(cell_type, self.cell) + + "\t{}:\t'{}'\n".format(cell_type, self.cell) + interpolate_message + "\n\n" ) elif self.missing == "raise": @@ -456,7 +456,7 @@ def _initialize_data(self, element_type): self.py_name + "\n" + "Data value missing or non-valid in:\n" + self._file_sheet - + "\t{}:\t{}\n".format(cell_type, self.cell) + + "\t{}:\t'{}'\n".format(cell_type, self.cell) ) # fill values if self.interp != "raw": @@ -559,8 +559,8 @@ def _file_sheet(self): """ Returns file and sheet name in a string """ - return "\tFile name:\t{}\n".format(self.file)\ - + "\tSheet name:\t{}\n".format(self.sheet) + return "\tFile name:\t'{}'\n".format(self.file)\ + + "\tSheet name:\t'{}'\n".format(self.sheet) @staticmethod def _col_to_num(col): @@ -925,14 +925,14 @@ def _initialize(self): self.py_name + "\n" + "Constant value missing or non-valid in:\n" + self._file_sheet - + "\t{}:\t{}\n".format(cell_type, self.cell) + + "\t{}:\t'{}'\n".format(cell_type, self.cell) ) elif self.missing == "raise": raise ValueError( self.py_name + "\n" + "Constant value missing or non-valid in:\n" + self._file_sheet - + "\t{}:\t{}\n".format(cell_type, self.cell) + + "\t{}:\t'{}'\n".format(cell_type, self.cell) ) # Create only an xarray if the data is not 0 dimensional diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index 4f695b40..9a6f945e 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -176,7 +176,7 @@ def visit_subscript_copy(self, n, vc): def visit_subscript_mapping(self, n, vc): warnings.warn( - "\n Subscript mapping detected." + "\nSubscript mapping detected. " + "This feature works only in some simple cases." ) diff --git a/tests/unit_test_external.py b/tests/unit_test_external.py index d463dbf1..061acc2e 100644 --- a/tests/unit_test_external.py +++ b/tests/unit_test_external.py @@ -265,7 +265,7 @@ def test_resolve_file(self): ext._resolve_file(root=root) self.assertIn( - "Indirect reference to file: ?input.xlsx", + "Indirect reference to file: '?input.xlsx'", str(err.exception)) From 7011710e6003a723a10b7cd02eb35bdd5bae8626 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 22 Apr 2022 13:25:53 +0200 Subject: [PATCH 38/96] Improve subscripts simplification --- .../python/python_expressions_builder.py | 6 +- pysd/building/python/python_model_builder.py | 43 +++++++------- pysd/building/python/subscripts.py | 59 ++++++++++++++++--- 3 files changed, 76 insertions(+), 32 deletions(-) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index 6b5cb436..98f5473b 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -21,7 +21,7 @@ def __str__(self): def reshape(self, subscripts, final_subscripts): subscripts_out = subscripts.simplify_subscript_input( - final_subscripts, list(final_subscripts))[1] + final_subscripts)[1] if not final_subscripts or ( self.subscripts == final_subscripts and list(self.subscripts) == list(final_subscripts)): @@ -1096,7 +1096,7 @@ def build(self, arguments): expression, subscripts =\ self.section.subscripts.subscript2num[self.reference] subscripts_out = self.section.subscripts.simplify_subscript_input( - subscripts, list(subscripts))[1] + subscripts)[1] if subscripts: self.section.imports.add("numpy") # NUMPY: not need this if @@ -1182,7 +1182,7 @@ def visit_subscripts(self, expression, original_subs): # self.mapping_subscripts if self.mapping_subscripts != final_subs: subscripts_out = self.section.subscripts.simplify_subscript_input( - self.mapping_subscripts, list(self.mapping_subscripts))[1] + self.mapping_subscripts)[1] expression = "xr.DataArray(%s.values, %s, %s)" % ( expression, subscripts_out, list(self.mapping_subscripts) ) diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 6fc3cb4b..6263aa1c 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -453,20 +453,15 @@ def build_element(self): if expr is None: continue if isinstance(subs, list): - subs = [{ - esubs: subsi[csubs] - for csubs, esubs in zip(subsi, self.subscripts) - } for subsi in subs] + subs = [self.section.subscripts.simplify_subscript_input( + subsi, self.subscripts) for subsi in subs] else: - subs = { - esubs: subs[csubs] - for csubs, esubs in zip(subs, self.subscripts) - } + subs = self.section.subscripts.simplify_subscript_input( + subs, self.subscripts) + exc_subs = [ - { - esubs: subs_e[csubs] - for csubs, esubs in zip(subs_e, self.subscripts) - } + self.section.subscripts.simplify_subscript_input( + subs_e, self.subscripts) for subs_e in except_subscripts ] expressions.append( @@ -500,8 +495,10 @@ def build_element(self): elif isinstance(expression["subs"], list): self.pre_expression += self.manage_multi_def(expression) else: - self.pre_expression +=\ - "value.loc[%(subs)s] = %(expr)s\n" % expression + self.pre_expression += "value.loc[%s] = "\ + % expression["subs"][1] + self.pre_expression += "%(expr)s\n" % expression + self.expression = "value" else: self.pre_expression = "" @@ -510,7 +507,9 @@ def build_element(self): if not expressions[0]["expr"].subscripts and self.subscripts: self.expression = "xr.DataArray(%s, %s, %s)\n" % ( expressions[0]["expr"], - self.subs_dict, list(self.subs_dict) + self.section.subscripts.simplify_subscript_input( + self.subs_dict)[1], + list(self.subs_dict) ) else: self.expression = expressions[0]["expr"] @@ -525,13 +524,14 @@ def build_element(self): def manage_multi_def(self, expression): final_expr = "def_subs = xr.zeros_like(value, dtype=bool)\n" for subs in expression["subs"]: - final_expr += "def_subs.loc[%s] = True\n" % subs + final_expr += "def_subs.loc[%s] = True\n"\ + % subs[1] return final_expr + "value.values[def_subs.values] = "\ "%(expr)s[def_subs.values]\n" % expression def manage_except(self, expression): - if expression["subs"] == self.subs_dict: + if expression["subs"][0] == self.subs_dict: # Final subscripts are the same as the main subscripts # of the component. Generate a True array like value final_expr = "except_subs = xr.ones_like(value, dtype=bool)\n" @@ -540,11 +540,11 @@ def manage_except(self, expression): # of the component. Generate a False array like value and # set to True the subarray of the component coordinates final_expr = "except_subs = xr.zeros_like(value, dtype=bool)\n"\ - "except_subs.loc[%(subs)s] = True\n" % expression + "except_subs.loc[%s] = True\n" % expression["subs"][1] for except_subs in expression["subs_except"]: # We set to False the dimensions in the EXCEPT - final_expr += "except_subs.loc[%s] = False\n" % except_subs + final_expr += "except_subs.loc[%s] = False\n" % except_subs[1] if expression["expr"].subscripts: # assign the values of an array @@ -569,7 +569,10 @@ def build_element_out(self): objects = "\n\n".join([ value["expression"] % { - "final_subs": value.get("final_subs", "")} + "final_subs": + self.section.subscripts.simplify_subscript_input( + value.get("final_subs", {}))[1] + } for value in self.objects.values() if value["expression"] is not None ]) diff --git a/pysd/building/python/subscripts.py b/pysd/building/python/subscripts.py index 5ee024da..0786d521 100644 --- a/pysd/building/python/subscripts.py +++ b/pysd/building/python/subscripts.py @@ -175,11 +175,15 @@ def find_subscript_name(self, element: str, avoid: List[str] = []) -> str: Examples -------- - >>> find_subscript_name('D') + >>> sm = SubscriptManager([], Path('')) + >>> sm._subscripts = { + ... 'Dim1': ['A', 'B', 'C'], + ... 'Dim2': ['A', 'B', 'C', 'D']} + >>> sm.find_subscript_name('D') 'Dim2' - >>> find_subscript_name('B') + >>> sm.find_subscript_name('B') 'Dim1' - >>> find_subscript_name('B', avoid=['Dim1']) + >>> sm.find_subscript_name('B', avoid=['Dim1']) 'Dim2' """ @@ -208,8 +212,18 @@ def make_coord_dict(self, subs: List[str]) -> dict: Examples -------- - >>> make_coord_dict(['Dim1', 'D']) + >>> sm = SubscriptManager([], Path('')) + >>> sm._subscripts = { + ... 'Dim1': ['A', 'B', 'C'], + ... 'Dim2': ['A', 'B', 'C', 'D']} + >>> sm.make_coord_dict(['Dim1', 'D']) {'Dim1': ['A', 'B', 'C'], 'Dim2': ['D']} + >>> sm.make_coord_dict(['A']) + {'Dim1': ['A']} + >>> sm.make_coord_dict(['A', 'B']) + {'Dim1': ['A'], 'Dim2': ['B']} + >>> sm.make_coord_dict(['A', 'Dim1']) + {'Dim2': ['A'], 'Dim1': ['A', 'B', 'C']} """ sub_elems_list = [y for x in self.subscripts.values() for y in x] @@ -249,10 +263,16 @@ def make_merge_list(self, subs_list: List[List[str]], Examples -------- - >>> sm = SubscriptManager() - >>> sm.subscripts = {"upper": ["A", "B"], "all": ["A", "B", "C"]} + >>> sm = SubscriptManager([], Path('')) + >>> sm._subscripts = {"upper": ["A", "B"], "all": ["A", "B", "C"]} + >>> sm.make_merge_list([['A'], ['B']]) + ['upper'] + >>> sm.make_merge_list([['A'], ['B'], ['C']]) + ['all'] >>> sm.make_merge_list([['upper'], ['C']]) ['all'] + >>> sm.make_merge_list([['A'], ['C']]) + ['all'] """ coords_set = [set() for i in range(len(subs_list[0]))] @@ -332,16 +352,21 @@ def make_merge_list(self, subs_list: List[List[str]], return dims def simplify_subscript_input(self, coords: dict, - merge_subs: List[str]) -> tuple: + merge_subs: List[str] = None) -> tuple: """ + Simplifies the subscripts input to avoid printing the coordinates + list when the _subscript_dict can be used. Makes model code more + simple. + Parameters ---------- coords: dict Coordinates to write in the model file. - merge_subs: list of strings + merge_subs: list of strings or None (optional) List of the final subscript range of the python array after - merging with other objects + merging with other objects. If None the merge_subs will be + taken from coords. Default is None. Returns ------- @@ -349,7 +374,23 @@ def simplify_subscript_input(self, coords: dict, Final subscripts and the equations to generate the coord dicttionary in the model file. + Examples + -------- + >>> sm = SubscriptManager([], Path('')) + >>> sm._subscripts = { + ... "dim": ["A", "B", "C"], + ... "dim2": ["A", "B", "C", "D"]} + >>> sm.simplify_subscript_input({"dim": ["A", "B", "C"]}) + ({"dim": ["A", "B", "C"]}, "{'dim': _subscript_dict['dim']}" + >>> sm.simplify_subscript_input({"dim": ["A", "B", "C"]}, ["dim2"]) + ({"dim2": ["A", "B", "C"]}, "{'dim2': _subscript_dict['dim']}" + >>> sm.simplify_subscript_input({"dim": ["A", "B"]}) + ({"dim": ["A", "B"]}, "{'dim': ['A', 'B']}" + """ + if merge_subs is None: + merge_subs = list(coords) + coordsp = [] final_subs = {} for ndim, (dim, coord) in zip(merge_subs, coords.items()): From 521640cd0be546593c102beb15403ea57528ee92 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 22 Apr 2022 16:30:29 +0200 Subject: [PATCH 39/96] Improve more the loc in multiline definitions --- .../python/python_expressions_builder.py | 136 +++++++++++++----- pysd/building/python/python_model_builder.py | 40 +++--- pysd/py_backend/utils.py | 1 - 3 files changed, 120 insertions(+), 57 deletions(-) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index 98f5473b..76ff4f45 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -1132,43 +1132,13 @@ def build(self, arguments): order=0) def visit_subscripts(self, expression, original_subs): - final_subs, rename, loc, reset_coords, float = {}, {}, [], False, True - for (dim, coord), (orig_dim, orig_coord)\ - in zip(self.subscripts.items(), original_subs.items()): - if len(coord) == 1: - # subset a 1 dimension value - # NUMPY: subset value [:, N, :, :] - loc.append(repr(coord[0])) - reset_coords = True - elif len(coord) < len(orig_coord): - # subset a subrange - # NUMPY: subset value [:, :, np.array([1, 0]), :] - # NUMPY: as order may change we need to check if - # dim != orig_dim - # NUMPY: use also ranges [:, :, 2:5, :] when possible - if dim.endswith("!"): - loc.append("_subscript_dict['%s']" % dim[:-1]) - else: - loc.append("_subscript_dict['%s']" % dim) - final_subs[dim] = coord - float = False - else: - # do nothing - # NUMPY: same, we can remove float = False - loc.append(":") - final_subs[dim] = coord - float = False - - if dim != orig_dim and len(coord) != 1: - # NUMPY: check order of dimensions, make all subranges work - # with the same dimensions? - # NUMPY: this could be solved in the previous if/then/else - rename[orig_dim] = dim - - if any(dim != ":" for dim in loc): + loc, rename, final_subs, reset_coords, to_float =\ + visit_loc(self.subscripts, original_subs) + + if loc is not None: # NUMPY: expression += "[%s]" % ", ".join(loc) - expression += ".loc[%s]" % ", ".join(loc) - if reset_coords and float: + expression += f".loc[{loc}]" + if to_float: # NUMPY: Not neccessary expression = "float(" + expression + ")" elif reset_coords: @@ -1279,6 +1249,100 @@ def _merge_dependencies(current, new): current[dep] = new[dep] +def visit_loc(current_subs: dict, original_subs: dict, + keep_shape: bool = False) -> tuple: + """ + Compares the original subscripts and the current subscripts and + returns subindexing information if needed. + + Parameters + ---------- + current_subs: dict + The dictionary of the subscripts that are used in the variable. + + original_subs: dict + The dictionary of the original subscripts of the variable. + + keep_shape: bool (optional) + If True will keep the number of dimensions of the original element + and return only loc. Default is False. + + Returns + ------- + loc: list of str or None + List of the subscripting in each dimensions. If all are full (":"), + None is rerned wich means that array indexing is not needed. + + rename: dict + Dictionary of the dimensions to rename. + + final_subs: dict + Dictionary of the final subscripts of the variable. + + reset_coords: bool + Boolean indicating if the coords need to be reseted. + + to_float: bool + Boolean indicating if the variable should be converted to a float. + + """ + final_subs, rename, loc, reset_coords, to_float = {}, {}, [], False, True + for (dim, coord), (orig_dim, orig_coord)\ + in zip(current_subs.items(), original_subs.items()): + if len(coord) == 1: + # subset a 1 dimension value + # NUMPY: subset value [:, N, :, :] + if keep_shape: + # NUMPY: not necessary + loc.append(f"[{repr(coord[0])}]") + else: + loc.append(repr(coord[0])) + reset_coords = True + elif len(coord) < len(orig_coord): + # subset a subrange + # NUMPY: subset value [:, :, np.array([1, 0]), :] + # NUMPY: as order may change we need to check if + # dim != orig_dim + # NUMPY: use also ranges [:, :, 2:5, :] when possible + if dim.endswith("!"): + loc.append("_subscript_dict['%s']" % dim[:-1]) + else: + if dim != orig_dim: + loc.append("_subscript_dict['%s']" % dim) + else: + # workaround for locs from external objects merge + loc.append(repr(coord)) + final_subs[dim] = coord + to_float = False + else: + # do nothing + # NUMPY: same, we can remove float = False + loc.append(":") + final_subs[dim] = coord + to_float = False + + if dim != orig_dim and len(coord) != 1: + # NUMPY: check order of dimensions, make all subranges work + # with the same dimensions? + # NUMPY: this could be solved in the previous if/then/else + rename[orig_dim] = dim + + if all(dim == ":" for dim in loc): + # if all are ":" then no need to loc + loc = None + else: + loc = ", ".join(loc) + + if keep_shape: + return loc + + # convert to float if also coords are reseted (input is an array) + to_float = to_float and reset_coords + + # NUMPY: save and return only loc, the other are not needed + return loc, rename, final_subs, reset_coords, to_float + + class ASTVisitor: builders = { ae.InitialStructure: InitialBuilder, diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 6263aa1c..084593cf 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -453,19 +453,21 @@ def build_element(self): if expr is None: continue if isinstance(subs, list): - subs = [self.section.subscripts.simplify_subscript_input( - subsi, self.subscripts) for subsi in subs] + loc = [vs.visit_loc(subsi, self.subs_dict, True) + for subsi in subs] else: - subs = self.section.subscripts.simplify_subscript_input( - subs, self.subscripts) + loc = vs.visit_loc(subs, self.subs_dict, True) - exc_subs = [ - self.section.subscripts.simplify_subscript_input( - subs_e, self.subscripts) + exc_loc = [ + vs.visit_loc(subs_e, self.subs_dict, True) for subs_e in except_subscripts ] - expressions.append( - {"expr": expr, "subs": subs, "subs_except": exc_subs}) + expressions.append({ + "expr": expr, + "subs": subs, + "loc": loc, + "loc_except": exc_loc + }) if len(expressions) > 1: # NUMPY: xrmerge would be sustitute by a multiple line definition @@ -489,15 +491,14 @@ def build_element(self): # NUMPY not necessary expression["expr"].lower_order(0, force_0=True) expression["expr"].expression += ".values" - if expression["subs_except"]: + if expression["loc_except"]: # there is an excep in the definition of the component self.pre_expression += self.manage_except(expression) elif isinstance(expression["subs"], list): self.pre_expression += self.manage_multi_def(expression) else: - self.pre_expression += "value.loc[%s] = "\ - % expression["subs"][1] - self.pre_expression += "%(expr)s\n" % expression + self.pre_expression +=\ + "value.loc[%(loc)s] = %(expr)s\n" % expression self.expression = "value" else: @@ -523,15 +524,14 @@ def build_element(self): def manage_multi_def(self, expression): final_expr = "def_subs = xr.zeros_like(value, dtype=bool)\n" - for subs in expression["subs"]: - final_expr += "def_subs.loc[%s] = True\n"\ - % subs[1] + for loc in expression["loc"]: + final_expr += f"def_subs.loc[{loc}] = True\n" return final_expr + "value.values[def_subs.values] = "\ "%(expr)s[def_subs.values]\n" % expression def manage_except(self, expression): - if expression["subs"][0] == self.subs_dict: + if expression["subs"] == self.subs_dict: # Final subscripts are the same as the main subscripts # of the component. Generate a True array like value final_expr = "except_subs = xr.ones_like(value, dtype=bool)\n" @@ -540,11 +540,11 @@ def manage_except(self, expression): # of the component. Generate a False array like value and # set to True the subarray of the component coordinates final_expr = "except_subs = xr.zeros_like(value, dtype=bool)\n"\ - "except_subs.loc[%s] = True\n" % expression["subs"][1] + "except_subs.loc[%(loc)s] = True\n" % expression - for except_subs in expression["subs_except"]: + for except_subs in expression["loc_except"]: # We set to False the dimensions in the EXCEPT - final_expr += "except_subs.loc[%s] = False\n" % except_subs[1] + final_expr += "except_subs.loc[%s] = False\n" % except_subs if expression["expr"].subscripts: # assign the values of an array diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index c8934000..9e377bac 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -116,7 +116,6 @@ def make_flat_df(df, return_addresses, flatten=False): new_df = {} for real_name, (pyname, address) in return_addresses.items(): if address: - print(df[pyname].values[0], "\n", address, "_________\n") # subset the specific address values = [x.loc[address] for x in df[pyname].values] else: From 374195e65433acdf0494eee485fe316f25425d6b Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Mon, 25 Apr 2022 15:16:25 +0200 Subject: [PATCH 40/96] Improve subscript updimensioning and transposition --- .../python/python_expressions_builder.py | 159 +++++++++++------- 1 file changed, 94 insertions(+), 65 deletions(-) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index 76ff4f45..cfc2d3d2 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -19,9 +19,7 @@ def __str__(self): # makes easier building return self.expression - def reshape(self, subscripts, final_subscripts): - subscripts_out = subscripts.simplify_subscript_input( - final_subscripts)[1] + def reshape(self, subscripts, final_subscripts, final_element=False): if not final_subscripts or ( self.subscripts == final_subscripts and list(self.subscripts) == list(final_subscripts)): @@ -30,22 +28,42 @@ def reshape(self, subscripts, final_subscripts): elif not self.subscripts: # original expression is not an array # NUMPY: object.expression = np.full(%s, %(shape)s) + subscripts_out = subscripts.simplify_subscript_input( + final_subscripts)[1] self.expression = "xr.DataArray(%s, %s, %s)" % ( self.expression, subscripts_out, list(final_subscripts) ) self.order = 0 + self.subscripts = final_subscripts else: # original expression is an array - # NUMPY: reorder dims if neccessary with np.moveaxis or similar - # NUMPY: add new axis with [:, None, :] or np.tile, - # depending on an input argument - # NUMPY: if order is not 0 need to lower the order to 0 - # using force! - self.expression = "(xr.DataArray(0, %s, %s) + %s)" % ( - subscripts_out, list(final_subscripts), self.expression - ) - self.order = 0 - self.subscripts = final_subscripts + self.lower_order(0, force_0=True) + + # reorder subscrips + final_order = { + sub: self.subscripts[sub] + for sub in final_subscripts + if sub in self.subscripts + } + if list(final_order) != list(self.subscripts): + # NUMPY: reorder dims if neccessary with np.moveaxis or similar + self.expression +=\ + f".transpose({', '.join(map(repr, final_order))})" + self.subscripts = final_order + + # add new dimensions + if final_element and final_subscripts != self.subscripts: + # NUMPY: remove final_element condition from top + # NUMPY: add new axis with [:, None, :] + # NUMPY: move final_element condition here and use np.tile + for i, dim in enumerate(final_subscripts): + if dim not in self.subscripts: + subscripts_out = subscripts.simplify_subscript_input( + {dim: final_subscripts[dim]})[1] + self.expression +=\ + f".expand_dims({subscripts_out}, {i})" + + self.subscripts = final_subscripts def lower_order(self, new_order, force_0=False): if self.order >= new_order and self.order != 0\ @@ -93,7 +111,8 @@ def reorder(self, arguments, def_subs=None, force=None): final_subscripts = self.get_final_subscripts( arguments, def_subs) - [arguments[key].reshape(self.section.subscripts, final_subscripts) + [arguments[key].reshape( + self.section.subscripts, final_subscripts, force == "equal") for key in arguments if arguments[key].subscripts or force == "equal"] @@ -364,31 +383,37 @@ def build_function_call(self, arguments): def_subs=self.def_subs ) if self.function == "xidz" and final_subscripts: + # xidz must always return the same shape object if not arguments["1"].subscripts: - new_args = {"0": arguments["0"], "2": arguments["2"]} - self.reorder( - new_args, - def_subs=self.def_subs, - force="equal" - ) - arguments.update(new_args) - if self.function == "if_then_else" and final_subscripts: + [arguments[i].reshape( + self.section.subscripts, final_subscripts, True) + for i in ["0", "1"]] + elif arguments["0"].subscripts or arguments["2"].subscripts: + # NUMPY: not need this statement + [arguments[i].reshape( + self.section.subscripts, final_subscripts, True) + for i in ["0", "1", "2"] + if arguments[i].subscripts] + elif self.function == "zidz" and final_subscripts: + # zidz must always return the same shape object + arguments["0"].reshape( + self.section.subscripts, final_subscripts, True) + if arguments["1"].subscripts: + # NUMPY: not need this statement + arguments["1"].reshape( + self.section.subscripts, final_subscripts, True) + elif self.function == "if_then_else" and final_subscripts: + # if_then_else must always return the same shape object if not arguments["0"].subscripts: - # NUMPY: we need to ensure that if_then_else always returs - # the same shape object - new_args = {"1": arguments["1"], "2": arguments["2"]} - self.reorder( - new_args, - def_subs=self.def_subs, - force="equal" - ) - arguments.update(new_args) + # condition is a float + [arguments[i].reshape( + self.section.subscripts, final_subscripts, True) + for i in ["1", "2"]] else: - self.reorder( - arguments, - def_subs=self.def_subs, - force="equal" - ) + # condition has dimensions + [arguments[i].reshape( + self.section.subscripts, final_subscripts, True) + for i in ["0", "1", "2"]] return BuildAST( expression=expression % arguments, @@ -623,7 +648,8 @@ def build(self, arguments): self.component.subtype = "Initial" self.section.imports.add("statefuls", "Initial") - arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["initial"].reshape( + self.section.subscripts, self.def_subs, True) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_initial") @@ -658,8 +684,10 @@ def build(self, arguments): self.component.subtype = "Integ" self.section.imports.add("statefuls", "Integ") - arguments["initial"].reshape(self.section.subscripts, self.def_subs) - arguments["flow"].reshape(self.section.subscripts, self.def_subs) + arguments["initial"].reshape( + self.section.subscripts, self.def_subs, True) + arguments["flow"].reshape( + self.section.subscripts, self.def_subs, True) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_integ") @@ -697,9 +725,12 @@ def build(self, arguments): self.component.subtype = "Delay" self.section.imports.add("statefuls", self.dtype) - arguments["input"].reshape(self.section.subscripts, self.def_subs) - arguments["delay_time"].reshape(self.section.subscripts, self.def_subs) - arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["input"].reshape( + self.section.subscripts, self.def_subs, True) + arguments["delay_time"].reshape( + self.section.subscripts, self.def_subs, True) + arguments["initial"].reshape( + self.section.subscripts, self.def_subs, True) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix=f"_{self.dtype.lower()}") @@ -743,8 +774,10 @@ def build(self, arguments): self.component.subtype = "DelayFixed" self.section.imports.add("statefuls", "DelayFixed") - arguments["input"].reshape(self.section.subscripts, self.def_subs) - arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["input"].reshape( + self.section.subscripts, self.def_subs, True) + arguments["initial"].reshape( + self.section.subscripts, self.def_subs, True) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_delayfixed") @@ -784,11 +817,11 @@ def build(self, arguments): self.section.imports.add("statefuls", "Smooth") arguments["input"].reshape( - self.section.subscripts, self.def_subs) + self.section.subscripts, self.def_subs, True) arguments["smooth_time"].reshape( - self.section.subscripts, self.def_subs) + self.section.subscripts, self.def_subs, True) arguments["initial"].reshape( - self.section.subscripts, self.def_subs) + self.section.subscripts, self.def_subs, True) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_smooth") @@ -836,11 +869,11 @@ def build(self, arguments): self.section.imports.add("statefuls", "Trend") arguments["input"].reshape( - self.section.subscripts, self.def_subs) + self.section.subscripts, self.def_subs, True) arguments["average_time"].reshape( - self.section.subscripts, self.def_subs) + self.section.subscripts, self.def_subs, True) arguments["initial_trend"].reshape( - self.section.subscripts, self.def_subs) + self.section.subscripts, self.def_subs, True) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_trend") @@ -885,13 +918,13 @@ def build(self, arguments): self.section.imports.add("statefuls", "Forecast") arguments["input"].reshape( - self.section.subscripts, self.def_subs) + self.section.subscripts, self.def_subs, True) arguments["average_time"].reshape( - self.section.subscripts, self.def_subs) + self.section.subscripts, self.def_subs, True) arguments["horizon"].reshape( - self.section.subscripts, self.def_subs) + self.section.subscripts, self.def_subs, True) arguments["initial_trend"].reshape( - self.section.subscripts, self.def_subs) + self.section.subscripts, self.def_subs, True) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_forecast") @@ -933,9 +966,12 @@ def build(self, arguments): self.component.subtype = "SampleIfTrue" self.section.imports.add("statefuls", "SampleIfTrue") - arguments["condition"].reshape(self.section.subscripts, self.def_subs) - arguments["input"].reshape(self.section.subscripts, self.def_subs) - arguments["initial"].reshape(self.section.subscripts, self.def_subs) + arguments["condition"].reshape( + self.section.subscripts, self.def_subs, True) + arguments["input"].reshape( + self.section.subscripts, self.def_subs, True) + arguments["initial"].reshape( + self.section.subscripts, self.def_subs, True) arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_sampleiftrue") @@ -1414,17 +1450,10 @@ def visit(self): ) if reshape: - # We are only comparing the dictionaries (set of dimensions) - # and not the list (order). - # With xarray we don't need to compare the order because the - # decorator @subs will reorder the objects # NUMPY: in this case we need to tile along dims if neccessary # or reorder the dimensions - # NUMPY: if the output is a float or int and they are several - # definitions we can return float or int as we can - # safely do "var[:, 1, :] = 3" visit_out.reshape( - self.component.section.subscripts, self.subscripts) + self.component.section.subscripts, self.subscripts, True) return visit_out From c833754a007db578e93d87def3861dc26ac8c5ec Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Mon, 25 Apr 2022 15:54:43 +0200 Subject: [PATCH 41/96] Correct lookup redefinition --- pysd/py_backend/lookups.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/pysd/py_backend/lookups.py b/pysd/py_backend/lookups.py index bb218334..43c64eb0 100644 --- a/pysd/py_backend/lookups.py +++ b/pysd/py_backend/lookups.py @@ -43,9 +43,16 @@ def __call__(self, x, final_subs=None): return self._call(self.data, x, final_subs) except (TypeError, KeyError): # this except catch the errors when a lookups has been - # changed to a constant valuue by the user - # TODO need to expand data to final_subs if they are given - return self.data + # changed to a constant value by the user + if final_subs and isinstance(self.data, xr.DataArray): + # self.data is an array, reshape it + outdata = xr.DataArray(np.nan, final_subs, list(final_subs)) + return xr.broadcast(outdata, self.data)[1] + elif final_subs: + # self.data is a float, create an array + return xr.DataArray(self.data, final_subs, list(final_subs)) + else: + return self.data def _call(self, data, x, final_subs=None): if isinstance(x, xr.DataArray): From efd423fc77764a3dd0b0fbb92293e5036e8f479f Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Mon, 25 Apr 2022 17:11:00 +0200 Subject: [PATCH 42/96] Continue working on the documentation --- docs/advanced_usage.rst | 11 +++++------ docs/basic_usage.rst | 26 +++++++++++++------------- docs/structure/python_builder.rst | 6 +++++- docs/structure/structure_index.rst | 24 +++++++----------------- pysd/pysd.py | 10 +++++----- 5 files changed, 35 insertions(+), 42 deletions(-) diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst index 0a15f010..b890164e 100644 --- a/docs/advanced_usage.rst +++ b/docs/advanced_usage.rst @@ -1,7 +1,7 @@ Advanced Usage ============== -The power of PySD, and its motivation for existence, is its ability to tie in to other models and analysis packages in the Python environment. In this section we’ll discuss how those connections happen. +The power of PySD, and its motivation for existence, is its ability to tie in to other models and analysis packages in the Python environment. In this section we'll discuss how those connections happen. Replacing model components with more complex objects @@ -20,7 +20,7 @@ This drew on the internal state of the system, namely the time t, and the time-s Because PySD assumes that all components in a model are represented as functions taking no arguments, any component that we wish to modify must be replaced with a function taking no arguments. As the state of the system and all auxiliary or flow methods are public, our replacement function can call these methods as part of its internal structure. -In our teacup example, suppose we didn’t know the functional form for calculating the heat lost to the room, but instead had a lot of data of teacup temperatures and heat flow rates. We could use a regression model (here a support vector regression from Scikit-Learn) in place of the analytic function:: +In our teacup example, suppose we didn't know the functional form for calculating the heat lost to the room, but instead had a lot of data of teacup temperatures and heat flow rates. We could use a regression model (here a support vector regression from Scikit-Learn) in place of the analytic function:: from sklearn.svm import SVR regression = SVR() @@ -116,10 +116,9 @@ Selecting and running a submodel -------------------------------- A submodel of a translated model can be selected in order to run only a part of the original model. This can be done through the :py:data:`.select_submodel()` method: -.. autoclass:: pysd.py_backend.statefuls.Model - :members: select_submodel +.. automethod:: pysd.py_backend.statefuls.Model.select_submodel + In order to preview the needed exogenous variables the :py:data:`.get_dependencies()` method can be used: -.. autoclass:: pysd.py_backend.statefuls.Model - :members: get_dependencies +.. automethod:: pysd.py_backend.statefuls.Model.get_dependencies diff --git a/docs/basic_usage.rst b/docs/basic_usage.rst index 94f1ccf4..3a522468 100644 --- a/docs/basic_usage.rst +++ b/docs/basic_usage.rst @@ -18,7 +18,7 @@ This code creates an instance of the PySD class loaded with an example model tha .. note:: The teacup model can be found in the `samples of the test-models repository `_. -To view a synopsis of the model equations and documentation, call the :py:func:`.doc` property of the model class. This will generate a listing of all the model elements, their documentation, units, and initial values, where appropriate, and return them as a :py:class:`pandas.DataFrame`. Here is a sample from the teacup model:: +To view a synopsis of the model equations and documentation, use the :py:function:`.doc` property of the model class. This will generate a listing of all the model elements, their documentation, units, and initial values, where appropriate, and return them as a :py:class:`pandas.DataFrame`. Here is a sample from the teacup model:: >>> model.doc @@ -41,7 +41,7 @@ To view a synopsis of the model equations and documentation, call the :py:func:` >>> model = pysd.load('Teacup.py') .. note:: - The functions :py:func:`read_vensim()`, :py:func:`read_xmile()` and :py:func:`load()` have optional arguments for advanced usage, you can check the full description in :doc:`User Functions Reference <../functions>` or using :py:func:`help()` e.g.:: + The functions :py:func:`pysd.read_vensim()`, :py:func:`pysd.read_xmile()` and :py:func:`pysd.load()` have optional arguments for advanced usage, you can check the full description in :doc:`User Functions Reference <../functions>` or using :py:func:`help()` e.g.:: >>> import pysd >>> help(pysd.load) @@ -109,7 +109,7 @@ If a variable is given in different files to choose the specific file a dictiona Outputting various run information ---------------------------------- -The :py:func:`.run()` command has a few options that make it more useful. In many situations we want to access components of the model other than merely the stocks – we can specify which components of the model should be included in the returned dataframe by including them in a list that we pass to the :py:func:`.run()` command, using the return_columns keyword argument:: +The :py:func:`.run()` command has a few options that make it more useful. In many situations we want to access components of the model other than merely the stocks - we can specify which components of the model should be included in the returned dataframe by including them in a list that we pass to the :py:func:`.run()` command, using the return_columns keyword argument:: >>> model.run(return_columns=['Teacup Temperature', 'Room Temperature']) @@ -147,7 +147,7 @@ If the measured data that we are comparing with our model comes in at irregular Retrieving totally flat dataframe --------------------------------- -The subscripted variables, in general, will be returned as *xarray.DataArray*s in the output *pandas.DataFrame*. To get a totally flat dataframe, like Vensim outuput the `flatten=True` when calling the run function:: +The subscripted variables, in general, will be returned as :py:class:`xarray.DataArray`s in the output :py:class:`pandas.DataFrame`. To get a totally flat dataframe, like Vensim outuput the `flatten=True` when calling the run function:: >>> model.run(flatten=True) @@ -159,7 +159,7 @@ This argument expects a dictionary whose keys correspond to the components of th >>> model.run(params={'Room Temperature': 20}) -Alternately, if we believe the room temperature is changing over the course of the simulation, we can give the run function a set of time-series values in the form of a Pandas series, and PySD will linearly interpolate between the given values in the course of its integration:: +Alternately, if we believe the room temperature is changing over the course of the simulation, we can give the run function a set of time-series values in the form of a :py:class:`pandas.Series`, and PySD will linearly interpolate between the given values in the course of its integration:: >>> import pandas as pd >>> temp = pd.Series(index=range(30), data=range(20, 80, 2)) @@ -169,22 +169,22 @@ If the parameter value to change is a subscripted variable (vector, matrix...), >>> model.run(params={'Subscripted var': 0}) -A partial *xarray.DataArray* can be used, for example a new variable with ‘dim2’ but not ‘dim2’, the result will be repeated in the remaining dimensions:: +A partial :py:class:`xarray.DataArray` can be used, for example a new variable with ‘dim2’ but not ‘dim2’, the result will be repeated in the remaining dimensions:: >>> import xarray as xr >>> new_value = xr.DataArray([1, 5], {'dim2': [1, 2]}, ['dim2']) >>> model.run(params={'Subscripted var': new_value}) -Same dimensions *xarray.DataArray* can be used (recommended):: +Same dimensions :py:class:`xarray.DataArray` can be used (recommended):: >>> import xarray as xr >>> new_value = xr.DataArray([[1, 5], [3, 4]], {'dim1': [1, 2], 'dim2': [1, 2]}, ['dim1', 'dim2']) >>> model.run(params={'Subscripted var': new_value}) -In the same way, a Pandas series can be used with constan values, partially defined *xarray.DataArrays* or same dimensions *xarray.DataArrays*. +In the same way, a Pandas series can be used with constan values, partially defined *:py:class:`xarray.DataArray`s or same dimensions :py:class:`xarray.DataArray`s. .. note:: - That once parameters are set by the run command, they are permanently changed within the model. We can also change model parameters without running the model, using PySD’s :py:data:`set_components(params={})` method, which takes the same params dictionary as the run function. We might choose to do this in situations where we’ll be running the model many times, and only want to spend time setting the parameters once. + That once parameters are set by the run command, they are permanently changed within the model. We can also change model parameters without running the model, using PySD’s :py:data:`set_components(params={})` method, which takes the same params dictionary as the run function. We might choose to do this in situations where we'll be running the model many times, and only want to spend time setting the parameters once. .. note:: If you need to know the dimensions of a variable, you can check them by using :py:data:`.get_coords(variable__name)` function:: @@ -200,11 +200,11 @@ In the same way, a Pandas series can be used with constan values, partially defi this will return the coords dictionary and the dimensions list if the variable is subscripted or ‘None’ if the variable is an scalar. .. note:: - If you change the value of a lookup function by a constant, the constant value will be used always. If a *pandas.Series* is given the index and values will be used for interpolation when the function is called in the model, keeping the arguments that are included in the model file. + If you change the value of a lookup function by a constant, the constant value will be used always. If a :py:class:`pandas.Series` is given the index and values will be used for interpolation when the function is called in the model, keeping the arguments that are included in the model file. - If you change the value of any other variable type by a constant, the constant value will be used always. If a *pandas.Series* is given the index and values will be used for interpolation when the function is called in the model, using the time as argument. + If you change the value of any other variable type by a constant, the constant value will be used always. If a :py:class:`pandas.Series` is given the index and values will be used for interpolation when the function is called in the model, using the time as argument. - If you need to know if a variable takes arguments, i.e., if it is a lookup variable, you can check it by using :py:data:`.get_args(variable__name)` function:: + If you need to know if a variable takes arguments, i.e., if it is a lookup variable, you can check it by using :py:func:`.get_args(variable__name)` function:: >>> model.get_args('Room Temperature') @@ -216,7 +216,7 @@ In the same way, a Pandas series can be used with constan values, partially defi Setting simulation initial conditions ------------------------------------- -Finally, we can set the initial conditions of our model in several ways. So far, we’ve been using the default value for the initial_condition keyword argument, which is ‘original’. This value runs the model from the initial conditions that were specified originally by the model file. We can alternately specify a tuple containing the start time and a dictionary of values for the system’s stocks. Here we start the model with the tea at just above freezing:: +Finally, we can set the initial conditions of our model in several ways. So far, we've been using the default value for the initial_condition keyword argument, which is ‘original’. This value runs the model from the initial conditions that were specified originally by the model file. We can alternately specify a tuple containing the start time and a dictionary of values for the system's stocks. Here we start the model with the tea at just above freezing:: >>> model.run(initial_condition=(0, {'Teacup Temperature': 33})) diff --git a/docs/structure/python_builder.rst b/docs/structure/python_builder.rst index 371ffa5a..a51fcb5d 100644 --- a/docs/structure/python_builder.rst +++ b/docs/structure/python_builder.rst @@ -1,7 +1,11 @@ Python builder ============== -This section documents the functions that are going on behaind the scenes, for the benefit of developers. +The Python builder allows to build models that can be run with the PySD Model class. + +The use of a one-to-one dictionary in translation means that the breadth of functionality is inherently limited. In the case where no direct Python equivalent is available, PySD provides a library of functions such as `pulse`, `step`, etc. that are specific to dynamic model behavior. + +In addition to translating individual commands between Vensim/XMILE and Python, PySD reworks component identifiers to be Python-safe by replacing spaces with underscores. The translator allows source identifiers to make use of alphanumeric characters, spaces, or the $ symbol. Main builders ------------- diff --git a/docs/structure/structure_index.rst b/docs/structure/structure_index.rst index 043805bb..5b9077c3 100644 --- a/docs/structure/structure_index.rst +++ b/docs/structure/structure_index.rst @@ -25,24 +25,14 @@ The internal functions of the model translation components and relevant objects -The PySD module is capable of importing models from a Vensim model file (\*.mdl) or an XMILE format xml file. Translation makes use of a Parsing Expression Grammar parser, using the third party Python library Parsimonious to construct an abstract syntax tree based upon the full model file (in the case of Vensim) or individual expressions (in the case of XMILE). - -The translators then crawl the tree, using a set of classes to define a pseudo model representation called :doc:`Abstract Model `. Its structure is defined in the following document: - - -The use of a one-to-one dictionary in translation means that the breadth of functionality is inherently limited. In the case where no direct Python equivalent is available, PySD provides a library of functions such as pulse, step, etc. that are specific to dynamic model behavior. - -In addition to translating individual commands between Vensim/XMILE and Python, PySD reworks component identifiers to be Python-safe by replacing spaces with underscores. The translator allows source identifiers to make use of alphanumeric characters, spaces, or the $ symbol. - -During translation some dictionaries are created that allow the correct operation of the model: - -* **_namespace**: used to connect real name (from the original model) with the Python name. -* **_subscript_dict**: Used to define the subscript ranges and subranges. -* **_dependencies**: Used to define the dependencies of each variable and assign cache type and initialize the model. +The PySD module is capable of importing models from a Vensim model file (\*.mdl) or an XMILE format xml file. Translation makes use of a Parsing Expression Grammar parser, using the third party Python library Parsimonious to construct an abstract syntax tree based upon the full model file (in the case of Vensim) or individual expressions (in the case of XMILE). The translators then crawl the tree, using a set of classes to define a pseudo model representation called :doc:`Abstract Model `. Building the model ------------------ + +The builders allow you to build the final model in the desired language. To do so, they use a series of classes that subtract the information from the abstract model and convert it into the desired code. Currently there is only one builder to build the models in Python, any contribution to add new builders is welcome. + .. toctree:: :maxdepth: 2 @@ -57,11 +47,11 @@ The Python model class model_loading -The translator constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a ‘statefull’ model in much the same way that the system itself has a specific state at any point in time. +The translator constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a `statefull` model in much the same way that the system itself has a specific state at any point in time. The model class also contains a function for each of the model components, representing the essential model equations. The docstring for each function contains the model documentation and units as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. -The model class maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. The downside to this design choice is that several components of Vensim or XMILE functionality – the most significant being the infinite order delay – are intentionally not supported. In many cases similar behavior can be approximated through other constructs. +The model class maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. The downside to this design choice is that several components of Vensim or XMILE functionality - the most significant being the infinite order delay - are intentionally not supported. In many cases similar behavior can be approximated through other constructs. Lastly, the model class provides a set of methods that are used to facilitate simulation. PySD uses the standard ordinary differential equations solver provided in the well-established Python library Scipy, which expects the state and its derivative to be represented as an ordered list. The model class provides the function .d_dt() that takes a state vector from the integrator and uses it to update the model state, and then calculates the derivative of each stock, returning them in a corresponding vector. A complementary function .state_vector() creates an ordered vector of states for use in initializing the integrator. @@ -69,6 +59,6 @@ The PySD class ^^^^^^^^^^^^^^ The PySD class provides the machinery to get the model moving, supply it with data, or modify its parameters. In addition, this class is the primary way that users interact with the PySD module. -The basic function for executing a model is appropriately named.run(). This function passes the model into scipy’s odeint() ordinary differential equations solver. The scipy integrator is itself utilizing the lsoda integrator from the Fortran library odepack14, and so integration takes advantage of highly optimized low-level routines to improve speed. We use the model’s timestep to set the maximum step size for the integrator’s adaptive solver to ensure that the integrator properly accounts for discontinuities. +The basic function for executing a model is appropriately named.run(). This function passes the model into scipy's odeint() ordinary differential equations solver. The scipy integrator is itself utilizing the lsoda integrator from the Fortran library odepack14, and so integration takes advantage of highly optimized low-level routines to improve speed. We use the model's timestep to set the maximum step size for the integrator's adaptive solver to ensure that the integrator properly accounts for discontinuities. The .run() function returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. \ No newline at end of file diff --git a/pysd/pysd.py b/pysd/pysd.py index 523e4665..6a8c2de3 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -119,13 +119,13 @@ def read_vensim(mdl_file, data_files=None, initialize=True, read from the model, if the encoding is not defined in the model file it will be set to 'UTF-8'. Default is None. + subview_sep: list + Characters used to separate views and subviews (e.g. [",", "."]). + If provided, and split_views=True, each submodule will be placed + inside the directory of the parent view. + **kwargs: (optional) Additional keyword arguments for translation. - subview_sep: list - Characters used to separate views and subviews (e.g. [",", "."]). - If provided, and split_views=True, each submodule will be placed - inside the directory of the parent view. - Returns ------- From a32a120d33a18f3ed57ddd911f3e21d64e9de258 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Tue, 26 Apr 2022 14:45:51 +0200 Subject: [PATCH 43/96] Remove dependencies dict from model files Remove dependencies dict from model files and also remove subscript dict when it is not necessary. --- .../python/python_expressions_builder.py | 151 +++++++++--------- pysd/building/python/python_model_builder.py | 46 +++--- pysd/py_backend/components.py | 10 +- pysd/py_backend/statefuls.py | 58 +++++-- pysd/py_backend/utils.py | 10 +- .../test_circular_reference.py | 19 ++- .../test_initialization_order.py | 28 ++-- .../version/test_current_version.py | 2 - .../pytest_select_submodel.py | 16 +- .../vensim_parser/pytest_split_views.py | 7 +- tests/unit_test_cli.py | 7 +- tests/unit_test_pysd.py | 6 +- 12 files changed, 193 insertions(+), 167 deletions(-) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index cfc2d3d2..95a86621 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -292,11 +292,12 @@ def build_macro_call(self, arguments): "%(args)s, '%(macro_name)s', " "time_initialization=lambda: __data['time'], " "py_name='%(name)s')" % arguments, - "calls": { - "initial": calls, - "step": calls - } } + self.element.other_dependencies[arguments["name"]] = { + "initial": calls, + "step": calls + } + return BuildAST( expression="%s()" % arguments["name"], calls={arguments["name"]: 1}, @@ -367,15 +368,11 @@ def build_function_call(self, arguments): def_subs=self.def_subs, force="equal" ) - self.element.objects[name] = { - "name": name, - "expression": None, - "calls": { - "initial": arguments["1"].calls, - "step": arguments["0"].calls - } - + self.element.other_dependencies[name] = { + "initial": arguments["1"].calls, + "step": arguments["0"].calls } + calls = {name: 1} else: final_subscripts = self.reorder( @@ -658,12 +655,12 @@ def build(self, arguments): "name": arguments["name"], "expression": "%(name)s = Initial(lambda: %(initial)s, " "'%(name)s')" % arguments, - "calls": { - "initial": arguments["initial"].calls, - "step": {} - } - } + self.element.other_dependencies[arguments["name"]] = { + "initial": arguments["initial"].calls, + "step": {} + } + return BuildAST( expression=arguments["name"] + "()", calls={arguments["name"]: 1}, @@ -695,13 +692,13 @@ def build(self, arguments): self.element.objects[arguments["name"]] = { "name": arguments["name"], "expression": "%(name)s = Integ(lambda: %(flow)s, " - "lambda: %(initial)s, '%(name)s')" % arguments, - "calls": { - "initial": arguments["initial"].calls, - "step": arguments["flow"].calls - } - + "lambda: %(initial)s, '%(name)s')" % arguments + } + self.element.other_dependencies[arguments["name"]] = { + "initial": arguments["initial"].calls, + "step": arguments["flow"].calls } + return BuildAST( expression=arguments["name"] + "()", calls={arguments["name"]: 1}, @@ -742,17 +739,18 @@ def build(self, arguments): "lambda: %(delay_time)s, lambda: %(initial)s, " "lambda: %(order)s, " "time_step, '%(name)s')" % arguments, - "calls": { - "initial": merge_dependencies( - arguments["initial"].calls, - arguments["delay_time"].calls, - arguments["order"].calls), - "step": merge_dependencies( - arguments["input"].calls, - arguments["delay_time"].calls) + } + self.element.other_dependencies[arguments["name"]] = { + "initial": merge_dependencies( + arguments["initial"].calls, + arguments["delay_time"].calls, + arguments["order"].calls), + "step": merge_dependencies( + arguments["input"].calls, + arguments["delay_time"].calls) - } } + return BuildAST( expression=arguments["name"] + "()", calls={arguments["name"]: 1}, @@ -787,13 +785,14 @@ def build(self, arguments): "expression": "%(name)s = DelayFixed(lambda: %(input)s, " "lambda: %(delay_time)s, lambda: %(initial)s, " "time_step, '%(name)s')" % arguments, - "calls": { - "initial": merge_dependencies( - arguments["initial"].calls, - arguments["delay_time"].calls), - "step": arguments["input"].calls - } } + self.element.other_dependencies[arguments["name"]] = { + "initial": merge_dependencies( + arguments["initial"].calls, + arguments["delay_time"].calls), + "step": arguments["input"].calls + } + return BuildAST( expression=arguments["name"] + "()", calls={arguments["name"]: 1}, @@ -836,17 +835,17 @@ def build(self, arguments): "expression": "%(name)s = Smooth(lambda: %(input)s, " "lambda: %(smooth_time)s, lambda: %(initial)s, " "lambda: %(order)s, '%(name)s')" % arguments, - "calls": { - "initial": merge_dependencies( - arguments["initial"].calls, - arguments["smooth_time"].calls, - arguments["order"].calls), - "step": merge_dependencies( - arguments["input"].calls, - arguments["smooth_time"].calls) - } - } + self.element.other_dependencies[arguments["name"]] = { + "initial": merge_dependencies( + arguments["initial"].calls, + arguments["smooth_time"].calls, + arguments["order"].calls), + "step": merge_dependencies( + arguments["input"].calls, + arguments["smooth_time"].calls) + } + return BuildAST( expression=arguments["name"] + "()", calls={arguments["name"]: 1}, @@ -884,17 +883,17 @@ def build(self, arguments): "lambda: %(average_time)s, " "lambda: %(initial_trend)s, " "'%(name)s')" % arguments, - "calls": { - "initial": merge_dependencies( - arguments["initial_trend"].calls, - arguments["input"].calls, - arguments["average_time"].calls), - "step": merge_dependencies( - arguments["input"].calls, - arguments["average_time"].calls) - } - } + self.element.other_dependencies[arguments["name"]] = { + "initial": merge_dependencies( + arguments["initial_trend"].calls, + arguments["input"].calls, + arguments["average_time"].calls), + "step": merge_dependencies( + arguments["input"].calls, + arguments["average_time"].calls) + } + return BuildAST( expression=arguments["name"] + "()", calls={arguments["name"]: 1}, @@ -934,17 +933,17 @@ def build(self, arguments): "expression": "%(name)s = Forecast(lambda: %(input)s, " "lambda: %(average_time)s, lambda: %(horizon)s, " "lambda: %(initial_trend)s, '%(name)s')" % arguments, - "calls": { - "initial": merge_dependencies( - arguments["input"].calls, - arguments["initial_trend"].calls), - "step": merge_dependencies( - arguments["input"].calls, - arguments["average_time"].calls, - arguments["horizon"].calls) - } - } + self.element.other_dependencies[arguments["name"]] = { + "initial": merge_dependencies( + arguments["input"].calls, + arguments["initial_trend"].calls), + "step": merge_dependencies( + arguments["input"].calls, + arguments["average_time"].calls, + arguments["horizon"].calls) + } + return BuildAST( expression=arguments["name"] + "()", calls={arguments["name"]: 1}, @@ -981,15 +980,15 @@ def build(self, arguments): "expression": "%(name)s = SampleIfTrue(lambda: %(condition)s, " "lambda: %(input)s, lambda: %(initial)s, " "'%(name)s')" % arguments, - "calls": { - "initial": - arguments["initial"].calls, - "step": merge_dependencies( - arguments["condition"].calls, - arguments["input"].calls) - } - } + self.element.other_dependencies[arguments["name"]] = { + "initial": + arguments["initial"].calls, + "step": merge_dependencies( + arguments["condition"].calls, + arguments["input"].calls) + } + return BuildAST( expression=arguments["name"] + "()", calls={arguments["name"]: 1}, diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 084593cf..2136c038 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -47,7 +47,6 @@ def __init__(self, abstract_section: AbstractSection): self.namespace = NamespaceManager(self.params) self.imports = ImportsManager() self.macrospace = {} - self.dependencies = {} # create parameters dict necessary in macros self.params = { @@ -65,10 +64,6 @@ def build_section(self): for element in self.elements: element.build_element() - self.dependencies[element.identifier] = element.dependencies - for subelement in element.objects.values(): - if "calls" in subelement: - self.dependencies[subelement["name"]] = subelement["calls"] if self.split: self._build_modular(self.views_dict) @@ -123,8 +118,7 @@ def _build_modular(self, elements_per_view): for file, values in { "modules_%s/_modules": elements_per_view, - "_subscripts_%s": self.subscripts.subscripts, - "_dependencies_%s": self.dependencies}.items(): + "_subscripts_%s": self.subscripts.subscripts}.items(): with self.root.joinpath( file % self.model_name).with_suffix( @@ -203,7 +197,7 @@ def _build_main_module(self, elements): # import of needed functions and packages text = self.imports.get_header(self.path.name) - # import subscript dict and dependencies from json file + # import subscript dict from json file text += textwrap.dedent(""" __pysd_version__ = '%(version)s' @@ -214,7 +208,7 @@ def _build_main_module(self, elements): _root = Path(__file__).parent %(params)s - _subscript_dict, _dependencies, _modules = load_model_data( + _subscript_dict, _modules = load_model_data( _root, "%(model_name)s") component = Component() @@ -246,6 +240,12 @@ def _build(self): control_vars, funcs = self._build_variables(self.elements) text = self.imports.get_header(self.path.name) + indent = "\n " + params = f"{indent}_params = {self.params}\n"\ + if self.params else "" + subs = f"{indent}_subscript_dict = {self.subscripts.subscripts}"\ + if self.subscripts.subscripts else "" + text += textwrap.dedent(""" __pysd_version__ = '%(version)s' @@ -256,16 +256,12 @@ def _build(self): _root = Path(__file__).parent %(params)s - _subscript_dict = %(subscript_dict)s - - _dependencies = %(dependencies)s + %(subscript_dict)s component = Component() """ % { - "subscript_dict": repr(self.subscripts.subscripts), - "dependencies": repr(self.dependencies), - "params": f"\n _params = {self.params}\n" - if self.params else "", + "subscript_dict": subs, + "params": params, "version": __version__, }) @@ -414,6 +410,7 @@ def __init__(self, abstract_element: AbstractElement, [component.subscripts[0] for component in self.components]) self.subs_dict = section.subscripts.make_coord_dict(self.subscripts) self.dependencies = {} + self.other_dependencies = {} self.objects = {} def _format_limits(self, limits): @@ -586,6 +583,7 @@ def build_element_out(self): self.name = repr(self.name) meta_data = ["name=%(name)s"] + # include basic metadata (units, limits, dimensions) if self.units: meta_data.append("units=%(units)s") self.units = repr(self.units) @@ -594,17 +592,25 @@ def build_element_out(self): if self.subscripts: self.section.imports.add("subs") meta_data.append("subscripts=%(subscripts)s") - if self.documentation: - doc = self.documentation.replace("\\", "\n") - contents = f'"""\n{doc}\n"""\n'\ - + contents + # include component type and subtype meta_data.append("comp_type='%(type)s'") meta_data.append("comp_subtype='%(subtype)s'") + # include dependencies + if self.dependencies: + meta_data.append("depends_on=%(dependencies)s") + if self.other_dependencies: + meta_data.append("other_deps=%(other_dependencies)s") + self.meta_data = f"@component.add({', '.join(meta_data)})"\ % self.__dict__ + if self.documentation: + doc = self.documentation.replace("\\", "\n") + contents = f'"""\n{doc}\n"""\n'\ + + contents + indent = 12 # convert newline indicator and add expected level of indentation diff --git a/pysd/py_backend/components.py b/pysd/py_backend/components.py index fb28bb08..9db3ed99 100644 --- a/pysd/py_backend/components.py +++ b/pysd/py_backend/components.py @@ -16,9 +16,11 @@ class Component(object): def __init__(self): self.namespace = {} + self.dependencies = {} def add(self, name, units=None, limits=(np.nan, np.nan), - subscripts=None, comp_type=None, comp_subtype=None): + subscripts=None, comp_type=None, comp_subtype=None, + depends_on={}, other_deps={}): """ This decorators allows assigning metadata to a function. """ @@ -30,7 +32,13 @@ def decorator(function): function.type = comp_type function.subtype = comp_subtype function.args = inspect.getfullargspec(function)[0] + + # include component in namespace and dependencies self.namespace[name] = function.__name__ + if function.__name__ != "time": + self.dependencies[function.__name__] = depends_on + self.dependencies.update(other_deps) + return function return decorator diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index d34f47b5..fac3de5d 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -3,10 +3,10 @@ method. This include from basic Integ class objects until the Model class objects. """ - import inspect import pickle import warnings +from typing import Union import numpy as np import pandas as pd @@ -627,7 +627,12 @@ def __init__(self, py_model_file, params=None, return_func=None, + " read_vensim or read_xmile.") self._namespace = self.components._components.component.namespace - self._dependencies = self.components._dependencies + self._dependencies = self.components._components.component.dependencies + self._subscript_dict = getattr( + self.components._components, "_subscript_dict", {}) + self._modules = getattr( + self.components._components, "_modules", {}) + self._doc = self._build_doc() if params is not None: @@ -686,17 +691,41 @@ def __call__(self): return self.return_func() @property - def doc(self): + def doc(self) -> pd.DataFrame: + """ + The documentation of the model. + """ return self._doc.copy() @property - def namespace(self): + def namespace(self) -> dict: + """ + The namespace dictionary of the model. + """ return self._namespace.copy() @property - def dependencies(self): + def dependencies(self) -> dict: + """ + The dependencies dictionary of the model. + """ return self._dependencies.copy() + @property + def subscripts(self) -> dict: + """ + The subscripts dictionary of the model. + """ + return self._subscript_dict.copy() + + @property + def modules(self) -> Union[dict, None]: + """ + The dictionary of modules of the model. If the model is not + split by modules it returns None. + """ + return self._modules.copy() or None + def clean_caches(self): self.cache.clean() # if nested macros @@ -1306,13 +1335,13 @@ def _timeseries_component(self, series, dims): series.values, series.index).interp(concat_dim=self.time()).reset_coords( 'concat_dim', drop=True), - dims, self.components._subscript_dict), {'time': 1} + dims, self._subscript_dict), {'time': 1} elif dims: # the interpolation will be time dependent return lambda: utils.rearrange( np.interp(self.time(), series.index, series.values), - dims, self.components._subscript_dict), {'time': 1} + dims, self._subscript_dict), {'time': 1} else: # the interpolation will be time dependent @@ -1324,7 +1353,7 @@ def _constant_component(self, value, dims): """ Internal function for creating a constant model element """ if dims: return lambda: utils.rearrange( - value, dims, self.components._subscript_dict) + value, dims, self._subscript_dict) else: return lambda: value @@ -1385,7 +1414,7 @@ def set_initial_value(self, t, initial_value): if dims: value = utils.rearrange( value, dims, - self.components._subscript_dict) + self._subscript_dict) element.initialize(value) modified_statefuls.add(stateful_name) except NameError: @@ -1436,9 +1465,6 @@ def set_stateful(self, stateful_dict): for attr, value in attrs.items(): setattr(getattr(self.components, element), attr, value) - def subscript_dict(self): - return self.components._subscript_dict.copy() - def _build_doc(self): """ Formats a table of documentation strings to help users remember @@ -1466,7 +1492,7 @@ def _build_doc(self): 'Type': element.type, 'Subtype': element.subtype, 'Comment': element.__doc__.strip().strip("\n").strip() - if element.__doc__ else None + if element.__doc__ else None }) if collector: @@ -1957,9 +1983,9 @@ def get_vars_in_module(self, module): Set of varible names in the given module. """ - try: - module_content = self.components._modules.copy() - except NameError: + if self._modules: + module_content = self._modules.copy() + else: raise ValueError( "Trying to get a module from a non-modularized model") diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index 9e377bac..caf2cffc 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -301,7 +301,7 @@ def load_model_data(root, model_name): """ Used for models split in several files. - Loads subscripts, depenencies and modules dictionaries + Loads subscripts and modules dictionaries Parameters ---------- @@ -317,9 +317,6 @@ def load_model_data(root, model_name): Dictionary describing the possible dimensions of the stock's subscripts. - dependencies: dict - DIctionary containing the dependencies of each model component. - modules: dict Dictionary containing view (module) names as keys and a list of the corresponding variables as values. @@ -333,15 +330,12 @@ def load_model_data(root, model_name): with open(root.joinpath("_subscripts_" + model_name + ".json")) as subs: subscripts = json.load(subs) - with open(root.joinpath("_dependencies_" + model_name + ".json")) as deps: - dependencies = json.load(deps) - # the _modules.json in the sketch_var folder shows to which module each # variable belongs with open(root.joinpath("modules_" + model_name, "_modules.json")) as mods: modules = json.load(mods) - return subscripts, dependencies, modules + return subscripts, modules def load_modules(module_name, module_content, work_dir, submodules): diff --git a/tests/more-tests/circular_reference/test_circular_reference.py b/tests/more-tests/circular_reference/test_circular_reference.py index fb4451ad..17d822e9 100644 --- a/tests/more-tests/circular_reference/test_circular_reference.py +++ b/tests/more-tests/circular_reference/test_circular_reference.py @@ -2,12 +2,7 @@ from pysd import Component _subscript_dict = {} -_dependencies = { - 'integ': {'_integ_integ': 1}, - 'delay': {'_delay_delay': 1}, - '_integ_integ': {'initial': {'delay': 1}, 'step': {}}, - '_delay_delay': {'initial': {'integ': 1}, 'step': {}} -} + __pysd_version__ = "3.0.0" __data = {'scope': None, 'time': lambda: 0} @@ -52,12 +47,20 @@ def saveper(): return __data["time"].save() -@component.add(name="Integ") +@component.add( + name="Integ", + depends_on={'_integ_integ': 1}, + other_deps={'_integ_integ': {'initial': {'delay': 1}, 'step': {}}} +) def integ(): return _integ_integ() -@component.add(name="Delay") +@component.add( + name="Delay", + depends_on={'_delay_delay': 1}, + other_deps={'_delay_delay': {'initial': {'integ': 1}, 'step': {}}} +) def delay(): return _delay_delay() diff --git a/tests/more-tests/initialization_order/test_initialization_order.py b/tests/more-tests/initialization_order/test_initialization_order.py index e50387d9..d132755b 100644 --- a/tests/more-tests/initialization_order/test_initialization_order.py +++ b/tests/more-tests/initialization_order/test_initialization_order.py @@ -11,18 +11,6 @@ _subscript_dict = {} -_dependencies = { - 'initial_time': {}, - 'final_time': {}, - 'time_step': {}, - 'saveper': {'time_step': 1}, - 'initial_par': {}, - 'stock_a': {'_integ_stock_a': 1}, - 'stock_b': {'_integ_stock_b': 1}, - '_integ_stock_a': {'initial': {'initial_par': 1}, 'step': {}}, - '_integ_stock_b': {'initial': {'stock_a': 1}, 'step': {}} -} - __data = {"scope": None, "time": lambda: 0} component = Component() @@ -60,17 +48,27 @@ def time_step(): return __data["time"].time_step() -@component.add(name="Saveper") +@component.add(name="Saveper", depends_on={'time_step': 1}) def saveper(): return __data["time"].saveper() -@component.add(name="Stock B") +@component.add(name="Stock B", depends_on={'_integ_stock_b': 1}, + other_deps={ + '_integ_stock_b': { + 'initial': {'stock_a': 1}, + 'step': {} + }}) def stock_b(): return _integ_stock_b() -@component.add(name="Stock A") +@component.add(name="Stock A", depends_on={'_integ_stock_a': 1}, + other_deps={ + '_integ_stock_a': { + 'initial': {'initial_par': 1}, + 'step': {} + }}) def stock_a(): return _integ_stock_a() diff --git a/tests/more-tests/version/test_current_version.py b/tests/more-tests/version/test_current_version.py index dcc7e5b9..8b98cfd1 100644 --- a/tests/more-tests/version/test_current_version.py +++ b/tests/more-tests/version/test_current_version.py @@ -2,8 +2,6 @@ __pysd_version__ = "3.0.0" -_dependencies = {} - __data = {'scope': None, 'time': lambda: 0} _control_vars = { diff --git a/tests/pytest_pysd/user_interaction/pytest_select_submodel.py b/tests/pytest_pysd/user_interaction/pytest_select_submodel.py index 99c3654e..cbd86d4f 100644 --- a/tests/pytest_pysd/user_interaction/pytest_select_submodel.py +++ b/tests/pytest_pysd/user_interaction/pytest_select_submodel.py @@ -117,12 +117,12 @@ def test_select_submodel(self, model, variables, modules, # assert original stateful elements assert len(model._dynamicstateful_elements) == 2 assert "_integ_other_stock" in model._stateful_elements - assert "_integ_other_stock" in model.components._dependencies - assert "other_stock" in model.components._dependencies + assert "_integ_other_stock" in model._dependencies + assert "other_stock" in model._dependencies assert "other stock" in model._namespace assert "_integ_stock" in model._stateful_elements - assert "_integ_stock" in model.components._dependencies - assert "stock" in model.components._dependencies + assert "_integ_stock" in model._dependencies + assert "stock" in model._dependencies assert "Stock" in model._namespace # select submodel @@ -135,12 +135,12 @@ def test_select_submodel(self, model, variables, modules, # assert stateful elements change assert len(model._dynamicstateful_elements) == 1 assert "_integ_other_stock" not in model._stateful_elements - assert "_integ_other_stock" not in model.components._dependencies - assert "other_stock" not in model.components._dependencies + assert "_integ_other_stock" not in model._dependencies + assert "other_stock" not in model._dependencies assert "other stock" not in model._namespace assert "_integ_stock" in model._stateful_elements - assert "_integ_stock" in model.components._dependencies - assert "stock" in model.components._dependencies + assert "_integ_stock" in model._dependencies + assert "stock" in model._dependencies assert "Stock" in model._namespace if not dep_vars: diff --git a/tests/pytest_translation/vensim_parser/pytest_split_views.py b/tests/pytest_translation/vensim_parser/pytest_split_views.py index b4ae704b..4806e9b8 100644 --- a/tests/pytest_translation/vensim_parser/pytest_split_views.py +++ b/tests/pytest_translation/vensim_parser/pytest_split_views.py @@ -86,7 +86,6 @@ def expected_files(self, shared_tmpdir, _root, model_path, modules_dir = shared_tmpdir.joinpath("modules_" + model_name) files = { shared_tmpdir.joinpath("_subscripts_" + model_name + ".json"), - shared_tmpdir.joinpath("_dependencies_" + model_name + ".json"), modules_dir.joinpath("_modules.json") } [files.add(modules_dir.joinpath(module + ".py")) for module in modules] @@ -112,9 +111,9 @@ def test_read_vensim_split_model(self, model_file, subview_sep, # check the dictionaries assert isinstance(model_split._namespace, dict) - assert isinstance(model_split.components._subscript_dict, dict) - assert isinstance(model_split.components._dependencies, dict) - assert isinstance(model_split.components._modules, dict) + assert isinstance(model_split._subscript_dict, dict) + assert isinstance(model_split._dependencies, dict) + assert isinstance(model_split._modules, dict) # assert taht main modules are dictionary keys for module in modules: diff --git a/tests/unit_test_cli.py b/tests/unit_test_cli.py index ffd3a110..3d030dfa 100644 --- a/tests/unit_test_cli.py +++ b/tests/unit_test_cli.py @@ -198,7 +198,6 @@ def test_read_vensim_split_model(self): root_dir = os.path.join(_root, "more-tests/split_model") + "/" model_name = "test_split_model" - dependencies_filename = "_dependencies_" + model_name + ".json" subscript_filename = "_subscripts_" + model_name + ".json" modules_filename = "_modules.json" modules_dirname = "modules_" + model_name @@ -208,9 +207,8 @@ def test_read_vensim_split_model(self): out = subprocess.run(split_bash(command), capture_output=True) self.assertEqual(out.returncode, 0) - # check that _subscript_dict and dependencies json files where created + # check that _subscript_dict json file was created self.assertTrue(os.path.isfile(root_dir + subscript_filename)) - self.assertTrue(os.path.isfile(root_dir + dependencies_filename)) # check that the main model file was created self.assertTrue(os.path.isfile(root_dir + model_name + ".py")) @@ -232,7 +230,6 @@ def test_read_vensim_split_model(self): # remove newly created files os.remove(root_dir + model_name + ".py") os.remove(root_dir + subscript_filename) - os.remove(root_dir + dependencies_filename) # remove newly created modules folder shutil.rmtree(root_dir + modules_dirname) @@ -252,7 +249,6 @@ def test_read_vensim_split_model_subviews(self): ) subscript_filename = "_subscripts_" + model_name + ".json" - dependencies_filename = "_dependencies_" + model_name + ".json" modules_dirname = "modules_" + model_name separator = "." @@ -290,7 +286,6 @@ def test_read_vensim_split_model_subviews(self): # remove newly created files os.remove(root_dir + model_name + ".py") os.remove(root_dir + subscript_filename) - os.remove(root_dir + dependencies_filename) # remove newly created modules folder shutil.rmtree(root_dir + modules_dirname) diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index 55e40369..db15caf5 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -1360,7 +1360,7 @@ def test_teacup_deps(self): 'saveper': {'time_step': 1}, 'time_step': {} } - self.assertEqual(model.components._dependencies, expected_dep) + self.assertEqual(model.dependencies, expected_dep) def test_multiple_deps(self): from pysd import read_vensim @@ -1391,7 +1391,7 @@ def test_multiple_deps(self): 'step': {'inflow_b': 1} } } - self.assertEqual(model.components._dependencies, expected_dep) + self.assertEqual(model.dependencies, expected_dep) more_tests.joinpath( "subscript_individually_defined_stocks2/" @@ -1411,7 +1411,7 @@ def test_constant_deps(self): "time_step": {}, "saveper": {"time_step": 1} } - self.assertEqual(model.components._dependencies, expected_dep) + self.assertEqual(model.dependencies, expected_dep) for key, value in model.cache_type.items(): if key != "time": From 2c0930549b6c606b94e3f82f3a83d6f37308e467 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Tue, 26 Apr 2022 17:27:27 +0200 Subject: [PATCH 44/96] Add tests and clean files --- pysd/building/python/namespace.py | 13 +- .../python/python_expressions_builder.py | 61 +++------ pysd/building/python/python_model_builder.py | 1 - pysd/building/python/subscripts.py | 19 +-- pysd/py_backend/components.py | 2 +- pysd/py_backend/functions.py | 5 +- pysd/py_backend/statefuls.py | 2 +- pysd/py_backend/utils.py | 14 +-- .../vensim/parsing_grammars/components.peg | 4 +- pysd/translation/vensim/vensim_element.py | 3 - pysd/translation/vensim/vensim_file.py | 2 +- .../xmile/parsing_grammars/equations.peg | 7 +- pysd/translation/xmile/xmile_element.py | 6 - .../test_not_implemented_and_incomplete.mdl | 65 ++++++++++ .../test_not_implemented_and_incomplete.py | 118 ++++++++++++++++++ .../test_old_version.py | 0 .../version/test_current_version.py | 44 ------- tests/pytest_pysd/errors/pytest_errors.py | 94 ++++++++++++++ tests/test-models | 2 +- tests/unit_test_pysd.py | 34 ----- tests/unit_test_statefuls.py | 16 ++- 21 files changed, 320 insertions(+), 192 deletions(-) create mode 100644 tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.mdl create mode 100644 tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py rename tests/more-tests/{version => old_version}/test_old_version.py (100%) delete mode 100644 tests/more-tests/version/test_current_version.py create mode 100644 tests/pytest_pysd/errors/pytest_errors.py diff --git a/pysd/building/python/namespace.py b/pysd/building/python/namespace.py index 733d9cc7..1bf01c8c 100644 --- a/pysd/building/python/namespace.py +++ b/pysd/building/python/namespace.py @@ -63,15 +63,11 @@ def make_python_identifier(self, string: str, prefix: str = None, """ Takes an arbitrary string and creates a valid Python identifier. - If the input string is in the namespace, return its value. - If the python identifier created is already in the namespace, but the input string is not (ie, two similar strings resolve to - the same python identifier) - - or if the identifier is a reserved word in the reserved_words - list, or is a python default reserved word, - adds _1, or if _1 is in the namespace, _2, etc. + the same python identifier) or if the identifier is a reserved + word in the reserved_words list, or is a python default + reserved word, adds _1, or if _1 is in the namespace, _2, etc. Parameters ---------- @@ -139,9 +135,6 @@ def make_python_identifier(self, string: str, prefix: str = None, s = string.lower() clean_s = s.replace(" ", "_") - if prefix is None and clean_s in self.cleanspace: - return self.cleanspace[clean_s] - # Make spaces into underscores s = re.sub(r"[\s\t\n_]+", "_", s) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index 95a86621..5f6377f0 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -87,13 +87,6 @@ def __init__(self, value, component): self.section = component.section self.def_subs = component.subscripts_dict - def build(self, arguments): - return BuildAST( - expression=repr(self.value), - calls={}, - subscripts={}, - order=0) - def join_calls(self, arguments): if len(arguments) == 0: return {} @@ -255,9 +248,9 @@ def __init__(self, call_str, component): def build_not_implemented(self, arguments): final_subscripts = self.reorder(arguments, def_subs=self.def_subs) warnings.warn( - "\n\nTrying to translate " - + self.function - + " which it is not implemented on PySD. The translated " + "\n\nTrying to translate '" + + self.function.upper().replace("_", " ") + + "' which it is not implemented on PySD. The translated " + "model will crash... " ) self.section.imports.add("functions", "not_implemented_function") @@ -270,6 +263,19 @@ def build_not_implemented(self, arguments): subscripts=final_subscripts, order=0) + def build_incomplete_call(self, arguments): + warnings.warn( + "'%s' has no equation specified" % self.element.name, + SyntaxWarning, stacklevel=2 + ) + self.section.imports.add("functions", "incomplete") + return BuildAST( + expression="incomplete(%s)" % ", ".join( + arg.expression for arg in arguments.values()), + calls=self.join_calls(arguments), + subscripts=self.def_subs, + order=0) + def build_macro_call(self, arguments): self.section.imports.add("statefuls", "Macro") macro = self.section.macrospace[self.macro_name] @@ -304,19 +310,6 @@ def build_macro_call(self, arguments): subscripts=final_subscripts, order=0) - def build_incomplete_call(self, arguments): - warnings.warn( - "%s has no equation specified" % self.element.name, - SyntaxWarning, stacklevel=2 - ) - self.section.imports.add("functions", "incomplete") - return BuildAST( - expression="incomplete(%s)" % ", ".join( - arg.expression for arg in arguments.values()), - calls=self.join_calls(arguments), - subscripts=self.def_subs, - order=0) - def build_lookups_call(self, arguments): if arguments["0"].subscripts: final_subscripts =\ @@ -1463,25 +1456,3 @@ def _visit(self, ast_object): for name, value in builder.arguments.items() } return builder.build(arguments) - - -class ExceptVisitor: # pragma: no cover - # this class will be used in the numpy array backend - def __init__(self, component): - self.except_definitions = component.subscripts[1] - self.subscripts = component.section.subscripts - self.subscripts_dict = component.subscripts_dict - - def visit(self): - excepts = [ - BuildAST("", self.subscripts_dict, {}, 0) - for _ in self.except_definitions - ] - [ - except_def.reshape( - self.subscripts, - self.subscripts.make_coord_dict(except_list)) - for except_def, except_list - in zip(excepts, self.except_definitions) - ] - return excepts diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 2136c038..91850593 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -640,7 +640,6 @@ def __init__(self, abstract_component: AbstractComponent, def build_component(self): self.subscripts_dict = self.section.subscripts.make_coord_dict( self.subscripts[0]) - # NUMPY: use vs.ExceptVisitor self.except_subscripts = [self.section.subscripts.make_coord_dict( except_list) for except_list in self.subscripts[1]] self.ast_build = vs.ASTVisitor(self).visit() diff --git a/pysd/building/python/subscripts.py b/pysd/building/python/subscripts.py index 0786d521..36eef739 100644 --- a/pysd/building/python/subscripts.py +++ b/pysd/building/python/subscripts.py @@ -154,11 +154,10 @@ def _get_subscript2num(self) -> dict: return s2n - def find_subscript_name(self, element: str, avoid: List[str] = []) -> str: + def _find_subscript_name(self, element: str, avoid: List[str] = []) -> str: """ Given a member of a subscript family, return the first key of which the member is within the value list. - If element is already a subscript name, return that. Parameters ---------- @@ -179,17 +178,14 @@ def find_subscript_name(self, element: str, avoid: List[str] = []) -> str: >>> sm._subscripts = { ... 'Dim1': ['A', 'B', 'C'], ... 'Dim2': ['A', 'B', 'C', 'D']} - >>> sm.find_subscript_name('D') + >>> sm._find_subscript_name('D') 'Dim2' - >>> sm.find_subscript_name('B') + >>> sm._find_subscript_name('B') 'Dim1' - >>> sm.find_subscript_name('B', avoid=['Dim1']) + >>> sm._find_subscript_name('B', avoid=['Dim1']) 'Dim2' """ - if element in self.subscripts.keys(): - return element - for name, elements in self.subscripts.items(): if element in elements and name not in avoid: return name @@ -230,7 +226,7 @@ def make_coord_dict(self, subs: List[str]) -> dict: coordinates = {} for sub in subs: if sub in sub_elems_list: - name = self.find_subscript_name( + name = self._find_subscript_name( sub, avoid=subs + list(coordinates)) coordinates[name] = [sub] else: @@ -396,10 +392,7 @@ def simplify_subscript_input(self, coords: dict, for ndim, (dim, coord) in zip(merge_subs, coords.items()): # find dimensions can be retrieved from _subscript_dict final_subs[ndim] = coord - if dim.endswith("!") and coord == self.subscripts[dim[:-1]]: - # use _subscript_dict - coordsp.append(f"'{ndim}': _subscript_dict['{dim[:-1]}']") - elif not dim.endswith("!") and coord == self.subscripts[dim]: + if not dim.endswith("!") and coord == self.subscripts[dim]: # use _subscript_dict coordsp.append(f"'{ndim}': _subscript_dict['{dim}']") else: diff --git a/pysd/py_backend/components.py b/pysd/py_backend/components.py index 9db3ed99..950562b9 100644 --- a/pysd/py_backend/components.py +++ b/pysd/py_backend/components.py @@ -79,7 +79,7 @@ def _load(self, py_model_file): "\n\nNot able to import the model. " + "This may be because the model was compiled with an " + "earlier version of PySD, you can check on the top of " - + " the model file you are trying to load." + + "the model file you are trying to load." + "\nThe current version of PySd is :" + "\n\tPySD " + __version__ + "\n\n" + "Please translate again the model with the function" diff --git a/pysd/py_backend/functions.py b/pysd/py_backend/functions.py index 28aa9242..e8a2884e 100644 --- a/pysd/py_backend/functions.py +++ b/pysd/py_backend/functions.py @@ -9,7 +9,6 @@ import numpy as np import xarray as xr -import scipy.stats as stats small_vensim = 1e-6 # What is considered zero according to Vensim Help @@ -179,6 +178,7 @@ def pulse_magnitude(time, magnitude, start, repeat_time=0): else: return 0 + def if_then_else(condition, val_if_true, val_if_false): """ Implements Vensim's IF THEN ELSE function. @@ -329,8 +329,7 @@ def incomplete(*args): def not_implemented_function(*args): - raise NotImplementedError( - 'Not implemented function {}'.format(args[0])) + raise NotImplementedError(f"Not implemented function '{args[0]}'") def integer(x): diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index fac3de5d..1a641767 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -618,7 +618,7 @@ def __init__(self, py_model_file, params=None, return_func=None, != self.get_pysd_compiler_version().split(".")[0]: raise ImportError( "\n\nNot able to import the model. " - + "The model was compiled with a " + + "The model was translated with a " + "not compatible version of PySD:" + "\n\tPySD " + self.get_pysd_compiler_version() + "\n\nThe current version of PySd is:" diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index caf2cffc..e840b101 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -305,7 +305,7 @@ def load_model_data(root, model_name): Parameters ---------- - root: pathlib.Path or str + root: pathlib.Path Path to the model file. model_name: str @@ -322,11 +322,6 @@ def load_model_data(root, model_name): corresponding variables as values. """ - if isinstance(root, str): # pragma: no cover - # backwards compatibility - # TODO: remove with PySD 3.0.0 - root = Path(root) - with open(root.joinpath("_subscripts_" + model_name + ".json")) as subs: subscripts = json.load(subs) @@ -355,7 +350,7 @@ def load_modules(module_name, module_content, work_dir, submodules): module has submodules, whereas if it is a list it means that that particular module/submodule is a final one. - work_dir: str + work_dir: pathlib.Path Path to the module file. submodules: list @@ -370,11 +365,6 @@ def load_modules(module_name, module_content, work_dir, submodules): model file. """ - if isinstance(work_dir, str): # pragma: no cover - # backwards compatibility - # TODO: remove with PySD 3.0.0 - work_dir = Path(work_dir) - if isinstance(module_content, list): with open(work_dir.joinpath(module_name + ".py"), "r", encoding="UTF-8") as mod: diff --git a/pysd/translation/vensim/parsing_grammars/components.peg b/pysd/translation/vensim/parsing_grammars/components.peg index e1032c02..927a4988 100644 --- a/pysd/translation/vensim/parsing_grammars/components.peg +++ b/pysd/translation/vensim/parsing_grammars/components.peg @@ -1,6 +1,6 @@ # Parsing Expression Grammar: components -expr_type = array / final_expr / empty +expr_type = array / final_expr final_expr = logic_expr (_ logic_oper _ logic_expr)* # logic operators (:and:, :or:) logic_expr = not_oper? _ comp_expr # :not: operator @@ -32,5 +32,3 @@ add_oper = ~r"(%(add_ops)s)"IU prod_oper = ~r"(%(prod_ops)s)"IU exp_oper = ~r"(%(exp_ops)s)"IU pre_oper = ~r"(%(pre_ops)s)"IU - -empty = "" # empty string diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index 9a6f945e..1605c344 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -706,9 +706,6 @@ def visit__(self, n, vc): def visit_nan(self, n, vc): return self.add_element(np.nan) - def visit_empty(self, n, vc): - return self.add_element(None) - def generic_visit(self, n, vc): return "".join(filter(None, vc)) or n.text diff --git a/pysd/translation/vensim/vensim_file.py b/pysd/translation/vensim/vensim_file.py index 715ae21e..0e034682 100644 --- a/pysd/translation/vensim/vensim_file.py +++ b/pysd/translation/vensim/vensim_file.py @@ -75,7 +75,7 @@ def _read(self, encoding: Union[None, str]) -> str: if self.mdl_path.suffix.lower() != ".mdl": raise ValueError( "The file to translate, '%s' " % self.mdl_path - + "is not a vensim model. It must end with mdl extension." + + "is not a Vensim model. It must end with mdl extension." ) if encoding is None: diff --git a/pysd/translation/xmile/parsing_grammars/equations.peg b/pysd/translation/xmile/parsing_grammars/equations.peg index ed12709b..f40d19bb 100644 --- a/pysd/translation/xmile/parsing_grammars/equations.peg +++ b/pysd/translation/xmile/parsing_grammars/equations.peg @@ -1,6 +1,6 @@ # Parsing Expression Grammar: components -expr_type = array / final_expr / empty +expr_type = array / final_expr final_expr = conditional_statement / logic2_expr @@ -14,7 +14,7 @@ exp_expr = neg_expr (_ exp_oper _ neg_expr)* # exponential neg_expr = pre_oper? _ expr # pre operators (-, +) expr = call / parens / number / reference -arguments = ((string / final_expr) _ ","? _)* +arguments = (final_expr _ ","? _)* parens = "(" _ final_expr _ ")" call = reference _ "(" _ arguments _ ")" @@ -33,8 +33,6 @@ prod_oper = ~r"(%(prod_ops)s)"IU exp_oper = ~r"(%(exp_ops)s)"IU pre_oper = ~r"(%(pre_ops)s)"IU -empty = "" # empty string - _ = spacechar* spacechar = " "* ~"\t"* @@ -51,4 +49,3 @@ escape_group = "\"" ( "\\\"" / ~r"[^\"]" )* "\"" number = raw_number raw_number = ("+"/"-")? ~r"\d+\.?\d*([eE][+-]?\d+)?" -string = "\'" (~r"[^\']"IU)* "\'" diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py index 16041364..1821e1d9 100644 --- a/pysd/translation/xmile/xmile_element.py +++ b/pysd/translation/xmile/xmile_element.py @@ -632,9 +632,6 @@ def visit_expr(self, n, vc): else: return vc[0] - def visit_string(self, n, vc): - return self.add_element(eval(n.text)) - def visit_arguments(self, n, vc): arglist = tuple(x.strip(",") for x in vc) return self.add_element(tuple( @@ -648,9 +645,6 @@ def visit__(self, n, vc): # handles whitespace characters return "" - def visit_empty(self, n, vc): - return self.add_element(None) - def generic_visit(self, n, vc): return "".join(filter(None, vc)) or n.text diff --git a/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.mdl b/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.mdl new file mode 100644 index 00000000..35fb9bc2 --- /dev/null +++ b/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.mdl @@ -0,0 +1,65 @@ +{UTF-8} +incomplete var = A FUNCTION OF( Time) + ~ + ~ | + +not implemented function= + MY FUNC(Time) + ~ + ~ | + +******************************************************** + .Control +********************************************************~ + Simulation Control Parameters + | + +FINAL TIME = 1 + ~ Month + ~ The final time for the simulation. + | + +INITIAL TIME = 0 + ~ Month + ~ The initial time for the simulation. + | + +SAVEPER = + TIME STEP + ~ Month [0,?] + ~ The frequency with which output is stored. + | + +TIME STEP = 1 + ~ Month [0,?] + ~ The time step for the simulation. + | + +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 1 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,incomplete var,629,259,46,11,8,3,0,0,0,0,0,0 +10,2,Time,443,331,26,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|0||128-128-128 +1,3,2,1,0,0,0,0,0,128,0,-1--1--1,,1|(527,298)| +10,4,not implemented function,626,406,53,19,8,3,0,0,0,0,0,0 +1,5,2,4,0,0,0,0,0,128,0,-1--1--1,,1|(517,361)| +///---\\\ +:L<%^E!@ +9:Current +15:0,0,0,0,0,0 +19:100,0 +27:0, +34:0, +5:not implemented function +35:Date +36:YYYY-MM-DD +37:2000 +38:1 +39:1 +40:2 +41:0 +42:1 +24:0 +25:0 +26:0 diff --git a/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py b/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py new file mode 100644 index 00000000..c3cafdb9 --- /dev/null +++ b/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py @@ -0,0 +1,118 @@ +""" +Python model 'test_not_implemented_and_incomplete.py' +Translated using PySD +""" + +from pathlib import Path +import numpy as np +import xarray as xr + +from pysd.py_backend.functions import incomplete, not_implemented_function +from pysd import Component + +__pysd_version__ = "3.0.0" + +__data = {"scope": None, "time": lambda: 0} + +_root = Path(__file__).parent + + +component = Component() + +####################################################################### +# CONTROL VARIABLES # +####################################################################### + +_control_vars = { + "initial_time": lambda: 0, + "final_time": lambda: 1, + "time_step": lambda: 1, + "saveper": lambda: time_step(), +} + + +def _init_outer_references(data): + for key in data: + __data[key] = data[key] + + +@component.add(name="Time") +def time(): + """ + Current time of the model. + """ + return __data["time"]() + + +@component.add( + name="FINAL TIME", units="Month", comp_type="Constant", comp_subtype="Normal" +) +def final_time(): + """ + The final time for the simulation. + """ + return __data["time"].final_time() + + +@component.add( + name="INITIAL TIME", units="Month", comp_type="Constant", comp_subtype="Normal" +) +def initial_time(): + """ + The initial time for the simulation. + """ + return __data["time"].initial_time() + + +@component.add( + name="SAVEPER", + units="Month", + limits=(0.0, np.nan), + comp_type="Auxiliary", + comp_subtype="Normal", + depends_on={"time_step": 1}, +) +def saveper(): + """ + The frequency with which output is stored. + """ + return __data["time"].saveper() + + +@component.add( + name="TIME STEP", + units="Month", + limits=(0.0, np.nan), + comp_type="Constant", + comp_subtype="Normal", +) +def time_step(): + """ + The time step for the simulation. + """ + return __data["time"].time_step() + + +####################################################################### +# MODEL VARIABLES # +####################################################################### + + +@component.add( + name="incomplete var", + comp_type="Auxiliary", + comp_subtype="Normal", + depends_on={"time": 1}, +) +def incomplete_var(): + return incomplete(time()) + + +@component.add( + name="not implemented function", + comp_type="Auxiliary", + comp_subtype="Normal", + depends_on={"time": 1}, +) +def not_implemented_function_1(): + return not_implemented_function("my_func", time()) diff --git a/tests/more-tests/version/test_old_version.py b/tests/more-tests/old_version/test_old_version.py similarity index 100% rename from tests/more-tests/version/test_old_version.py rename to tests/more-tests/old_version/test_old_version.py diff --git a/tests/more-tests/version/test_current_version.py b/tests/more-tests/version/test_current_version.py deleted file mode 100644 index 8b98cfd1..00000000 --- a/tests/more-tests/version/test_current_version.py +++ /dev/null @@ -1,44 +0,0 @@ -from pysd import Component - -__pysd_version__ = "3.0.0" - -__data = {'scope': None, 'time': lambda: 0} - -_control_vars = { - "initial_time": lambda: 0, - "final_time": lambda: 20, - "time_step": lambda: 1, - "saveper": lambda: time_step() -} - -component = Component() - - -def _init_outer_references(data): - for key in data: - __data[key] = data[key] - - -@component.add(name="Time") -def time(): - return __data["time"]() - - -@component.add(name="Initial time") -def initial_time(): - return __data["time"].initial_time() - - -@component.add(name="Final time") -def final_time(): - return __data["time"].final_time() - - -@component.add(name="Time step") -def time_step(): - return __data["time"].time_step() - - -@component.add(name="Saveper") -def saveper(): - return __data["time"].saveper() diff --git a/tests/pytest_pysd/errors/pytest_errors.py b/tests/pytest_pysd/errors/pytest_errors.py new file mode 100644 index 00000000..7b62ddad --- /dev/null +++ b/tests/pytest_pysd/errors/pytest_errors.py @@ -0,0 +1,94 @@ +import pytest + +from pysd import read_vensim, read_xmile, load + + +@pytest.fixture +def model_path(_root, name, suffix): + return _root / "more-tests" / name / f"test_{name}.{suffix}" + + +@pytest.mark.parametrize( + "name,suffix,loader,raise_type,error_message", + [ + ( # load_old_version + "old_version", + "py", + load, + ImportError, + r"Not able to import the model\. The model was translated " + r"with a not compatible version of PySD:\n\tPySD 1\.5\.0" + ), + ( # load_type + "type_error", + "py", + load, + ImportError, + r".*Not able to import the model\. This may be because the " + "model was compiled with an earlier version of PySD, you can " + r"check on the top of the model file you are trying to load\..*" + ), + ( # not_vensim_model + "not_vensim", + "txt", + read_vensim, + ValueError, + "The file to translate, " + "'.*test_not_vensim.txt' is not a " + r"Vensim model\. It must end with mdl extension\." + ), + ( # not_xmile_model + "not_vensim", + "txt", + read_xmile, + ValueError, + "The file to translate, " + "'.*test_not_vensim.txt' is not a " + r"Xmile model\. It must end with xmile, xml or stmx extension\." + ), + ( # circular_reference + "circular_reference", + "py", + load, + ValueError, + r"Circular initialization\.\.\.\nNot able to initialize the " + "following objects:\n\t_integ_integ\n\t_delay_delay" + ), + ], + ids=[ + "old_version", "load_type", + "not_vensim_model", "not_xmile_model", + "circular_reference" + ] +) +def test_loading_error(loader, model_path, raise_type, error_message): + with pytest.raises(raise_type, match=error_message): + loader(model_path) + + +@pytest.mark.parametrize( + "name,suffix", + [ + ( # load_old_version + "not_implemented_and_incomplete", + "mdl", + ), + ] +) +def test_not_implemented_and_incomplete(model_path): + with pytest.warns(UserWarning) as ws: + model = read_vensim(model_path) + assert "'incomplete var' has no equation specified"\ + in str(ws[0].message) + assert "Trying to translate 'MY FUNC' which it is not implemented"\ + " on PySD. The translated model will crash..."\ + in str(ws[1].message) + + with pytest.warns(RuntimeWarning, + match="Call to undefined function, calling dependencies " + "and returning NaN"): + model["incomplete var"] + + with pytest.raises(NotImplementedError, + match="Not implemented function 'my_func'"): + model["not implemented function"] diff --git a/tests/test-models b/tests/test-models index d770b2c6..07478c98 160000 --- a/tests/test-models +++ b/tests/test-models @@ -1 +1 @@ -Subproject commit d770b2c6bb377776b2b855682f6aa210bbdcf441 +Subproject commit 07478c98f67ae939e6fe738ff97f67d4e3602b56 diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index db15caf5..756e59a3 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -27,27 +27,6 @@ class TestPySD(unittest.TestCase): - def test_load_different_version_error(self): - # old PySD major version - with self.assertRaises(ImportError): - pysd.load(more_tests.joinpath("version/test_old_version.py")) - - # current PySD major version - pysd.load(more_tests.joinpath("version/test_current_version.py")) - - def test_load_type_error(self): - with self.assertRaises(ImportError): - pysd.load(more_tests.joinpath("type_error/test_type_error.py")) - - def test_read_not_model_vensim(self): - with self.assertRaises(ValueError): - pysd.read_vensim( - more_tests.joinpath("not_vensim/test_not_vensim.txt")) - - def test_read_not_model_xmile(self): - with self.assertRaises(ValueError): - pysd.read_xmile( - more_tests.joinpath("not_vensim/test_not_vensim.txt")) def test_run(self): model = pysd.read_vensim(test_model) @@ -1297,19 +1276,6 @@ def test_restart_cache(self): self.assertEqual(new, 345) self.assertNotEqual(old, new) - def test_circular_reference(self): - with self.assertRaises(ValueError) as err: - pysd.load(more_tests.joinpath( - "circular_reference/test_circular_reference.py")) - - self.assertIn("_integ_integ", str(err.exception)) - self.assertIn("_delay_delay", str(err.exception)) - self.assertIn( - "Circular initialization...\n" - + "Not able to initialize the following objects:", - str(err.exception), - ) - def test_not_able_to_update_stateful_object(self): integ = pysd.statefuls.Integ( lambda: xr.DataArray([1, 2], {"Dim": ["A", "B"]}, ["Dim"]), diff --git a/tests/unit_test_statefuls.py b/tests/unit_test_statefuls.py index 17fbd9a4..54a5b769 100644 --- a/tests/unit_test_statefuls.py +++ b/tests/unit_test_statefuls.py @@ -1,13 +1,12 @@ import unittest -import os import warnings - +from pathlib import Path import xarray as xr -_root = os.path.dirname(__file__) +_root = Path(__file__).parent -more_tests = os.path.join(_root, "more-tests") +more_tests = _root / "more-tests" class TestStateful(unittest.TestCase): @@ -244,10 +243,8 @@ def input(): frcst.initialize(init_trend) self.assertEqual( frcst(), - input_val* - (1+ - (input_val-input_val/(1+init_trend)) - /(3*input_val/(1+init_trend))*10)) + input_val * (1+(input_val-input_val/(1+init_trend)) + / (3*input_val/(1+init_trend))*10)) def test_initial(self): from pysd.py_backend.statefuls import Initial @@ -361,7 +358,8 @@ class TestMacroMethods(unittest.TestCase): def test_get_elements_to_initialize(self): from pysd.py_backend.statefuls import Macro - macro = Macro(more_tests + "/version/test_current_version.py") + model = "not_implemented_and_incomplete" + macro = Macro(more_tests / model / f"test_{model}.py") macro.stateful_initial_dependencies = { "A": {"B", "C"}, From 8aece44dfd0eac57a164f6de5031d33e3ee2c923 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 4 May 2022 09:21:54 +0200 Subject: [PATCH 45/96] Put supported suffixes in one place --- pysd/cli/main.py | 10 ++++++++-- pysd/cli/parser.py | 19 +++++++++++-------- pysd/translation/vensim/vensim_file.py | 5 +++-- pysd/translation/vensim/vensim_utils.py | 3 +++ pysd/translation/xmile/xmile_file.py | 7 ++++--- pysd/translation/xmile/xmile_utils.py | 3 +++ .../test_not_implemented_and_incomplete.py | 2 +- tests/pytest_pysd/errors/pytest_errors.py | 5 +++-- tests/unit_test_cli.py | 4 ++-- 9 files changed, 38 insertions(+), 20 deletions(-) diff --git a/pysd/cli/main.py b/pysd/cli/main.py index ec9f2302..53b5bcfc 100644 --- a/pysd/cli/main.py +++ b/pysd/cli/main.py @@ -1,5 +1,6 @@ import sys import os +from pathlib import Path from csv import QUOTE_NONE from datetime import datetime @@ -7,6 +8,10 @@ from .parser import parser import pysd +from pysd.translation.vensim.vensim_utils import supported_extensions as\ + vensim_extensions +from pysd.translation.xmile.xmile_utils import supported_extensions as\ + xmile_extensions def main(args): @@ -85,13 +90,14 @@ def load(model_file, data_files, missing_values, split_views, **kwargs): pysd.model """ - if model_file.lower().endswith(".mdl"): + model_file_suffix = Path(model_file).suffix.lower() + if model_file_suffix in vensim_extensions: print("\nTranslating model file...\n") return pysd.read_vensim(model_file, initialize=False, data_files=data_files, missing_values=missing_values, split_views=split_views, **kwargs) - elif model_file.lower().endswith(".xmile"): + elif model_file_suffix in xmile_extensions: print("\nTranslating model file...\n") return pysd.read_xmile(model_file, initialize=False, data_files=data_files, diff --git a/pysd/cli/parser.py b/pysd/cli/parser.py index b748f402..1160b66b 100644 --- a/pysd/cli/parser.py +++ b/pysd/cli/parser.py @@ -8,6 +8,10 @@ from argparse import ArgumentParser, Action from pysd import __version__ +from pysd.translation.vensim.vensim_utils import supported_extensions as\ + vensim_extensions +from pysd.translation.xmile.xmile_utils import supported_extensions as\ + xmile_extensions docs = "https://pysd.readthedocs.io/en/master/command_line_usage.html" @@ -38,18 +42,17 @@ def check_model(string): Checks that model file ends with .py .mdl or .xmile and that exists. """ - if not string.lower().endswith('.mdl')\ - and not string.lower().endswith('.xmile')\ - and not string.endswith('.py'): + suffixes = [".py"] + vensim_extensions + xmile_extensions + if not any(string.lower().endswith(suffix) for suffix in suffixes): parser.error( - f'when parsing {string}' - '\nThe model file name must be Vensim (.mdl), Xmile (.xmile)' - ' or PySD (.py) model file...') + f"when parsing {string} \nThe model file name must be a Vensim" + f" ({', '.join(vensim_extensions)}), a Xmile " + f"({', '.join(xmile_extensions)}) or a PySD (.py) model file...") if not os.path.isfile(string): parser.error( - f'when parsing {string}' - '\nThe model file does not exist...') + f"when parsing {string}" + "\nThe model file does not exist...") return string diff --git a/pysd/translation/vensim/vensim_file.py b/pysd/translation/vensim/vensim_file.py index 0e034682..2741acd6 100644 --- a/pysd/translation/vensim/vensim_file.py +++ b/pysd/translation/vensim/vensim_file.py @@ -16,6 +16,7 @@ from . import vensim_utils as vu from .vensim_section import Section +from .vensim_utils import supported_extensions class VensimFile(): @@ -72,10 +73,10 @@ def _read(self, encoding: Union[None, str]) -> str: """ # check for model extension - if self.mdl_path.suffix.lower() != ".mdl": + if self.mdl_path.suffix.lower() not in supported_extensions: raise ValueError( "The file to translate, '%s' " % self.mdl_path - + "is not a Vensim model. It must end with mdl extension." + + "is not a Vensim model. It must end with .mdl extension." ) if encoding is None: diff --git a/pysd/translation/vensim/vensim_utils.py b/pysd/translation/vensim/vensim_utils.py index c40989f1..ea01bb0d 100644 --- a/pysd/translation/vensim/vensim_utils.py +++ b/pysd/translation/vensim/vensim_utils.py @@ -8,6 +8,9 @@ from chardet import detect +supported_extensions = [".mdl"] + + class Grammar(): _common_grammar = None _grammar_path: Path = Path(__file__).parent.joinpath("parsing_grammars") diff --git a/pysd/translation/xmile/xmile_file.py b/pysd/translation/xmile/xmile_file.py index 013543d5..3380cc90 100644 --- a/pysd/translation/xmile/xmile_file.py +++ b/pysd/translation/xmile/xmile_file.py @@ -10,6 +10,7 @@ from ..structures.abstract_model import AbstractModel from .xmile_section import Section +from .xmile_utils import supported_extensions class XmileFile(): @@ -58,11 +59,11 @@ def _get_root(self) -> etree._Element: """ # check for model extension - if self.xmile_path.suffix.lower() not in [".xmile", ".xml", ".stmx"]: + if self.xmile_path.suffix.lower() not in supported_extensions: raise ValueError( "The file to translate, '%s' " % self.xmile_path - + "is not a Xmile model. It must end with xmile, xml or " - + "stmx extension." + + "is not a Xmile model. It must end with any of " + + "%s extensions." % ', '.join(supported_extensions) ) return etree.parse( diff --git a/pysd/translation/xmile/xmile_utils.py b/pysd/translation/xmile/xmile_utils.py index c3bb8e26..eb7613a8 100644 --- a/pysd/translation/xmile/xmile_utils.py +++ b/pysd/translation/xmile/xmile_utils.py @@ -6,6 +6,9 @@ from pathlib import Path +supported_extensions = [".xmile", ".xml", ".stmx"] + + class Grammar(): _common_grammar = None _grammar_path: Path = Path(__file__).parent.joinpath("parsing_grammars") diff --git a/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py b/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py index c3cafdb9..62d8f106 100644 --- a/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py +++ b/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py @@ -7,7 +7,7 @@ import numpy as np import xarray as xr -from pysd.py_backend.functions import incomplete, not_implemented_function +from pysd.py_backend.functions import not_implemented_function, incomplete from pysd import Component __pysd_version__ = "3.0.0" diff --git a/tests/pytest_pysd/errors/pytest_errors.py b/tests/pytest_pysd/errors/pytest_errors.py index 7b62ddad..6d184483 100644 --- a/tests/pytest_pysd/errors/pytest_errors.py +++ b/tests/pytest_pysd/errors/pytest_errors.py @@ -35,7 +35,7 @@ def model_path(_root, name, suffix): ValueError, "The file to translate, " "'.*test_not_vensim.txt' is not a " - r"Vensim model\. It must end with mdl extension\." + r"Vensim model\. It must end with \.mdl extension\." ), ( # not_xmile_model "not_vensim", @@ -44,7 +44,8 @@ def model_path(_root, name, suffix): ValueError, "The file to translate, " "'.*test_not_vensim.txt' is not a " - r"Xmile model\. It must end with xmile, xml or stmx extension\." + r"Xmile model\. It must end with any of \.xmile, \.xml, " + r"\.stmx extensions\." ), ( # circular_reference "circular_reference", diff --git a/tests/unit_test_cli.py b/tests/unit_test_cli.py index 3d030dfa..b7ae522a 100644 --- a/tests/unit_test_cli.py +++ b/tests/unit_test_cli.py @@ -67,8 +67,8 @@ def test_read_not_model(self): self.assertNotEqual(out.returncode, 0) self.assertIn(f"PySD: error: when parsing {model}", stderr) self.assertIn( - "The model file name must be Vensim (.mdl), Xmile (.xmile) " - "or PySD (.py) model file...", stderr) + "The model file name must be a Vensim (.mdl), a Xmile (.xmile, " + ".xml, .stmx) or a PySD (.py) model file...", stderr) def test_read_model_not_exists(self): From 784684f7f3bd898fc5e9075a386638d328588b5e Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 4 May 2022 21:32:46 +0200 Subject: [PATCH 46/96] Add new tests --- docs/generate_tables.py | 12 +- pysd/building/python/python_functions.py | 3 +- pysd/py_backend/lookups.py | 4 - .../test_not_implemented_and_incomplete.py | 118 ----------- tests/more-tests/random/test_random.mdl | 193 ++++++++++++++++++ tests/pytest.ini | 2 +- .../pytest_integration_vensim_pathway.py | 0 .../pytest_integration_xmile_pathway.py | 0 .../pytest_pysd/{errors => }/pytest_errors.py | 17 +- tests/pytest_pysd/pytest_random.py | 30 +++ .../pytest_select_submodel.py | 0 .../{vensim_parser => }/pytest_split_views.py | 0 tests/pytest_translation/pytest_vensim.py | 88 ++++++++ .../vensim_parser/pytest_vensim_file.py | 53 ----- tests/unit_test_external.py | 6 +- tests/unit_test_pysd.py | 3 +- tests/unit_test_statefuls.py | 6 +- 17 files changed, 349 insertions(+), 186 deletions(-) delete mode 100644 tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py create mode 100644 tests/more-tests/random/test_random.mdl rename tests/pytest_integration/{vensim_pathway => }/pytest_integration_vensim_pathway.py (100%) rename tests/pytest_integration/{xmile_pathway => }/pytest_integration_xmile_pathway.py (100%) rename tests/pytest_pysd/{errors => }/pytest_errors.py (88%) create mode 100644 tests/pytest_pysd/pytest_random.py rename tests/pytest_pysd/{user_interaction => }/pytest_select_submodel.py (100%) rename tests/pytest_translation/{vensim_parser => }/pytest_split_views.py (100%) create mode 100644 tests/pytest_translation/pytest_vensim.py delete mode 100644 tests/pytest_translation/vensim_parser/pytest_vensim_file.py diff --git a/docs/generate_tables.py b/docs/generate_tables.py index d8cd7bec..ec59d0a7 100644 --- a/docs/generate_tables.py +++ b/docs/generate_tables.py @@ -38,9 +38,15 @@ def generate_tables(): # different combinations to generate contents = { - "vensim": ["Vensim", "Vensim example", "Abstract Syntax", "Vensim comments"], - "xmile": ["Xmile", "Xmile example", "Abstract Syntax", "Xmile comments"], - "python": ["Abstract Syntax", "Python Translation", "Python comments"] + "vensim": [ + "Vensim", "Vensim example", "Abstract Syntax", "Vensim comments" + ], + "xmile": [ + "Xmile", "Xmile example", "Abstract Syntax", "Xmile comments" + ], + "python": [ + "Abstract Syntax", "Python Translation", "Python comments" + ] } # load the tables diff --git a/pysd/building/python/python_functions.py b/pysd/building/python/python_functions.py index 519a9470..2c25295e 100644 --- a/pysd/building/python/python_functions.py +++ b/pysd/building/python/python_functions.py @@ -83,6 +83,7 @@ "np.random.uniform(%(0)s, %(1)s, size=%(size)s)", ("numpy",)), "random_normal": ( - "stats.truncnorm.rvs(%(0)s, %(1)s, loc=%(2)s, scale=%(3)s, size=%(size)s)", + "stats.truncnorm.rvs(%(0)s, %(1)s, loc=%(2)s, scale=%(3)s," + " size=%(size)s)", ("scipy", "stats")), } diff --git a/pysd/py_backend/lookups.py b/pysd/py_backend/lookups.py index 43c64eb0..4f86571a 100644 --- a/pysd/py_backend/lookups.py +++ b/pysd/py_backend/lookups.py @@ -8,10 +8,6 @@ class Lookups(object): - # TODO add __init__ and use this class for used input pandas.Series - # as Lookups - # def __init__(self, data, coords, interp="interpolate"): - def set_values(self, values): """Set new values from user input""" self.data = xr.DataArray( diff --git a/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py b/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py deleted file mode 100644 index 62d8f106..00000000 --- a/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Python model 'test_not_implemented_and_incomplete.py' -Translated using PySD -""" - -from pathlib import Path -import numpy as np -import xarray as xr - -from pysd.py_backend.functions import not_implemented_function, incomplete -from pysd import Component - -__pysd_version__ = "3.0.0" - -__data = {"scope": None, "time": lambda: 0} - -_root = Path(__file__).parent - - -component = Component() - -####################################################################### -# CONTROL VARIABLES # -####################################################################### - -_control_vars = { - "initial_time": lambda: 0, - "final_time": lambda: 1, - "time_step": lambda: 1, - "saveper": lambda: time_step(), -} - - -def _init_outer_references(data): - for key in data: - __data[key] = data[key] - - -@component.add(name="Time") -def time(): - """ - Current time of the model. - """ - return __data["time"]() - - -@component.add( - name="FINAL TIME", units="Month", comp_type="Constant", comp_subtype="Normal" -) -def final_time(): - """ - The final time for the simulation. - """ - return __data["time"].final_time() - - -@component.add( - name="INITIAL TIME", units="Month", comp_type="Constant", comp_subtype="Normal" -) -def initial_time(): - """ - The initial time for the simulation. - """ - return __data["time"].initial_time() - - -@component.add( - name="SAVEPER", - units="Month", - limits=(0.0, np.nan), - comp_type="Auxiliary", - comp_subtype="Normal", - depends_on={"time_step": 1}, -) -def saveper(): - """ - The frequency with which output is stored. - """ - return __data["time"].saveper() - - -@component.add( - name="TIME STEP", - units="Month", - limits=(0.0, np.nan), - comp_type="Constant", - comp_subtype="Normal", -) -def time_step(): - """ - The time step for the simulation. - """ - return __data["time"].time_step() - - -####################################################################### -# MODEL VARIABLES # -####################################################################### - - -@component.add( - name="incomplete var", - comp_type="Auxiliary", - comp_subtype="Normal", - depends_on={"time": 1}, -) -def incomplete_var(): - return incomplete(time()) - - -@component.add( - name="not implemented function", - comp_type="Auxiliary", - comp_subtype="Normal", - depends_on={"time": 1}, -) -def not_implemented_function_1(): - return not_implemented_function("my_func", time()) diff --git a/tests/more-tests/random/test_random.mdl b/tests/more-tests/random/test_random.mdl new file mode 100644 index 00000000..483f806f --- /dev/null +++ b/tests/more-tests/random/test_random.mdl @@ -0,0 +1,193 @@ +{UTF-8} +A B uniform matrix[Dims,Subs]= + RANDOM UNIFORM(10, 11, 1) + ~ + ~ | + +A B uniform matrix 1[Dims,Subs]= + RANDOM UNIFORM(my var[Dims]+100, 200, 1) + ~ + ~ | + +A B uniform matrix 1 0[Dims,Subs]= + RANDOM UNIFORM(my var2[Subs], my var[Dims], 1) + ~ + ~ | + +A B uniform scalar= + RANDOM UNIFORM(-1, 10, 1) + ~ + ~ | + +A B uniform vec[Dims]= + RANDOM UNIFORM(2, 3, 1) + ~ + ~ | + +A B uniform vec 1[Dims]= + RANDOM UNIFORM(my var[Dims], 100, 1) + ~ + ~ | + +Dims: + (Dim1-Dim25) + ~ + ~ | + +my var[Dims]= + Dims-50 + ~ + ~ | + +my var2[Subs]= + 2*Subs + ~ + ~ | + +normal A B uniform matrix[Dims,Subs]= + RANDOM NORMAL(-1, 10, 5, 1, 1) + ~ + ~ | + +normal A B uniform matrix 1[Dims,Subs]= + RANDOM NORMAL(my var[Dims], my var[Dims]+5, my var[Dims]+2, 10, 1) + ~ + ~ | + +normal A B uniform matrix 1 0[Dims,Subs]= + RANDOM NORMAL(my var[Dims], my var[Dims]+5, my var[Dims]+2, my var2[Subs], 1) + ~ + ~ | + +normal scalar= + RANDOM NORMAL(2, 4, 0.5, 2, 1) + ~ + ~ | + +normal vec[Dims]= + RANDOM NORMAL(1, 5, 3.5, 10, 1) + ~ + ~ | + +normal vec 1[Dims]= + RANDOM NORMAL(my var[Dims], 400, my var[Dims]+200, 2, 1) + ~ + ~ | + +Subs: + (sub1-sub50) + ~ + ~ | + +uniform matrix[Dims,Subs]= + RANDOM 0 1() + ~ + ~ | + +uniform scalar= + RANDOM 0 1() + ~ + ~ | + +uniform vec[Dims]= + RANDOM 0 1() + ~ + ~ | + +******************************************************** + .Control +********************************************************~ + Simulation Control Parameters + | + +FINAL TIME = 100 + ~ Month + ~ The final time for the simulation. + | + +INITIAL TIME = 0 + ~ Month + ~ The initial time for the simulation. + | + +SAVEPER = + TIME STEP + ~ Month [0,?] + ~ The frequency with which output is stored. + | + +TIME STEP = 1 + ~ Month [0,?] + ~ The time step for the simulation. + | + +\\\---/// Sketch information - do not modify anything except names +V300 Do not put anything below this section - it will be ignored +*View 1 +$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 +10,1,uniform scalar,275,125,44,11,8,3,0,0,0,0,0,0 +10,2,uniform vec,254,206,37,11,8,3,0,0,0,0,0,0 +10,3,uniform matrix,352,338,45,11,8,3,0,0,0,0,0,0 +10,4,A B uniform scalar,524,129,59,11,8,3,0,0,0,0,0,0 +10,5,A B uniform vec,503,210,52,11,8,3,0,0,0,0,0,0 +10,6,A B uniform matrix,601,342,60,11,8,3,0,0,0,0,0,0 +10,7,A B uniform vec 1,733,205,58,11,8,3,0,0,0,0,0,0 +10,8,A B uniform matrix 1,831,337,40,19,8,3,0,0,0,0,0,0 +10,9,A B uniform matrix 1 0,967,340,40,19,8,3,0,0,0,0,0,0 +10,10,my var,949,87,23,11,8,3,0,0,0,0,0,0 +1,11,10,8,0,0,0,0,0,128,0,-1--1--1,,1|(894,201)| +1,12,10,7,0,0,0,0,0,128,0,-1--1--1,,1|(847,142)| +1,13,10,9,0,0,0,0,0,128,0,-1--1--1,,1|(956,202)| +10,14,my var2,1243,87,27,11,8,3,0,0,0,0,0,0 +1,15,14,9,0,0,0,0,0,128,0,-1--1--1,,1|(1114,204)| +10,16,normal scalar,1157,311,45,19,8,3,0,0,0,0,0,0 +10,17,normal vec,1162,373,37,19,8,3,0,0,0,0,0,0 +10,18,normal A B uniform matrix,1281,367,46,19,8,3,0,0,0,0,0,0 +10,19,normal vec 1,1188,262,44,19,8,3,0,0,0,0,0,0 +10,20,normal A B uniform matrix 1,1511,362,63,19,8,3,0,0,0,0,0,0 +10,21,normal A B uniform matrix 1 0,1647,365,63,19,8,3,0,0,0,0,0,0 +1,22,10,19,0,0,0,0,0,128,0,-1--1--1,,1|(1057,166)| +1,23,10,20,0,0,0,0,0,128,0,-1--1--1,,1|(1215,217)| +1,24,10,21,0,0,0,0,0,128,0,-1--1--1,,1|(1278,218)| +1,25,14,20,0,0,0,0,0,128,0,-1--1--1,,1|(1367,215)| +1,26,14,21,0,0,0,0,0,128,0,-1--1--1,,1|(1432,218)| +///---\\\ +:L<%^E!@ +1:Current.vdf +9:Current +15:0,0,0,0,0,0 +19:100,0 +27:0, +34:0, +4:Time +5:A B uniform matrix[Dims,Subs] +35:Date +36:YYYY-MM-DD +37:2000 +38:1 +39:1 +40:2 +41:0 +42:1 +24:0 +25:100 +26:100 +57:1 +54:0 +55:0 +59:0 +56:0 +58:0 +44:65001 +46:0 +45:1 +49:1 +50:0 +51: +52: +53: +43:output +47:Current +48: +6:Dim1 +6:sub1 diff --git a/tests/pytest.ini b/tests/pytest.ini index d6f5a6e0..982daecb 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -1,2 +1,2 @@ [pytest] -python_files = unit_test_*.py integration_test_*.py pytest_*/**/*.py +python_files = unit_test_*.py pytest_*/**/*.py pytest_*/*.py diff --git a/tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py b/tests/pytest_integration/pytest_integration_vensim_pathway.py similarity index 100% rename from tests/pytest_integration/vensim_pathway/pytest_integration_vensim_pathway.py rename to tests/pytest_integration/pytest_integration_vensim_pathway.py diff --git a/tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py b/tests/pytest_integration/pytest_integration_xmile_pathway.py similarity index 100% rename from tests/pytest_integration/xmile_pathway/pytest_integration_xmile_pathway.py rename to tests/pytest_integration/pytest_integration_xmile_pathway.py diff --git a/tests/pytest_pysd/errors/pytest_errors.py b/tests/pytest_pysd/pytest_errors.py similarity index 88% rename from tests/pytest_pysd/errors/pytest_errors.py rename to tests/pytest_pysd/pytest_errors.py index 6d184483..58f4d79a 100644 --- a/tests/pytest_pysd/errors/pytest_errors.py +++ b/tests/pytest_pysd/pytest_errors.py @@ -1,13 +1,28 @@ import pytest +import shutil from pysd import read_vensim, read_xmile, load @pytest.fixture -def model_path(_root, name, suffix): +def original_path(_root, name, suffix): return _root / "more-tests" / name / f"test_{name}.{suffix}" +@pytest.fixture +def model_path(original_path, shared_tmpdir, _root): + """ + Copy test folder to a temporary folder therefore we avoid creating + PySD model files in the original folder + """ + new_file = shared_tmpdir / original_path.name + shutil.copy( + original_path, + new_file + ) + return new_file + + @pytest.mark.parametrize( "name,suffix,loader,raise_type,error_message", [ diff --git a/tests/pytest_pysd/pytest_random.py b/tests/pytest_pysd/pytest_random.py new file mode 100644 index 00000000..2c1dff1e --- /dev/null +++ b/tests/pytest_pysd/pytest_random.py @@ -0,0 +1,30 @@ + +import pytest +import shutil + +import pysd + + +class TestRandomModel: + """Submodel selecting class""" + # messages for selecting submodules + @pytest.fixture(scope="class") + def model_path(self, shared_tmpdir, _root): + """ + Copy test folder to a temporary folder therefore we avoid creating + PySD model files in the original folder + """ + new_file = shared_tmpdir.joinpath("test_random.mdl") + shutil.copy( + _root.joinpath("more-tests/random/test_random.mdl"), + new_file + ) + return new_file + + def test_translate(self, model_path): + """ + Translate the model or read a translated version. + This way each file is only translated once. + """ + # expected file + pysd.read_vensim(model_path) diff --git a/tests/pytest_pysd/user_interaction/pytest_select_submodel.py b/tests/pytest_pysd/pytest_select_submodel.py similarity index 100% rename from tests/pytest_pysd/user_interaction/pytest_select_submodel.py rename to tests/pytest_pysd/pytest_select_submodel.py diff --git a/tests/pytest_translation/vensim_parser/pytest_split_views.py b/tests/pytest_translation/pytest_split_views.py similarity index 100% rename from tests/pytest_translation/vensim_parser/pytest_split_views.py rename to tests/pytest_translation/pytest_split_views.py diff --git a/tests/pytest_translation/pytest_vensim.py b/tests/pytest_translation/pytest_vensim.py new file mode 100644 index 00000000..b75fba0c --- /dev/null +++ b/tests/pytest_translation/pytest_vensim.py @@ -0,0 +1,88 @@ +import pytest +from pathlib import Path +from parsimonious import VisitationError + +from pysd.translation.vensim.vensim_file import VensimFile +from pysd.translation.vensim.vensim_element import Element + + +@pytest.mark.parametrize( + "path", + [ + ( # teacup + "samples/teacup/teacup.mdl" + ), + ( # macros + "tests/macro_multi_expression/test_macro_multi_expression.mdl" + ), + ( # mapping + "tests/subscript_mapping_vensim/test_subscript_mapping_vensim.mdl" + ), + ( # data + "tests/data_from_other_model/test_data_from_other_model.mdl" + ), + ( # except + "tests/except/test_except.mdl" + ) + ], + ids=["teacup", "macros", "mapping", "data", "except"] +) +class TestVensimFile: + """ + Test for splitting Vensim views in modules and submodules + """ + @pytest.fixture + def model_path(self, _root, path): + return _root.joinpath("test-models").joinpath(path) + + @pytest.mark.dependency(name="read_vensim_file") + def test_read_vensim_file(self, model_path): + # assert that the files don't exist in the temporary directory + ven_file = VensimFile(model_path) + + assert hasattr(ven_file, "mdl_path") + assert hasattr(ven_file, "root_path") + assert hasattr(ven_file, "model_text") + + assert isinstance(getattr(ven_file, "mdl_path"), Path) + assert isinstance(getattr(ven_file, "root_path"), Path) + assert isinstance(getattr(ven_file, "model_text"), str) + + @pytest.mark.dependency(depends=["read_vensim_file"]) + def test_file_split_file_sections(self, model_path): + ven_file = VensimFile(model_path) + ven_file.parse() + + +class TestElements: + """ + Test for splitting Vensim views in modules and submodules + """ + @pytest.fixture + def element(self, equation): + return Element(equation, "", "") + + @pytest.mark.parametrize( + "equation,error_message", + [ + ( # no-letter + "dim: (1-12)", + "A numeric range must contain at least one letter." + ), + ( # greater + "dim: (a12-a10)", + "The number of the first subscript value must be lower " + "than the second subscript value in a subscript numeric" + " range." + ), + ( # different-leading + "dim: (aba1-abc12)", + "Only matching names ending in numbers are valid." + ), + ], + ids=["no-letter", "greater", "different-leading"] + ) + def test_subscript_range_error(self, element, error_message): + # assert that the files don't exist in the temporary directory + with pytest.raises(VisitationError, match=error_message): + element.parse() diff --git a/tests/pytest_translation/vensim_parser/pytest_vensim_file.py b/tests/pytest_translation/vensim_parser/pytest_vensim_file.py deleted file mode 100644 index 7845748b..00000000 --- a/tests/pytest_translation/vensim_parser/pytest_vensim_file.py +++ /dev/null @@ -1,53 +0,0 @@ - -import pytest -from pathlib import Path - -from pysd.translation.vensim.vensim_file import VensimFile - - -@pytest.mark.parametrize( - "path", - [ - ( # teacup - "test-models/samples/teacup/teacup.mdl" - ), - ( # macros - "test-models/tests/macro_multi_expression/test_macro_multi_expression.mdl" - ), - ( # mapping - "test-models/tests/subscript_mapping_vensim/test_subscript_mapping_vensim.mdl" - ), - ( # data - "test-models/tests/data_from_other_model/test_data_from_other_model.mdl" - ), - ( # except - "test-models/tests/except/test_except.mdl" - ) - ], - ids=["teacup", "macros", "mapping", "data", "except"] -) -class TestVensimFile: - """ - Test for splitting Vensim views in modules and submodules - """ - @pytest.fixture - def model_path(self, _root, path): - return _root.joinpath(path) - - @pytest.mark.dependency(name="read_vensim_file") - def test_read_vensim_file(self, model_path): - # assert that the files don't exist in the temporary directory - ven_file = VensimFile(model_path) - - assert hasattr(ven_file, "mdl_path") - assert hasattr(ven_file, "root_path") - assert hasattr(ven_file, "model_text") - - assert isinstance(getattr(ven_file, "mdl_path"), Path) - assert isinstance(getattr(ven_file, "root_path"), Path) - assert isinstance(getattr(ven_file, "model_text"), str) - - @pytest.mark.dependency(depends=["read_vensim_file"]) - def test_file_split_file_sections(self, model_path): - ven_file = VensimFile(model_path) - ven_file.parse() diff --git a/tests/unit_test_external.py b/tests/unit_test_external.py index 061acc2e..65659037 100644 --- a/tests/unit_test_external.py +++ b/tests/unit_test_external.py @@ -1257,12 +1257,14 @@ def test_lookup_vn2d_xarray(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") self.assertTrue( - data(all_smaller, {**coords_1, **coords_2} + data( + all_smaller, {**coords_1, **coords_2} ).equals(all_smaller_out)) self.assertTrue( data(all_bigger, coords_1).equals(all_bigger_out)) self.assertTrue( - data(all_inside, {**coords_1, **coords_2} + data( + all_inside, {**coords_1, **coords_2} ).equals(all_inside_out)) self.assertTrue( data(mixed, coords_1).equals(mixed_out)) diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index 756e59a3..9be689b5 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -601,7 +601,8 @@ def test_docs(self): "Constant" ) self.assertEqual( - doc[doc["Real Name"] == "Characteristic Time"]["Subtype"].values[0], + doc[doc["Real Name"] + == "Characteristic Time"]["Subtype"].values[0], "Normal" ) self.assertEqual( diff --git a/tests/unit_test_statefuls.py b/tests/unit_test_statefuls.py index 54a5b769..5ae5c371 100644 --- a/tests/unit_test_statefuls.py +++ b/tests/unit_test_statefuls.py @@ -356,10 +356,12 @@ def test_not_initialized_object(self): class TestMacroMethods(unittest.TestCase): def test_get_elements_to_initialize(self): + from pysd import read_vensim from pysd.py_backend.statefuls import Macro - model = "not_implemented_and_incomplete" - macro = Macro(more_tests / model / f"test_{model}.py") + test_model = _root.joinpath("test-models/samples/teacup/teacup.mdl") + read_vensim(test_model) + macro = Macro(test_model.with_suffix(".py")) macro.stateful_initial_dependencies = { "A": {"B", "C"}, From f63f620ad2afd7021f902a804a86189eaf4a0386 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 5 May 2022 17:09:49 +0200 Subject: [PATCH 47/96] Add new tests --- pysd/_version.py | 2 +- .../python/python_expressions_builder.py | 50 ++++---- pysd/translation/vensim/vensim_element.py | 13 +-- tests/more-tests/random/test_random.mdl | 2 +- tests/pytest_building/pytest_python.py | 108 ++++++++++++++++++ tests/pytest_pysd/pytest_random.py | 3 +- tests/pytest_translation/pytest_vensim.py | 60 ++++++++++ 7 files changed, 200 insertions(+), 38 deletions(-) create mode 100644 tests/pytest_building/pytest_python.py diff --git a/pysd/_version.py b/pysd/_version.py index 528787cf..4cb28cbd 100644 --- a/pysd/_version.py +++ b/pysd/_version.py @@ -1 +1 @@ -__version__ = "3.0.0" +__version__ = "3.0.0-dev" diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index 5f6377f0..0c2308f5 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -96,32 +96,30 @@ def join_calls(self, arguments): return merge_dependencies( *[val.calls for val in arguments.values()]) - def reorder(self, arguments, def_subs=None, force=None): + def reorder(self, arguments, force=None): if force == "component": - final_subscripts = def_subs or {} + final_subscripts = self.def_subs or {} else: - final_subscripts = self.get_final_subscripts( - arguments, def_subs) + final_subscripts = self.get_final_subscripts(arguments) [arguments[key].reshape( - self.section.subscripts, final_subscripts, force == "equal") + self.section.subscripts, final_subscripts, bool(force)) for key in arguments if arguments[key].subscripts or force == "equal"] return final_subscripts - def get_final_subscripts(self, arguments, def_subs): + def get_final_subscripts(self, arguments): if len(arguments) == 0: return {} elif len(arguments) == 1: return arguments["0"].subscripts else: return self._compute_final_subscripts( - [arg.subscripts for arg in arguments.values()], - def_subs) + [arg.subscripts for arg in arguments.values()]) - def _compute_final_subscripts(self, subscripts_list, def_subs): + def _compute_final_subscripts(self, subscripts_list): expression = {} [expression.update(subscript) for subscript in subscripts_list if subscript] @@ -173,7 +171,7 @@ def __init__(self, operation, component): def build(self, arguments): operands = {} calls = self.join_calls(arguments) - final_subscripts = self.reorder(arguments, def_subs=self.def_subs) + final_subscripts = self.reorder(arguments) arguments = [arguments[str(i)] for i in range(len(arguments))] dependencies, order = self.operators_build[self.operators[-1]][1:] @@ -246,7 +244,7 @@ def __init__(self, call_str, component): self.build = self.build_not_implemented def build_not_implemented(self, arguments): - final_subscripts = self.reorder(arguments, def_subs=self.def_subs) + final_subscripts = self.reorder(arguments) warnings.warn( "\n\nTrying to translate '" + self.function.upper().replace("_", " ") @@ -281,7 +279,7 @@ def build_macro_call(self, arguments): macro = self.section.macrospace[self.macro_name] calls = self.join_calls(arguments) - final_subscripts = self.reorder(arguments, def_subs=self.def_subs) + final_subscripts = self.reorder(arguments) arguments["name"] = self.section.namespace.make_python_identifier( self.macro_name + "_" + self.element.identifier, prefix="_macro") @@ -313,7 +311,7 @@ def build_macro_call(self, arguments): def build_lookups_call(self, arguments): if arguments["0"].subscripts: final_subscripts =\ - self.get_final_subscripts(arguments, self.def_subs) + self.get_final_subscripts(arguments) expression = arguments["function"].expression.replace( "()", f"(%(0)s, {final_subscripts})") else: @@ -344,23 +342,22 @@ def build_function_call(self, arguments): final_subscripts, arguments["axis"] = self.compute_axis(arguments) elif "%(size)s" in expression: - final_subscripts = self.reorder( - arguments, - def_subs=self.def_subs, - force="component" - ) - arguments["size"] = compute_shape(final_subscripts) + final_subscripts = self.reorder(arguments, force="component") + arguments["size"] = tuple(compute_shape(final_subscripts)) + if arguments["size"]: + # NUMPY: not necessary + # generate an xarray from the output + subs = self.section.subscripts.simplify_subscript_input( + self.def_subs)[1] + expression = f"xr.DataArray({expression}, {subs}, "\ + f"{list(self.def_subs)})" elif self.function == "active_initial": # we need to ensure that active initial outputs are always the # same and update dependencies as stateful object name = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_active_initial") - final_subscripts = self.reorder( - arguments, - def_subs=self.def_subs, - force="equal" - ) + final_subscripts = self.reorder(arguments, force="equal") self.element.other_dependencies[name] = { "initial": arguments["1"].calls, "step": arguments["0"].calls @@ -368,10 +365,7 @@ def build_function_call(self, arguments): calls = {name: 1} else: - final_subscripts = self.reorder( - arguments, - def_subs=self.def_subs - ) + final_subscripts = self.reorder(arguments) if self.function == "xidz" and final_subscripts: # xidz must always return the same shape object if not arguments["1"].subscripts: diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translation/vensim/vensim_element.py index 1605c344..3d4bbd0f 100644 --- a/pysd/translation/vensim/vensim_element.py +++ b/pysd/translation/vensim/vensim_element.py @@ -174,14 +174,13 @@ def visit_subscript_copy(self, n, vc): self.component = SubscriptRange(self.name, vc[4].strip()) def visit_subscript_mapping(self, n, vc): - - warnings.warn( - "\nSubscript mapping detected. " - + "This feature works only in some simple cases." - ) - if ":" in str(vc): - # TODO: add test for this condition + # TODO: ensure the correct working of this condition adding + # full integration tests + warnings.warn( + "\nSubscript mapping detected. " + + "This feature works only in some simple cases." + ) # Obtain subscript name and split by : and ( self.mapping.append(str(vc).split(":")[0].split("(")[1].strip()) else: diff --git a/tests/more-tests/random/test_random.mdl b/tests/more-tests/random/test_random.mdl index 483f806f..da30cb61 100644 --- a/tests/more-tests/random/test_random.mdl +++ b/tests/more-tests/random/test_random.mdl @@ -100,7 +100,7 @@ uniform vec[Dims]= Simulation Control Parameters | -FINAL TIME = 100 +FINAL TIME = 10 ~ Month ~ The final time for the simulation. | diff --git a/tests/pytest_building/pytest_python.py b/tests/pytest_building/pytest_python.py new file mode 100644 index 00000000..8b73b1ce --- /dev/null +++ b/tests/pytest_building/pytest_python.py @@ -0,0 +1,108 @@ +import pytest +from pathlib import Path + +from pysd.building.python.python_model_builder import ComponentBuilder, ElementBuilder, SectionBuilder +from pysd.building.python.python_expressions_builder import StructureBuilder, BuildAST +from pysd.translation.structures.abstract_model import AbstractComponent, AbstractElement, AbstractSection + + +class TestStructureBuilder: + """ + Test for StructureBuilder + """ + + @pytest.fixture + def section(self): + return SectionBuilder( + AbstractSection( + "main", Path("here"), "__main__", + [], [], tuple(), tuple(), False, None + )) + + @pytest.fixture + def abstract_component(self): + return AbstractComponent([[], []], "") + + @pytest.fixture + def element(self, section, abstract_component): + return ElementBuilder( + AbstractElement("element", [abstract_component], "", None, ""), + section + ) + + @pytest.fixture + def component(self, element, section, abstract_component): + component_obj = ComponentBuilder(abstract_component, element, section) + component_obj.subscripts_dict = {} + return component_obj + + @pytest.fixture + def structure_builder(self, component): + return StructureBuilder(None, component) + + @pytest.mark.parametrize( + "arguments,expected", + [ + ( # 0 + {}, + {} + ), + ( # 1 + {"0": BuildAST("", {"a": 1}, {}, 0)}, + {"a": 1} + ), + ( # 2 + {"0": BuildAST("", {"a": 1}, {}, 0), + "1": BuildAST("", {"a": 1, "b": 3}, {}, 0)}, + {"a": 2, "b": 3} + ), + ( # 3 + {"0": BuildAST("", {"a": 1}, {}, 0), + "1": BuildAST("", {"a": 1, "b": 3, "c": 2}, {}, 0), + "2": BuildAST("", {"b": 5}, {}, 0)}, + {"a": 2, "b": 8, "c": 2} + ), + ], + ids=["0", "1", "2", "3"] + ) + def test_join_calls(self, structure_builder, arguments, expected): + assert structure_builder.join_calls(arguments) == expected + + @pytest.mark.parametrize( + "arguments,expected", + [ + ( # 0 + {}, + {} + ), + ( # 1 + {"0": BuildAST("", {}, {"a": [1]}, 0)}, + {"a": [1]} + ), + ( # 2a + {"0": BuildAST("", {}, {"a": [1]}, 0), + "1": BuildAST("", {}, {}, 0)}, + {"a": [1]} + ), + ( # 2b + {"0": BuildAST("", {}, {"a": [1]}, 0), + "1": BuildAST("", {}, {"b": [2, 3]}, 0)}, + {"a": [1], "b": [2, 3]} + ), + ( # 2c + {"0": BuildAST("", {}, {"a": [1]}, 0), + "1": BuildAST("", {}, {"b": [2, 3], "a": [1]}, 0)}, + {"a": [1], "b": [2, 3]} + ), + ( # 3 + {"0": BuildAST("", {}, {"a": [1]}, 0), + "1": BuildAST("", {}, {"b": [2, 3], "a": [1]}, 0), + "2": BuildAST("", {}, {"b": [2, 3], "c": [10]}, 0)}, + {"a": [1], "b": [2, 3], "c": [10]} + ), + ], + ids=["0", "1", "2a", "2b", "2c", "3"] + ) + def test__get_final_subscripts(self, structure_builder, + arguments, expected): + assert structure_builder.get_final_subscripts(arguments) == expected diff --git a/tests/pytest_pysd/pytest_random.py b/tests/pytest_pysd/pytest_random.py index 2c1dff1e..7e6e1b1d 100644 --- a/tests/pytest_pysd/pytest_random.py +++ b/tests/pytest_pysd/pytest_random.py @@ -27,4 +27,5 @@ def test_translate(self, model_path): This way each file is only translated once. """ # expected file - pysd.read_vensim(model_path) + model = pysd.read_vensim(model_path) + model.run() diff --git a/tests/pytest_translation/pytest_vensim.py b/tests/pytest_translation/pytest_vensim.py index b75fba0c..b5d36332 100644 --- a/tests/pytest_translation/pytest_vensim.py +++ b/tests/pytest_translation/pytest_vensim.py @@ -86,3 +86,63 @@ def test_subscript_range_error(self, element, error_message): # assert that the files don't exist in the temporary directory with pytest.raises(VisitationError, match=error_message): element.parse() + + @pytest.mark.parametrize( + "equation,mapping", + [ + ( # single + "subcon : subcon1,subcon2->(del: del con1, del con2)", + ["del"] + ), + ( # single2 + "subcon : subcon1,subcon2 -> (del:del con1,del con2)", + ["del"] + ), + ( # multiple + "class: class1,class2->(metal:class1 metal,class2 metal)," + "(our metal:ourC1,ourC2)", + ["metal", "our metal"] + ), + ( # multiple2 + "class: class1,class2-> (metal:class1 metal,class2 metal) ," + " (our metal:ourC1,ourC2)", + ["metal", "our metal"] + ), + ], + ids=["single", "single2", "multiple", "multiple2"] + ) + def test_complex_mapping(self, element, mapping): + # parse the mapping + warning_message = r"Subscript mapping detected\. "\ + r"This feature works only in some simple cases\." + with pytest.warns(UserWarning, match=warning_message): + out = element.parse() + + assert out.mapping == mapping + + @pytest.mark.parametrize( + "equation,mapping", + [ + ( # single + "subcon : subcon1,subcon2 -> del", + ["del"] + ), + ( # single2 + "subcon : subcon1,subcon2->del", + ["del"] + ), + ( # multiple + "class: class1,class2->metal,our metal", + ["metal", "our metal"] + ), + ( # multiple2 + "class: class1,class2->metal , our metal", + ["metal", "our metal"] + ), + ], + ids=["single", "single2", "multiple", "multiple2"] + ) + def test_simple_mapping(self, element, mapping): + # parse the mapping + out = element.parse() + assert out.mapping == mapping From 14d2992c8b3061ab44c73f2e8b00d82807a60a0d Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 6 May 2022 11:37:31 +0200 Subject: [PATCH 48/96] Solve bug for saving xarray data coming from transposed arrays --- pysd/py_backend/utils.py | 4 +++- tests/unit_test_utils.py | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index e840b101..f6af4cf9 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -160,12 +160,14 @@ def _add_flat(savedict, name, values): """ # remove subscripts from name if given name = re.sub(r'\[.*\]', '', name) + dims = values[0].dims + # split values in xarray.DataArray lval = [xrsplit(val) for val in values] for i, ar in enumerate(lval[0]): vals = [float(v[i]) for v in lval] subs = '[' + ','.join([str(ar.coords[dim].values) - for dim in list(ar.coords)]) + ']' + for dim in dims]) + ']' savedict[name+subs] = vals diff --git a/tests/unit_test_utils.py b/tests/unit_test_utils.py index e859ab3a..41e8b6f6 100644 --- a/tests/unit_test_utils.py +++ b/tests/unit_test_utils.py @@ -254,6 +254,44 @@ def test_make_flat_df_flatten(self): actual.loc[:, col].values, expected.loc[:, col].values) + def test_make_flat_df_flatten_transposed(self): + import pysd + + df = pd.DataFrame(index=[1], columns=['elem2']) + df.at[1] = [ + xr.DataArray( + [[1, 4, 7], [2, 5, 8], [3, 6, 9]], + {'Dim2': ['D', 'E', 'F'], 'Dim1': ['A', 'B', 'C']}, + ['Dim2', 'Dim1'] + ).transpose("Dim1", "Dim2") + ] + + expected = pd.DataFrame(index=[1], columns=[ + 'Elem2[A,D]', + 'Elem2[A,E]', + 'Elem2[A,F]', + 'Elem2[B,D]', + 'Elem2[B,E]', + 'Elem2[B,F]', + 'Elem2[C,D]', + 'Elem2[C,E]', + 'Elem2[C,F]']) + + expected.at[1] = [1, 2, 3, 4, 5, 6, 7, 8, 9] + + return_addresses = { + 'Elem2': ('elem2', {})} + + actual = pysd.utils.make_flat_df(df, return_addresses, flatten=True) + print(actual.columns) + # check all columns are in the DataFrame + self.assertEqual(set(actual.columns), set(expected.columns)) + # need to assert one by one as they are xarrays + for col in set(expected.columns): + self.assertEqual( + actual.loc[:, col].values, + expected.loc[:, col].values) + def test_make_flat_df_times(self): import pysd From 3f222c8394ccc5c958f3fedb251c6590d94f5d28 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 6 May 2022 12:09:36 +0200 Subject: [PATCH 49/96] Create a AbstractSyntax class for typing --- .../python/python_expressions_builder.py | 8 +- .../structures/abstract_expressions.py | 118 ++++++++++-------- pysd/translation/xmile/xmile_element.py | 24 ++-- 3 files changed, 81 insertions(+), 69 deletions(-) diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index 0c2308f5..65c558b6 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -1,9 +1,11 @@ import warnings from dataclasses import dataclass +from typing import Union import numpy as np from pysd.py_backend.utils import compute_shape +from pysd.translation.structures.abstract_expressions import AbstractSyntax from pysd.translation.structures import abstract_expressions as ae from .python_functions import functionspace @@ -1241,7 +1243,7 @@ def merge_dependencies(*dependencies, inplace=False): return current -def _merge_dependencies(current, new): +def _merge_dependencies(current: dict, new: dict) -> None: """ Merge two dependencies dicts of an element. @@ -1398,7 +1400,7 @@ def __init__(self, component): self.subscripts = component.subscripts_dict self.component = component - def visit(self): + def visit(self) -> Union[None, BuildAST]: visit_out = self._visit(self.ast) if not visit_out: @@ -1443,7 +1445,7 @@ def visit(self): return visit_out - def _visit(self, ast_object): + def _visit(self, ast_object: AbstractSyntax) -> AbstractSyntax: builder = self.builders[type(ast_object)](ast_object, self.component) arguments = { name: self._visit(value) diff --git a/pysd/translation/structures/abstract_expressions.py b/pysd/translation/structures/abstract_expressions.py index 0e36a26d..58a90b4e 100644 --- a/pysd/translation/structures/abstract_expressions.py +++ b/pysd/translation/structures/abstract_expressions.py @@ -12,8 +12,16 @@ from typing import Union +class AbstractSyntax: + """ + Generic class. All Abstract Synax structured are childs of that class. + Used for typing. + """ + pass + + @dataclass -class ArithmeticStructure: +class ArithmeticStructure(AbstractSyntax): """ Dataclass for an arithmetic structure. @@ -34,7 +42,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class LogicStructure: +class LogicStructure(AbstractSyntax): """ Dataclass for a logic structure. @@ -55,7 +63,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class SubscriptsReferenceStructure: +class SubscriptsReferenceStructure(AbstractSyntax): """ Dataclass for a subscript reference structure. @@ -72,7 +80,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class ReferenceStructure: +class ReferenceStructure(AbstractSyntax): """ Dataclass for an element reference structure. @@ -94,7 +102,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class CallStructure: +class CallStructure(AbstractSyntax): """ Dataclass for a call structure. @@ -106,7 +114,7 @@ class CallStructure: The list of arguments used for calling the function. """ - function: Union[str, object] + function: Union[str, ReferenceStructure] arguments: tuple def __str__(self) -> str: # pragma: no cover @@ -119,7 +127,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class GameStructure: +class GameStructure(AbstractSyntax): """ Dataclass for a game structure. @@ -129,14 +137,14 @@ class GameStructure: The expression inside the game call. """ - expression: object + expression: Union[AbstractSyntax, float] def __str__(self) -> str: # pragma: no cover return "GameStructure:\n\t%s" % self.expression @dataclass -class InitialStructure: +class InitialStructure(AbstractSyntax): """ Dataclass for a initial structure. @@ -146,7 +154,7 @@ class InitialStructure: The expression inside the initial call. """ - initial: object + initial: Union[AbstractSyntax, float] def __str__(self) -> str: # pragma: no cover return "InitialStructure:\n\t%s" % ( @@ -154,7 +162,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class IntegStructure: +class IntegStructure(AbstractSyntax): """ Dataclass for an integ/stock structure. @@ -166,8 +174,8 @@ class IntegStructure: The initial value of the stock. """ - flow: object - initial: object + flow: Union[AbstractSyntax, float] + initial: Union[AbstractSyntax, float] def __str__(self) -> str: # pragma: no cover return "IntegStructure:\n\t%s,\n\t%s" % ( @@ -176,7 +184,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class DelayStructure: +class DelayStructure(AbstractSyntax): """ Dataclass for a delay structure. @@ -192,9 +200,9 @@ class DelayStructure: The order of the delay. """ - input: object - delay_time: object - initial: object + input: Union[AbstractSyntax, float] + delay_time: Union[AbstractSyntax, float] + initial: Union[AbstractSyntax, float] order: float def __str__(self) -> str: # pragma: no cover @@ -206,7 +214,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class DelayNStructure: +class DelayNStructure(AbstractSyntax): """ Dataclass for a delay n structure. @@ -222,10 +230,10 @@ class DelayNStructure: The order of the delay. """ - input: object - delay_time: object - initial: object - order: object + input: Union[AbstractSyntax, float] + delay_time: Union[AbstractSyntax, float] + initial: Union[AbstractSyntax, float] + order: Union[AbstractSyntax, float] # DELAY N may behave different than other delays when the delay time # changes during integration @@ -239,7 +247,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class DelayFixedStructure: +class DelayFixedStructure(AbstractSyntax): """ Dataclass for a delay fixed structure. @@ -253,9 +261,9 @@ class DelayFixedStructure: The initial value of the delay. """ - input: object - delay_time: object - initial: object + input: Union[AbstractSyntax, float] + delay_time: Union[AbstractSyntax, float] + initial: Union[AbstractSyntax, float] def __str__(self) -> str: # pragma: no cover return "DelayFixedStructure:\n\t%s,\n\t%s,\n\t%s" % ( @@ -265,7 +273,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class SmoothStructure: +class SmoothStructure(AbstractSyntax): """ Dataclass for a smooth structure. @@ -281,9 +289,9 @@ class SmoothStructure: The order of the smooth. """ - input: object - smooth_time: object - initial: object + input: Union[AbstractSyntax, float] + smooth_time: Union[AbstractSyntax, float] + initial: Union[AbstractSyntax, float] order: float def __str__(self) -> str: # pragma: no cover @@ -295,7 +303,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class SmoothNStructure: +class SmoothNStructure(AbstractSyntax): """ Dataclass for a smooth n structure. @@ -311,10 +319,10 @@ class SmoothNStructure: The order of the smooth. """ - input: object - smooth_time: object - initial: object - order: object + input: Union[AbstractSyntax, float] + smooth_time: Union[AbstractSyntax, float] + initial: Union[AbstractSyntax, float] + order: Union[AbstractSyntax, float] # SMOOTH N may behave different than other smooths with RungeKutta # integration @@ -328,7 +336,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class TrendStructure: +class TrendStructure(AbstractSyntax): """ Dataclass for a trend structure. @@ -342,9 +350,9 @@ class TrendStructure: The initial trend value of the trend. """ - input: object - average_time: object - initial_trend: object + input: Union[AbstractSyntax, float] + average_time: Union[AbstractSyntax, float] + initial_trend: Union[AbstractSyntax, float] def __str__(self) -> str: # pragma: no cover return "TrendStructure:\n\t%s,\n\t%s,\n\t%s" % ( @@ -354,7 +362,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class ForecastStructure: +class ForecastStructure(AbstractSyntax): """ Dataclass for a forecast structure. @@ -370,10 +378,10 @@ class ForecastStructure: The initial trend value of the forecast. """ - input: object - average_time: object - horizon: object - initial_trend: object + input: Union[AbstractSyntax, float] + average_time: Union[AbstractSyntax, float] + horizon: Union[AbstractSyntax, float] + initial_trend: Union[AbstractSyntax, float] def __str__(self) -> str: # pragma: no cover return "ForecastStructure:\n\t%s,\n\t%s,\n\t%s,\n\t%s" % ( @@ -384,7 +392,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class SampleIfTrueStructure: +class SampleIfTrueStructure(AbstractSyntax): """ Dataclass for a sample if true structure. @@ -398,9 +406,9 @@ class SampleIfTrueStructure: The initial value of the sample if true. """ - condition: object - input: object - initial: object + condition: Union[AbstractSyntax, float] + input: Union[AbstractSyntax, float] + initial: Union[AbstractSyntax, float] def __str__(self) -> str: # pragma: no cover return "SampleIfTrueStructure:\n\t%s,\n\t%s,\n\t%s" % ( @@ -410,7 +418,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class LookupsStructure: +class LookupsStructure(AbstractSyntax): """ Dataclass for a lookup structure. @@ -441,7 +449,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class InlineLookupsStructure: +class InlineLookupsStructure(AbstractSyntax): """ Dataclass for an inline lookup structure. @@ -453,7 +461,7 @@ class InlineLookupsStructure: The lookups definition. """ - argument: object + argument: Union[AbstractSyntax, float] lookups: LookupsStructure def __str__(self) -> str: # pragma: no cover @@ -464,7 +472,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class DataStructure: +class DataStructure(AbstractSyntax): """ Dataclass for an empty data structure. @@ -480,7 +488,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class GetLookupsStructure: +class GetLookupsStructure(AbstractSyntax): """ Dataclass for a get lookups structure. @@ -509,7 +517,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class GetDataStructure: +class GetDataStructure(AbstractSyntax): """ Dataclass for a get lookups structure. @@ -538,7 +546,7 @@ def __str__(self) -> str: # pragma: no cover @dataclass -class GetConstantsStructure: +class GetConstantsStructure(AbstractSyntax): """ Dataclass for a get lookups structure. diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py index 1821e1d9..ed3bfd75 100644 --- a/pysd/translation/xmile/xmile_element.py +++ b/pysd/translation/xmile/xmile_element.py @@ -22,6 +22,8 @@ from ..structures.abstract_model import\ AbstractElement, AbstractLookup, AbstractComponent, AbstractSubscriptRange +from ..structures.abstract_expressions import AbstractSyntax + from . import xmile_utils as vu from .xmile_structures import structures, parsing_ops @@ -112,13 +114,13 @@ def _get_limits(self) -> Tuple[Union[None, str], Union[None, str]]: ) return tuple(float(x) if x is not None else x for x in lims) - def _parse_lookup_xml_node(self, node: etree._Element) -> object: + def _parse_lookup_xml_node(self, node: etree._Element) -> AbstractSyntax: """ Parse lookup definition Returns ------- - AST: AbstractSyntaxTree + AST: AbstractSyntax """ ys_node = node.xpath('ns:ypts', namespaces=self.ns)[0] @@ -180,13 +182,13 @@ def parse(self) -> None: zip(subs_list, parsed) ] - def _smile_parser(self, expression: str) -> object: + def _smile_parser(self, expression: str) -> AbstractSyntax: """ Parse expression with parsimonious. Returns ------- - AST: AbstractSyntaxTree + AST: AbstractSyntax """ tree = vu.Grammar.get("equations", parsing_ops).parse(expression) @@ -231,13 +233,13 @@ def __init__(self, node, ns, subscripts): super().__init__(node, ns, subscripts) self.limits = self._get_limits() - def _parse_component(self, node: etree._Element) -> List[object]: + def _parse_component(self, node: etree._Element) -> List[AbstractSyntax]: """ Parse one Flaux component Returns ------- - AST: AbstractSyntaxTree + AST: AbstractSyntax """ asts = [] @@ -309,13 +311,13 @@ def get_limits(self) -> Tuple[Union[None, str], Union[None, str]]: ) return tuple(float(x) if x is not None else x for x in lims) - def _parse_component(self, node: etree._Element) -> object: + def _parse_component(self, node: etree._Element) -> AbstractSyntax: """ Parse one Gf component Returns ------- - AST: AbstractSyntaxTree + AST: AbstractSyntax """ return [self._parse_lookup_xml_node(self.node)] @@ -366,13 +368,13 @@ def __init__(self, node, ns, subscripts): super().__init__(node, ns, subscripts) self.limits = self._get_limits() - def _parse_component(self, node) -> object: + def _parse_component(self, node) -> AbstractSyntax: """ Parse one Stock component Returns ------- - AST: AbstractSyntaxTree + AST: AbstractSyntax """ # Parse each flow equations @@ -446,7 +448,7 @@ def parse(self) -> None: Returns ------- - AST: AbstractSyntaxTree + AST: AbstractSyntax """ self.ast = self._smile_parser(self.eqn) From e6eb1e84a6aba4d1a5d59eb7965ddcb793bd8612 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 6 May 2022 13:16:17 +0200 Subject: [PATCH 50/96] Add support for zeroled decimals --- pysd/building/python/subscripts.py | 11 +------- .../parsing_grammars/common_grammar.peg | 2 +- .../xmile/parsing_grammars/equations.peg | 2 +- pysd/translation/xmile/xmile_element.py | 5 ++-- tests/pytest_building/pytest_python.py | 27 ++++++++++++++++--- .../pytest_integration_vensim_pathway.py | 4 +++ .../pytest_integration_xmile_pathway.py | 4 +++ tests/test-models | 2 +- 8 files changed, 39 insertions(+), 18 deletions(-) diff --git a/pysd/building/python/subscripts.py b/pysd/building/python/subscripts.py index 36eef739..e37f749c 100644 --- a/pysd/building/python/subscripts.py +++ b/pysd/building/python/subscripts.py @@ -66,7 +66,7 @@ def subscripts(self, abstract_subscripts: List[AbstractSubscriptRange]): root=self._root).subscript else: raise ValueError( - f"Invalid definition of subscript {sub.name}:\n\t" + f"Invalid definition of subscript '{sub.name}':\n\t" + str(sub.subscripts)) while missing: @@ -336,15 +336,6 @@ def make_merge_list(self, subs_list: List[List[str]], + name + str(j) + ": " + ', '.join(elements)) break - if not dims[i]: - # not able to find the correct dimension - raise ValueError( - element - + "\nImpossible to find the dimension that contains:" - + "\n\t{}\nFor subscript_dict:".format(coord2) - + "\n\t{}".format(self.subscripts) - ) - return dims def simplify_subscript_input(self, coords: dict, diff --git a/pysd/translation/vensim/parsing_grammars/common_grammar.peg b/pysd/translation/vensim/parsing_grammars/common_grammar.peg index a71fbb73..6b6e5f8c 100644 --- a/pysd/translation/vensim/parsing_grammars/common_grammar.peg +++ b/pysd/translation/vensim/parsing_grammars/common_grammar.peg @@ -12,7 +12,7 @@ id_continue = id_start / ~r"[0-9\'\$\s\_]" escape_group = "\"" ( "\\\"" / ~r"[^\"]" )* "\"" number = raw_number -raw_number = ("+"/"-")? _ ~r"\d+\.?\d*([eE][+-]?\d+)?" +raw_number = ("+"/"-")? _ (~r"\d+\.?\d*([eE][+-]?\d+)?" / ~r"\.\d+([eE][+-]?\d+)?") string = "\'" (~r"[^\']"IU)* "\'" limits = _ "[" ~r"[^\]]*" "]" _ "," diff --git a/pysd/translation/xmile/parsing_grammars/equations.peg b/pysd/translation/xmile/parsing_grammars/equations.peg index f40d19bb..5a0a659a 100644 --- a/pysd/translation/xmile/parsing_grammars/equations.peg +++ b/pysd/translation/xmile/parsing_grammars/equations.peg @@ -48,4 +48,4 @@ id_continue = id_start / ~r"[0-9\'\$\_]" escape_group = "\"" ( "\\\"" / ~r"[^\"]" )* "\"" number = raw_number -raw_number = ("+"/"-")? ~r"\d+\.?\d*([eE][+-]?\d+)?" +raw_number = ("+"/"-")? (~r"\d+\.?\d*([eE][+-]?\d+)?" / ~r"\.\d+([eE][+-]?\d+)?") diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translation/xmile/xmile_element.py index ed3bfd75..27a4f32d 100644 --- a/pysd/translation/xmile/xmile_element.py +++ b/pysd/translation/xmile/xmile_element.py @@ -57,7 +57,7 @@ class Element(): def __init__(self, node: etree._Element, ns: dict, subscripts): self.node = node self.ns = ns - self.name = node.attrib["name"] + self.name = node.attrib["name"].replace("\\n", " ") self.units = self._get_xpath_text(node, "ns:units") or "" self.documentation = self._get_xpath_text(node, "ns:doc") or "" self.limits = (None, None) @@ -191,7 +191,8 @@ def _smile_parser(self, expression: str) -> AbstractSyntax: AST: AbstractSyntax """ - tree = vu.Grammar.get("equations", parsing_ops).parse(expression) + tree = vu.Grammar.get("equations", parsing_ops).parse( + expression.strip()) return EquationVisitor(tree).translation def _get_empty_abstract_element(self) -> AbstractElement: diff --git a/tests/pytest_building/pytest_python.py b/tests/pytest_building/pytest_python.py index 8b73b1ce..aaa6aec6 100644 --- a/tests/pytest_building/pytest_python.py +++ b/tests/pytest_building/pytest_python.py @@ -1,9 +1,13 @@ import pytest from pathlib import Path -from pysd.building.python.python_model_builder import ComponentBuilder, ElementBuilder, SectionBuilder -from pysd.building.python.python_expressions_builder import StructureBuilder, BuildAST -from pysd.translation.structures.abstract_model import AbstractComponent, AbstractElement, AbstractSection +from pysd.building.python.subscripts import SubscriptManager +from pysd.building.python.python_model_builder import\ + ComponentBuilder, ElementBuilder, SectionBuilder +from pysd.building.python.python_expressions_builder import\ + StructureBuilder, BuildAST +from pysd.translation.structures.abstract_model import\ + AbstractComponent, AbstractElement, AbstractSection, AbstractSubscriptRange class TestStructureBuilder: @@ -106,3 +110,20 @@ def test_join_calls(self, structure_builder, arguments, expected): def test__get_final_subscripts(self, structure_builder, arguments, expected): assert structure_builder.get_final_subscripts(arguments) == expected + + +class TestSubscriptManager: + @pytest.mark.parametrize( + "arguments,raise_type,error_message", + [ + ( # invalid definition + [[AbstractSubscriptRange("my subs", 5, [])], Path("here")], + ValueError, + "Invalid definition of subscript 'my subs':\n\t5" + ), + ], + ids=["invalid definition"] + ) + def test_invalid_subscripts(self, arguments, raise_type, error_message): + with pytest.raises(raise_type, match=error_message): + SubscriptManager(*arguments) diff --git a/tests/pytest_integration/pytest_integration_vensim_pathway.py b/tests/pytest_integration/pytest_integration_vensim_pathway.py index fa66af38..9b4ae58b 100644 --- a/tests/pytest_integration/pytest_integration_vensim_pathway.py +++ b/tests/pytest_integration/pytest_integration_vensim_pathway.py @@ -510,6 +510,10 @@ "xidz_zidz": { "folder": "xidz_zidz", "file": "xidz_zidz.mdl" + }, + "zeroled_decimals": { + "folder": "zeroled_decimals", + "file": "test_zeroled_decimals.mdl" } } diff --git a/tests/pytest_integration/pytest_integration_xmile_pathway.py b/tests/pytest_integration/pytest_integration_xmile_pathway.py index 46c9843a..3d8fc294 100644 --- a/tests/pytest_integration/pytest_integration_xmile_pathway.py +++ b/tests/pytest_integration/pytest_integration_xmile_pathway.py @@ -210,6 +210,10 @@ "folder": "xidz_zidz", "file": "xidz_zidz.xmile" }, + "zeroled_decimals": { + "folder": "zeroled_decimals", + "file": "test_zeroled_decimals.xmile" + } } diff --git a/tests/test-models b/tests/test-models index 07478c98..1502dce4 160000 --- a/tests/test-models +++ b/tests/test-models @@ -1 +1 @@ -Subproject commit 07478c98f67ae939e6fe738ff97f67d4e3602b56 +Subproject commit 1502dce4b5dbe8d86e6f310fc40f5c33a6dea1ec From efb3a7c6ab6077985a325f09ee6d095c5b6c50e0 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 6 May 2022 14:06:01 +0200 Subject: [PATCH 51/96] Add more unit test --- pysd/tools/benchmarking.py | 21 ++++++-- .../test_not_implemented_and_incomplete.mdl | 37 ++----------- .../pytest_translation/pytest_split_views.py | 8 ++- tests/unit_test_benchmarking.py | 53 ++++++++++++------- 4 files changed, 63 insertions(+), 56 deletions(-) diff --git a/pysd/tools/benchmarking.py b/pysd/tools/benchmarking.py index e26d3137..32cf294b 100644 --- a/pysd/tools/benchmarking.py +++ b/pysd/tools/benchmarking.py @@ -10,6 +10,10 @@ from pysd import read_vensim, read_xmile, load from ..py_backend.utils import load_outputs, detect_encoding +from pysd.translation.vensim.vensim_utils import supported_extensions as\ + vensim_extensions +from pysd.translation.xmile.xmile_utils import supported_extensions as\ + xmile_extensions def runner(model_file, canonical_file=None, transpose=False, data_files=None): @@ -58,14 +62,17 @@ def runner(model_file, canonical_file=None, transpose=False, data_files=None): encoding=detect_encoding(canonical_file)) # load model - if model_file.suffix.lower() == ".mdl": + if model_file.suffix.lower() in vensim_extensions: model = read_vensim(model_file, data_files) - elif model_file.suffix.lower() == ".xmile": + elif model_file.suffix.lower() in xmile_extensions: model = read_xmile(model_file, data_files) elif model_file.suffix.lower() == ".py": model = load(model_file, data_files) else: - raise ValueError("\nModelfile should be *.mdl, *.xmile, or *.py") + raise ValueError( + "\nThe model file name must be a Vensim" + f" ({', '.join(vensim_extensions)}), a Xmile " + f"({', '.join(xmile_extensions)}) or a PySD (.py) model file...") # run model and return the result @@ -106,6 +113,14 @@ def assert_frames_close(actual, expected, assertion="raise", kwargs: Optional rtol and atol values for assert_allclose. + Returns + ------- + (cols, first_false_time, first_false_cols) or None: (set, float, set) or None + If assertion is 'return', return the sets of the all columns that are + different. The time when the first difference was found and the + variables that what different at that time. If assertion is not + 'return' it returns None. + Examples -------- >>> assert_frames_close( diff --git a/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.mdl b/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.mdl index 35fb9bc2..e814f29e 100644 --- a/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.mdl +++ b/tests/more-tests/not_implemented_and_incomplete/test_not_implemented_and_incomplete.mdl @@ -1,11 +1,11 @@ {UTF-8} incomplete var = A FUNCTION OF( Time) - ~ + ~ ~ | not implemented function= MY FUNC(Time) - ~ + ~ ~ | ******************************************************** @@ -24,8 +24,8 @@ INITIAL TIME = 0 ~ The initial time for the simulation. | -SAVEPER = - TIME STEP +SAVEPER = + TIME STEP ~ Month [0,?] ~ The frequency with which output is stored. | @@ -34,32 +34,3 @@ TIME STEP = 1 ~ Month [0,?] ~ The time step for the simulation. | - -\\\---/// Sketch information - do not modify anything except names -V300 Do not put anything below this section - it will be ignored -*View 1 -$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,0 -10,1,incomplete var,629,259,46,11,8,3,0,0,0,0,0,0 -10,2,Time,443,331,26,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|0||128-128-128 -1,3,2,1,0,0,0,0,0,128,0,-1--1--1,,1|(527,298)| -10,4,not implemented function,626,406,53,19,8,3,0,0,0,0,0,0 -1,5,2,4,0,0,0,0,0,128,0,-1--1--1,,1|(517,361)| -///---\\\ -:L<%^E!@ -9:Current -15:0,0,0,0,0,0 -19:100,0 -27:0, -34:0, -5:not implemented function -35:Date -36:YYYY-MM-DD -37:2000 -38:1 -39:1 -40:2 -41:0 -42:1 -24:0 -25:0 -26:0 diff --git a/tests/pytest_translation/pytest_split_views.py b/tests/pytest_translation/pytest_split_views.py index 4806e9b8..7dddb220 100644 --- a/tests/pytest_translation/pytest_split_views.py +++ b/tests/pytest_translation/pytest_split_views.py @@ -163,8 +163,14 @@ def test_read_vensim_split_model(self, model_file, subview_sep, ["a"], "The given subview separators were not matched in any view name." ), + ( # no_sketch + Path("more-tests/not_implemented_and_incomplete/" + "test_not_implemented_and_incomplete.mdl"), + ["a"], + "No sketch detected. The model will be built in a single file." + ), ], - ids=["warning_noviews", "not_match_separator"] + ids=["warning_noviews", "not_match_separator", "no_sketch"] ) class TestSplitViewsWarnings: """ diff --git a/tests/unit_test_benchmarking.py b/tests/unit_test_benchmarking.py index 2bc74fc2..acea887a 100644 --- a/tests/unit_test_benchmarking.py +++ b/tests/unit_test_benchmarking.py @@ -1,10 +1,11 @@ -import os +from pathlib import Path from unittest import TestCase # most of the features of this script are already tested indirectly when # running vensim and xmile integration tests -_root = os.path.dirname(__file__) +_root = Path(__file__).parent +test_model = _root.joinpath("test-models/samples/teacup/teacup.mdl") class TestErrors(TestCase): @@ -13,7 +14,7 @@ def test_canonical_file_not_found(self): from pysd.tools.benchmarking import runner with self.assertRaises(FileNotFoundError) as err: - runner(os.path.join(_root, "more-tests/not_existent.mdl")) + runner(_root.joinpath("more-tests/not_existent.mdl")) self.assertIn( 'Canonical output file not found.', @@ -23,12 +24,11 @@ def test_non_valid_model(self): from pysd.tools.benchmarking import runner with self.assertRaises(ValueError) as err: - runner(os.path.join( - _root, - "more-tests/not_vensim/test_not_vensim.txt")) + runner(_root.joinpath("more-tests/not_vensim/test_not_vensim.txt")) self.assertIn( - 'Modelfile should be *.mdl, *.xmile, or *.py', + "The model file name must be a Vensim (.mdl), a Xmile " + "(.xmile, .xml, .stmx) or a PySD (.py) model file...", str(err.exception)) def test_different_frames_error(self): @@ -36,9 +36,8 @@ def test_different_frames_error(self): with self.assertRaises(AssertionError) as err: assert_frames_close( - load_outputs(os.path.join(_root, "data/out_teacup.csv")), - load_outputs( - os.path.join(_root, "data/out_teacup_modified.csv"))) + load_outputs(_root.joinpath("data/out_teacup.csv")), + load_outputs(_root.joinpath("data/out_teacup_modified.csv"))) self.assertIn( "Following columns are not close:\n\tTeacup Temperature", @@ -58,9 +57,8 @@ def test_different_frames_error(self): with self.assertRaises(AssertionError) as err: assert_frames_close( - load_outputs(os.path.join(_root, "data/out_teacup.csv")), - load_outputs( - os.path.join(_root, "data/out_teacup_modified.csv")), + load_outputs(_root.joinpath("data/out_teacup.csv")), + load_outputs(_root.joinpath("data/out_teacup_modified.csv")), verbose=True) self.assertIn( @@ -85,9 +83,8 @@ def test_different_frames_warning(self): with catch_warnings(record=True) as ws: assert_frames_close( - load_outputs(os.path.join(_root, "data/out_teacup.csv")), - load_outputs( - os.path.join(_root, "data/out_teacup_modified.csv")), + load_outputs(_root.joinpath("data/out_teacup.csv")), + load_outputs(_root.joinpath("data/out_teacup_modified.csv")), assertion="warn") # use only user warnings @@ -112,9 +109,8 @@ def test_different_frames_warning(self): with catch_warnings(record=True) as ws: assert_frames_close( - load_outputs(os.path.join(_root, "data/out_teacup.csv")), - load_outputs( - os.path.join(_root, "data/out_teacup_modified.csv")), + load_outputs(_root.joinpath("data/out_teacup.csv")), + load_outputs(_root.joinpath("data/out_teacup_modified.csv")), assertion="warn", verbose=True) # use only user warnings @@ -137,6 +133,18 @@ def test_different_frames_warning(self): "Expected values:\n\t", str(wu[0].message)) + def test_different_frames_return(self): + from pysd.tools.benchmarking import load_outputs, assert_frames_close + + cols, first_false_time, first_false_cols = assert_frames_close( + load_outputs(_root.joinpath("data/out_teacup.csv")), + load_outputs(_root.joinpath("data/out_teacup_modified.csv")), + assertion="return") + + assert cols == {"Teacup Temperature"} + assert first_false_time == 30. + assert first_false_cols == {"Teacup Temperature"} + def test_different_cols(self): from warnings import catch_warnings from pysd.tools.benchmarking import assert_frames_close @@ -218,3 +226,10 @@ def test_invalid_input(self): self.assertIn( "Inputs must both be pandas DataFrames.", str(err.exception)) + + def test_run_python(self): + from pysd.tools.benchmarking import runner + assert ( + runner(str(test_model))[0] + == runner(test_model.with_suffix(".py"))[0] + ).all().all() From 0e046568c96bd43f8b4a7e3f020353392cc7dec2 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 6 May 2022 16:00:50 +0200 Subject: [PATCH 52/96] Add unit tests --- pysd/py_backend/data.py | 4 +- tests/pytest_types/data/pytest_data.py | 140 +++++++++++++++--- .../data/pytest_data_with_model.py | 11 ++ tests/pytest_types/lookup/pytest_lookups.py | 105 +++++++++++++ 4 files changed, 237 insertions(+), 23 deletions(-) create mode 100644 tests/pytest_types/lookup/pytest_lookups.py diff --git a/pysd/py_backend/data.py b/pysd/py_backend/data.py index a19ac935..468e5935 100644 --- a/pysd/py_backend/data.py +++ b/pysd/py_backend/data.py @@ -266,9 +266,9 @@ def __init__(self, real_name, py_name, coords, interp="interpolate"): if self.interp not in ["interpolate", "raw", "look_forward", "hold_backward"]: raise ValueError(self.py_name + "\n" - + " The interpolation method (interp) must be " + + "The interpolation method (interp) must be " + "'raw', 'interpolate', " - + "'look_forward' or 'hold_backward") + + "'look_forward' or 'hold_backward'") def load_data(self, file_names): """ diff --git a/tests/pytest_types/data/pytest_data.py b/tests/pytest_types/data/pytest_data.py index 1939b968..ab052702 100644 --- a/tests/pytest_types/data/pytest_data.py +++ b/tests/pytest_types/data/pytest_data.py @@ -1,30 +1,11 @@ import pytest import xarray as xr +import pandas as pd -from pysd.py_backend.data import Data +from pysd.py_backend.data import Data, TabData -@pytest.mark.parametrize( - "value,interp,raise_type,error_message", - [ - ( # not_loaded_data - None, - "interpolate", - ValueError, - "Trying to interpolate data variable before loading the data..." - ), - # test that try/except block on call doesn't catch errors differents - # than data = None - ( # try_except - xr.DataArray([10, 20], {'time': [0, 1]}, ['time']), - None, - AttributeError, - "'Data' object has no attribute 'is_float'" - ) - ], - ids=["not_loaded_data", "try_except"] -) @pytest.mark.filterwarnings("ignore") class TestDataErrors(): # Test errors associated with Data class @@ -39,6 +20,123 @@ def data(self, value, interp): obj.py_name = "data" return obj + @pytest.mark.parametrize( + "value,interp,raise_type,error_message", + [ + ( # not_loaded_data + None, + "interpolate", + ValueError, + "Trying to interpolate data variable before loading " + "the data..." + ), + # test that try/except block on call doesn't catch errors + # differents than data = None + ( # try_except + xr.DataArray([10, 20], {'time': [0, 1]}, ['time']), + None, + AttributeError, + "'Data' object has no attribute 'is_float'" + ) + ], + ids=["not_loaded_data", "try_except"] + ) def test_data_errors(self, data, raise_type, error_message): with pytest.raises(raise_type, match=error_message): data(1.5) + + def test_invalid_interp_method(self): + error_message = r"\nThe interpolation method \(interp\) must be"\ + r" 'raw', 'interpolate', 'look_forward' or 'hold_backward'" + with pytest.raises(ValueError, match=error_message): + TabData("", "", {}, interp="invalid") + + +@pytest.mark.parametrize( + "value,new_value,expected", + [ + ( # float-constant + xr.DataArray([10, 20], {'time': [0, 1]}, ['time']), + 26, + 26 + ), + ( # float-series + xr.DataArray([10, 20], {'time': [0, 1]}, ['time']), + pd.Series(index=[1, 20, 40], data=[2, 10, 2]), + xr.DataArray([2, 10, 2], {"time": [1, 20, 40]}, ["time"]) + ), + ( # array-constantfloat + xr.DataArray( + [[10, 20], [30, 40]], + {"time": [0, 1], "dim":["A", "B"]}, + ["time", "dim"]), + 26, + xr.DataArray(26, {"dim": ["A", "B"]}, ["dim"]), + ), + ( # array-seriesfloat + xr.DataArray( + [[10, 20], [30, 40]], + {"time": [0, 1], "dim":["A", "B"]}, + ["time", "dim"]), + pd.Series(index=[1, 20, 40], data=[2, 10, 2]), + xr.DataArray( + [[2, 2], [10, 10], [2, 2]], + {"time": [1, 20, 40], "dim":["A", "B"]}, + ["time", "dim"]) + ), + ( # array-constantarray + xr.DataArray( + [[[10, 20], [30, 40]], [[15, 25], [35, 45]]], + {"time": [0, 1], "dim":["A", "B"], "dim2": ["C", "D"]}, + ["time", "dim", "dim2"]), + xr.DataArray( + [1, 2], + {"dim": ["A", "B"]}, + ["dim"]), + xr.DataArray( + [[1, 2], [1, 2]], + {"dim": ["A", "B"], "dim2": ["C", "D"]}, + ["dim", "dim2"]) + ), + ( # array-seriesarray + xr.DataArray( + [[[10, 20], [30, 40]], [[15, 25], [35, 45]]], + {"time": [0, 1], "dim":["A", "B"], "dim2": ["C", "D"]}, + ["time", "dim", "dim2"]), + pd.Series(index=[1, 20, 40], data=[ + xr.DataArray([1, 2], {"dim": ["A", "B"]}, ["dim"]), + xr.DataArray([10, 20], {"dim": ["A", "B"]}, ["dim"]), + xr.DataArray([1, 2], {"dim": ["A", "B"]}, ["dim"]) + ]), + xr.DataArray( + [[[1, 2], [1, 2]], [[10, 20], [10, 20]], [[1, 2], [1, 2]]], + {"time": [1, 20, 40], "dim":["A", "B"], "dim2": ["C", "D"]}, + ["time", "dim", "dim2"]) + ) + ], + ids=[ + "float-constant", "float-series", + "array-constantfloat", "array-seriesfloat", + "array-constantarray", "array-seriesarray" + ] +) +class TestDataSetValues(): + + @pytest.fixture + def data(self, value): + obj = Data() + obj.data = value + obj.interp = "interp" + obj.is_float = len(value.shape) < 2 + obj.final_coords = { + dim: value.coords[dim] for dim in value.dims if dim != "time" + } + obj.py_name = "data" + return obj + + def test_data_set_value(self, data, new_value, expected): + data.set_values(new_value) + if isinstance(expected, (float, int)): + assert data.data == expected + else: + assert data.data.equals(expected) diff --git a/tests/pytest_types/data/pytest_data_with_model.py b/tests/pytest_types/data/pytest_data_with_model.py index acb3f5b9..de87ee3f 100644 --- a/tests/pytest_types/data/pytest_data_with_model.py +++ b/tests/pytest_types/data/pytest_data_with_model.py @@ -94,6 +94,17 @@ def test_get_data_and_run(self, model, expected): model.run(return_columns=["var1", "var2", "var3"]), expected) + def test_modify_data(self, model, expected): + out = model.run(params={ + "var1": pd.Series(index=[1, 3, 7], data=[10, 20, 30]), + "var2": 10 + }) + + assert (out["var2"] == 10).all() + assert ( + out["var1"] == [10, 10, 15, 20, 22.5, 25, 27.5, 30, 30, 30, 30] + ).all() + class TestPySDDataErrors: def model(self, data_model, data_files, shared_tmpdir): diff --git a/tests/pytest_types/lookup/pytest_lookups.py b/tests/pytest_types/lookup/pytest_lookups.py new file mode 100644 index 00000000..490fc025 --- /dev/null +++ b/tests/pytest_types/lookup/pytest_lookups.py @@ -0,0 +1,105 @@ +import pytest + +import xarray as xr +import pandas as pd + +from pysd.py_backend.lookups import Lookups + + +@pytest.mark.parametrize( + "value,new_value,expected", + [ + ( # float-constant + xr.DataArray([10, 20], {'lookup_dim': [0, 1]}, ['lookup_dim']), + 26, + 26 + ), + ( # float-series + xr.DataArray([10, 20], {'lookup_dim': [0, 1]}, ['lookup_dim']), + pd.Series(index=[1, 20, 40], data=[2, 10, 2]), + xr.DataArray( + [2, 10, 2], + {"lookup_dim": [1, 20, 40]}, + ["lookup_dim"] + ) + + ), + ( # array-constantfloat + xr.DataArray( + [[10, 20], [30, 40]], + {"lookup_dim": [0, 1], "dim":["A", "B"]}, + ["lookup_dim", "dim"]), + 26, + xr.DataArray(26, {"dim": ["A", "B"]}, ["dim"]), + ), + ( # array-seriesfloat + xr.DataArray( + [[10, 20], [30, 40]], + {"lookup_dim": [0, 1], "dim":["A", "B"]}, + ["lookup_dim", "dim"]), + pd.Series(index=[1, 20, 40], data=[2, 10, 2]), + xr.DataArray( + [[2, 2], [10, 10], [2, 2]], + {"lookup_dim": [1, 20, 40], "dim":["A", "B"]}, + ["lookup_dim", "dim"]) + ), + ( # array-constantarray + xr.DataArray( + [[[10, 20], [30, 40]], [[15, 25], [35, 45]]], + {"lookup_dim": [0, 1], "dim":["A", "B"], "dim2": ["C", "D"]}, + ["lookup_dim", "dim", "dim2"]), + xr.DataArray( + [1, 2], + {"dim": ["A", "B"]}, + ["dim"]), + xr.DataArray( + [[1, 2], [1, 2]], + {"dim": ["A", "B"], "dim2": ["C", "D"]}, + ["dim", "dim2"]) + ), + ( # array-seriesarray + xr.DataArray( + [[[10, 20], [30, 40]], [[15, 25], [35, 45]]], + {"lookup_dim": [0, 1], "dim":["A", "B"], "dim2": ["C", "D"]}, + ["lookup_dim", "dim", "dim2"]), + pd.Series(index=[1, 20, 40], data=[ + xr.DataArray([1, 2], {"dim": ["A", "B"]}, ["dim"]), + xr.DataArray([10, 20], {"dim": ["A", "B"]}, ["dim"]), + xr.DataArray([1, 2], {"dim": ["A", "B"]}, ["dim"]) + ]), + xr.DataArray( + [[[1, 2], [1, 2]], [[10, 20], [10, 20]], [[1, 2], [1, 2]]], + { + "lookup_dim": [1, 20, 40], + "dim":["A", "B"], + "dim2": ["C", "D"] + }, + ["lookup_dim", "dim", "dim2"]) + ) + ], + ids=[ + "float-constant", "float-series", + "array-constantfloat", "array-seriesfloat", + "array-constantarray", "array-seriesarray" + ] +) +class TestLookupsSetValues(): + + @pytest.fixture + def lookups(self, value): + obj = Lookups() + obj.data = value + obj.interp = "interp" + obj.is_float = len(value.shape) < 2 + obj.final_coords = { + dim: value.coords[dim] for dim in value.dims if dim != "lookup_dim" + } + obj.py_name = "lookup" + return obj + + def test_lookups_set_value(self, lookups, new_value, expected): + lookups.set_values(new_value) + if isinstance(expected, (float, int)): + assert lookups.data == expected + else: + assert lookups.data.equals(expected) From 96325012232e52c4e92041047c236937489f15fe Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 6 May 2022 16:43:59 +0200 Subject: [PATCH 53/96] Remove unnecessary lines --- pysd/py_backend/statefuls.py | 64 ++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 36 deletions(-) diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 1a641767..8d7d7b6a 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -627,7 +627,8 @@ def __init__(self, py_model_file, params=None, return_func=None, + " read_vensim or read_xmile.") self._namespace = self.components._components.component.namespace - self._dependencies = self.components._components.component.dependencies + self._dependencies =\ + self.components._components.component.dependencies.copy() self._subscript_dict = getattr( self.components._components, "_subscript_dict", {}) self._modules = getattr( @@ -835,16 +836,10 @@ def _add_constant_cache(self): self.constant_funcs = set() for element, cache_type in self.cache_type.items(): if cache_type == "run": - if self.get_args(element): - self.components._set_component( - element, - constant_cache(getattr(self.components, element), None) - ) - else: - self.components._set_component( - element, - constant_cache(getattr(self.components, element)) - ) + self.components._set_component( + element, + constant_cache(getattr(self.components, element)) + ) self.constant_funcs.add(element) def _remove_constant_cache(self): @@ -1273,17 +1268,24 @@ def set_components(self, params, new=False): # if the variable is a lookup or a data we perform the change in # the object they call - if getattr(func, "type", None) == "Lookup": - getattr( - self.components, - self._dependencies[func_name]["__lookup__"] - ).set_values(value) - continue - elif getattr(func, "type", None) == "Data": + func_type = getattr(func, "type", None) + if func_type in ["Lookup", "Data"]: + # getting the object from original dependencies + obj = self._dependencies[func_name][f"__{func_type.lower()}__"] getattr( self.components, - self._dependencies[func_name]["__data__"] + obj ).set_values(value) + + # Update dependencies + if func_type == "Data": + if isinstance(value, pd.Series): + self._dependencies[func_name] = { + "time": 1, "__data__": obj + } + else: + self._dependencies[func_name] = {"__data__": obj} + continue if isinstance(value, pd.Series): @@ -1292,17 +1294,10 @@ def set_components(self, params, new=False): self._dependencies[func_name] = deps elif callable(value): new_function = value - args = self.get_args(value) - if args: - # user function needs arguments, add it as a lookup - # to avoud caching it - self._dependencies[func_name] = {"__lookup__": None} - else: - # TODO it would be better if we can parse the content - # of the function to get all the dependencies - # user function takes no arguments, using step cache - # adding time as dependency - self._dependencies[func_name] = {"time": 1} + # Using step cache adding time as dependency + # TODO it would be better if we can parse the content + # of the function to get all the dependencies + self._dependencies[func_name] = {"time": 1} else: new_function = self._constant_component(value, dims) @@ -1495,12 +1490,9 @@ def _build_doc(self): if element.__doc__ else None }) - if collector: - docs_df = pd.DataFrame(collector) - return docs_df.sort_values(by="Real Name").reset_index(drop=True) - else: - # manage models with no documentation (mainly test models) - return None + return pd.DataFrame( + collector + ).sort_values(by="Real Name").reset_index(drop=True) def __str__(self): """ Return model source files """ From 381fe8d1f39e454b8dd211cb4ea312848f6cfed2 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Mon, 9 May 2022 11:02:19 +0200 Subject: [PATCH 54/96] Add tests --- tests/more-tests/split_model/input.xlsx | Bin 0 -> 8406 bytes .../split_model/test_split_model.mdl | 24 ++--- tests/pytest_pysd/pytest_model_attributes.py | 88 ++++++++++++++++++ tests/pytest_pysd/pytest_select_submodel.py | 13 ++- .../pytest_translation/pytest_split_views.py | 11 ++- tests/unit_test_pysd.py | 33 ++++++- 6 files changed, 151 insertions(+), 18 deletions(-) create mode 100644 tests/more-tests/split_model/input.xlsx create mode 100644 tests/pytest_pysd/pytest_model_attributes.py diff --git a/tests/more-tests/split_model/input.xlsx b/tests/more-tests/split_model/input.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..2a828d5dbd0a31660b490877ef8663f215a59d8f GIT binary patch literal 8406 zcmeHMg;!K-_a3?t5CjCIyN7O&5QgsVW@wOZq(P)ZQcyaD5tNWdIwc3Bq`Uh!`rZ58 zd%gGf7ku~3TIXG}&OG~^Iq!bkKKtpJg*VYC$QtIF&mzD6XD7k;QfjZjx262n@*3{fvY?N zeS()F3%{87W^DVi|QPXG@DygFJprb zLe=ZTsE>1AFdACiEdNa{!v~8uyGfi{n~tE0WOhLy%H$Qn@Tbo?nw-QTqxA}_8s*tn z(v0VbN*MykLSzRo?KwDQH1r9+Sp zW-W)`6!lJ2d7r@9x>Dcd1`rL3e!`?dnS&pk9mAA2l{pVw;*R2n4{aBTF*>V*#TwSBLD!;VAA!2Mqc*po=&b{GbbnT51lI4 zbZ{!*$GHm_yG7~rBvinJn{X$9-(Y&KtodOIR3w{%p;UvO{$4SuboM^L9Myj&OQ~^B zy0fZd#o8)|v&h&((Lsc>q``)eSJcHJrap|p`?|XNzF|n!Mjtc|DXh1dX%DdKyK_C; zW-rpGB;OD7ssrR{Vtg9=7(@1qfB;-9h)V`GwWhX^=*PwVjlU2?~+WmzZHiQsxj1z(gSv53m@F{RE8c#;Ot4(lheLB^Iw&e6m^l&mlWgvo`G!S>pa+@I*s*$>saqwIq>@>P zU8%d4>tJ7)NJPnW?E{S3{luK9>g~&E42Sg{&7rh0rkU`w`W9qLUj>A8i1YED@Z7bA4ilTNH6ldu{gcTHrh0Gxk2d~Yk$!e%pYsea~RJF*9Xke z@Gg0av~qj`4?K<&mB%>Vw^Q`9LR=8z7C6INx})+^b2=>(#RE950ZQ2 zkHT(&Ll|=Qadv&dgv}m4N_6@P?g$t0GOvW0S&*b3vCR)e$4O-;rQa@AxFtmF$jl+gpJhyl#rZ7*eLL)GQ+m;ZM zNJG)9e4HWHv&nQ!nfg$7xi4wg&8=>5NalqdAt{miAp?n2Z3MLf{oCil)HKZSJN0x09j zJ5yg+C4LPwUJ8MV%ks{kRw6^)XdFDnR`Qha8_$&_6V~#2b&by}Cl56Jg_}dnI4gNW zJqt~#!VNDks&5P1WCc47$#vd7>Uk`fa1h=LDHwJBj?l<$>qi<_NTiS1{X{{&;ma{$ z?7dK)@V3Uap5*-0a9$}R&C#yoB6ss!Pp5clfyN;5R9jg>m6RyfsPNV-+Gn5pF9t;p z6LZ3}u1H;sV!+s4B4jQ8!p_fV%6e+M_>~_K3tz7u5&n~ZC?vC88^e6V2UcVd!8ZQ4 z!0l#ZY3c69{&V5{5y7((mOY4Rpv8K>0M}kYMxahCHZwL)=ZQMuceIwqjiP+k`{OKG}g)Qz3z3l7$RU zTDSq~Ti+L^b6V&+x;0ZK?MTbwhe4;;N@sVV@nzhs?~Wh#%}t-jN5*LYvz{?T)iD@D zx&>QQH?BK@Y;Te}Qq6LWEW1yIXj#MFFF;H~EPP5X7XlJ92wmGc@Rj|#X=pa zYeTn5ZSbQcy}$qMHnIxJr;03~;Q?l?UO7eP8cpB`m$n*1pTclZjrMlyD0AUoVh>+-@^skz4RD;sk&8)@r=mW0-Y?^36KM4P_{tu{iO79|t_pqt`H z<@?hq-EAx#EZKjafBNH&?qCcFAAT$0l^CX*=MB$FJnh1_Vf!Q~jYW1cX+2~|Rf`Ll z*hKOWo>t&gjiK;u+EXWy_RE|WlVyDw-#!~~q{0!WF%CgC^xRz*+ml!NJ@^iTXuDA|g zPr_hsS-?4KB zJiGv#OWNCY!Vm0^Mz!dq){u229rNyijS41yI-_n!n{9Su6sVzkzHf~YiVb>Bw{P`Y zA!9{wHryd8?;z8H+bNE(OtDn1Ci6dO?`!ryS{N}leBVe`8Xq>5rx92kPLBEUn>0q1 z1f!@yM?|(&k@fae9=@@D$CUi8k;;b?u!@wNYeP6&SiPNR<1B$i`^L+M3z(r2&54Y| zs;{uhcGT1rbg1~&^kVmbqJSWHH5$3)FI||*BdE0vF$@CVI_GG|QSqF@@#st%G#If! znio}~!FTVdqahr?!W2W3C>;{DGcU8wFA9(w=DbNh3icck-@Aj&|&34&&O! zdM0^C6k2uly~%jv``cT>1>@GcgRMip%?bL$_RiLu%l@?1`$NIg>g9O?_7?x6qs=73 zMgP-v<#@8$Ia0Fp1InnHgUTiIeYzOaVC?HdII-+bj;Ec8;NgAIT3JXUBE-O?=o$TRMg+`9kO1hfR zsxX=&&iJ6FfuZ2>s$`grr0yNmXh&=Rcr9+Hb9>8jx1)GLz{euPleiYMNp*n#l>> zL#kseXN4it^@CAdLW0L7DYKu5)ZJsdiF`kZy6KFVa`mq_j%rhxq>$}^%9(qIG*7ck z;4BkM!Cx6&(@>c$&kvIws;!+|LcAU^MBy$LUOtwBf(-fBvNU|MUt%kM2Eyyo_NB!AtS}>A6_R5zGkpo zmkNH5c23R$_OZ#C|LmMt?)KGN)6Hn|;{LkSYTQV)%5n&taje&UdUh&dm(k=G+();6 zw2`CSRI|oKnY`DJ)b;79r$1uTXeM`bImrA*zc^o^+P^WNOwG7dkO|;B5Q| zrk&EhrnI#xViI-lC262*Upm!U3 zBKr#!6WakB8lJjo$P6>%M5YQ>B&yGeRE@Ep;u~4NFq+BG8FkkgRoP0**OPHsL+5ZH zAn;l{`zGDI79lY!5pG4AylzfVRF}~dTZulDA>)(8RgwXxV#dxz5$~=(^U=lI?zwGg zvveUVb*%gQIF2!n5W*!Ep~2K^-R_SW5feb=Mzcl|SFWwN2e@EqD*A?CIy&`{P9ew3 zdOOq)Ow`=q%ZD389aCP&jnotRgm2UvJc#9T94|agQ;7*{BczfA4U&}lkU105eUm#` zjadUzu((G_-#9R92jPh7tz?QblfQPsaTn_K0=7}xhC_)|bCk$~m((kr?@GHC$;xSF zNZXsqZohPbtU{slzoJe^lvn_v{O*p+2;jCF1%11ug=Z0+i-TGY@)7FH0(q@FvDpuK% z;b*!;w;;s?2D_u`KC!L6whE`ALl^+BDD>br%q|q3|C_$y~(^LG4n~f zaj1(;0`#o+0gU!2P|!4M@?+Ch9F0)5d|XIbmO~k3o0SSi zcD@RR8CB@-WBEXux*UjDA-qP(yB3P*D0G{K(xf7iZ z#JSq*^H^a)gY=J0sBCoSk?oY3^+y#O#nM<0b>Cepc^HujLPi6kl-9@HhM{h1W3QFa z>&&}dD}bbiVMLsoThV2T;jN+sZJ5;C#ihj~(UxArEG=IaTjeX}FwPn*2PlJ7csumh zGm$7E^FX=X6vy-s_Iln#!Rbt_nmE(C+PhcyG}s-C3CYaD6f72(Vibv)>ku>$E6LVH zCdS^G$Rn9rgL$W9UU$|YJ3>X9sE;;wW)m6ai3`_M@vCn~wWjvaouewjMzv|~>lR8j z-ZJp3Qyi59dG zDPQ~ZW!V(pdNPurYE34jT_ju7>cT$IjKIm#FZ!z8bM_R6Saj}zQHjl#TlJ8L4iUSK zD%dMLaDi29K-*KAbfA5WCEegXO*(zQ<|_rqv@%W32P+3>QyXwz9jE(?Wx`=G_0{lC z(A*f#RF+p}(w0W8@)m~~MxMCk_KY8xeX(gUxYopWczz{=dHuM#yg<3Sa;#L`_NfYDG+4DnG1y2e=HjvR5ErdZ z1p)pB0DG{%KPHQ$uRe^v3`@s2I0jx$pieOvvOp4Dv!efMfVM3^q~WU;ko=`bDAtM) z1TG5~*nubfaDZzb-kb|(6qoTLh?^X;2M94QmIyh*E|QCGZGKYj9*FJU*UQKp$dNzJ zxtGrB#g4t#T!w5u&Mn8aY4+lVF+g8b)KhZ>RR3u?sL#N;kCmg!;WE#psDKs&tS0aT zZ5I#x{RX(>YZ%-ha&wfk9XvO=u|)9(C-#g-sc(CAFQ8uBQl^%LZajUyyqAYon>)Mt z!|;8&Q|KAt5z+E}CnN1p8mzefheF0w4h<&?=6blW)+o-OUC-3n`Cre2 zy|UkqtV9W?IS$OQeS|A=Oel!}R0T7*+)P&cq}miN5n`?Q&OE=CCbB$XXDS{tIx*w3 z6}Rf^btNSSB^5M2c{SRl61DaQ9|@5JOn8>!H4QZkAV5k)t3Tuvr+pc;EMx4a_L}se zg=-MfGsJQ!`D87%HxBt+4JHEBW1aKF%e?!R_K&TXcxThF;lGsCHl;dKpz=(lZQ?V$ z>(o>x$EqkGyEt(1RKP5s{E*!AKKvr}s_4CdnSJp^K=rqjteEIch>3A+K>=Gz)(1Oq zTm%owcDENI?IQBllY#2GI2YpjE!)Y^x9J$}fnE;UZ!R2gkh4H1bV3g!YmnzAV(XP< zb;Y`FVp#$CiT3H?<^tzK;B#sAX9$Gr-IONqrc0zQ>%9nfcD!`sLY@5rhrJw!O8j%d zPJ(Vjd`20{SMd$E_T~cPl3yr$;`j0c@9s)?BoUceQsIzq>CSJS*V;n?e7+UB9pYFp*Y~|Eqz& zPS1V^{#YYmocPl$?N{Kh6NNvZ<*>Bmm-)i4;J Date: Mon, 9 May 2022 12:32:45 +0200 Subject: [PATCH 55/96] Correct bug with return timestamps --- pysd/py_backend/components.py | 43 +++++++++++++++++++++++++++++++---- pysd/py_backend/statefuls.py | 7 +++++- tests/unit_test_pysd.py | 40 ++++++++++++++++++++++++++++++++ 3 files changed, 84 insertions(+), 6 deletions(-) diff --git a/pysd/py_backend/components.py b/pysd/py_backend/components.py index 950562b9..95ad8e17 100644 --- a/pysd/py_backend/components.py +++ b/pysd/py_backend/components.py @@ -2,6 +2,7 @@ Model components and time managing classes. """ +from warnings import warn import os import random import inspect @@ -176,12 +177,30 @@ def in_bounds(self): def in_return(self): """ Check if current time should be returned """ + prec = self.time_step() * self.rprec + if self.return_timestamps is not None: - return self._time in self.return_timestamps + # this allows managing float precission error + if self.next_return is None: + return False + if np.isclose(self._time, self.next_return, prec): + self._update_next_return() + return True + else: + while self.next_return is not None\ + and self._time > self.next_return: + warn( + f"The returning time stamp '{self.next_return}' " + "seems to not be a multiple of the time step. " + "This value will not be saved in the output. " + "Please, modify the returning timestamps or the " + "integration time step to avoid this." + ) + self._update_next_return() + return False time_delay = self._time - self._initial_time save_per = self.saveper() - prec = self.time_step() * self.rprec return time_delay % save_per < prec or -time_delay % save_per < prec def round(self): @@ -192,15 +211,29 @@ def round(self): def add_return_timestamps(self, return_timestamps): """ Add return timestamps """ - if return_timestamps is None or hasattr(return_timestamps, '__len__'): - self.return_timestamps = return_timestamps + if hasattr(return_timestamps, '__len__')\ + and len(return_timestamps) > 0: + self.return_timestamps = list(return_timestamps) + self.return_timestamps.sort(reverse=True) + self.next_return = self.return_timestamps.pop() + elif isinstance(return_timestamps, (float, int)): + self.next_return = return_timestamps + self.return_timestamps = [] else: - self.return_timestamps = [return_timestamps] + self.next_return = None + self.return_timestamps = None def update(self, value): """ Update current time value """ self._time = value + def _update_next_return(self): + """ Update the next_return value """ + if self.return_timestamps: + self.next_return = self.return_timestamps.pop() + else: + self.next_return = None + def reset(self): """ Reset time value to the initial """ self._time = self._initial_time diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 8d7d7b6a..a3ffae9b 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -1620,7 +1620,12 @@ def run(self, params=None, return_columns=None, return_timestamps=None, self.time.add_return_timestamps(return_timestamps) if self.time.return_timestamps is not None and not final_time: - final_time = self.time.return_timestamps[-1] + # if not final time given the model will end in the list + # return timestamp (the list is reversed for popping) + if self.time.return_timestamps: + final_time = self.time.return_timestamps[0] + else: + final_time = self.time.next_return self.time.set_control_vars( final_time=final_time, time_step=time_step, saveper=saveper) diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index d2457289..a5cabf2f 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -109,6 +109,45 @@ def test_run_return_timestamps(self): with self.assertRaises(TypeError): model.run(return_timestamps=timestamps) + # assert that return_timestamps works with float error + stocks = model.run(time_step=0.1, return_timestamps=0.3) + assert 0.3 in stocks.index + + # assert that return_timestamps works with float error + stocks = model.run( + time_step=0.1, return_timestamps=[0.3, 0.1, 10.5, 0.9]) + assert 0.1 in stocks.index + assert 0.3 in stocks.index + assert 0.9 in stocks.index + assert 10.5 in stocks.index + + # assert one timestamp is not returned because is not multiple of + # the time step + warning_message =\ + "The returning time stamp '%s' seems to not be a multiple "\ + "of the time step. This value will not be saved in the output. "\ + "Please, modify the returning timestamps or the integration "\ + "time step to avoid this." + # assert that return_timestamps works with float error + with catch_warnings(record=True) as ws: + stocks = model.run( + time_step=0.1, return_timestamps=[0.3, 0.1, 0.55, 0.9]) + assert str(ws[0].message) == warning_message % 0.55 + assert 0.1 in stocks.index + assert 0.3 in stocks.index + assert 0.9 in stocks.index + assert 0.55 not in stocks.index + + with catch_warnings(record=True) as ws: + stocks = model.run( + time_step=0.1, return_timestamps=[0.3, 0.15, 0.55, 0.95]) + for w, value in zip(ws, [0.15, 0.55, 0.95]): + assert str(w.message) == warning_message % value + assert 0.15 not in stocks.index + assert 0.3 in stocks.index + assert 0.95 not in stocks.index + assert 0.55 not in stocks.index + def test_run_return_timestamps_past_final_time(self): """ If the user enters a timestamp that is longer than the euler timeseries that is defined by the normal model file, should @@ -117,6 +156,7 @@ def test_run_return_timestamps_past_final_time(self): model = pysd.read_vensim(test_model) return_timestamps = list(range(0, 100, 10)) stocks = model.run(return_timestamps=return_timestamps) + print(stocks.index) self.assertSequenceEqual(return_timestamps, list(stocks.index)) def test_return_timestamps_with_range(self): From 5bb06fe25c31a8847d528ae1a075f336f9d145d6 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Mon, 9 May 2022 12:50:05 +0200 Subject: [PATCH 56/96] Catch warnings in the integration tests --- .../pytest_integration_vensim_pathway.py | 8 ++++++-- .../pytest_integration_xmile_pathway.py | 9 ++++++--- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/tests/pytest_integration/pytest_integration_vensim_pathway.py b/tests/pytest_integration/pytest_integration_vensim_pathway.py index 9b4ae58b..f7dbc389 100644 --- a/tests/pytest_integration/pytest_integration_vensim_pathway.py +++ b/tests/pytest_integration/pytest_integration_vensim_pathway.py @@ -1,5 +1,7 @@ -import pytest +import warnings import shutil +import pytest + from pysd.tools.benchmarking import runner, assert_frames_close # TODO add warnings catcher per test @@ -575,5 +577,7 @@ def kwargs(self, test_data): return kwargs def test_read_vensim_file(self, model_path, data_path, kwargs): - output, canon = runner(model_path, data_files=data_path) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + output, canon = runner(model_path, data_files=data_path) assert_frames_close(output, canon, **kwargs) diff --git a/tests/pytest_integration/pytest_integration_xmile_pathway.py b/tests/pytest_integration/pytest_integration_xmile_pathway.py index 3d8fc294..8772c5f6 100644 --- a/tests/pytest_integration/pytest_integration_xmile_pathway.py +++ b/tests/pytest_integration/pytest_integration_xmile_pathway.py @@ -1,10 +1,11 @@ -import pytest +import warnings import shutil +import pytest + from pysd.tools.benchmarking import runner, assert_frames_close # TODO add warnings catcher per test - xmile_test = { "abs": { "folder": "abs", @@ -256,5 +257,7 @@ def kwargs(self, test_data): return kwargs def test_read_vensim_file(self, model_path, kwargs): - output, canon = runner(model_path) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + output, canon = runner(model_path) assert_frames_close(output, canon, **kwargs) From be8e2b2f9360407cc7805cf5dee742a613963e5b Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Mon, 9 May 2022 16:05:20 +0200 Subject: [PATCH 57/96] Document --- docs/structure/python_builder.rst | 2 +- pysd/building/python/imports.py | 6 +- pysd/building/python/namespace.py | 8 +- .../python/python_expressions_builder.py | 3 - pysd/building/python/python_model_builder.py | 318 +++++++++++++----- 5 files changed, 246 insertions(+), 91 deletions(-) diff --git a/docs/structure/python_builder.rst b/docs/structure/python_builder.rst index a51fcb5d..2dd8290e 100644 --- a/docs/structure/python_builder.rst +++ b/docs/structure/python_builder.rst @@ -25,7 +25,7 @@ Namespace manager ----------------- .. automodule:: pysd.building.python.namespace - :members: + :members: NamespaceManager :undoc-members: diff --git a/pysd/building/python/imports.py b/pysd/building/python/imports.py index 53fade01..f807de22 100644 --- a/pysd/building/python/imports.py +++ b/pysd/building/python/imports.py @@ -1,3 +1,5 @@ +from typing import Union + class ImportsManager(): """ @@ -15,7 +17,7 @@ def __init__(self): self._lookups, self._utils, self._scipy =\ set(), set(), set(), set(), set(), set(), set() - def add(self, module, function=None): + def add(self, module: str, function: Union[str, None] = None) -> None: """ Add a function from module. @@ -33,7 +35,7 @@ def add(self, module, function=None): else: setattr(self, f"_{module}", True) - def get_header(self, outfile): + def get_header(self, outfile: str) -> str: """ Returns the importing information to print in the model file diff --git a/pysd/building/python/namespace.py b/pysd/building/python/namespace.py index 1bf01c8c..e08965b6 100644 --- a/pysd/building/python/namespace.py +++ b/pysd/building/python/namespace.py @@ -28,12 +28,12 @@ class NamespaceManager: By defaukt it is an empty list. """ - reserved_words = set( + _reserved_words = set( dir() + bidir() + cdir() + ddir() + cadir() + edir() + fdir() + sdir() + udir()).union(kwlist) def __init__(self, parameters: List[str] = []): - self.used_words = self.reserved_words.copy() + self._used_words = self._reserved_words.copy() # inlcude time to the namespace self.namespace = {"Time": "time"} # include time to the cleanspace (case and whitespace/underscore @@ -159,12 +159,12 @@ def make_python_identifier(self, string: str, prefix: str = None, # Check that the string is not a python identifier identifier = s i = 1 - while identifier in self.used_words: + while identifier in self._used_words: identifier = s + '_' + str(i) i += 1 # include the word in used words to avoid using it againg - self.used_words.add(identifier) + self._used_words.add(identifier) if add_to_namespace: # include word to the namespace diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index 65c558b6..504dce6e 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -1225,9 +1225,6 @@ def build(self, arguments): def merge_dependencies(*dependencies, inplace=False): - # TODO improve dependencies in the next major release, include info - # about external objects and simplify the stateful objects, think about - # how to include data/lookups objects current = dependencies[0] if inplace: current = dependencies[0] diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 91850593..8d8411cc 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -1,6 +1,15 @@ +""" +The ModelBuilder class allows converting the AbstractModel into a +PySD model writing the Python code in files that can be loaded later +with PySD Model class. Each Abstract level has its own Builder. However, +the user is only required to create a ModelBuilder object using the +AbstractModel and call the `build_model` method. +""" import textwrap import black import json +from pathlib import Path +from typing import Union from pysd.translation.structures.abstract_model import\ AbstractComponent, AbstractElement, AbstractModel, AbstractSection @@ -13,109 +22,155 @@ class ModelBuilder: + """ + ModelBuilder allows building a PySD Python model from the + Abstract Model. + + Parameters + ---------- + abstract_model: AbstractModel + The abstract model to build. + + """ def __init__(self, abstract_model: AbstractModel): self.__dict__ = abstract_model.__dict__.copy() + # load sections self.sections = [ SectionBuilder(section) for section in abstract_model.sections ] + # create the macrospace (namespace of macros) self.macrospace = { section.name: section for section in self.sections[1:]} - def build_model(self): - # TODO: add special building for main + def build_model(self) -> Path: + """ + Build the python model in a file callled as the orginal model + but with '.py' suffix. + + Returns + ------- + path: pathlib.Path + The path to the new PySD model. + + """ for section in self.sections: + # add macrospace information to each section and build it section.macrospace = self.macrospace section.build_section() + # return the path to the main file return self.sections[0].path class SectionBuilder: + """ + SectionBuilder allows building a section of the PySD model. Each + section will be a file unless the model has been setted to be + split in modules. + + Parameters + ---------- + abstract_section: AbstractSection + The abstract section to build. + """ def __init__(self, abstract_section: AbstractSection): self.__dict__ = abstract_section.__dict__.copy() - self.root = self.path.parent - self.model_name = self.path.with_suffix("").name + self.root = self.path.parent # the folder where the model is + self.model_name = self.path.with_suffix("").name # name of the model + # Create subscript manager object with subscripts_dict self.subscripts = SubscriptManager( abstract_section.subscripts, self.root) + # Load the elements in the section self.elements = [ ElementBuilder(element, self) for element in abstract_section.elements ] + # Create the namespace of the section self.namespace = NamespaceManager(self.params) + # Create an imports manager self.imports = ImportsManager() + # Create macrospace (namespace of macros) self.macrospace = {} - - # create parameters dict necessary in macros + # Create parameters dict necessary in macros self.params = { key: self.namespace.namespace[key] for key in self.params } - def build_section(self): - # Create namespace + def build_section(self) -> None: + """ + Build the python section in a file callled as the orginal model + if the section is main or in a file called as the macro name + if the section is a macro. + """ + # Firts iteration over elements to recover their information for element in self.elements: + # Add element to namespace self.namespace.add_to_namespace(element.name) identifier = self.namespace.namespace[element.name] element.identifier = identifier + # Add element subscripts information to the subscript manager self.subscripts.elements[identifier] = element.subscripts + # Build elements for element in self.elements: element.build_element() if self.split: + # Build modular section self._build_modular(self.views_dict) else: + # Build one-file section self._build() - def process_views_tree(self, view_name, view_content, wdir): + def _process_views_tree(self, view_name: str, + view_content: Union[dict, set], + wdir: Path) -> dict: """ Creates a directory tree based on the elements_per_view dictionary. If it's the final view, it creates a file, if not, it creates a folder. """ if isinstance(view_content, set): - # will become a module - - # convert subview elements names to python names + # Will become a module + # Convert subview elements names to python names view_content = { self.namespace.cleanspace[var] for var in view_content } - - # get subview elements + # Get subview elements subview_elems = [ element for element in self.elements_remaining if element.identifier in view_content ] - - # remove elements from remaining ones + # Remove elements from remaining ones [ self.elements_remaining.remove(element) for element in subview_elems ] - + # Build the module self._build_separate_module(subview_elems, view_name, wdir) - return sorted(view_content) - else: - # the current view has subviews + # The current view has subviews wdir = wdir.joinpath(view_name) wdir.mkdir(exist_ok=True) return { subview_name: - self.process_views_tree(subview_name, subview_content, wdir) + self._process_views_tree(subview_name, subview_content, wdir) for subview_name, subview_content in view_content.items() } - def _build_modular(self, elements_per_view): + def _build_modular(self, elements_per_view: dict) -> None: + """ Build modular section """ self.elements_remaining = self.elements.copy() - elements_per_view = self.process_views_tree( + elements_per_view = self._process_views_tree( "modules_" + self.model_name, elements_per_view, self.root) - # building main file using the build function + # Building main file using the build function self._build_main_module(self.elements_remaining) + # Build subscripts dir and moduler .json files for file, values in { "modules_%s/_modules": elements_per_view, "_subscripts_%s": self.subscripts.subscripts}.items(): @@ -125,7 +180,8 @@ def _build_modular(self, elements_per_view): ".json").open("w") as outfile: json.dump(values, outfile, indent=4, sort_keys=True) - def _build_separate_module(self, elements, module_name, module_dir): + def _build_separate_module(self, elements: list, module_name: str, + module_dir: str) -> None: """ Constructs and writes the python representation of a specific model module, when the split_views=True in the read_vensim function. @@ -165,7 +221,7 @@ def _build_separate_module(self, elements, module_name, module_dir): with outfile_name.open("w", encoding="UTF-8") as out: out.write(text) - def _build_main_module(self, elements): + def _build_main_module(self, elements: list) -> None: """ Constructs and writes the python representation of the main model module, when the split_views=True in the read_vensim function. @@ -183,9 +239,7 @@ def _build_main_module(self, elements): Returns ------- - None or text: None or str - If file_name="return" it will return the content of the output file - instead of saving it. It is used for testing. + None """ # separating between control variables and rest of variables @@ -236,13 +290,23 @@ def _build_main_module(self, elements): with self.path.open("w", encoding="UTF-8") as out: out.write(text) - def _build(self): + def _build(self) -> None: + """ + Constructs and writes the python representation of a section. + + Returns + ------- + None + + """ control_vars, funcs = self._build_variables(self.elements) text = self.imports.get_header(self.path.name) indent = "\n " + # Generate params dict for macro parameters params = f"{indent}_params = {self.params}\n"\ if self.params else "" + # Generate subscripts dir subs = f"{indent}_subscript_dict = {self.subscripts.subscripts}"\ if self.subscripts.subscripts else "" @@ -273,7 +337,7 @@ def _build(self): with self.path.open("w", encoding="UTF-8") as out: out.write(text) - def _build_variables(self, elements): + def _build_variables(self, elements: dict) -> tuple: """ Build model variables (functions) and separate then in control variables and regular variables. @@ -324,7 +388,7 @@ def _build_variables(self, elements): self._generate_functions(control_vars)),\ self._generate_functions(regular_vars) - def _generate_functions(self, elements): + def _generate_functions(self, elements: dict) -> str: """ Builds all model elements as functions in string format. NOTE: this function calls the build_element function, which @@ -346,9 +410,11 @@ def _generate_functions(self, elements): String containing all formated model functions """ - return "\n".join([element.build_element_out() for element in elements]) + return "\n".join( + [element._build_element_out() for element in elements] + ) - def _get_control_vars(self, control_vars): + def _get_control_vars(self, control_vars: str) -> str: """ Create the section of control variables @@ -394,67 +460,73 @@ def time(): class ElementBuilder: - + """ + ElementBuilder allows building an element of the PySD model. + + Parameters + ---------- + abstract_element: AbstractElement + The abstract element to build. + section: SectionBuilder + The section where the element is defined. Necessary to give the + acces to the subscripts and namespace. + + """ def __init__(self, abstract_element: AbstractElement, section: SectionBuilder): self.__dict__ = abstract_element.__dict__.copy() + # Set element type and subtype to None self.type = None self.subtype = None + # Get the arguments of the element self.arguments = getattr(self.components[0], "arguments", "") + # Load the components of the element self.components = [ ComponentBuilder(component, self, section) for component in abstract_element.components ] self.section = section + # Get the subscripts of the element after merging all the components self.subscripts = section.subscripts.make_merge_list( [component.subscripts[0] for component in self.components]) + # Get the subscript dictionary of the element self.subs_dict = section.subscripts.make_coord_dict(self.subscripts) + # Dictionaries to save dependencies and objects related to the element self.dependencies = {} self.other_dependencies = {} self.objects = {} - def _format_limits(self, limits): - if limits == (None, None): - return None - - new_limits = [] - for value in limits: - value = repr(value) - if value == "nan" or value == "None": - # add numpy.nan to the values - self.section.imports.add("numpy") - new_limits.append("np.nan") - elif value.endswith("inf"): - # add numpy.inf to the values - self.section.imports.add("numpy") - new_limits.append(value.strip("inf") + "np.inf") - else: - # add numeric value - new_limits.append(value) - - if new_limits[0] == "np.nan" and new_limits[1] == "np.nan": - # if both are numpy.nan do not include limits - return None - - return "(" + ", ".join(new_limits) + ")" - - def build_element(self): + def build_element(self) -> None: + """ + Build the element. Returns the string to include in the section which + will be a decorated function definition and possible objects. + """ # TODO think better how to build the components at once to build # in one declaration the external objects # TODO include some kind of magic vectorization to identify patterns # that can be easily vecorized (GET, expressions, Stocks...) - expressions = [] + + # Build the components of the element [component.build_component() for component in self.components] + expressions = [] for component in self.components: expr, subs, except_subscripts = component.get() if expr is None: + # The expr is None when the component has been "added" + # to an existing object using the add method continue if isinstance(subs, list): + # Get the list of locs for the component + # Subscripts dict will be a list when the component is + # translated to an object that groups may components + # via 'add' method. loc = [vs.visit_loc(subsi, self.subs_dict, True) for subsi in subs] else: + # Get the loc of the component loc = vs.visit_loc(subs, self.subs_dict, True) + # Get the locs of the :EXCLUDE: parameters if any exc_loc = [ vs.visit_loc(subs_e, self.subs_dict, True) for subs_e in except_subscripts @@ -483,26 +555,33 @@ def build_element(self): {"dim": subs} for subs in self.subscripts), self.subscripts) for expression in expressions: + # Generate the pre_expression, operations to compute in + # the body of the function if expression["expr"].subscripts: - # get the values + # Get the values # NUMPY not necessary expression["expr"].lower_order(0, force_0=True) expression["expr"].expression += ".values" if expression["loc_except"]: - # there is an excep in the definition of the component - self.pre_expression += self.manage_except(expression) + # There is an excep in the definition of the component + self.pre_expression += self._manage_except(expression) elif isinstance(expression["subs"], list): - self.pre_expression += self.manage_multi_def(expression) + # There are mixed definitions which include multicomponent + # object + self.pre_expression += self._manage_multi_def(expression) else: + # Regular loc for a component self.pre_expression +=\ "value.loc[%(loc)s] = %(expr)s\n" % expression + # Return value self.expression = "value" else: self.pre_expression = "" - # NUMPY: reshape to the final shape if meeded + # NUMPY: reshape to the final shape if needed # expressions[0]["expr"].reshape(self.section.subscripts, {}) if not expressions[0]["expr"].subscripts and self.subscripts: + # Updimension the return value to an array self.expression = "xr.DataArray(%s, %s, %s)\n" % ( expressions[0]["expr"], self.section.subscripts.simplify_subscript_input( @@ -510,8 +589,11 @@ def build_element(self): list(self.subs_dict) ) else: + # Return the expression self.expression = expressions[0]["expr"] + # Merge the types of the components (well defined element should + # have only one type and subtype) self.type = ", ".join( set(component.type for component in self.components) ) @@ -519,15 +601,24 @@ def build_element(self): set(component.subtype for component in self.components) ) - def manage_multi_def(self, expression): + def _manage_multi_def(self, expression: dict) -> str: + """ + Manage multiline definitions when some of them (not all) are + merged to one object. + """ final_expr = "def_subs = xr.zeros_like(value, dtype=bool)\n" for loc in expression["loc"]: + # coordinates of the object final_expr += f"def_subs.loc[{loc}] = True\n" + # replace the values matching the coordinates return final_expr + "value.values[def_subs.values] = "\ "%(expr)s[def_subs.values]\n" % expression - def manage_except(self, expression): + def _manage_except(self, expression: dict) -> str: + """ + Manage except declarations by not asigning its values. + """ if expression["subs"] == self.subs_dict: # Final subscripts are the same as the main subscripts # of the component. Generate a True array like value @@ -544,15 +635,15 @@ def manage_except(self, expression): final_expr += "except_subs.loc[%s] = False\n" % except_subs if expression["expr"].subscripts: - # assign the values of an array + # Assign the values of an array return final_expr + "value.values[except_subs.values] = "\ "%(expr)s[except_subs.values]\n" % expression else: - # assign the values of a float + # Assign the values of a float return final_expr + "value.values[except_subs.values] = "\ "%(expr)s\n" % expression - def build_element_out(self): + def _build_element_out(self) -> str: """ Returns a string that has processed a single element dictionary. @@ -562,28 +653,34 @@ def build_element_out(self): The function to write in the model file. """ + # Contents of the function (body + return) contents = self.pre_expression + "return %s" % self.expression + # Get the objects to create as string objects = "\n\n".join([ value["expression"] % { "final_subs": self.section.subscripts.simplify_subscript_input( value.get("final_subs", {}))[1] - } + } # Replace the final subs in the objects that merge + # several components for value in self.objects.values() if value["expression"] is not None ]) + # Format the limits to get them as a string self.limits = self._format_limits(self.limits) + # Update arguments with final subs to alllow passing arguments + # with subscripts to the lookups if self.arguments == 'x': self.arguments = 'x, final_subs=None' - # define variable metadata for the @component decorator + # Define variable metadata for the @component decorator self.name = repr(self.name) meta_data = ["name=%(name)s"] - # include basic metadata (units, limits, dimensions) + # Include basic metadata (units, limits, dimensions) if self.units: meta_data.append("units=%(units)s") self.units = repr(self.units) @@ -593,19 +690,21 @@ def build_element_out(self): self.section.imports.add("subs") meta_data.append("subscripts=%(subscripts)s") - # include component type and subtype + # Include component type and subtype meta_data.append("comp_type='%(type)s'") meta_data.append("comp_subtype='%(subtype)s'") - # include dependencies + # Include dependencies if self.dependencies: meta_data.append("depends_on=%(dependencies)s") if self.other_dependencies: meta_data.append("other_deps=%(other_dependencies)s") + # Get metadata decorator self.meta_data = f"@component.add({', '.join(meta_data)})"\ % self.__dict__ + # Clean the documentation and add it to the beggining of contents if self.documentation: doc = self.documentation.replace("\\", "\n") contents = f'"""\n{doc}\n"""\n'\ @@ -613,10 +712,11 @@ def build_element_out(self): indent = 12 - # convert newline indicator and add expected level of indentation + # Convert newline indicator and add expected level of indentation self.contents = contents.replace("\n", "\n" + " " * (indent+4)) self.objects = objects.replace("\n", "\n" + " " * indent) + # Return the decorated function definition with the object declarations return textwrap.dedent(''' %(meta_data)s def %(identifier)s(%(arguments)s): @@ -626,9 +726,49 @@ def %(identifier)s(%(arguments)s): %(objects)s ''' % self.__dict__) + def _format_limits(self, limits: tuple) -> str: + """Format the limits of an element to print them properly""" + if limits == (None, None): + return None -class ComponentBuilder: + new_limits = [] + for value in limits: + value = repr(value) + if value == "nan" or value == "None": + # add numpy.nan to the values + self.section.imports.add("numpy") + new_limits.append("np.nan") + elif value.endswith("inf"): + # add numpy.inf to the values + self.section.imports.add("numpy") + new_limits.append(value.strip("inf") + "np.inf") + else: + # add numeric value + new_limits.append(value) + + if new_limits[0] == "np.nan" and new_limits[1] == "np.nan": + # if both are numpy.nan do not include limits + return None + return "(" + ", ".join(new_limits) + ")" + + +class ComponentBuilder: + """ + ComponentBuilder allows building a component of the PySD model. + + Parameters + ---------- + abstract_component: AbstracComponent + The abstract component to build. + element: ElementBuilder + The element where the component is defined. Necessary to give the + acces to the merging subscripts and other components. + section: SectionBuilder + The section where the element is defined. Necessary to give the + acces to the subscripts and namespace. + + """ def __init__(self, abstract_component: AbstractComponent, element: ElementBuilder, section: SectionBuilder): self.__dict__ = abstract_component.__dict__.copy() @@ -637,12 +777,28 @@ def __init__(self, abstract_component: AbstractComponent, if not hasattr(self, "keyword"): self.keyword = None - def build_component(self): + def build_component(self) -> None: + """ + Build model component parsing the Abstract Syntax Tree. + """ self.subscripts_dict = self.section.subscripts.make_coord_dict( self.subscripts[0]) self.except_subscripts = [self.section.subscripts.make_coord_dict( except_list) for except_list in self.subscripts[1]] self.ast_build = vs.ASTVisitor(self).visit() - def get(self): + def get(self) -> tuple: + """ + Get build component to build the element. + + Returns + ------- + ast_build: BuildAST + Parsed AbstractSyntaxTree. + subscript_dict: dict or list of dicts + The subscripts of the component. + except_subscripts: list of dicts + The subscripts to avoid. + + """ return self.ast_build, self.subscripts_dict, self.except_subscripts From 898b995e14dd87919ffe63777ddc29ed6f7a9afd Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Tue, 10 May 2022 12:33:28 +0200 Subject: [PATCH 58/96] Document --- docs/basic_usage.rst | 6 +- .../pysd_architecture_views/4+1view_model.rst | 5 + docs/functions.rst | 12 - docs/index.rst | 1 - docs/structure/model_loading.rst | 13 +- docs/structure/python_builder.rst | 7 - .../python/python_expressions_builder.py | 901 +++++++++++++++--- pysd/building/python/python_model_builder.py | 2 +- pysd/pysd.py | 4 +- 9 files changed, 808 insertions(+), 143 deletions(-) delete mode 100644 docs/functions.rst diff --git a/docs/basic_usage.rst b/docs/basic_usage.rst index 3a522468..855a6ed0 100644 --- a/docs/basic_usage.rst +++ b/docs/basic_usage.rst @@ -18,7 +18,7 @@ This code creates an instance of the PySD class loaded with an example model tha .. note:: The teacup model can be found in the `samples of the test-models repository `_. -To view a synopsis of the model equations and documentation, use the :py:function:`.doc` property of the model class. This will generate a listing of all the model elements, their documentation, units, and initial values, where appropriate, and return them as a :py:class:`pandas.DataFrame`. Here is a sample from the teacup model:: +To view a synopsis of the model equations and documentation, use the :py:func:`.doc` property of the model class. This will generate a listing of all the model elements, their documentation, units, and initial values, where appropriate, and return them as a :py:class:`pandas.DataFrame`. Here is a sample from the teacup model:: >>> model.doc @@ -41,7 +41,7 @@ To view a synopsis of the model equations and documentation, use the :py:functio >>> model = pysd.load('Teacup.py') .. note:: - The functions :py:func:`pysd.read_vensim()`, :py:func:`pysd.read_xmile()` and :py:func:`pysd.load()` have optional arguments for advanced usage, you can check the full description in :doc:`User Functions Reference <../functions>` or using :py:func:`help()` e.g.:: + The functions :py:func:`pysd.read_vensim()`, :py:func:`pysd.read_xmile()` and :py:func:`pysd.load()` have optional arguments for advanced usage, you can check the full description in :doc:`Model loading ` or using :py:func:`help()` e.g.:: >>> import pysd >>> help(pysd.load) @@ -181,7 +181,7 @@ Same dimensions :py:class:`xarray.DataArray` can be used (recommended):: >>> new_value = xr.DataArray([[1, 5], [3, 4]], {'dim1': [1, 2], 'dim2': [1, 2]}, ['dim1', 'dim2']) >>> model.run(params={'Subscripted var': new_value}) -In the same way, a Pandas series can be used with constan values, partially defined *:py:class:`xarray.DataArray`s or same dimensions :py:class:`xarray.DataArray`s. +In the same way, a Pandas series can be used with constan values, partially defined :py:class:`xarray.DataArray` or same dimensions :py:class:`xarray.DataArray`. .. note:: That once parameters are set by the run command, they are permanently changed within the model. We can also change model parameters without running the model, using PySD’s :py:data:`set_components(params={})` method, which takes the same params dictionary as the run function. We might choose to do this in situations where we'll be running the model many times, and only want to spend time setting the parameters once. diff --git a/docs/development/pysd_architecture_views/4+1view_model.rst b/docs/development/pysd_architecture_views/4+1view_model.rst index 3470fee6..8e8105f5 100644 --- a/docs/development/pysd_architecture_views/4+1view_model.rst +++ b/docs/development/pysd_architecture_views/4+1view_model.rst @@ -2,6 +2,11 @@ The "4+1" Model View of Software Architecture ============================================= .. _4+1 model view: https://www.cs.ubc.ca/~gregor/teaching/papers/4+1view-architecture.pdf + +.. warning:: + This page is outdated as it was written for PySD 2.x. However, the content here could be useful for developers. + For PySD 3+ architecture see :doc:`Structure of the PySD module <../../structure/structure_index>`. + The `4+1 model view`_, designed by Philippe Krutchen, presents a way to describe the architecture of software systems, using multiple and concurrent views. This use of multiple views allows to address separately the concerns of the various 'stakeholders' of the architecture such as end-user, developers, systems engineers, project managers, etc. The software architecture deals with abstraction, with decomposition and composition, with style and system's esthetic. To describe a software architecture, we use a model formed by multiple views or perspectives. That model is made up of five main views: logical view, development view, process view, physical view and scenarios or user cases. diff --git a/docs/functions.rst b/docs/functions.rst deleted file mode 100644 index 1a844796..00000000 --- a/docs/functions.rst +++ /dev/null @@ -1,12 +0,0 @@ -User Functions Reference -======================== - -These are the primary functions that control model import and execution. - - -.. autofunction:: pysd.read_vensim - -.. autofunction:: pysd.read_xmile - -.. autofunction:: pysd.load - diff --git a/docs/index.rst b/docs/index.rst index c8f18781..3d61e91f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -89,7 +89,6 @@ Contents advanced_usage command_line_usage tools - functions structure/structure_index development/development_index reporting_bugs diff --git a/docs/structure/model_loading.rst b/docs/structure/model_loading.rst index d02bd1c2..51d55bff 100644 --- a/docs/structure/model_loading.rst +++ b/docs/structure/model_loading.rst @@ -1,2 +1,11 @@ -Model model loading -=================== \ No newline at end of file +Model loading +============= +For loading a translated model with Python the function :py:func:`pysd.load` can be used: + +.. autofunction:: pysd.load + +To translate a load a model the :py:func:`pysd.read_vensim` and :py:func:`pysd.read_xmile` functions can be used: + +.. autofunction:: pysd.read_vensim + +.. autofunction:: pysd.read_xmile \ No newline at end of file diff --git a/docs/structure/python_builder.rst b/docs/structure/python_builder.rst index 2dd8290e..e550ed0b 100644 --- a/docs/structure/python_builder.rst +++ b/docs/structure/python_builder.rst @@ -9,35 +9,28 @@ In addition to translating individual commands between Vensim/XMILE and Python, Main builders ------------- - .. automodule:: pysd.building.python.python_model_builder :members: - :undoc-members: Expression builders ------------------- .. automodule:: pysd.building.python.python_expressions_builder :members: - :undoc-members: Namespace manager ----------------- - .. automodule:: pysd.building.python.namespace :members: NamespaceManager - :undoc-members: Subscript manager ----------------- .. automodule:: pysd.building.python.subscripts :members: - :undoc-members: Imports manager --------------- .. automodule:: pysd.building.python.imports :members: - :undoc-members: diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index 504dce6e..c0fd3197 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -1,3 +1,13 @@ +""" +The translation from Abstract Syntax Tree to Python happens in both ways. +The outer expression is visited with its builder, which will split its +arguments and visit them with their respective builders. Once the lowest +level is reached, it will be translated into Python returning a BuildAST +object, this object will include the python expression, its subscripts, +its calls to other and its arithmetic order (see Build AST for more info). +BuildAST will be returned for each visited argument from the lower +lever to the top level, giving the final expression. +""" import warnings from dataclasses import dataclass from typing import Union @@ -5,30 +15,79 @@ import numpy as np from pysd.py_backend.utils import compute_shape -from pysd.translation.structures.abstract_expressions import AbstractSyntax -from pysd.translation.structures import abstract_expressions as ae +from pysd.translation.structures.abstract_expressions import\ + AbstractSyntax, ArithmeticStructure, CallStructure, DataStructure,\ + DelayFixedStructure, DelayStructure, DelayNStructure, ForecastStructure,\ + GameStructure, GetConstantsStructure, GetDataStructure,\ + GetLookupsStructure, InitialStructure, InlineLookupsStructure,\ + IntegStructure, LogicStructure, LookupsStructure, ReferenceStructure,\ + SampleIfTrueStructure, SmoothNStructure, SmoothStructure,\ + SubscriptsReferenceStructure, TrendStructure + from .python_functions import functionspace +from .subscripts import SubscriptManager @dataclass class BuildAST: + """ + Python expression holder. + + Parameters + ---------- expression: str + The Python expression. calls: dict + The calls to other variables for the dependencies dictionary. subscripts: dict + The subscripts dict of the expression. order: int + Arithmetic order of the expression. The arithmetic order depends + on the last arithmetic operation. If the expression is a number, + a call to a function, or is between parenthesis; its order will + be 0. If the expression its an exponential of two terms its order + will be 1. If the expression is a product or division its order + will be 2. If the expression is a sum or substraction its order + will be 3. If the expression is a logical comparison its order + will be 4. - def __str__(self): + """ + expression: str + calls: dict + subscripts: dict + order: int + + def __str__(self) -> str: # makes easier building return self.expression - def reshape(self, subscripts, final_subscripts, final_element=False): + def reshape(self, subscripts: SubscriptManager, + final_subscripts: dict, + final_element: bool = False) -> None: + """ + Reshape the object to the desired subscripts. It will modify the + expression and lower the order if it is not 0. + + Parameters + ---------- + subscripts: SubscriptManager + The subscripts of the section. + final_subscripts: dict + The desired final subscripts. + final_element: bool (optional) + If True the array will be reshaped with the final subscripts + to have the shame shape. Otherwise, a length 1 dimension + will be included in the position to allow arithmetic + operations with other arrays. Default is False. + + """ if not final_subscripts or ( self.subscripts == final_subscripts and list(self.subscripts) == list(final_subscripts)): - # same dictionary in the same orde, do nothing + # Same dictionary in the same order, do nothing pass elif not self.subscripts: - # original expression is not an array + # Original expression is not an array # NUMPY: object.expression = np.full(%s, %(shape)s) subscripts_out = subscripts.simplify_subscript_input( final_subscripts)[1] @@ -38,10 +97,10 @@ def reshape(self, subscripts, final_subscripts, final_element=False): self.order = 0 self.subscripts = final_subscripts else: - # original expression is an array - self.lower_order(0, force_0=True) + # Original expression is an array + self.lower_order(-1) - # reorder subscrips + # Reorder subscrips final_order = { sub: self.subscripts[sub] for sub in final_subscripts @@ -67,21 +126,42 @@ def reshape(self, subscripts, final_subscripts, final_element=False): self.subscripts = final_subscripts - def lower_order(self, new_order, force_0=False): - if self.order >= new_order and self.order != 0\ - and (new_order != 0 or force_0): + def lower_order(self, new_order: int) -> None: + """ + Lower the order to maintain the correct order in arithmetic + operations. If the requestes order is smaller than the current + order parenthesis will be added to the expression to lower its + order to 0. + + Parameters + ---------- + new_order: int + The required new order of the expression. If 0 it will be + assumed that the expression will be passed as an argument + of a function and therefore no operations will be done. If + order 0 is required, a negative value can be used for + new_order. + + """ + if self.order >= new_order and self.order != 0 and new_order != 0: # if current operator order is 0 do not need to do anything # if the order of operations conflicts add parenthesis # if new order is 0 do not need to do anything, as it may be - # an argument to a function, unless force_0 is True which - # will force the parenthesis (necessary to reshape some - # numpy arrays) + # an argument to a function. To force the 0 order a negative + # value can be used, which will force the parenthesis + # (necessary to reshape some arrays) self.expression = "(%s)" % self.expression self.order = 0 class StructureBuilder: - def __init__(self, value, component): + """ + Main builder for Abstract Syntax Tree structures. All the builders + are children of this class, which allows them inheriting the methods. + """ + def __init__(self, value: object, component: object): + # component typing should be ComponentBuilder, but importing it + # for typing would create a circular dependency :S self.value = value self.arguments = {} self.component = component @@ -89,17 +169,58 @@ def __init__(self, value, component): self.section = component.section self.def_subs = component.subscripts_dict - def join_calls(self, arguments): + @staticmethod + def join_calls(arguments: dict) -> dict: + """ + Merge the calls of the arguments. + + Parameters + ---------- + arguments: dict + The dictionary of arguments. The keys should br strings of + ordered integer numbers starting from 0. + + Returns + ------- + calls: dict + The merged dictionary of calls. + + """ if len(arguments) == 0: + # No arguments return {} elif len(arguments) == 1: + # Only one argument return arguments["0"].calls else: + # Several arguments return merge_dependencies( *[val.calls for val in arguments.values()]) - def reorder(self, arguments, force=None): - + def reorder(self, arguments: dict, force: bool = None) -> dict: + """ + Reorder the subscripts of the arguments to make them match. + + Parameters + ---------- + arguments: dict + The dictionary of arguments. The keys should br strings of + ordered integer numbers starting from 0. + force: 'component', 'equal', or None (optional) + If force is 'component' it will force the arguments to have + the subscripts of the component definition. If force is + 'equal' it will force all the arguments to have the same + subscripts, includying the floats. If force is None, it + will only modify the shape of the arrays adding length 1 + dimensions to allow operation between different shape arrays. + Default is None. + + Returns + ------- + final_subscripts: dict + The final_subscripts after reordering all the elements. + + """ if force == "component": final_subscripts = self.def_subs or {} else: @@ -112,7 +233,22 @@ def reorder(self, arguments, force=None): return final_subscripts - def get_final_subscripts(self, arguments): + def get_final_subscripts(self, arguments: dict) -> dict: + """ + Get the final subscripts of a combination of arguments. + + Parameters + ---------- + arguments: dict + The dictionary of arguments. The keys should br strings of + ordered integer numbers starting from 0. + + Returns + ------- + final_subscripts: dict + The final_subscripts of combining all the elements. + + """ if len(arguments) == 0: return {} elif len(arguments) == 1: @@ -121,32 +257,76 @@ def get_final_subscripts(self, arguments): return self._compute_final_subscripts( [arg.subscripts for arg in arguments.values()]) - def _compute_final_subscripts(self, subscripts_list): + def _compute_final_subscripts(self, subscripts_list: list) -> dict: + """ + Compute final subscripts from a list of subscript dictionaries. + + Parameters + ---------- + subscript_list: list of dicts + List of subscript dictionaries. + + """ expression = {} [expression.update(subscript) for subscript in subscripts_list if subscript] # TODO reorder final_subscripts taking into account def_subs + # this way try to minimize the reordering operations return expression - def update_object_subscripts(self, name, component_final_subs): + def update_object_subscripts(self, name: str, + component_final_subs: dict) -> None: + """ + Update the object subscripts. Needed for those objects that + use 'add' method to load several components at once. + + Parameters + ---------- + name: str + The name of the object in the objects dictionary from the + element. + component_final_subs: dict + The subscripts of the component but with the element + subscript ranges as keys. This can differ from the component + subscripts when the component is defined with subranges of + the final subscript ranges. + + """ + # Get the component used to define the object first time origin_comp = self.element.objects[name]["component"] if isinstance(origin_comp.subscripts_dict, dict): + # The original component subscript dictionary is a dict if len(list(origin_comp.subscripts_dict)) == 1: + # If the subscript dict has only one dimension + # all the components can be loaded in 1D array directly + # with the same length as the given by the sum of the + # components key = list(origin_comp.subscripts_dict.keys())[0] value = list(component_final_subs.values())[0] origin_comp.subscripts_dict[key] += value self.element.objects[name]["final_subs"] =\ origin_comp.subscripts_dict else: + # If the subscripts dict has several dimensions, then + # a multi-dimensional array needs to be computed, + # in some cases, when mixed definitions are used in an + # element (e.g. GET DIRECT CONSTANTS and regular constants), + # this array can have some empty subarrays, therefore a + # list should be created and manage the loaded data + # with manage_multi_def in the element building origin_comp.subscripts_dict = [origin_comp.subscripts_dict] self.element.objects[name]["final_subs"] =\ self.element.subs_dict if isinstance(origin_comp.subscripts_dict, list): + # The original component subscript dictionary is a list + # (this happens when other components have already been + # added with 'add' method) origin_comp.subscripts_dict.append(component_final_subs) class OperationBuilder(StructureBuilder): - operators_build = { + """Builder for arithmetic and logical operations.""" + _operators_build = { "^": ("%(left)s**%(right)s", None, 1), "*": ("%(left)s*%(right)s", None, 2), "/": ("%(left)s/%(right)s", None, 2), @@ -164,24 +344,40 @@ class OperationBuilder(StructureBuilder): "negative": ("-%s", None, 3), } - def __init__(self, operation, component): + def __init__(self, operation: Union[ArithmeticStructure, LogicStructure], + component: object): super().__init__(None, component) self.operators = operation.operators.copy() self.arguments = { str(i): arg for i, arg in enumerate(operation.arguments)} - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ operands = {} calls = self.join_calls(arguments) final_subscripts = self.reorder(arguments) arguments = [arguments[str(i)] for i in range(len(arguments))] - dependencies, order = self.operators_build[self.operators[-1]][1:] + dependencies, order = self._operators_build[self.operators[-1]][1:] if dependencies: + # Add necessary dependencies to the imports self.section.imports.add(*dependencies) if self.operators[-1] == "^": - # right side of the exponential can be from higher order + # Right side of the exponential can be from higher order arguments[-1].lower_order(2) else: arguments[-1].lower_order(order) @@ -190,18 +386,24 @@ def build(self, arguments): # not and negative operations (only 1 element) if self.operators[0] == "negative": order = 1 - expression = self.operators_build[self.operators[0]][0] + expression = self._operators_build[self.operators[0]][0] return BuildAST( expression=expression % arguments[0], calls=calls, subscripts=final_subscripts, order=order) + # Add the arguments to the expression with the operator, + # they are built from right to left + # Get the last argument as the RHS of the first operation operands["right"] = arguments.pop() while arguments or self.operators: - expression = self.operators_build[self.operators.pop()][0] + # Get the operator and the LHS of the operation + expression = self._operators_build[self.operators.pop()][0] operands["left"] = arguments.pop() + # Lower the order of the LHS if neccessary operands["left"].lower_order(order) + # Include the operation in the RHS for next iteration operands["right"] = expression % operands return BuildAST( @@ -212,40 +414,74 @@ def build(self, arguments): class GameBuilder(StructureBuilder): - def __init__(self, game_str, component): + """Builder for GAME expressions.""" + def __init__(self, game_str: GameStructure, component: object): super().__init__(None, component) self.arguments = {"expr": game_str.expression} - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ + # Game calls are ignored as we have no support for a similar + # feature, we simpli return the content inside the GAME call return arguments["expr"] class CallBuilder(StructureBuilder): - def __init__(self, call_str, component): + """Builder for calls to functions, macros and lookups.""" + def __init__(self, call_str: CallStructure, component: object): super().__init__(None, component) function_name = call_str.function.reference self.arguments = { str(i): arg for i, arg in enumerate(call_str.arguments)} - # move this to a setter + if function_name in self.section.macrospace: - # build macro + # Build macro self.macro_name = function_name self.build = self.build_macro_call elif function_name in self.section.namespace.cleanspace: - # build lookupcall + # Build lookupcall self.arguments["function"] = call_str.function self.build = self.build_lookups_call elif function_name in functionspace: - # build direct function + # Build direct function self.function = function_name self.build = self.build_function_call elif function_name == "a_function_of": + # Build incomplete function self.build = self.build_incomplete_call else: + # Build missing function self.function = function_name self.build = self.build_not_implemented - def build_not_implemented(self, arguments): + def build_not_implemented(self, arguments: dict) -> BuildAST: + """ + Build method for not implemented function calls. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ final_subscripts = self.reorder(arguments) warnings.warn( "\n\nTrying to translate '" @@ -263,7 +499,21 @@ def build_not_implemented(self, arguments): subscripts=final_subscripts, order=0) - def build_incomplete_call(self, arguments): + def build_incomplete_call(self, arguments: dict) -> BuildAST: + """ + Build method for incomplete function calls. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ warnings.warn( "'%s' has no equation specified" % self.element.name, SyntaxWarning, stacklevel=2 @@ -276,8 +526,23 @@ def build_incomplete_call(self, arguments): subscripts=self.def_subs, order=0) - def build_macro_call(self, arguments): + def build_macro_call(self, arguments: dict) -> BuildAST: + """ + Build method for macro calls. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.section.imports.add("statefuls", "Macro") + # Get macro from macrospace macro = self.section.macrospace[self.macro_name] calls = self.join_calls(arguments) @@ -292,6 +557,7 @@ def build_macro_call(self, arguments): for key, val in zip(macro.params, arguments.values()) ]) + # Create Macro object self.element.objects[arguments["name"]] = { "name": arguments["name"], "expression": "%(name)s = Macro(_root.joinpath('%(file)s'), " @@ -299,6 +565,7 @@ def build_macro_call(self, arguments): "time_initialization=lambda: __data['time'], " "py_name='%(name)s')" % arguments, } + # Add other_dependencies self.element.other_dependencies[arguments["name"]] = { "initial": calls, "step": calls @@ -310,13 +577,31 @@ def build_macro_call(self, arguments): subscripts=final_subscripts, order=0) - def build_lookups_call(self, arguments): + def build_lookups_call(self, arguments: dict) -> BuildAST: + """ + Build method for loookups calls. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ if arguments["0"].subscripts: + # Build lookups with subcripted arguments + # it is neccessary to give the final subscripts information + # in the call to rearrange it correctly final_subscripts =\ self.get_final_subscripts(arguments) expression = arguments["function"].expression.replace( "()", f"(%(0)s, {final_subscripts})") else: + # Build lookups with float arguments final_subscripts = arguments["function"].subscripts expression = arguments["function"].expression.replace( "()", "(%(0)s)") @@ -329,24 +614,45 @@ def build_lookups_call(self, arguments): subscripts=final_subscripts, order=0) - def build_function_call(self, arguments): + def build_function_call(self, arguments: dict) -> BuildAST: + """ + Build method for function calls. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ + # Get the function expression from the functionspace expression, modules = functionspace[self.function] if modules: + # Update module dependencies in imports self.section.imports.add(*modules) calls = self.join_calls(arguments) if "__data['time']" in expression: + # If the expression depens on time add to the dependencies merge_dependencies(calls, {"time": 1}, inplace=True) - # TODO modify dimensions of BuildAST if "%(axis)s" in expression: - final_subscripts, arguments["axis"] = self.compute_axis(arguments) + # Vectorial expressions, compute the axis using dimensions + # with ! operator + final_subscripts, arguments["axis"] = self._compute_axis(arguments) elif "%(size)s" in expression: + # Random expressions, need to give the final size of the + # component to create one value per final coordinate final_subscripts = self.reorder(arguments, force="component") arguments["size"] = tuple(compute_shape(final_subscripts)) if arguments["size"]: + # Create an xarray from the random function output # NUMPY: not necessary # generate an xarray from the output subs = self.section.subscripts.simplify_subscript_input( @@ -355,7 +661,7 @@ def build_function_call(self, arguments): f"{list(self.def_subs)})" elif self.function == "active_initial": - # we need to ensure that active initial outputs are always the + # Ee need to ensure that active initial outputs are always the # same and update dependencies as stateful object name = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_active_initial") @@ -407,7 +713,24 @@ def build_function_call(self, arguments): subscripts=final_subscripts, order=0) - def compute_axis(self, arguments): + def _compute_axis(self, arguments: dict) -> tuple: + """ + Compute the axis to apply a vectorial function. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + coords: dict + The final coordinates after executing the vectorial function + axis: list + The list of dimensions to apply the function. Uses the + dimensions with "!" at the end. + + """ subscripts = arguments["0"].subscripts axis = [] coords = {} @@ -422,7 +745,8 @@ def compute_axis(self, arguments): class ExtLookupBuilder(StructureBuilder): - def __init__(self, getlookup_str, component): + """Builder for External Lookups.""" + def __init__(self, getlookup_str: GetLookupsStructure, component: object): super().__init__(None, component) self.file = getlookup_str.file self.tab = getlookup_str.tab @@ -430,7 +754,22 @@ def __init__(self, getlookup_str, component): self.cell = getlookup_str.cell self.arguments = {} - def build(self, arguments): + def build(self, arguments: dict) -> Union[BuildAST, None]: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST or None + The built object, unless the component has been added to an + existing object using the 'add' method. + + """ self.component.type = "Lookup" self.component.subtype = "External" arguments["params"] = "'%s', '%s', '%s', '%s'" % ( @@ -441,7 +780,7 @@ def build(self, arguments): self.def_subs, self.element.subscripts) if "ext_lookups" in self.element.objects: - # object already exists + # Object already exists, use 'add' method self.element.objects["ext_lookups"]["expression"] += "\n\n"\ + self.element.objects["ext_lookups"]["name"]\ + ".add(%(params)s, %(subscripts)s)" % arguments @@ -450,7 +789,7 @@ def build(self, arguments): return None else: - # create a new object + # Create a new object self.section.imports.add("external", "ExtLookup") arguments["name"] = self.section.namespace.make_python_identifier( @@ -478,7 +817,8 @@ def build(self, arguments): class ExtDataBuilder(StructureBuilder): - def __init__(self, getdata_str, component): + """Builder for External Data.""" + def __init__(self, getdata_str: GetDataStructure, component: object): super().__init__(None, component) self.file = getdata_str.file self.tab = getdata_str.tab @@ -487,7 +827,22 @@ def __init__(self, getdata_str, component): self.keyword = component.keyword self.arguments = {} - def build(self, arguments): + def build(self, arguments: dict) -> Union[BuildAST, None]: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST or None + The built object, unless the component has been added to an + existing object using the 'add' method. + + """ self.component.type = "Data" self.component.subtype = "External" arguments["params"] = "'%s', '%s', '%s', '%s'" % ( @@ -499,7 +854,7 @@ def build(self, arguments): arguments["method"] = "'%s'" % self.keyword if self.keyword else None if "ext_data" in self.element.objects: - # object already exists + # Object already exists, use add method self.element.objects["ext_data"]["expression"] += "\n\n"\ + self.element.objects["ext_data"]["name"]\ + ".add(%(params)s, %(method)s, %(subscripts)s)" % arguments @@ -508,7 +863,7 @@ def build(self, arguments): return None else: - # create a new object + # Create a new object self.section.imports.add("external", "ExtData") arguments["name"] = self.section.namespace.make_python_identifier( @@ -536,14 +891,31 @@ def build(self, arguments): class ExtConstantBuilder(StructureBuilder): - def __init__(self, getconstant_str, component): + """Builder for External Constants.""" + def __init__(self, getconstant_str: GetConstantsStructure, + component: object): super().__init__(None, component) self.file = getconstant_str.file self.tab = getconstant_str.tab self.cell = getconstant_str.cell self.arguments = {} - def build(self, arguments): + def build(self, arguments: dict) -> Union[BuildAST, None]: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST or None + The built object, unless the component has been added to an + existing object using the 'add' method. + + """ self.component.type = "Constant" self.component.subtype = "External" arguments["params"] = "'%s', '%s', '%s'" % ( @@ -554,7 +926,7 @@ def build(self, arguments): self.def_subs, self.element.subscripts) if "constants" in self.element.objects: - # object already exists + # Object already exists, use 'add' method self.element.objects["constants"]["expression"] += "\n\n"\ + self.element.objects["constants"]["name"]\ + ".add(%(params)s, %(subscripts)s)" % arguments @@ -563,7 +935,7 @@ def build(self, arguments): return None else: - # create a new object + # Create a new object self.section.imports.add("external", "ExtConstant") arguments["name"] = self.section.namespace.make_python_identifier( @@ -588,12 +960,27 @@ def build(self, arguments): class TabDataBuilder(StructureBuilder): - def __init__(self, data_str, component): + """Builder for empty DATA expressions.""" + def __init__(self, data_str: DataStructure, component: object): super().__init__(None, component) self.keyword = component.keyword self.arguments = {} - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.section.imports.add("data", "TabData") final_subs, arguments["subscripts"] =\ @@ -609,6 +996,7 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_data") + # Create TabData object self.element.objects["tab_data"] = { "name": arguments["name"], "expression": "%(name)s = TabData('%(real_name)s', '%(py_name)s', " @@ -623,13 +1011,28 @@ def build(self, arguments): class InitialBuilder(StructureBuilder): - def __init__(self, initial_str, component): + """Builder for Initials.""" + def __init__(self, initial_str: InitialStructure, component: object): super().__init__(None, component) self.arguments = { "initial": initial_str.initial } - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.component.type = "Stateful" self.component.subtype = "Initial" self.section.imports.add("statefuls", "Initial") @@ -640,11 +1043,13 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_initial") + # Create the object self.element.objects[arguments["name"]] = { "name": arguments["name"], "expression": "%(name)s = Initial(lambda: %(initial)s, " "'%(name)s')" % arguments, } + # Add other-dependencies self.element.other_dependencies[arguments["name"]] = { "initial": arguments["initial"].calls, "step": {} @@ -658,14 +1063,29 @@ def build(self, arguments): class IntegBuilder(StructureBuilder): - def __init__(self, integ_str, component): + """Builder for Integs/Stocks.""" + def __init__(self, integ_str: IntegStructure, component: object): super().__init__(None, component) self.arguments = { "flow": integ_str.flow, "initial": integ_str.initial } - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.component.type = "Stateful" self.component.subtype = "Integ" self.section.imports.add("statefuls", "Integ") @@ -678,11 +1098,13 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_integ") + # Create the object self.element.objects[arguments["name"]] = { "name": arguments["name"], "expression": "%(name)s = Integ(lambda: %(flow)s, " "lambda: %(initial)s, '%(name)s')" % arguments } + # Add other dependencies self.element.other_dependencies[arguments["name"]] = { "initial": arguments["initial"].calls, "step": arguments["flow"].calls @@ -696,7 +1118,10 @@ def build(self, arguments): class DelayBuilder(StructureBuilder): - def __init__(self, dtype, delay_str, component): + """Builder for regular Delays.""" + def __init__(self, dtype: str, + delay_str: Union[DelayStructure, DelayNStructure], + component: object): super().__init__(None, component) self.arguments = { "input": delay_str.input, @@ -706,7 +1131,21 @@ def __init__(self, dtype, delay_str, component): } self.dtype = dtype - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.component.type = "Stateful" self.component.subtype = "Delay" self.section.imports.add("statefuls", self.dtype) @@ -722,6 +1161,7 @@ def build(self, arguments): self.element.identifier, prefix=f"_{self.dtype.lower()}") arguments["dtype"] = self.dtype + # Add the object self.element.objects[arguments["name"]] = { "name": arguments["name"], "expression": "%(name)s = %(dtype)s(lambda: %(input)s, " @@ -729,6 +1169,7 @@ def build(self, arguments): "lambda: %(order)s, " "time_step, '%(name)s')" % arguments, } + # Add other dependencies self.element.other_dependencies[arguments["name"]] = { "initial": merge_dependencies( arguments["initial"].calls, @@ -748,7 +1189,8 @@ def build(self, arguments): class DelayFixedBuilder(StructureBuilder): - def __init__(self, delay_str, component): + """Builder for Delay Fixed.""" + def __init__(self, delay_str: DelayFixedStructure, component: object): super().__init__(None, component) self.arguments = { "input": delay_str.input, @@ -756,7 +1198,21 @@ def __init__(self, delay_str, component): "initial": delay_str.initial, } - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.component.type = "Stateful" self.component.subtype = "DelayFixed" self.section.imports.add("statefuls", "DelayFixed") @@ -769,12 +1225,14 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_delayfixed") + # Create object self.element.objects[arguments["name"]] = { "name": arguments["name"], "expression": "%(name)s = DelayFixed(lambda: %(input)s, " "lambda: %(delay_time)s, lambda: %(initial)s, " "time_step, '%(name)s')" % arguments, } + # Add other dependencies self.element.other_dependencies[arguments["name"]] = { "initial": merge_dependencies( arguments["initial"].calls, @@ -790,7 +1248,9 @@ def build(self, arguments): class SmoothBuilder(StructureBuilder): - def __init__(self, smooth_str, component): + """Builder for Smooths.""" + def __init__(self, smooth_str: Union[SmoothStructure, SmoothNStructure], + component: object): super().__init__(None, component) self.arguments = { "input": smooth_str.input, @@ -799,7 +1259,21 @@ def __init__(self, smooth_str, component): "order": smooth_str.order } - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.component.type = "Stateful" self.component.subtype = "Smooth" self.section.imports.add("statefuls", "Smooth") @@ -819,12 +1293,15 @@ def build(self, arguments): # TODO in the future we may want to have 2 py_backend classes for # smooth as the behaviour is different for SMOOTH and SMOOTH N when # using RingeKutta scheme + + # Create object self.element.objects[arguments["name"]] = { "name": arguments["name"], "expression": "%(name)s = Smooth(lambda: %(input)s, " "lambda: %(smooth_time)s, lambda: %(initial)s, " "lambda: %(order)s, '%(name)s')" % arguments, } + # Add other dependencies self.element.other_dependencies[arguments["name"]] = { "initial": merge_dependencies( arguments["initial"].calls, @@ -843,7 +1320,8 @@ def build(self, arguments): class TrendBuilder(StructureBuilder): - def __init__(self, trend_str, component): + """Builder for Trends.""" + def __init__(self, trend_str: TrendStructure, component: object): super().__init__(None, component) self.arguments = { "input": trend_str.input, @@ -851,7 +1329,21 @@ def __init__(self, trend_str, component): "initial_trend": trend_str.initial_trend, } - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.component.type = "Stateful" self.component.subtype = "Trend" self.section.imports.add("statefuls", "Trend") @@ -866,6 +1358,7 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_trend") + # Create object self.element.objects[arguments["name"]] = { "name": arguments["name"], "expression": "%(name)s = Trend(lambda: %(input)s, " @@ -873,6 +1366,7 @@ def build(self, arguments): "lambda: %(initial_trend)s, " "'%(name)s')" % arguments, } + # Add other dependencies self.element.other_dependencies[arguments["name"]] = { "initial": merge_dependencies( arguments["initial_trend"].calls, @@ -891,7 +1385,8 @@ def build(self, arguments): class ForecastBuilder(StructureBuilder): - def __init__(self, forecast_str, component): + """Builder for Forecasts.""" + def __init__(self, forecast_str: ForecastStructure, component: object): super().__init__(None, component) self.arguments = { "input": forecast_str.input, @@ -900,7 +1395,21 @@ def __init__(self, forecast_str, component): "initial_trend": forecast_str.initial_trend } - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.component.type = "Stateful" self.component.subtype = "Forecast" self.section.imports.add("statefuls", "Forecast") @@ -917,12 +1426,14 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_forecast") + # Create object self.element.objects[arguments["name"]] = { "name": arguments["name"], "expression": "%(name)s = Forecast(lambda: %(input)s, " "lambda: %(average_time)s, lambda: %(horizon)s, " "lambda: %(initial_trend)s, '%(name)s')" % arguments, } + # Add other dependencies self.element.other_dependencies[arguments["name"]] = { "initial": merge_dependencies( arguments["input"].calls, @@ -941,7 +1452,9 @@ def build(self, arguments): class SampleIfTrueBuilder(StructureBuilder): - def __init__(self, sampleiftrue_str, component): + """Builder for Sample If True.""" + def __init__(self, sampleiftrue_str: SampleIfTrueStructure, + component: object): super().__init__(None, component) self.arguments = { "condition": sampleiftrue_str.condition, @@ -949,7 +1462,21 @@ def __init__(self, sampleiftrue_str, component): "initial": sampleiftrue_str.initial, } - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.component.type = "Stateful" self.component.subtype = "SampleIfTrue" self.section.imports.add("statefuls", "SampleIfTrue") @@ -964,12 +1491,14 @@ def build(self, arguments): arguments["name"] = self.section.namespace.make_python_identifier( self.element.identifier, prefix="_sampleiftrue") + # Create object self.element.objects[arguments["name"]] = { "name": arguments["name"], "expression": "%(name)s = SampleIfTrue(lambda: %(condition)s, " "lambda: %(input)s, lambda: %(initial)s, " "'%(name)s')" % arguments, } + # Add other dependencies self.element.other_dependencies[arguments["name"]] = { "initial": arguments["initial"].calls, @@ -986,16 +1515,33 @@ def build(self, arguments): class LookupsBuilder(StructureBuilder): - def __init__(self, lookups_str, component): + """Builder for regular Lookups.""" + def __init__(self, lookups_str: LookupsStructure, component: object): super().__init__(None, component) self.arguments = {} self.x = lookups_str.x self.y = lookups_str.y self.keyword = lookups_str.type - def build(self, arguments): + def build(self, arguments: dict) -> Union[BuildAST, None]: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST or None + The built object, unless the component has been added to an + existing object using the 'add' method. + + """ self.component.type = "Lookup" self.component.subtype = "Normal" + # Get the numeric values as numpy arrays arguments["x"] = np.array2string( np.array(self.x), separator=",", @@ -1010,14 +1556,14 @@ def build(self, arguments): arguments["interp"] = self.keyword if "hardcoded_lookups" in self.element.objects: - # object already exists + # Object already exists, use 'add' method self.element.objects["hardcoded_lookups"]["expression"] += "\n\n"\ + self.element.objects["hardcoded_lookups"]["name"]\ + ".add(%(x)s, %(y)s, %(subscripts)s)" % arguments return None else: - # create a new object + # Create a new object self.section.imports.add("lookups", "HardcodedLookups") arguments["name"] = self.section.namespace.make_python_identifier( @@ -1041,17 +1587,34 @@ def build(self, arguments): class InlineLookupsBuilder(StructureBuilder): - def __init__(self, inlinelookups_str, component): + """Builder for inline Lookups.""" + def __init__(self, inlinelookups_str: InlineLookupsStructure, + component: object): super().__init__(None, component) self.arguments = { "value": inlinelookups_str.argument } self.lookups = inlinelookups_str.lookups - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.component.type = "Auxiliary" self.component.subtype = "with Lookup" self.section.imports.add("numpy") + # Get the numeric values as numpy arrays arguments["x"] = np.array2string( np.array(self.lookups.x), separator=",", @@ -1070,7 +1633,8 @@ def build(self, arguments): class ReferenceBuilder(StructureBuilder): - def __init__(self, reference_str, component): + """Builder for references to other variables.""" + def __init__(self, reference_str: ReferenceStructure, component: object): super().__init__(None, component) self.mapping_subscripts = {} self.reference = reference_str.reference @@ -1083,7 +1647,7 @@ def subscripts(self): return self._subscripts @subscripts.setter - def subscripts(self, subscripts): + def subscripts(self, subscripts: SubscriptsReferenceStructure): """Get subscript dictionary from reference""" self._subscripts = self.section.subscripts.make_coord_dict( getattr(subscripts, "subscripts", {})) @@ -1114,7 +1678,21 @@ def subscripts(self, subscripts): # do not change it self.mapping_subscripts[dim] = coordinates - def build(self, arguments): + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ if self.reference not in self.section.namespace.cleanspace: # Manage references to subscripts (subscripts used as variables) expression, subscripts =\ @@ -1146,7 +1724,7 @@ def build(self, arguments): original_subs = self.section.subscripts.make_coord_dict( self.section.subscripts.elements[reference]) - expression, final_subs = self.visit_subscripts( + expression, final_subs = self._visit_subscripts( expression, original_subs) return BuildAST( @@ -1155,7 +1733,26 @@ def build(self, arguments): subscripts=final_subs, order=0) - def visit_subscripts(self, expression, original_subs): + def _visit_subscripts(self, expression: str, original_subs: dict) -> tuple: + """ + Visit the subcripts of a reference to subset a subarray if neccessary + or apply mapping. + + Parameters + ---------- + expression: str + The expression of visiting the variable. + original_subs: dict + The original subscript dict of the variable. + + Returns + ------- + expression: str + The expression with the necessary operations. + mapping_subscirpts: dict + The final subscripts of the reference after applying mapping. + + """ loc, rename, final_subs, reset_coords, to_float =\ visit_loc(self.subscripts, original_subs) @@ -1185,7 +1782,22 @@ def visit_subscripts(self, expression, original_subs): class NumericBuilder(StructureBuilder): - def build(self, arguments): + """Builder for numeric and nan values.""" + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ if np.isnan(self.value): self.section.imports.add("numpy") @@ -1203,7 +1815,22 @@ def build(self, arguments): class ArrayBuilder(StructureBuilder): - def build(self, arguments): + """Builder for arrays.""" + def build(self, arguments: dict) -> BuildAST: + """ + Build method. + + Parameters + ---------- + arguments: dict + The dictionary of builded arguments. + + Returns + ------- + built_ast: BuildAST + The built object. + + """ self.value = np.array2string( self.value.reshape(compute_shape(self.def_subs)), separator=",", @@ -1224,7 +1851,25 @@ def build(self, arguments): order=0) -def merge_dependencies(*dependencies, inplace=False): +def merge_dependencies(*dependencies: dict, inplace: bool = False) -> dict: + """ + Merge two dependencies dicts of an element. + + Parameters + ---------- + dependencies: dict + The dictionaries of dependencies to merge. + + inplace: bool (optional) + If True the final dependencies dict will be updated in the first + dependencies argument, mutating it. Default is False. + + Returns + ------- + current: dict + The final dependencies dict. + + """ current = dependencies[0] if inplace: current = dependencies[0] @@ -1308,8 +1953,8 @@ def visit_loc(current_subs: dict, original_subs: dict, """ final_subs, rename, loc, reset_coords, to_float = {}, {}, [], False, True - for (dim, coord), (orig_dim, orig_coord)\ - in zip(current_subs.items(), original_subs.items()): + subscripts_zipped = zip(current_subs.items(), original_subs.items()) + for (dim, coord), (orig_dim, orig_coord) in subscripts_zipped: if len(coord) == 1: # subset a 1 dimension value # NUMPY: subset value [:, N, :, :] @@ -1365,39 +2010,62 @@ def visit_loc(current_subs: dict, original_subs: dict, class ASTVisitor: - builders = { - ae.InitialStructure: InitialBuilder, - ae.IntegStructure: IntegBuilder, - ae.DelayStructure: lambda x, y: DelayBuilder("Delay", x, y), - ae.DelayNStructure: lambda x, y: DelayBuilder("DelayN", x, y), - ae.DelayFixedStructure: DelayFixedBuilder, - ae.SmoothStructure: SmoothBuilder, - ae.SmoothNStructure: SmoothBuilder, - ae.TrendStructure: TrendBuilder, - ae.ForecastStructure: ForecastBuilder, - ae.SampleIfTrueStructure: SampleIfTrueBuilder, - ae.GetConstantsStructure: ExtConstantBuilder, - ae.GetDataStructure: ExtDataBuilder, - ae.GetLookupsStructure: ExtLookupBuilder, - ae.LookupsStructure: LookupsBuilder, - ae.InlineLookupsStructure: InlineLookupsBuilder, - ae.DataStructure: TabDataBuilder, - ae.ReferenceStructure: ReferenceBuilder, - ae.CallStructure: CallBuilder, - ae.GameStructure: GameBuilder, - ae.LogicStructure: OperationBuilder, - ae.ArithmeticStructure: OperationBuilder, + """ + ASTVisitor allows visiting the Abstract Synatx Tree of a component + returning the Python object and generating the neccessary objects. + + Parameters + ---------- + component: ComponentBuilder + The component builder to build. + + """ + _builders = { + InitialStructure: InitialBuilder, + IntegStructure: IntegBuilder, + DelayStructure: lambda x, y: DelayBuilder("Delay", x, y), + DelayNStructure: lambda x, y: DelayBuilder("DelayN", x, y), + DelayFixedStructure: DelayFixedBuilder, + SmoothStructure: SmoothBuilder, + SmoothNStructure: SmoothBuilder, + TrendStructure: TrendBuilder, + ForecastStructure: ForecastBuilder, + SampleIfTrueStructure: SampleIfTrueBuilder, + GetConstantsStructure: ExtConstantBuilder, + GetDataStructure: ExtDataBuilder, + GetLookupsStructure: ExtLookupBuilder, + LookupsStructure: LookupsBuilder, + InlineLookupsStructure: InlineLookupsBuilder, + DataStructure: TabDataBuilder, + ReferenceStructure: ReferenceBuilder, + CallStructure: CallBuilder, + GameStructure: GameBuilder, + LogicStructure: OperationBuilder, + ArithmeticStructure: OperationBuilder, int: NumericBuilder, float: NumericBuilder, np.ndarray: ArrayBuilder, } - def __init__(self, component): + def __init__(self, component: object): + # component typing should be ComponentBuilder, but importing it + # for typing would create a circular dependency :S self.ast = component.ast self.subscripts = component.subscripts_dict self.component = component def visit(self) -> Union[None, BuildAST]: + """ + Visit the Abstract Syntax Tree of the component. + + Returns + ------- + visit_out: BuildAST or None + The BuildAST object resulting from visiting the AST. If the + component content has been added to an existing object + using the 'add' method it will return None. + + """ visit_out = self._visit(self.ast) if not visit_out: @@ -1443,7 +2111,10 @@ def visit(self) -> Union[None, BuildAST]: return visit_out def _visit(self, ast_object: AbstractSyntax) -> AbstractSyntax: - builder = self.builders[type(ast_object)](ast_object, self.component) + """ + Visit one Builder and its arguments. + """ + builder = self._builders[type(ast_object)](ast_object, self.component) arguments = { name: self._visit(value) for name, value in builder.arguments.items() diff --git a/pysd/building/python/python_model_builder.py b/pysd/building/python/python_model_builder.py index 8d8411cc..2775c1a0 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/building/python/python_model_builder.py @@ -560,7 +560,7 @@ def build_element(self) -> None: if expression["expr"].subscripts: # Get the values # NUMPY not necessary - expression["expr"].lower_order(0, force_0=True) + expression["expr"].lower_order(-1) expression["expr"].expression += ".values" if expression["loc_except"]: # There is an excep in the definition of the component diff --git a/pysd/pysd.py b/pysd/pysd.py index 6a8c2de3..dd399662 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -27,12 +27,12 @@ def read_xmile(xmile_file, data_files=None, initialize=True, missing_values="warning"): """ - Construct a model from `.xmile` file. + Construct a model from a Xmile file. Parameters ---------- xmile_file: str or pathlib.Path - The relative path filename for a raw `.xmile` file. + The relative path filename for a raw Xmile file. initialize: bool (optional) If False, the model will not be initialize when it is loaded. From cc5694700d026310e3e580f778cf309da0ad8854 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Tue, 10 May 2022 16:15:23 +0200 Subject: [PATCH 59/96] Document --- docs/advanced_usage.rst | 2 + docs/basic_usage.rst | 4 +- docs/conf.py | 4 +- docs/structure/model_class.rst | 13 +++++ docs/structure/python_builder.rst | 30 +++++++++- docs/structure/structure_index.rst | 23 ++++---- docs/structure/vensim_translation.rst | 2 +- docs/tables/functions.tab | 10 ++-- pysd/py_backend/statefuls.py | 81 +++++++++++++++++++++------ pysd/pysd.py | 8 ++- 10 files changed, 134 insertions(+), 43 deletions(-) create mode 100644 docs/structure/model_class.rst diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst index b890164e..abadc212 100644 --- a/docs/advanced_usage.rst +++ b/docs/advanced_usage.rst @@ -117,8 +117,10 @@ Selecting and running a submodel A submodel of a translated model can be selected in order to run only a part of the original model. This can be done through the :py:data:`.select_submodel()` method: .. automethod:: pysd.py_backend.statefuls.Model.select_submodel + :noindex: In order to preview the needed exogenous variables the :py:data:`.get_dependencies()` method can be used: .. automethod:: pysd.py_backend.statefuls.Model.get_dependencies + :noindex: diff --git a/docs/basic_usage.rst b/docs/basic_usage.rst index 855a6ed0..bf8d9d3d 100644 --- a/docs/basic_usage.rst +++ b/docs/basic_usage.rst @@ -147,13 +147,13 @@ If the measured data that we are comparing with our model comes in at irregular Retrieving totally flat dataframe --------------------------------- -The subscripted variables, in general, will be returned as :py:class:`xarray.DataArray`s in the output :py:class:`pandas.DataFrame`. To get a totally flat dataframe, like Vensim outuput the `flatten=True` when calling the run function:: +The subscripted variables, in general, will be returned as :py:class:`xarray.DataArray` in the output :py:class:`pandas.DataFrame`. To get a totally flat dataframe, like Vensim outuput the `flatten=True` when calling the run function:: >>> model.run(flatten=True) Setting parameter values ------------------------ -In many cases, we want to modify the parameters of the model to investigate its behavior under different assumptions. There are several ways to do this in PySD, but the :py:func:`.run()` function gives us a convenient method in the params keyword argument. +In many cases, we want to modify the parameters of the model to investigate its behavior under different assumptions. There are several ways to do this in PySD, but the :py:func:`run()` function gives us a convenient method in the params keyword argument. This argument expects a dictionary whose keys correspond to the components of the model. The associated values can either be a constant, or a Pandas series whose indices are timestamps and whose values are the values that the model component should take on at the corresponding time. For instance, in our model we can set the room temperature to a constant value:: diff --git a/docs/conf.py b/docs/conf.py index 179e6e4c..9e2f05b7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -143,7 +143,9 @@ intersphinx_mapping = { 'python': ('https://docs.python.org/3.7', None), 'pysdcookbook': ('http://pysd-cookbook.readthedocs.org/en/latest/', None), - 'pandas': ('https://pandas.pydata.org/docs/', None) + 'pandas': ('https://pandas.pydata.org/docs/', None), + 'xarray': ('https://docs.xarray.dev/en/stable/', None), + 'numpy': ('https://numpy.org/doc/stable/', None) } # -- Options for autodoc -------------------------------------------------- diff --git a/docs/structure/model_class.rst b/docs/structure/model_class.rst new file mode 100644 index 00000000..59c93a69 --- /dev/null +++ b/docs/structure/model_class.rst @@ -0,0 +1,13 @@ +Python model class +================== + +Model class +----------- +.. autoclass:: pysd.py_backend.statefuls.Model + :members: + + +Macro class +----------- +.. autoclass:: pysd.py_backend.statefuls.Macro + :members: diff --git a/docs/structure/python_builder.rst b/docs/structure/python_builder.rst index e550ed0b..b8fb5d7e 100644 --- a/docs/structure/python_builder.rst +++ b/docs/structure/python_builder.rst @@ -5,7 +5,8 @@ The Python builder allows to build models that can be run with the PySD Model cl The use of a one-to-one dictionary in translation means that the breadth of functionality is inherently limited. In the case where no direct Python equivalent is available, PySD provides a library of functions such as `pulse`, `step`, etc. that are specific to dynamic model behavior. -In addition to translating individual commands between Vensim/XMILE and Python, PySD reworks component identifiers to be Python-safe by replacing spaces with underscores. The translator allows source identifiers to make use of alphanumeric characters, spaces, or the $ symbol. +In addition to translating individual commands between Vensim/XMILE and Python, PySD reworks component identifiers to be Python-safe by replacing spaces with underscores. The translator allows source identifiers to make use of alphanumeric characters, spaces, or the symbol. + Main builders ------------- @@ -17,6 +18,33 @@ Expression builders .. automodule:: pysd.building.python.python_expressions_builder :members: +Supported expressions examples +------------------------------ +Operators +^^^^^^^^^ + +.. csv-table:: Supported unary operators + :file: ../tables/unary_python.csv + :header-rows: 1 + +.. csv-table:: Supported binary operators + :file: ../tables/binary_python.csv + :header-rows: 1 + +Functions +^^^^^^^^^ + +.. csv-table:: Supported basic functions + :file: ../tables/functions_python.csv + :header-rows: 1 + +.. csv-table:: Supported delay functions + :file: ../tables/delay_functions_python.csv + :header-rows: 1 + +.. csv-table:: Supported get functions + :file: ../tables/get_functions_python.csv + :header-rows: 1 Namespace manager ----------------- diff --git a/docs/structure/structure_index.rst b/docs/structure/structure_index.rst index 5b9077c3..d8c957bc 100644 --- a/docs/structure/structure_index.rst +++ b/docs/structure/structure_index.rst @@ -38,27 +38,24 @@ The builders allow you to build the final model in the desired language. To do s python_builder -The Python model class ------------------------ +The Python model +---------------- +For loading a translated model with Python see :doc:`basic usage <../basic_usage>` or: .. toctree:: :maxdepth: 2 model_loading +The Python builder constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a `statefull` model in much the same way that the system itself has a specific state at any point in time. -The translator constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a `statefull` model in much the same way that the system itself has a specific state at any point in time. +The model class also contains a function for each of the model components, representing the essential model equations. The each function contains its units, subcscripts type infromation and documentation as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. -The model class also contains a function for each of the model components, representing the essential model equations. The docstring for each function contains the model documentation and units as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. +The model class maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. -The model class maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. The downside to this design choice is that several components of Vensim or XMILE functionality - the most significant being the infinite order delay - are intentionally not supported. In many cases similar behavior can be approximated through other constructs. +Lastly, the model class provides a set of methods that are used to facilitate simulation. The .run() function returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. -Lastly, the model class provides a set of methods that are used to facilitate simulation. PySD uses the standard ordinary differential equations solver provided in the well-established Python library Scipy, which expects the state and its derivative to be represented as an ordered list. The model class provides the function .d_dt() that takes a state vector from the integrator and uses it to update the model state, and then calculates the derivative of each stock, returning them in a corresponding vector. A complementary function .state_vector() creates an ordered vector of states for use in initializing the integrator. - -The PySD class -^^^^^^^^^^^^^^ -The PySD class provides the machinery to get the model moving, supply it with data, or modify its parameters. In addition, this class is the primary way that users interact with the PySD module. - -The basic function for executing a model is appropriately named.run(). This function passes the model into scipy's odeint() ordinary differential equations solver. The scipy integrator is itself utilizing the lsoda integrator from the Fortran library odepack14, and so integration takes advantage of highly optimized low-level routines to improve speed. We use the model's timestep to set the maximum step size for the integrator's adaptive solver to ensure that the integrator properly accounts for discontinuities. +.. toctree:: + :maxdepth: 2 -The .run() function returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. \ No newline at end of file + model_class \ No newline at end of file diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst index 399893b3..dcaf5556 100644 --- a/docs/structure/vensim_translation.rst +++ b/docs/structure/vensim_translation.rst @@ -10,7 +10,7 @@ The following translation workflow allows splitting the Vensim file while parsin 1. Vensim file: Splits the file content from the skecth and allows splitting the model in sections (main section, macro section) 2. Vensim section: Full set of varibles and definitions that can be integrated. Allows splitting the model expressions. -3. Vensim element: A definition in the mdl file which could be a subscript (sub)range definition or a variable definition. It includes units and commnets. Definitions for the same variable are grouped after in the same :py:class:`AbstractElement` object. Allows parsing its left hand side (LHS) to get the name of the subscript (sub)range or variable and it is returned as a specific type of component depending on the used assing operator (=, ==, :=, (), :) +3. Vensim element: A definition in the mdl file which could be a subscript (sub)range definition or a variable definition. It includes units and comments. Definitions for the same variable are grouped after in the same :py:class:`AbstractElement` object. Allows parsing its left hand side (LHS) to get the name of the subscript (sub)range or variable and it is returned as a specific type of component depending on the used assing operator (=, ==, :=, (), :) 4. Vensim component: The classified object for a variable definition, it depends on the opperator used to define the variable. Its right hand side (RHS) can be parsed to get the Abstract Syntax Tree (AST) of the expression. Once the model is parsed and broken following the previous steps. The :py:class:`AbstractModel` can be returned. diff --git a/docs/tables/functions.tab b/docs/tables/functions.tab index 34124719..bd891f63 100644 --- a/docs/tables/functions.tab +++ b/docs/tables/functions.tab @@ -26,10 +26,10 @@ VMAX VMAX(A) "CallStructure('vmax', (A,))" pysd.functions.vmax(A) SUM SUM(A) "CallStructure('sum', (A,))" pysd.functions.sum(A) PROD PROD(A) "CallStructure('prod', (A,))" pysd.functions.prod(A) -PULSE PULSE pysd.functions.pulse -PULSE TRAIN PULSE TRAIN pysd.functions.pulse_train -RAMP RAMP pysd.functions.ramp -STEP STEP pysd.functions.step +PULSE PULSE(start, width) "CallStructure('train', (start, width))" pysd.functions.pulse +PULSE TRAIN PULSE TRAIN(start, width, tbetween, end) "CallStructure('pulse_train', (start, width, tbetween, end))" pysd.functions.pulse_train +RAMP RAMP(slope, start time, end time) "CallStructure('step', (slope, start time, end time))" pysd.functions.ramp +STEP STEP(height, step time) "CallStructure('step', (height, step time))" pysd.functions.step GAME GAME(A) GameStructure(A) A -INITIAL INITIAL(value) init init(value) InitialStructure(value) pysd.statefuls.Initial +INITIAL INITIAL(value) init init(value) InitialStructure(value) pysd.statefuls.Initial SAMPLE IF TRUE "SAMPLE IF TRUE(condition, input, initial_value)" "SampleIfTrueStructure(condition, input, initial_value)" pysd.statefuls.SampleIfTrue(...) diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index a3ffae9b..72981fa4 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -579,32 +579,44 @@ def export(self): class Macro(DynamicStateful): """ - The Model class implements a stateful representation of the system, - and contains the majority of methods for accessing and modifying model + The Macro class implements a stateful representation of the system, + and contains the majority of methods for accessing and modifying components. When the instance in question also serves as the root model object (as opposed to a macro or submodel within another model) it will have added methods to facilitate execution. - """ + The Macro object will be created with components drawn from a + translated python model file. + + Parameters + ---------- + py_model_file: str or pathlib.Path + Filename of a model or macro which has already been converted + into a python format. + params: dict or None (optional) + Dictionary of the macro parameters. Default is None. + return_func: str or None (optional) + The name of the function to return from the macro. Default is None. + time: components.Time or None (optional) + Time object for integration. If None a new time object will + be generated (for models), if passed the time object will be + used (for macros). Default is None. + time_initialization: callable or None + Time to set at the begginning of the Macro. Default is None. + data_files: dict or list or str or None + The dictionary with keys the name of file and variables to + load the data from there. Or the list of names or name of the + file to search the data in. Only works for TabData type object + and it is neccessary to provide it. Default is None. + py_name: str or None + The name of the Macro object. Default is None. + + """ def __init__(self, py_model_file, params=None, return_func=None, time=None, time_initialization=None, data_files=None, py_name=None): - """ - The model object will be created with components drawn from a - translated python model file. - - Parameters - ---------- - py_model_file : - Filename of a model which has already been converted into a - python format. - get_time: - needs to be a function that returns a time object - params - return_func - """ super().__init__() self.time = time self.time_initialization = time_initialization @@ -728,6 +740,10 @@ def modules(self) -> Union[dict, None]: return self._modules.copy() or None def clean_caches(self): + """ + Clean the cahce of the object and the macros objects that it + contains + """ self.cache.clean() # if nested macros [macro.clean_caches() for macro in self._macro_elements] @@ -1508,6 +1524,37 @@ def __str__(self): class Model(Macro): + """ + The Model class implements a stateful representation of the system. + It inherits methods from the Macro class to integrate the model and + access and modify model components. It also contains the main + methods for running the model. + + The Model object will be created with components drawn from a + translated python model file. + + Parameters + ---------- + py_model_file: str or pathlib.Path + Filename of a model which has already been converted into a + python format. + data_files: dict or list or str or None + The dictionary with keys the name of file and variables to + load the data from there. Or the list of names or name of the + file to search the data in. Only works for TabData type object + and it is neccessary to provide it. Default is None. + initialize: bool + If False, the model will not be initialize when it is loaded. + Default is True. + missing_values : str ("warning", "error", "ignore", "keep") (optional) + What to do with missing values. If "warning" (default) + shows a warning message and interpolates the values. + If "raise" raises an error. If "ignore" interpolates + the values without showing anything. If "keep" it will keep + the missing values, this option may cause the integration to + fail, but it may be used to check the quality of the data. + + """ def __init__(self, py_model_file, data_files, initialize, missing_values): """ Sets up the python objects """ super().__init__(py_model_file, None, None, Time(), diff --git a/pysd/pysd.py b/pysd/pysd.py index dd399662..5b6b4d51 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -176,9 +176,11 @@ def load(py_model_file, data_files=None, initialize=True, If False, the model will not be initialize when it is loaded. Default is True. - data_files: list or str or None (optional) - If given the list of files where the necessary data to run the model - is given. Default is None. + data_files: dict or list or str or None + The dictionary with keys the name of file and variables to + load the data from there. Or the list of names or name of the + file to search the data in. Only works for TabData type object + and it is neccessary to provide it. Default is None. missing_values : str ("warning", "error", "ignore", "keep") (optional) What to do with missing values. If "warning" (default) From 35a71cff9d99325f75b71f3bf6f452cb2f151c43 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 11 May 2022 16:44:55 +0200 Subject: [PATCH 60/96] Merge and correct PULSE functions --- docs/structure/vensim_translation.rst | 11 +- docs/structure/xmile_translation.rst | 6 + docs/tables/functions.tab | 11 +- pysd/building/python/python_functions.py | 16 +- pysd/py_backend/functions.py | 125 ++----- pysd/translation/vensim/vensim_structures.py | 6 +- pysd/translation/xmile/xmile_structures.py | 13 + tests/pytest_pysd/pytest_functions.py | 352 +++++++++++++++++++ tests/unit_test_functions.py | 322 ----------------- 9 files changed, 436 insertions(+), 426 deletions(-) create mode 100644 tests/pytest_pysd/pytest_functions.py delete mode 100644 tests/unit_test_functions.py diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst index dcaf5556..1211e916 100644 --- a/docs/structure/vensim_translation.rst +++ b/docs/structure/vensim_translation.rst @@ -55,7 +55,7 @@ All the basic operators are supported, this includes the ones shown in the table :file: ../tables/binary_vensim.csv :header-rows: 1 -Moreover, the Vensim :EXCEPT: operator is also supported to manage exceptions in the subscripts. See the section for subscripts. TODO include link +Moreover, the Vensim :EXCEPT: operator is also supported to manage exceptions in the subscripts. See the :ref:`Subscripts section`. Functions ^^^^^^^^^ @@ -79,6 +79,8 @@ Stocks Stocks defined in Vensim as `INTEG(flow, initial_value)` are supported and are translated to the AST as `IntegStructure(flow, initial_value)`. +.. _Subscripts section: + Subscripts ^^^^^^^^^^ Several subscript related features all supported. This include: @@ -87,10 +89,13 @@ Several subscript related features all supported. This include: - Subscript ranges and subranges definitions. - Basic subscript mapping where the subscript range is mapping to a full range (e.g. new_dim: A, B, C -> dim, dim_other), mapping to a partial range is not supported yet (e.g. new_dim: A, B, C -> dim: E, F, G). - Subscript copy (e.g. new_dim <-> dim). -- \:EXCEPT: operator. +- \:EXCEPT: operator with any number of arguments. - Subscript usage as a variable (e.g. my_var[dim] = another var * dim). - Subscript vectorial opperations (e.g. SUM(my var[dim, dim!])). +Lookups +^^^^^^^ +Vensim Lookups expressions are supported they can be given hardcoded, using `GET LOOKUPS` function or using `WITH LOOKUPS` function. Data ^^^^ @@ -105,6 +110,6 @@ Vensim macros are supported, The macro content between the keywords \:MACRO: and Planed New Functions and Features --------------------------------- - ALLOCATE BY PRIORITY -- VECTOR SELECT - GET TIME VALUE - SHIFT IF TRUE +- VECTOR SELECT diff --git a/docs/structure/xmile_translation.rst b/docs/structure/xmile_translation.rst index 314dcbe5..d070c949 100644 --- a/docs/structure/xmile_translation.rst +++ b/docs/structure/xmile_translation.rst @@ -86,6 +86,12 @@ Several subscript related features all supported. This include: - Basic subscript operations with different ranges. - Subscript ranges and subranges definitions. +Grafic functions +^^^^^^^^^^^^^^^^ +Xmile grafic functions (gf), also known as lookups, are supported. They can be given hardcoded or inline. + +.. warning:: + Interpolation methods 'extrapolate' and 'discrete' are implemented but not tested. Full integration models with this methods are required. Supported in Vensim but not in Xmile ------------------------------------ diff --git a/docs/tables/functions.tab b/docs/tables/functions.tab index bd891f63..2377741f 100644 --- a/docs/tables/functions.tab +++ b/docs/tables/functions.tab @@ -26,10 +26,13 @@ VMAX VMAX(A) "CallStructure('vmax', (A,))" pysd.functions.vmax(A) SUM SUM(A) "CallStructure('sum', (A,))" pysd.functions.sum(A) PROD PROD(A) "CallStructure('prod', (A,))" pysd.functions.prod(A) -PULSE PULSE(start, width) "CallStructure('train', (start, width))" pysd.functions.pulse -PULSE TRAIN PULSE TRAIN(start, width, tbetween, end) "CallStructure('pulse_train', (start, width, tbetween, end))" pysd.functions.pulse_train -RAMP RAMP(slope, start time, end time) "CallStructure('step', (slope, start time, end time))" pysd.functions.ramp -STEP STEP(height, step time) "CallStructure('step', (height, step time))" pysd.functions.step +PULSE PULSE(start, width) "CallStructure('pulse', (start, width))" pysd.functions.pulse(start, width=width) + pulse pulse(magnitude, start) "CallStructure('Xpulse', (start, magnitude))" pysd.functions.pulse(start, magnitude=magnitude) Not tested for Xmile! + pulse pulse(magnitude, start, interval) "CallStructure('Xpulse_train', (start, interval, magnitude))" pysd.functions.pulse(start, repeat_time=interval, magnitude=magnitude) Not tested for Xmile! +PULSE TRAIN PULSE TRAIN(start, width, tbetween, end) "CallStructure('pulse_train', (start, tbetween, width, end))" pysd.functions.pulse(start, repeat_time=tbetween, width=width, end=end) +RAMP RAMP(slope, start_time, end_time) ramp ramp(slope, start_time, end_time) "CallStructure('ramp', (slope, start_time, end_time))" pysd.functions.ramp(time, slope, start_time, end_time) Not tested for Xmile! + ramp ramp(slope, start_time) "CallStructure('ramp', (slope, start_time))" pysd.functions.ramp(time, slope, start_time) Not tested for Xmile! +STEP STEP(height, step_time) step step(height, step_time) "CallStructure('step', (height, step_time))" pysd.functions.step(time, height, step_time) Not tested for Xmile! GAME GAME(A) GameStructure(A) A INITIAL INITIAL(value) init init(value) InitialStructure(value) pysd.statefuls.Initial SAMPLE IF TRUE "SAMPLE IF TRUE(condition, input, initial_value)" "SampleIfTrueStructure(condition, input, initial_value)" pysd.statefuls.SampleIfTrue(...) diff --git a/pysd/building/python/python_functions.py b/pysd/building/python/python_functions.py index 2c25295e..fcbb2d44 100644 --- a/pysd/building/python/python_functions.py +++ b/pysd/building/python/python_functions.py @@ -51,14 +51,24 @@ "modulo(%(0)s, %(1)s)", ("functions", "modulo")), "pulse": ( - "pulse(__data['time'], %(0)s, %(1)s)", + "pulse(__data['time'], %(0)s, width=%(1)s)", + ("functions", "pulse")), + "Xpulse": ( + "pulse(__data['time'], %(0)s, magnitude=%(1)s)", ("functions", "pulse")), "pulse_train": ( - "pulse_train(__data['time'], %(0)s, %(1)s, %(2)s, %(3)s)", - ("functions", "pulse_train")), + "pulse(__data['time'], %(0)s, repeat_time=%(1)s, width=%(2)s, "\ + "end=%(3)s)", + ("functions", "pulse")), + "Xpulse_train": ( + "pulse(__data['time'], %(0)s, repeat_time=%(1)s, magnitude=%(2)s)", + ("functions", "pulse")), "quantum": ( "quantum(%(0)s, %(1)s)", ("functions", "quantum")), + "Xramp": ( + "ramp(__data['time'], %(0)s, %(1)s)", + ("functions", "ramp")), "ramp": ( "ramp(__data['time'], %(0)s, %(1)s, %(2)s)", ("functions", "ramp")), diff --git a/pysd/py_backend/functions.py b/pysd/py_backend/functions.py index e8a2884e..c6207ebc 100644 --- a/pysd/py_backend/functions.py +++ b/pysd/py_backend/functions.py @@ -5,6 +5,7 @@ makes it easy for the model elements to call. """ +from timeit import repeat import warnings import numpy as np @@ -13,20 +14,21 @@ small_vensim = 1e-6 # What is considered zero according to Vensim Help -def ramp(time, slope, start, finish=0): +def ramp(time, slope, start, finish=None): """ Implements vensim's and xmile's RAMP function. Parameters ---------- time: function - The current time of modelling. + Function that returns the current time. slope: float The slope of the ramp starting at zero at time start. start: float Time at which the ramp begins. - finish: float - Optional. Time at which the ramp ends. + finish: float or None (oprional) + Time at which the ramp ends. If None the ramp will never end. + Default is None. Returns ------- @@ -39,7 +41,7 @@ def ramp(time, slope, start, finish=0): if t < start: return 0 else: - if finish <= 0: + if finish is None: return slope * (t - start) elif t > finish: return slope * (finish - start) @@ -53,6 +55,8 @@ def step(time, value, tstep): Parameters ---------- + time: function + Function that returns the current time. value: float The height of the step. tstep: float @@ -69,37 +73,10 @@ def step(time, value, tstep): return value if time() >= tstep else 0 -def pulse(time, start, duration): - """ - Implements vensim's PULSE function. - - Parameters - ---------- - time: function - Function that returns the current time. - start: float - Starting time of the pulse. - duration: float - Duration of the pulse. - - Returns - ------- - float: - - In range [-inf, start): - returns 0 - - In range [start, start + duration): - returns 1 - - In range [start + duration, +inf]: - returns 0 - - """ - t = time() - return 1 if start <= t < start + duration else 0 - - -def pulse_train(time, start, duration, repeat_time, end): +def pulse(time, start, repeat_time=0, width=None, magnitude=None, end=None): """ - Implements vensim's PULSE TRAIN function. + Implements Vensim's PULSE and PULSE TRAIN functions and Xmile's PULSE + function. Parameters ---------- @@ -107,78 +84,42 @@ def pulse_train(time, start, duration, repeat_time, end): Function that returns the current time. start: float Starting time of the pulse. - duration: float - Duration of the pulse. - repeat_time: float - Time interval of the pulse repetition. - end: float - Final time of the pulse. + repeat_time: float (optional) + Time interval of the pulse repetition. If 0 it will return a + single pulse. Default is 0. + width: float or None (optional) + Duration of the pulse. If None only one-time_step pulse will be + generated. Default is None. + magnitude: float or None (optional) + The magnitude of the pulse. If None it will return 1 when the + pulse happens, similar to magnitude=time_step(). Default is None. + end: float or None (optional) + Final time of the pulse. If None there is no final time. + Default is None. Returns ------- float: - In range [-inf, start): returns 0 - - In range [start + n*repeat_time, start + n*repeat_time + duration): - returns 1 - - In range [start + n*repeat_time + duration, + - In range [start + n*repeat_time, start + n*repeat_time + width): + returns magnitude/time_step or 1 + - In range [start + n*repeat_time + width, start + (n+1)*repeat_time): returns 0 """ t = time() - if start <= t < end: - return 1 if (t - start) % repeat_time < duration else 0 + width = .5*time.time_step() if width is None else width + out = magnitude/time.time_step() if magnitude is not None else 1 + if repeat_time == 0: + return out if start - small_vensim <= t < start + width else 0 + elif start <= t and (end is None or t < end): + return out if (t - start + small_vensim) % repeat_time < width else 0 else: return 0 -def pulse_magnitude(time, magnitude, start, repeat_time=0): - """ - Implements xmile's PULSE function. Generate a one-DT wide pulse - at the given time. - - Parameters - ---------- - time: function - Function that returns the current time. - magnitude: - Magnitude of the pulse. - start: float - Starting time of the pulse. - repeat_time: float (optional) - Time interval of the pulse repetition. Default is 0, only one - pulse will be generated. - - Notes - ----- - PULSE(time(), 20, 12, 5) generates a pulse value of 20/DT at - time 12, 17, 22, etc. - - Returns - ------- - float: - - In rage [-inf, start): - returns 0 - - In range [start + n*repeat_time, start + n*repeat_time + dt): - returns magnitude/dt - - In rage [start + n*repeat_time + dt, start + (n+1)*repeat_time): - returns 0 - - """ - t = time() - if repeat_time <= small_vensim: - if abs(t - start) < time.time_step(): - return magnitude * time.time_step() - else: - return 0 - else: - if abs((t - start) % repeat_time) < time.time_step(): - return magnitude * time.time_step() - else: - return 0 - - def if_then_else(condition, val_if_true, val_if_false): """ Implements Vensim's IF THEN ELSE function. diff --git a/pysd/translation/vensim/vensim_structures.py b/pysd/translation/vensim/vensim_structures.py index 0f955ebe..84e1f5a8 100644 --- a/pysd/translation/vensim/vensim_structures.py +++ b/pysd/translation/vensim/vensim_structures.py @@ -38,10 +38,12 @@ "forecast": lambda x, y, z: ae.ForecastStructure(x, y, z, 0), "sample_if_true": ae.SampleIfTrueStructure, "lookup": ae.LookupsStructure, - "data": ae.DataStructure + "data": ae.DataStructure, + "pulse_train": lambda start, width, interval, end: ae.CallStructure( + ae.ReferenceStructure("pulse_train"), + (start, interval, width, end)) } - operators = { "logic_ops": [":AND:", ":OR:"], "not_ops": [":NOT:"], diff --git a/pysd/translation/xmile/xmile_structures.py b/pysd/translation/xmile/xmile_structures.py index 6d2211f5..01fe23e0 100644 --- a/pysd/translation/xmile/xmile_structures.py +++ b/pysd/translation/xmile/xmile_structures.py @@ -52,6 +52,19 @@ }, "if_then_else": lambda x, y, z: ae.CallStructure( ae.ReferenceStructure("if_then_else"), (x, y, z)), + "ramp": { + 2: lambda x, y: ae.CallStructure( + ae.ReferenceStructure("Xramp"), (x, y)), + 3: lambda x, y, z: ae.CallStructure( + ae.ReferenceStructure("ramp"), (x, y, z)) + }, + "pulse": { + 2: lambda magnitude, start: ae.CallStructure( + ae.ReferenceStructure("Xpulse"), (start, magnitude)), + 3: lambda magnitude, start, interval: ae.CallStructure( + ae.ReferenceStructure("Xpulse_train"), (start, interval, magnitude) + ) + }, "negative": lambda x: ae.ArithmeticStructure(["negative"], (x,)), "int": lambda x: ae.CallStructure( ae.ReferenceStructure("integer"), (x,)) diff --git a/tests/pytest_pysd/pytest_functions.py b/tests/pytest_pysd/pytest_functions.py new file mode 100644 index 00000000..5316267e --- /dev/null +++ b/tests/pytest_pysd/pytest_functions.py @@ -0,0 +1,352 @@ +import pytest +import numpy as np +import xarray as xr + +from pysd.py_backend.functions import\ + ramp, step, pulse, xidz, zidz, if_then_else, sum, prod, vmin, vmax,\ + invert_matrix + + +class TestInputFunctions(): + def test_ramp(self): + assert ramp(lambda: 14, .5, 10, 18) == 2 + + assert ramp(lambda: 4, .5, 10, 18) == 0 + + assert ramp(lambda: 24, .5, 10, 18) == 4 + + assert ramp(lambda: 24, .5, 10) == 7 + + assert ramp(lambda: 50, .5, 10) == 20 + + def test_step(self): + assert step(lambda: 5, 1, 10) == 0 + + assert step(lambda: 15, 1, 10) == 1 + + assert step(lambda: 10, 1, 10) == 1 + + def test_pulse(self): + assert pulse(lambda: 0, 1, width=3) == 0 + + assert pulse(lambda: 1, 1, width=3) == 1 + + assert pulse(lambda: 2, 1, width=3) == 1 + + assert pulse(lambda: 4, 1, width=3) == 0 + + assert pulse(lambda: 5, 1, width=3) == 0 + + def test_pulse_chain(self): + # before train starts + assert pulse(lambda: 0, 1, repeat_time=5, width=3, end=12) == 0 + # on train start + assert pulse(lambda: 1, 1, repeat_time=5, width=3, end=12) == 1 + # within first pulse + assert pulse(lambda: 2, 1, repeat_time=5, width=3, end=12) == 1 + # end of first pulse + assert pulse(lambda: 4, 1, repeat_time=5, width=3, end=12) == 0 + # after first pulse before second + assert pulse(lambda: 5, 1, repeat_time=5, width=3, end=12) == 0 + # on start of second pulse + assert pulse(lambda: 6, 1, repeat_time=5, width=3, end=12) == 1 + # within second pulse + assert pulse(lambda: 7, 1, repeat_time=5, width=3, end=12) == 1 + # after second pulse + assert pulse(lambda: 10, 1, repeat_time=5, width=3, end=12) == 0 + # on third pulse + assert pulse(lambda: 11, 1, repeat_time=5, width=3, end=12) == 1 + # on train end + assert pulse(lambda: 12, 1, repeat_time=5, width=3, end=12) == 0 + # after train + assert pulse(lambda: 15, 1, repeat_time=5, width=3, end=13) == 0 + + def test_pulse_magnitude(self): + from pysd.py_backend.statefuls import Time + + # Pulse function with repeat time + # before first impulse + t = Time() + t.set_control_vars(initial_time=0, time_step=1) + assert pulse(t, 2, repeat_time=5, magnitude=10) == 0 + # first impulse + t.update(2) + assert pulse(t, 2, repeat_time=5, magnitude=10) == 10 + # after first impulse and before second + t.update(4) + assert pulse(t, 2, repeat_time=5, magnitude=10) == 0 + # second impulse + t.update(7) + assert pulse(t, 2, repeat_time=5, magnitude=10) == 10 + # after second and before third impulse + t.update(9) + assert pulse(t, 2, repeat_time=5, magnitude=10) == 0 + # third impulse + t.update(12) + assert pulse(t, 2, repeat_time=5, magnitude=10) == 10 + # after third impulse + t.update(14) + assert pulse(t, 2, repeat_time=5, magnitude=10) == 0 + + t = Time() + t.set_control_vars(initial_time=0, time_step=0.2) + assert pulse(t, 3, repeat_time=5, magnitude=5) == 0 + # first impulse + t.update(2) + assert pulse(t, 3, repeat_time=5, magnitude=5) == 0 + # after first impulse and before second + t.update(3) + assert pulse(t, 3, repeat_time=5, magnitude=5) == 25 + # second impulse + t.update(7) + assert pulse(t, 3, repeat_time=5, magnitude=5) == 0 + # after second and before third impulse + t.update(8) + assert pulse(t, 3, repeat_time=5, magnitude=5) == 25 + # third impulse + t.update(12) + assert pulse(t, 3, repeat_time=5, magnitude=5) == 0 + # after third impulse + t.update(13) + assert pulse(t, 3, repeat_time=5, magnitude=5) == 25 + + # Pulse function without repeat time + # before first impulse + t = Time() + t.set_control_vars(initial_time=0, time_step=1) + assert pulse(t, 2, magnitude=10) == 0 + # first impulse + t.update(2) + assert pulse(t, 2, magnitude=10) == 10 + # after first impulse and before second + t.update(4) + assert pulse(t, 2, magnitude=10) == 0 + # second impulse + t.update(7) + assert pulse(t, 2, magnitude=10) == 0 + # after second and before third impulse + t.update(9) + assert pulse(t, 2, magnitude=10) == 0 + + t = Time() + t.set_control_vars(initial_time=0, time_step=0.1) + assert pulse(t, 4, magnitude=10) == 0 + # first impulse + t.update(2) + assert pulse(t, 4, magnitude=10) == 0 + # after first impulse and before second + t.update(4) + assert pulse(t, 4, magnitude=10) == 100 + # second impulse + t.update(7) + assert pulse(t, 4, magnitude=10) == 0 + # after second and before third impulse + t.update(9) + assert pulse(t, 4, magnitude=10) == 0 + + def test_numeric_error(self): + from pysd.py_backend.statefuls import Time + time = Time() + time.set_control_vars(initial_time=0, time_step=0.1, final_time=10) + err = 4e-16 + + # upper numeric error + time.update(3 + err) + assert 3 != time(), "there is no numeric error included" + + assert pulse(time, 3) == 1 + assert pulse(time, 1, repeat_time=2) == 1 + + # lower numeric error + time.update(3 - err) + assert 3 != time(), "there is no numeric error included" + + assert pulse(time, 3) == 1 + assert pulse(time, 1, repeat_time=2) == 1 + + def test_xidz(self): + assert xidz(1, -0.00000001, 5) == 5 + assert xidz(1, 0, 5) == 5 + assert xidz(1, 8, 5) == 0.125 + + def test_zidz(self): + assert zidz(1, -0.00000001) == 0 + assert zidz(1, 0) == 0 + assert zidz(1, 8) == 0.125 + + +class TestLogicFunctions(): + def test_if_then_else_basic(self): + assert if_then_else(True, lambda: 1, lambda: 0) == 1 + assert if_then_else(False, lambda: 1, lambda: 0) == 0 + + # Ensure lazzy evaluation + assert if_then_else(True, lambda: 1, lambda: 1/0) == 1 + assert if_then_else(False, lambda: 1/0, lambda: 0) == 0 + + with pytest.raises(ZeroDivisionError): + if_then_else(True, lambda: 1/0, lambda: 0) + with pytest.raises(ZeroDivisionError): + if_then_else(False, lambda: 1, lambda: 1/0) + + def test_if_then_else_with_subscripted(self): + # this test only test the lazzy evaluation and basics + # subscripted_if_then_else test all the possibilities + coords = {'dim1': [0, 1], 'dim2': [0, 1]} + dims = list(coords) + + xr_true = xr.DataArray([[True, True], [True, True]], coords, dims) + xr_false = xr.DataArray([[False, False], [False, False]], coords, dims) + xr_mixed = xr.DataArray([[True, False], [False, True]], coords, dims) + + out_mixed = xr.DataArray([[1, 0], [0, 1]], coords, dims) + + assert if_then_else(xr_true, lambda: 1, lambda: 0) == 1 + assert if_then_else(xr_false, lambda: 1, lambda: 0) == 0 + assert if_then_else(xr_mixed, lambda: 1, lambda: 0).equals(out_mixed) + + # Ensure lazzy evaluation + assert if_then_else(xr_true, lambda: 1, lambda: 1/0) == 1 + assert if_then_else(xr_false, lambda: 1/0, lambda: 0) == 0 + + with pytest.raises(ZeroDivisionError): + if_then_else(xr_true, lambda: 1/0, lambda: 0) + with pytest.raises(ZeroDivisionError): + if_then_else(xr_false, lambda: 1, lambda: 1/0) + with pytest.raises(ZeroDivisionError): + if_then_else(xr_mixed, lambda: 1/0, lambda: 0) + with pytest.raises(ZeroDivisionError): + if_then_else(xr_mixed, lambda: 1, lambda: 1/0) + + +class TestFunctions(): + + def test_sum(self): + """ + Test for sum function + """ + coords = {'d1': [9, 1], 'd2': [2, 4]} + coords_d1, coords_d2 = {'d1': [9, 1]}, {'d2': [2, 4]} + dims = ['d1', 'd2'] + + data = xr.DataArray([[1, 2], [3, 4]], coords, dims) + + assert sum(data, dim=['d1']).equals( + xr.DataArray([4, 6], coords_d2, ['d2'])) + assert sum(data, dim=['d2']).equals( + xr.DataArray([3, 7], coords_d1, ['d1'])) + assert sum(data, dim=['d1', 'd2']) == 10 + assert sum(data) == 10 + + def test_prod(self): + """ + Test for sum function + """ + coords = {'d1': [9, 1], 'd2': [2, 4]} + coords_d1, coords_d2 = {'d1': [9, 1]}, {'d2': [2, 4]} + dims = ['d1', 'd2'] + + data = xr.DataArray([[1, 2], [3, 4]], coords, dims) + + assert prod(data, dim=['d1']).equals( + xr.DataArray([3, 8], coords_d2, ['d2'])) + assert prod(data, dim=['d2']).equals( + xr.DataArray([2, 12], coords_d1, ['d1'])) + assert prod(data, dim=['d1', 'd2']) == 24 + assert prod(data) == 24 + + def test_vmin(self): + """ + Test for vmin function + """ + coords = {'d1': [9, 1], 'd2': [2, 4]} + coords_d1, coords_d2 = {'d1': [9, 1]}, {'d2': [2, 4]} + dims = ['d1', 'd2'] + + data = xr.DataArray([[1, 2], [3, 4]], coords, dims) + + assert vmin(data, dim=['d1']).equals( + xr.DataArray([1, 2], coords_d2, ['d2'])) + assert vmin(data, dim=['d2']).equals( + xr.DataArray([1, 3], coords_d1, ['d1'])) + assert vmin(data, dim=['d1', 'd2']) == 1 + assert vmin(data) == 1 + + def test_vmax(self): + """ + Test for vmax function + """ + coords = {'d1': [9, 1], 'd2': [2, 4]} + coords_d1, coords_d2 = {'d1': [9, 1]}, {'d2': [2, 4]} + dims = ['d1', 'd2'] + + data = xr.DataArray([[1, 2], [3, 4]], coords, dims) + + assert vmax(data, dim=['d1']).equals( + xr.DataArray([3, 4], coords_d2, ['d2'])) + assert vmax(data, dim=['d2']).equals( + xr.DataArray([2, 4], coords_d1, ['d1'])) + assert vmax(data, dim=['d1', 'd2']) == 4 + assert vmax(data) == 4 + + def test_invert_matrix(self): + """ + Test for invert_matrix function + """ + coords1 = {'d1': ['a', 'b'], 'd2': ['a', 'b']} + coords2 = {'d0': ['1', '2'], 'd1': ['a', 'b'], 'd2': ['a', 'b']} + coords3 = {'d0': ['1', '2'], + 'd1': ['a', 'b', 'c'], + 'd2': ['a', 'b', 'c']} + + data1 = xr.DataArray([[1, 2], [3, 4]], coords1, ['d1', 'd2']) + data2 = xr.DataArray([[[1, 2], [3, 4]], [[-1, 2], [5, 4]]], + coords2, + ['d0', 'd1', 'd2']) + data3 = xr.DataArray([[[1, 2, 3], [3, 7, 2], [3, 4, 6]], + [[-1, 2, 3], [4, 7, 3], [5, 4, 6]]], + coords3, + ['d0', 'd1', 'd2']) + + for data in [data1, data2, data3]: + datai = invert_matrix(data) + assert data.dims == datai.dims + + if len(data.shape) == 2: + # two dimensions xarrays + assert ( + abs(np.dot(data, datai) - np.dot(datai, data)) + < 1e-14 + ).all() + assert ( + abs(np.dot(data, datai) - np.identity(data.shape[-1])) + < 1e-14 + ).all() + else: + # three dimensions xarrays + for i in range(data.shape[0]): + assert ( + abs(np.dot(data[i], datai[i]) + - np.dot(datai[i], data[i])) + < 1e-14 + ).all() + assert ( + abs(np.dot(data[i], datai[i]) + - np.identity(data.shape[-1])) + < 1e-14 + ).all() + + def test_incomplete(self): + from pysd.py_backend.functions import incomplete + from warnings import catch_warnings + + with catch_warnings(record=True) as w: + incomplete() + assert len(w) == 1 + assert 'Call to undefined function' in str(w[-1].message) + + def test_not_implemented_function(self): + from pysd.py_backend.functions import not_implemented_function + + with pytest.raises(NotImplementedError): + not_implemented_function("NIF") diff --git a/tests/unit_test_functions.py b/tests/unit_test_functions.py deleted file mode 100644 index 97a30e73..00000000 --- a/tests/unit_test_functions.py +++ /dev/null @@ -1,322 +0,0 @@ -import unittest - -import numpy as np -import xarray as xr - - -class TestInputFunctions(unittest.TestCase): - def test_ramp(self): - from pysd.py_backend.functions import ramp - - self.assertEqual(ramp(lambda: 14, .5, 10, 18), 2) - - self.assertEqual(ramp(lambda: 4, .5, 10, 18), 0) - - self.assertEqual(ramp(lambda: 24, .5, 10, 18), 4) - - self.assertEqual(ramp(lambda: 24, .5, 10, -1), 7) - - def test_step(self): - from pysd.py_backend.functions import step - - self.assertEqual(step(lambda: 5, 1, 10), 0) - - self.assertEqual(step(lambda: 15, 1, 10), 1) - - self.assertEqual(step(lambda: 10, 1, 10), 1) - - def test_pulse(self): - from pysd.py_backend.functions import pulse - - self.assertEqual(pulse(lambda: 0, 1, 3), 0) - - self.assertEqual(pulse(lambda: 1, 1, 3), 1) - - self.assertEqual(pulse(lambda: 2, 1, 3), 1) - - self.assertEqual(pulse(lambda: 4, 1, 3), 0) - - self.assertEqual(pulse(lambda: 5, 1, 3), 0) - - def test_pulse_chain(self): - from pysd.py_backend.functions import pulse_train - # before train starts - self.assertEqual(pulse_train(lambda: 0, 1, 3, 5, 12), 0) - # on train start - self.assertEqual(pulse_train(lambda: 1, 1, 3, 5, 12), 1) - # within first pulse - self.assertEqual(pulse_train(lambda: 2, 1, 3, 5, 12), 1) - # end of first pulse - self.assertEqual(pulse_train(lambda: 4, 1, 3, 5, 12), 0) - # after first pulse before second - self.assertEqual(pulse_train(lambda: 5, 1, 3, 5, 12), 0) - # on start of second pulse - self.assertEqual(pulse_train(lambda: 6, 1, 3, 5, 12), 1) - # within second pulse - self.assertEqual(pulse_train(lambda: 7, 1, 3, 5, 12), 1) - # after second pulse - self.assertEqual(pulse_train(lambda: 10, 1, 3, 5, 12), 0) - # on third pulse - self.assertEqual(pulse_train(lambda: 11, 1, 3, 5, 12), 1) - # on train end - self.assertEqual(pulse_train(lambda: 12, 1, 3, 5, 12), 0) - # after train - self.assertEqual(pulse_train(lambda: 15, 1, 3, 5, 13), 0) - - def test_pulse_magnitude(self): - from pysd.py_backend.functions import pulse_magnitude - from pysd.py_backend.statefuls import Time - - # Pulse function with repeat time - # before first impulse - t = Time() - t.set_control_vars(initial_time=0, time_step=1) - self.assertEqual(pulse_magnitude(t, 10, 2, 5), 0) - # first impulse - t.update(2) - self.assertEqual(pulse_magnitude(t, 10, 2, 5), 10) - # after first impulse and before second - t.update(4) - self.assertEqual(pulse_magnitude(t, 10, 2, 5), 0) - # second impulse - t.update(7) - self.assertEqual(pulse_magnitude(t, 10, 2, 5), 10) - # after second and before third impulse - t.update(9) - self.assertEqual(pulse_magnitude(t, 10, 2, 5), 0) - # third impulse - t.update(12) - self.assertEqual(pulse_magnitude(t, 10, 2, 5), 10) - # after third impulse - t.update(14) - self.assertEqual(pulse_magnitude(t, 10, 2, 5), 0) - - # Pulse function without repeat time - # before first impulse - t = Time() - t.set_control_vars(initial_time=0, time_step=1) - self.assertEqual(pulse_magnitude(t, 10, 2), 0) - # first impulse - t.update(2) - self.assertEqual(pulse_magnitude(t, 10, 2), 10) - # after first impulse and before second - t.update(4) - self.assertEqual(pulse_magnitude(t, 10, 2), 0) - # second impulse - t.update(7) - self.assertEqual(pulse_magnitude(t, 10, 2), 0) - # after second and before third impulse - t.update(9) - self.assertEqual(pulse_magnitude(t, 10, 2), 0) - - def test_xidz(self): - from pysd.py_backend.functions import xidz - self.assertEqual(xidz(1, -0.00000001, 5), 5) - self.assertEqual(xidz(1, 0, 5), 5) - self.assertEqual(xidz(1, 8, 5), 0.125) - - def test_zidz(self): - from pysd.py_backend.functions import zidz - self.assertEqual(zidz(1, -0.00000001), 0) - self.assertEqual(zidz(1, 0), 0) - self.assertEqual(zidz(1, 8), 0.125) - - -class TestLogicFunctions(unittest.TestCase): - def test_if_then_else_basic(self): - from pysd.py_backend.functions import if_then_else - self.assertEqual(if_then_else(True, lambda: 1, lambda: 0), 1) - self.assertEqual(if_then_else(False, lambda: 1, lambda: 0), 0) - - # Ensure lazzy evaluation - self.assertEqual(if_then_else(True, lambda: 1, lambda: 1/0), 1) - self.assertEqual(if_then_else(False, lambda: 1/0, lambda: 0), 0) - - with self.assertRaises(ZeroDivisionError): - if_then_else(True, lambda: 1/0, lambda: 0) - with self.assertRaises(ZeroDivisionError): - if_then_else(False, lambda: 1, lambda: 1/0) - - def test_if_then_else_with_subscripted(self): - # this test only test the lazzy evaluation and basics - # subscripted_if_then_else test all the possibilities - - from pysd.py_backend.functions import if_then_else - - coords = {'dim1': [0, 1], 'dim2': [0, 1]} - dims = list(coords) - - xr_true = xr.DataArray([[True, True], [True, True]], coords, dims) - xr_false = xr.DataArray([[False, False], [False, False]], coords, dims) - xr_mixed = xr.DataArray([[True, False], [False, True]], coords, dims) - - out_mixed = xr.DataArray([[1, 0], [0, 1]], coords, dims) - - self.assertEqual(if_then_else(xr_true, lambda: 1, lambda: 0), 1) - self.assertEqual(if_then_else(xr_false, lambda: 1, lambda: 0), 0) - self.assertTrue( - if_then_else(xr_mixed, lambda: 1, lambda: 0).equals(out_mixed)) - - # Ensure lazzy evaluation - self.assertEqual(if_then_else(xr_true, lambda: 1, lambda: 1/0), 1) - self.assertEqual(if_then_else(xr_false, lambda: 1/0, lambda: 0), 0) - - with self.assertRaises(ZeroDivisionError): - if_then_else(xr_true, lambda: 1/0, lambda: 0) - with self.assertRaises(ZeroDivisionError): - if_then_else(xr_false, lambda: 1, lambda: 1/0) - with self.assertRaises(ZeroDivisionError): - if_then_else(xr_mixed, lambda: 1/0, lambda: 0) - with self.assertRaises(ZeroDivisionError): - if_then_else(xr_mixed, lambda: 1, lambda: 1/0) - - -class TestFunctions(unittest.TestCase): - - def test_sum(self): - """ - Test for sum function - """ - from pysd.py_backend.functions import sum - - coords = {'d1': [9, 1], 'd2': [2, 4]} - coords_d1, coords_d2 = {'d1': [9, 1]}, {'d2': [2, 4]} - dims = ['d1', 'd2'] - - data = xr.DataArray([[1, 2], [3, 4]], coords, dims) - - self.assertTrue(sum( - data, - dim=['d1']).equals(xr.DataArray([4, 6], coords_d2, ['d2']))) - self.assertTrue(sum( - data, - dim=['d2']).equals(xr.DataArray([3, 7], coords_d1, ['d1']))) - self.assertEqual(sum(data, dim=['d1', 'd2']), 10) - self.assertEqual(sum(data), 10) - - def test_prod(self): - """ - Test for sum function - """ - from pysd.py_backend.functions import prod - - coords = {'d1': [9, 1], 'd2': [2, 4]} - coords_d1, coords_d2 = {'d1': [9, 1]}, {'d2': [2, 4]} - dims = ['d1', 'd2'] - - data = xr.DataArray([[1, 2], [3, 4]], coords, dims) - - self.assertTrue(prod( - data, - dim=['d1']).equals(xr.DataArray([3, 8], coords_d2, ['d2']))) - self.assertTrue(prod( - data, - dim=['d2']).equals(xr.DataArray([2, 12], coords_d1, ['d1']))) - self.assertEqual(prod(data, dim=['d1', 'd2']), 24) - self.assertEqual(prod(data), 24) - - def test_vmin(self): - """ - Test for vmin function - """ - from pysd.py_backend.functions import vmin - - coords = {'d1': [9, 1], 'd2': [2, 4]} - coords_d1, coords_d2 = {'d1': [9, 1]}, {'d2': [2, 4]} - dims = ['d1', 'd2'] - - data = xr.DataArray([[1, 2], [3, 4]], coords, dims) - - self.assertTrue(vmin( - data, - dim=['d1']).equals(xr.DataArray([1, 2], coords_d2, ['d2']))) - self.assertTrue(vmin( - data, - dim=['d2']).equals(xr.DataArray([1, 3], coords_d1, ['d1']))) - self.assertEqual(vmin(data, dim=['d1', 'd2']), 1) - self.assertEqual(vmin(data), 1) - - def test_vmax(self): - """ - Test for vmax function - """ - from pysd.py_backend.functions import vmax - - coords = {'d1': [9, 1], 'd2': [2, 4]} - coords_d1, coords_d2 = {'d1': [9, 1]}, {'d2': [2, 4]} - dims = ['d1', 'd2'] - - data = xr.DataArray([[1, 2], [3, 4]], coords, dims) - - self.assertTrue(vmax( - data, - dim=['d1']).equals(xr.DataArray([3, 4], coords_d2, ['d2']))) - self.assertTrue(vmax( - data, - dim=['d2']).equals(xr.DataArray([2, 4], coords_d1, ['d1']))) - self.assertEqual(vmax(data, dim=['d1', 'd2']), 4) - self.assertEqual(vmax(data), 4) - - def test_invert_matrix(self): - """ - Test for invert_matrix function - """ - from pysd.py_backend.functions import invert_matrix - - coords1 = {'d1': ['a', 'b'], 'd2': ['a', 'b']} - coords2 = {'d0': ['1', '2'], 'd1': ['a', 'b'], 'd2': ['a', 'b']} - coords3 = {'d0': ['1', '2'], - 'd1': ['a', 'b', 'c'], - 'd2': ['a', 'b', 'c']} - - data1 = xr.DataArray([[1, 2], [3, 4]], coords1, ['d1', 'd2']) - data2 = xr.DataArray([[[1, 2], [3, 4]], [[-1, 2], [5, 4]]], - coords2, - ['d0', 'd1', 'd2']) - data3 = xr.DataArray([[[1, 2, 3], [3, 7, 2], [3, 4, 6]], - [[-1, 2, 3], [4, 7, 3], [5, 4, 6]]], - coords3, - ['d0', 'd1', 'd2']) - - for data in [data1, data2, data3]: - datai = invert_matrix(data) - self.assertEqual(data.dims, datai.dims) - - if len(data.shape) == 2: - # two dimensions xarrays - self.assertTrue(( - abs(np.dot(data, datai) - np.dot(datai, data)) - < 1e-14 - ).all()) - self.assertTrue(( - abs(np.dot(data, datai) - np.identity(data.shape[-1])) - < 1e-14 - ).all()) - else: - # three dimensions xarrays - for i in range(data.shape[0]): - self.assertTrue(( - abs(np.dot(data[i], datai[i]) - - np.dot(datai[i], data[i])) - < 1e-14 - ).all()) - self.assertTrue(( - abs(np.dot(data[i], datai[i]) - - np.identity(data.shape[-1])) - < 1e-14 - ).all()) - - def test_incomplete(self): - from pysd.py_backend.functions import incomplete - from warnings import catch_warnings - - with catch_warnings(record=True) as w: - incomplete() - self.assertEqual(len(w), 1) - self.assertTrue('Call to undefined function' in str(w[-1].message)) - - def test_not_implemented_function(self): - from pysd.py_backend.functions import not_implemented_function - - with self.assertRaises(NotImplementedError): - not_implemented_function("NIF") From 812167899db38b00131864011bec65c8e330c4b9 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 11 May 2022 17:05:57 +0200 Subject: [PATCH 61/96] Update version --- pysd/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pysd/_version.py b/pysd/_version.py index 4cb28cbd..528787cf 100644 --- a/pysd/_version.py +++ b/pysd/_version.py @@ -1 +1 @@ -__version__ = "3.0.0-dev" +__version__ = "3.0.0" From 1a4309d9e86da68a1fccb5f6d44ac345552874c2 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 10:16:41 +0200 Subject: [PATCH 62/96] Correct spelling --- docs/structure/abstract_model.rst | 2 +- docs/structure/python_builder.rst | 2 +- docs/structure/structure_index.rst | 2 +- docs/structure/vensim_translation.rst | 2 +- docs/structure/xmile_translation.rst | 8 ++-- .../python/python_expressions_builder.py | 42 +++++-------------- pysd/py_backend/functions.py | 2 - 7 files changed, 19 insertions(+), 41 deletions(-) diff --git a/docs/structure/abstract_model.rst b/docs/structure/abstract_model.rst index dcebecf5..2d495023 100644 --- a/docs/structure/abstract_model.rst +++ b/docs/structure/abstract_model.rst @@ -1,6 +1,6 @@ Abstract Model Representation ============================= -The Abstract Model representatin allows a separation of concern between +The Abstract Model representation allows a separation of concern between translation and the building. The translation will be called anything that happens between the source code and the Abstract Model representation. While the building will be everything that happens between the Abstract Model and the diff --git a/docs/structure/python_builder.rst b/docs/structure/python_builder.rst index b8fb5d7e..b6734c27 100644 --- a/docs/structure/python_builder.rst +++ b/docs/structure/python_builder.rst @@ -5,7 +5,7 @@ The Python builder allows to build models that can be run with the PySD Model cl The use of a one-to-one dictionary in translation means that the breadth of functionality is inherently limited. In the case where no direct Python equivalent is available, PySD provides a library of functions such as `pulse`, `step`, etc. that are specific to dynamic model behavior. -In addition to translating individual commands between Vensim/XMILE and Python, PySD reworks component identifiers to be Python-safe by replacing spaces with underscores. The translator allows source identifiers to make use of alphanumeric characters, spaces, or the symbol. +In addition to translating individual commands between Vensim/XMILE and Python, PySD reworks component identifiers to be Python-safe by replacing spaces with underscores. The translator allows source identifiers to make use of alphanumeric characters, spaces, or special characteres. In order to make that possible a namespace is created, which links the original name of the variable with the Python-safe name. The namespace is also available in the PySD model class to allow users working with both original names and Python-safe names. Main builders diff --git a/docs/structure/structure_index.rst b/docs/structure/structure_index.rst index d8c957bc..3594df79 100644 --- a/docs/structure/structure_index.rst +++ b/docs/structure/structure_index.rst @@ -49,7 +49,7 @@ For loading a translated model with Python see :doc:`basic usage <../basic_usage The Python builder constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a `statefull` model in much the same way that the system itself has a specific state at any point in time. -The model class also contains a function for each of the model components, representing the essential model equations. The each function contains its units, subcscripts type infromation and documentation as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. +The model class also contains a function for each of the model components, representing the essential model equations. Each function contains its units, subcscripts type infromation and documentation as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. The model class maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst index 1211e916..d31eedbb 100644 --- a/docs/structure/vensim_translation.rst +++ b/docs/structure/vensim_translation.rst @@ -8,7 +8,7 @@ The translation workflow ------------------------- The following translation workflow allows splitting the Vensim file while parsing each part of it in order to make it possible to build an :py:class:`AbstractModel` type object. The workflow could be summarized as follows: -1. Vensim file: Splits the file content from the skecth and allows splitting the model in sections (main section, macro section) +1. Vensim file: Splits the file content from the sketch and allows splitting the model in sections (main section, macro section) 2. Vensim section: Full set of varibles and definitions that can be integrated. Allows splitting the model expressions. 3. Vensim element: A definition in the mdl file which could be a subscript (sub)range definition or a variable definition. It includes units and comments. Definitions for the same variable are grouped after in the same :py:class:`AbstractElement` object. Allows parsing its left hand side (LHS) to get the name of the subscript (sub)range or variable and it is returned as a specific type of component depending on the used assing operator (=, ==, :=, (), :) 4. Vensim component: The classified object for a variable definition, it depends on the opperator used to define the variable. Its right hand side (RHS) can be parsed to get the Abstract Syntax Tree (AST) of the expression. diff --git a/docs/structure/xmile_translation.rst b/docs/structure/xmile_translation.rst index d070c949..d5ad6209 100644 --- a/docs/structure/xmile_translation.rst +++ b/docs/structure/xmile_translation.rst @@ -5,7 +5,7 @@ PySD allows parsing a Xmile file and translates the result to an :py:class:`Abst .. warning:: - Currently no Xmile users are working in the development of PySD, this is causing a gap between the Xmile and Vensim developments. Stella users are encouraged to take part in the development of PySD by invcludying new test models and adding support for new functions and features. + Currently no Xmile users are working in the development of PySD, this is causing a gap between the Xmile and Vensim developments. Stella users are encouraged to take part in the development of PySD by invcludying new `test models `_ and adding support for new functions and features. The translation workflow @@ -86,9 +86,9 @@ Several subscript related features all supported. This include: - Basic subscript operations with different ranges. - Subscript ranges and subranges definitions. -Grafic functions -^^^^^^^^^^^^^^^^ -Xmile grafic functions (gf), also known as lookups, are supported. They can be given hardcoded or inline. +Graphical functions +^^^^^^^^^^^^^^^^^^^ +Xmile graphical functions (gf), also known as lookups, are supported. They can be given hardcoded or inline. .. warning:: Interpolation methods 'extrapolate' and 'discrete' are implemented but not tested. Full integration models with this methods are required. diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index c0fd3197..bd83fd66 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -1880,41 +1880,21 @@ def merge_dependencies(*dependencies: dict, inplace: bool = False) -> dict: current.update(new) elif new: # regular element - _merge_dependencies(current, new) + current_set, new_set = set(current), set(new) + for dep in current_set.intersection(new_set): + # if dependency is in both sum the number of calls + if dep.startswith("__"): + # if it is special (__lookup__, __external__) continue + continue + else: + current[dep] += new[dep] + for dep in new_set.difference(current_set): + # if dependency is only in new copy it + current[dep] = new[dep] return current -def _merge_dependencies(current: dict, new: dict) -> None: - """ - Merge two dependencies dicts of an element. - - Parameters - ---------- - current: dict - Current dependencies of the element. It will be mutated. - - new: dict - New dependencies to add. - - Returns - ------- - None - - """ - current_set, new_set = set(current), set(new) - for dep in current_set.intersection(new_set): - # if dependency is in both sum the number of calls - if dep.startswith("__"): - # if it is special (__lookup__, __external__) continue - continue - else: - current[dep] += new[dep] - for dep in new_set.difference(current_set): - # if dependency is only in new copy it - current[dep] = new[dep] - - def visit_loc(current_subs: dict, original_subs: dict, keep_shape: bool = False) -> tuple: """ diff --git a/pysd/py_backend/functions.py b/pysd/py_backend/functions.py index c6207ebc..a97a1c22 100644 --- a/pysd/py_backend/functions.py +++ b/pysd/py_backend/functions.py @@ -4,8 +4,6 @@ what is present in the function call. We provide them in a structure that makes it easy for the model elements to call. """ - -from timeit import repeat import warnings import numpy as np From 39c99ddc056121d106e7eb68d54f3540062ad884 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 10:57:15 +0200 Subject: [PATCH 63/96] Remove variables in documentation when selecting a submodel --- pysd/py_backend/statefuls.py | 7 ++++++- tests/pytest_pysd/pytest_select_submodel.py | 8 ++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 72981fa4..f28ca445 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -1796,11 +1796,16 @@ def select_submodel(self, vars=[], modules=[], exogenous_components={}): all_vars = all_deps.copy() all_vars.update(c_vars) - # clean dependendies and namespace dictionaries + # clean dependendies and namespace dictionaries, and remove + # the rows from the documentation for real_name, py_name in self._namespace.copy().items(): if py_name not in all_vars: del self._namespace[real_name] del self._dependencies[py_name] + self._doc.drop( + self._doc.index[self._doc["Real Name"] == real_name], + inplace=True + ) for py_name in self._dependencies.copy().keys(): if py_name.startswith("_") and py_name not in s_deps: diff --git a/tests/pytest_pysd/pytest_select_submodel.py b/tests/pytest_pysd/pytest_select_submodel.py index f796e626..c4759aea 100644 --- a/tests/pytest_pysd/pytest_select_submodel.py +++ b/tests/pytest_pysd/pytest_select_submodel.py @@ -129,10 +129,14 @@ def test_select_submodel(self, model, variables, modules, assert "_integ_other_stock" in model._dependencies assert "other_stock" in model._dependencies assert "other stock" in model._namespace + assert "other stock" in model._doc["Real Name"].to_list() + assert "other_stock" in model._doc["Py Name"].to_list() assert "_integ_stock" in model._stateful_elements assert "_integ_stock" in model._dependencies assert "stock" in model._dependencies assert "Stock" in model._namespace + assert "Stock" in model._doc["Real Name"].to_list() + assert "stock" in model._doc["Py Name"].to_list() # select submodel with pytest.warns(UserWarning) as record: @@ -147,10 +151,14 @@ def test_select_submodel(self, model, variables, modules, assert "_integ_other_stock" not in model._dependencies assert "other_stock" not in model._dependencies assert "other stock" not in model._namespace + assert "other stock" not in model._doc["Real Name"].to_list() + assert "other_stock" not in model._doc["Py Name"].to_list() assert "_integ_stock" in model._stateful_elements assert "_integ_stock" in model._dependencies assert "stock" in model._dependencies assert "Stock" in model._namespace + assert "Stock" in model._doc["Real Name"].to_list() + assert "stock" in model._doc["Py Name"].to_list() if not dep_vars: # totally independent submodels can run without producing From e9cbc77e6ac92bef9ce77bb9034d78dda681c31d Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 11:21:24 +0200 Subject: [PATCH 64/96] Move model and macro to a separate file --- pysd/building/python/imports.py | 7 +- .../python/python_expressions_builder.py | 2 +- pysd/py_backend/model.py | 1707 +++++++++++++++++ pysd/py_backend/statefuls.py | 1699 +--------------- pysd/pysd.py | 2 +- tests/pytest_pysd/pytest_functions.py | 4 +- tests/unit_test_statefuls.py | 2 +- 7 files changed, 1718 insertions(+), 1705 deletions(-) create mode 100644 pysd/py_backend/model.py diff --git a/pysd/building/python/imports.py b/pysd/building/python/imports.py index f807de22..92706918 100644 --- a/pysd/building/python/imports.py +++ b/pysd/building/python/imports.py @@ -8,14 +8,15 @@ class ImportsManager(): _external_libs = {"numpy": "np", "xarray": "xr"} _external_submodules = ["scipy"] _internal_libs = [ - "functions", "statefuls", "external", "data", "lookups", "utils" + "functions", "statefuls", "external", "data", "lookups", "utils", + "model" ] def __init__(self): self._numpy, self._xarray = False, False self._functions, self._statefuls, self._external, self._data,\ - self._lookups, self._utils, self._scipy =\ - set(), set(), set(), set(), set(), set(), set() + self._lookups, self._utils, self._scipy, self._model =\ + set(), set(), set(), set(), set(), set(), set(), set() def add(self, module: str, function: Union[str, None] = None) -> None: """ diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/building/python/python_expressions_builder.py index bd83fd66..c8a61978 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/building/python/python_expressions_builder.py @@ -541,7 +541,7 @@ def build_macro_call(self, arguments: dict) -> BuildAST: The built object. """ - self.section.imports.add("statefuls", "Macro") + self.section.imports.add("model", "Macro") # Get macro from macrospace macro = self.section.macrospace[self.macro_name] diff --git a/pysd/py_backend/model.py b/pysd/py_backend/model.py new file mode 100644 index 00000000..31f16d2c --- /dev/null +++ b/pysd/py_backend/model.py @@ -0,0 +1,1707 @@ +""" +Macro and Model classes are the main classes for loading and interacting +with a PySD model. Model class allows loading and running a PySD model. +Several methods and propierties are inherited from Macro class, which +allows integrating a model or a Macro expression (set of functions in +a separate file). +""" +import warnings +import inspect +import pickle +from typing import Union + +import numpy as np +import xarray as xr +import pandas as pd + +from . import utils +from .statefuls import DynamicStateful, Stateful +from .external import External, Excels +from .cache import Cache, constant_cache +from .data import TabData +from .lookups import HardcodedLookups +from .components import Components, Time + +from pysd._version import __version__ + + +class Macro(DynamicStateful): + """ + The Macro class implements a stateful representation of the system, + and contains the majority of methods for accessing and modifying + components. + + When the instance in question also serves as the root model object + (as opposed to a macro or submodel within another model) it will have + added methods to facilitate execution. + + The Macro object will be created with components drawn from a + translated python model file. + + Parameters + ---------- + py_model_file: str or pathlib.Path + Filename of a model or macro which has already been converted + into a python format. + params: dict or None (optional) + Dictionary of the macro parameters. Default is None. + return_func: str or None (optional) + The name of the function to return from the macro. Default is None. + time: components.Time or None (optional) + Time object for integration. If None a new time object will + be generated (for models), if passed the time object will be + used (for macros). Default is None. + time_initialization: callable or None + Time to set at the begginning of the Macro. Default is None. + data_files: dict or list or str or None + The dictionary with keys the name of file and variables to + load the data from there. Or the list of names or name of the + file to search the data in. Only works for TabData type object + and it is neccessary to provide it. Default is None. + py_name: str or None + The name of the Macro object. Default is None. + + """ + def __init__(self, py_model_file, params=None, return_func=None, + time=None, time_initialization=None, data_files=None, + py_name=None): + super().__init__() + self.time = time + self.time_initialization = time_initialization + self.cache = Cache() + self.py_name = py_name + self.external_loaded = False + self.lookups_loaded = False + self.components = Components(str(py_model_file), self.set_components) + + if __version__.split(".")[0]\ + != self.get_pysd_compiler_version().split(".")[0]: + raise ImportError( + "\n\nNot able to import the model. " + + "The model was translated with a " + + "not compatible version of PySD:" + + "\n\tPySD " + self.get_pysd_compiler_version() + + "\n\nThe current version of PySd is:" + + "\n\tPySD " + __version__ + "\n\n" + + "Please translate again the model with the function" + + " read_vensim or read_xmile.") + + self._namespace = self.components._components.component.namespace + self._dependencies =\ + self.components._components.component.dependencies.copy() + self._subscript_dict = getattr( + self.components._components, "_subscript_dict", {}) + self._modules = getattr( + self.components._components, "_modules", {}) + + self._doc = self._build_doc() + + if params is not None: + # add params to namespace + self._namespace.update(self.components._components._params) + # create new components with the params + self.set_components(params, new=True) + # update dependencies + for param in params: + self._dependencies[ + self._namespace[param]] = {"time"} + + # Get the collections of stateful elements and external elements + self._stateful_elements = { + name: getattr(self.components, name) + for name in dir(self.components) + if isinstance(getattr(self.components, name), Stateful) + } + self._dynamicstateful_elements = [ + getattr(self.components, name) for name in dir(self.components) + if isinstance(getattr(self.components, name), DynamicStateful) + ] + self._external_elements = [ + getattr(self.components, name) for name in dir(self.components) + if isinstance(getattr(self.components, name), External) + ] + self._macro_elements = [ + getattr(self.components, name) for name in dir(self.components) + if isinstance(getattr(self.components, name), Macro) + ] + + self._data_elements = [ + getattr(self.components, name) for name in dir(self.components) + if isinstance(getattr(self.components, name), TabData) + ] + + self._lookup_elements = [ + getattr(self.components, name) for name in dir(self.components) + if isinstance(getattr(self.components, name), HardcodedLookups) + ] + + if data_files: + self._get_data(data_files) + + self._assign_cache_type() + self._get_initialize_order() + + if return_func is not None: + self.return_func = getattr(self.components, return_func) + else: + self.return_func = lambda: 0 + + self.py_model_file = str(py_model_file) + + def __call__(self): + return self.return_func() + + @property + def doc(self) -> pd.DataFrame: + """ + The documentation of the model. + """ + return self._doc.copy() + + @property + def namespace(self) -> dict: + """ + The namespace dictionary of the model. + """ + return self._namespace.copy() + + @property + def dependencies(self) -> dict: + """ + The dependencies dictionary of the model. + """ + return self._dependencies.copy() + + @property + def subscripts(self) -> dict: + """ + The subscripts dictionary of the model. + """ + return self._subscript_dict.copy() + + @property + def modules(self) -> Union[dict, None]: + """ + The dictionary of modules of the model. If the model is not + split by modules it returns None. + """ + return self._modules.copy() or None + + def clean_caches(self): + """ + Clean the cahce of the object and the macros objects that it + contains + """ + self.cache.clean() + # if nested macros + [macro.clean_caches() for macro in self._macro_elements] + + def _get_data(self, data_files): + if isinstance(data_files, dict): + for data_file, vars in data_files.items(): + for var in vars: + found = False + for element in self._data_elements: + if var in [element.py_name, element.real_name]: + element.load_data(data_file) + found = True + break + if not found: + raise ValueError( + f"'{var}' not found as model data variable") + + else: + for element in self._data_elements: + element.load_data(data_files) + + def _get_initialize_order(self): + """ + Get the initialization order of the stateful elements + and their the full dependencies. + """ + # get the full set of dependencies to initialize an stateful object + # includying all levels + self.stateful_initial_dependencies = { + ext: set() + for ext in self._dependencies + if (ext.startswith("_") and not ext.startswith("_active_initial_")) + } + for element in self.stateful_initial_dependencies: + self._get_full_dependencies( + element, self.stateful_initial_dependencies[element], + "initial") + + # get the full dependencies of stateful objects taking into account + # only other objects + current_deps = { + element: [ + dep for dep in deps + if dep in self.stateful_initial_dependencies + ] for element, deps in self.stateful_initial_dependencies.items() + } + + # get initialization order of the stateful elements + self.initialize_order = [] + delete = True + while delete: + delete = [] + for element in current_deps: + if not current_deps[element]: + # if stateful element has no deps on others + # add to the queue to initialize + self.initialize_order.append(element) + delete.append(element) + for element2 in current_deps: + # remove dependency on the initialized element + if element in current_deps[element2]: + current_deps[element2].remove(element) + # delete visited elements + for element in delete: + del current_deps[element] + + if current_deps: + # if current_deps is not an empty set there is a circular + # reference between stateful objects + raise ValueError( + 'Circular initialization...\n' + + 'Not able to initialize the following objects:\n\t' + + '\n\t'.join(current_deps)) + + def _get_full_dependencies(self, element, dep_set, stateful_deps): + """ + Get all dependencies of an element, i.e., also get the dependencies + of the dependencies. When finding an stateful element only dependencies + for initialization are considered. + + Parameters + ---------- + element: str + Element to get the full dependencies. + dep_set: set + Set to include the dependencies of the element. + stateful_deps: "initial" or "step" + The type of dependencies to take in the case of stateful objects. + + Returns + ------- + None + + """ + deps = self._dependencies[element] + if element.startswith("_"): + deps = deps[stateful_deps] + for dep in deps: + if dep not in dep_set and not dep.startswith("__")\ + and dep != "time": + dep_set.add(dep) + self._get_full_dependencies(dep, dep_set, stateful_deps) + + def _add_constant_cache(self): + self.constant_funcs = set() + for element, cache_type in self.cache_type.items(): + if cache_type == "run": + self.components._set_component( + element, + constant_cache(getattr(self.components, element)) + ) + self.constant_funcs.add(element) + + def _remove_constant_cache(self): + for element in self.constant_funcs: + self.components._set_component( + element, + getattr(self.components, element).function) + self.constant_funcs = set() + + def _assign_cache_type(self): + """ + Assigns the cache type to all the elements from the namespace. + """ + self.cache_type = {"time": None} + + for element in self._namespace.values(): + if element not in self.cache_type\ + and element in self._dependencies: + self._assign_cache(element) + + for element, cache_type in self.cache_type.items(): + if cache_type is not None: + if element not in self.cache.cached_funcs\ + and self._count_calls(element) > 1: + self.components._set_component( + element, + self.cache(getattr(self.components, element))) + self.cache.cached_funcs.add(element) + + def _count_calls(self, element): + n_calls = 0 + for subelement in self._dependencies: + if subelement.startswith("_") and\ + element in self._dependencies[subelement]["step"]: + if element in\ + self._dependencies[subelement]["initial"]: + n_calls +=\ + 2*self._dependencies[subelement]["step"][element] + else: + n_calls +=\ + self._dependencies[subelement]["step"][element] + elif (not subelement.startswith("_") and + element in self._dependencies[subelement]): + n_calls +=\ + self._dependencies[subelement][element] + + return n_calls + + def _assign_cache(self, element): + """ + Assigns the cache type to the given element and its dependencies if + needed. + + Parameters + ---------- + element: str + Element name. + + Returns + ------- + None + + """ + if not self._dependencies[element]: + self.cache_type[element] = "run" + elif "__lookup__" in self._dependencies[element]: + self.cache_type[element] = None + elif self._isdynamic(self._dependencies[element]): + self.cache_type[element] = "step" + else: + self.cache_type[element] = "run" + for subelement in self._dependencies[element]: + if subelement.startswith("_initial_")\ + or subelement.startswith("__"): + continue + if subelement not in self.cache_type: + self._assign_cache(subelement) + if self.cache_type[subelement] == "step": + self.cache_type[element] = "step" + break + + def _isdynamic(self, dependencies): + """ + + Parameters + ---------- + dependencies: iterable + List of dependencies. + + Returns + ------- + isdynamic: bool + True if 'time' or a dynamic stateful objects is in dependencies. + + """ + if "time" in dependencies: + return True + for dep in dependencies: + if dep.startswith("_") and not dep.startswith("_initial_")\ + and not dep.startswith("__"): + return True + return False + + def get_pysd_compiler_version(self): + """ + Returns the version of pysd complier that used for generating + this model + """ + return self.components.__pysd_version__ + + def initialize(self): + """ + This function initializes the external objects and stateful objects + in the given order. + """ + # Initialize time + if self.time is None: + self.time = self.time_initialization() + + # Reset time to the initial one + self.time.reset() + self.cache.clean() + + self.components._init_outer_references({ + 'scope': self, + 'time': self.time + }) + + if not self.lookups_loaded: + # Initialize HardcodedLookups elements + for element in self._lookup_elements: + element.initialize() + + self.lookups_loaded = True + + if not self.external_loaded: + # Initialize external elements + for element in self._external_elements: + element.initialize() + + # Remove Excel data from memory + Excels.clean() + + self.external_loaded = True + + # Initialize stateful objects + for element_name in self.initialize_order: + self._stateful_elements[element_name].initialize() + + def ddt(self): + return np.array([component.ddt() for component + in self._dynamicstateful_elements], dtype=object) + + @property + def state(self): + return np.array([component.state for component + in self._dynamicstateful_elements], dtype=object) + + @state.setter + def state(self, new_value): + [component.update(val) for component, val + in zip(self._dynamicstateful_elements, new_value)] + + def export(self, file_name): + """ + Export stateful values to pickle file. + + Parameters + ---------- + file_name: str + Name of the file to export the values. + + """ + warnings.warn( + "\nCompatibility of exported states could be broken between" + " different versions of PySD or xarray, current versions:\n" + f"\tPySD {__version__}\n\txarray {xr.__version__}\n" + ) + stateful_elements = { + name: element.export() + for name, element in self._stateful_elements.items() + } + + with open(file_name, 'wb') as file: + pickle.dump( + (self.time(), + stateful_elements, + {'pysd': __version__, 'xarray': xr.__version__} + ), file) + + def import_pickle(self, file_name): + """ + Import stateful values from pickle file. + + Parameters + ---------- + file_name: str + Name of the file to import the values from. + + """ + with open(file_name, 'rb') as file: + time, stateful_dict, metadata = pickle.load(file) + + if __version__ != metadata['pysd']\ + or xr.__version__ != metadata['xarray']: # pragma: no cover + warnings.warn( + "\nCompatibility of exported states could be broken between" + " different versions of PySD or xarray. Current versions:\n" + f"\tPySD {__version__}\n\txarray {xr.__version__}\n" + "Loaded versions:\n" + f"\tPySD {metadata['pysd']}\n\txarray {metadata['xarray']}\n" + ) + + self.set_stateful(stateful_dict) + self.time.set_control_vars(initial_time=time) + + def get_args(self, param): + """ + Returns the arguments of a model element. + + Parameters + ---------- + param: str or func + The model element name or function. + + Returns + ------- + args: list + List of arguments of the function. + + Examples + -------- + >>> model.get_args('birth_rate') + >>> model.get_args('Birth Rate') + + """ + if isinstance(param, str): + func_name = utils.get_key_and_value_by_insensitive_key_or_value( + param, + self._namespace)[1] or param + + func = getattr(self.components, func_name) + else: + func = param + + if hasattr(func, 'args'): + # cached functions + return func.args + else: + # regular functions + args = inspect.getfullargspec(func)[0] + if 'self' in args: + args.remove('self') + return args + + def get_coords(self, param): + """ + Returns the coordinates and dims of a model element. + + Parameters + ---------- + param: str or func + The model element name or function. + + Returns + ------- + (coords, dims) or None: (dict, list) or None + The coords and the dimensions of the element if it has. + Otherwise, returns None. + + Examples + -------- + >>> model.get_coords('birth_rate') + >>> model.get_coords('Birth Rate') + + """ + if isinstance(param, str): + func_name = utils.get_key_and_value_by_insensitive_key_or_value( + param, + self._namespace)[1] or param + + func = getattr(self.components, func_name) + + else: + func = param + + if hasattr(func, "subscripts"): + dims = func.subscripts + if not dims: + return None + coords = {dim: self.components._subscript_dict[dim] + for dim in dims} + return coords, dims + elif hasattr(func, "state") and isinstance(func.state, xr.DataArray): + value = func() + else: + return None + + dims = list(value.dims) + coords = {coord: list(value.coords[coord].values) + for coord in value.coords} + return coords, dims + + def __getitem__(self, param): + """ + Returns the current value of a model component. + + Parameters + ---------- + param: str or func + The model element name. + + Returns + ------- + value: float or xarray.DataArray + The value of the model component. + + Examples + -------- + >>> model['birth_rate'] + >>> model['Birth Rate'] + + Note + ---- + It will crash if the model component takes arguments. + + """ + func_name = utils.get_key_and_value_by_insensitive_key_or_value( + param, + self._namespace)[1] or param + + if self.get_args(getattr(self.components, func_name)): + raise ValueError( + "Trying to get the current value of a lookup " + "to get all the values with the series data use " + "model.get_series_data(param)\n\n") + + return getattr(self.components, func_name)() + + def get_series_data(self, param): + """ + Returns the original values of a model lookup/data component. + + Parameters + ---------- + param: str + The model lookup/data element name. + + Returns + ------- + value: xarray.DataArray + Array with the value of the interpolating series + in the first dimension. + + Examples + -------- + >>> model['room_temperature'] + >>> model['Room temperature'] + + """ + func_name = utils.get_key_and_value_by_insensitive_key_or_value( + param, + self._namespace)[1] or param + + if func_name.startswith("_ext_"): + return getattr(self.components, func_name).data + elif "__data__" in self._dependencies[func_name]: + return getattr( + self.components, + self._dependencies[func_name]["__data__"] + ).data + elif "__lookup__" in self._dependencies[func_name]: + return getattr( + self.components, + self._dependencies[func_name]["__lookup__"] + ).data + else: + raise ValueError( + "Trying to get the values of a constant variable. " + "'model.get_series_data' only works lookups/data objects.\n\n") + + def set_components(self, params, new=False): + """ Set the value of exogenous model elements. + Element values can be passed as keyword=value pairs in the + function call. Values can be numeric type or pandas Series. + Series will be interpolated by integrator. + + Examples + -------- + >>> model.set_components({'birth_rate': 10}) + >>> model.set_components({'Birth Rate': 10}) + + >>> br = pandas.Series(index=range(30), values=np.sin(range(30)) + >>> model.set_components({'birth_rate': br}) + + + """ + # TODO: allow the params argument to take a pandas dataframe, where + # column names are variable names. However some variables may be + # constant or have no values for some index. This should be processed. + # TODO: make this compatible with loading outputs from other files + + for key, value in params.items(): + func_name = utils.get_key_and_value_by_insensitive_key_or_value( + key, + self._namespace)[1] + + if isinstance(value, np.ndarray) or isinstance(value, list): + raise TypeError( + 'When setting ' + key + '\n' + 'Setting subscripted must be done using a xarray.DataArray' + ' with the correct dimensions or a constant value ' + '(https://pysd.readthedocs.io/en/master/basic_usage.html)') + + if func_name is None: + raise NameError( + "\n'%s' is not recognized as a model component." + % key) + + if new: + func = None + dims = None + else: + func = getattr(self.components, func_name) + _, dims = self.get_coords(func) or (None, None) + + # if the variable is a lookup or a data we perform the change in + # the object they call + func_type = getattr(func, "type", None) + if func_type in ["Lookup", "Data"]: + # getting the object from original dependencies + obj = self._dependencies[func_name][f"__{func_type.lower()}__"] + getattr( + self.components, + obj + ).set_values(value) + + # Update dependencies + if func_type == "Data": + if isinstance(value, pd.Series): + self._dependencies[func_name] = { + "time": 1, "__data__": obj + } + else: + self._dependencies[func_name] = {"__data__": obj} + + continue + + if isinstance(value, pd.Series): + new_function, deps = self._timeseries_component( + value, dims) + self._dependencies[func_name] = deps + elif callable(value): + new_function = value + # Using step cache adding time as dependency + # TODO it would be better if we can parse the content + # of the function to get all the dependencies + self._dependencies[func_name] = {"time": 1} + + else: + new_function = self._constant_component(value, dims) + self._dependencies[func_name] = {} + + # this won't handle other statefuls... + if '_integ_' + func_name in dir(self.components): + warnings.warn("Replacing the equation of stock" + + "{} with params".format(key), + stacklevel=2) + + new_function.__name__ = func_name + if dims: + new_function.dims = dims + self.components._set_component(func_name, new_function) + if func_name in self.cache.cached_funcs: + self.cache.cached_funcs.remove(func_name) + + def _timeseries_component(self, series, dims): + """ Internal function for creating a timeseries model element """ + # this is only called if the set_component function recognizes a + # pandas series + # TODO: raise a warning if extrapolating from the end of the series. + # TODO: data type variables should be creted using a Data object + # lookup type variables should be created using a Lookup object + + if isinstance(series.values[0], xr.DataArray): + # the interpolation will be time dependent + return lambda: utils.rearrange(xr.concat( + series.values, + series.index).interp(concat_dim=self.time()).reset_coords( + 'concat_dim', drop=True), + dims, self._subscript_dict), {'time': 1} + + elif dims: + # the interpolation will be time dependent + return lambda: utils.rearrange( + np.interp(self.time(), series.index, series.values), + dims, self._subscript_dict), {'time': 1} + + else: + # the interpolation will be time dependent + return lambda:\ + np.interp(self.time(), series.index, series.values),\ + {'time': 1} + + def _constant_component(self, value, dims): + """ Internal function for creating a constant model element """ + if dims: + return lambda: utils.rearrange( + value, dims, self._subscript_dict) + + else: + return lambda: value + + def set_initial_value(self, t, initial_value): + """ Set the system initial value. + + Parameters + ---------- + t : numeric + The system time + + initial_value : dict + A (possibly partial) dictionary of the system initial values. + The keys to this dictionary may be either pysafe names or + original model file names + + """ + self.time.set_control_vars(initial_time=t) + stateful_name = "_NONE" + modified_statefuls = set() + + for key, value in initial_value.items(): + component_name =\ + utils.get_key_and_value_by_insensitive_key_or_value( + key, self._namespace)[1] + if component_name is not None: + if self._dependencies[component_name]: + deps = list(self._dependencies[component_name]) + if len(deps) == 1 and deps[0] in self.initialize_order: + stateful_name = deps[0] + else: + component_name = key + stateful_name = key + + try: + _, dims = self.get_coords(component_name) + except TypeError: + dims = None + + if isinstance(value, xr.DataArray)\ + and not set(value.dims).issubset(set(dims)): + raise ValueError( + f"\nInvalid dimensions for {component_name}." + f"It should be a subset of {dims}, " + f"but passed value has {list(value.dims)}") + + if isinstance(value, np.ndarray) or isinstance(value, list): + raise TypeError( + 'When setting ' + key + '\n' + 'Setting subscripted must be done using a xarray.DataArray' + ' with the correct dimensions or a constant value ' + '(https://pysd.readthedocs.io/en/master/basic_usage.html)') + + # Try to update stateful component + try: + element = getattr(self.components, stateful_name) + if dims: + value = utils.rearrange( + value, dims, + self._subscript_dict) + element.initialize(value) + modified_statefuls.add(stateful_name) + except NameError: + # Try to override component + raise ValueError( + f"\nUnrecognized stateful '{component_name}'. If you want" + " to set a value of a regular component. Use params={" + f"'{component_name}': {value}" + "} instead.") + + self.clean_caches() + + # get the elements to initialize + elements_to_initialize =\ + self._get_elements_to_initialize(modified_statefuls) + + # Initialize remaining stateful objects + for element_name in self.initialize_order: + if element_name in elements_to_initialize: + self._stateful_elements[element_name].initialize() + + def _get_elements_to_initialize(self, modified_statefuls): + elements_to_initialize = set() + for stateful, deps in self.stateful_initial_dependencies.items(): + if stateful in modified_statefuls: + # if elements initial conditions have been modified + # we should not modify it + continue + for modified_sateteful in modified_statefuls: + if modified_sateteful in deps: + # if element has dependencies on a modified element + # we should re-initialize it + elements_to_initialize.add(stateful) + continue + + return elements_to_initialize + + def set_stateful(self, stateful_dict): + """ + Set stateful values. + + Parameters + ---------- + stateful_dict: dict + Dictionary of the stateful elements and the attributes to change. + + """ + for element, attrs in stateful_dict.items(): + for attr, value in attrs.items(): + setattr(getattr(self.components, element), attr, value) + + def _build_doc(self): + """ + Formats a table of documentation strings to help users remember + variable names, and understand how they are translated into + python safe names. + + Returns + ------- + docs_df: pandas dataframe + Dataframe with columns for the model components: + - Real names + - Python safe identifiers (as used in model.components) + - Units string + - Documentation strings from the original model file + """ + collector = [] + for name, pyname in self._namespace.items(): + element = getattr(self.components, pyname) + collector.append({ + 'Real Name': name, + 'Py Name': pyname, + 'Subscripts': element.subscripts, + 'Units': element.units, + 'Limits': element.limits, + 'Type': element.type, + 'Subtype': element.subtype, + 'Comment': element.__doc__.strip().strip("\n").strip() + if element.__doc__ else None + }) + + return pd.DataFrame( + collector + ).sort_values(by="Real Name").reset_index(drop=True) + + def __str__(self): + """ Return model source files """ + + # JT: Might be helpful to return not only the source file, but + # also how the instance differs from that source file. This + # would give a more accurate view of the current model. + string = 'Translated Model File: ' + self.py_model_file + if hasattr(self, 'mdl_file'): + string += '\n Original Model File: ' + self.mdl_file + + return string + + +class Model(Macro): + """ + The Model class implements a stateful representation of the system. + It inherits methods from the Macro class to integrate the model and + access and modify model components. It also contains the main + methods for running the model. + + The Model object will be created with components drawn from a + translated python model file. + + Parameters + ---------- + py_model_file: str or pathlib.Path + Filename of a model which has already been converted into a + python format. + data_files: dict or list or str or None + The dictionary with keys the name of file and variables to + load the data from there. Or the list of names or name of the + file to search the data in. Only works for TabData type object + and it is neccessary to provide it. Default is None. + initialize: bool + If False, the model will not be initialize when it is loaded. + Default is True. + missing_values : str ("warning", "error", "ignore", "keep") (optional) + What to do with missing values. If "warning" (default) + shows a warning message and interpolates the values. + If "raise" raises an error. If "ignore" interpolates + the values without showing anything. If "keep" it will keep + the missing values, this option may cause the integration to + fail, but it may be used to check the quality of the data. + + """ + def __init__(self, py_model_file, data_files, initialize, missing_values): + """ Sets up the python objects """ + super().__init__(py_model_file, None, None, Time(), + data_files=data_files) + self.time.stage = 'Load' + self.time.set_control_vars(**self.components._control_vars) + self.data_files = data_files + self.missing_values = missing_values + if initialize: + self.initialize() + + def initialize(self): + """ Initializes the simulation model """ + self.time.stage = 'Initialization' + External.missing = self.missing_values + super().initialize() + + def run(self, params=None, return_columns=None, return_timestamps=None, + initial_condition='original', final_time=None, time_step=None, + saveper=None, reload=False, progress=False, flatten_output=False, + cache_output=True): + """ + Simulate the model's behavior over time. + Return a pandas dataframe with timestamps as rows, + model elements as columns. + + Parameters + ---------- + params: dict (optional) + Keys are strings of model component names. + Values are numeric or pandas Series. + Numeric values represent constants over the model integration. + Timeseries will be interpolated to give time-varying input. + + return_timestamps: list, numeric, ndarray (1D) (optional) + Timestamps in model execution at which to return state information. + Defaults to model-file specified timesteps. + + return_columns: list, 'step' or None (optional) + List of string model component names, returned dataframe + will have corresponding columns. If 'step' only variables with + cache step will be returned. If None, variables with cache step + and run will be returned. Default is None. + + initial_condition: str or (float, dict) (optional) + The starting time, and the state of the system (the values of + all the stocks) at that starting time. 'original' or 'o'uses + model-file specified initial condition. 'current' or 'c' uses + the state of the model after the previous execution. Other str + objects, loads initial conditions from the pickle file with the + given name.(float, dict) tuple lets the user specify a starting + time (float) and (possibly partial) dictionary of initial values + for stock (stateful) objects. Default is 'original'. + + final_time: float or None + Final time of the simulation. If float, the given value will be + used to compute the return_timestamps (if not given) and as a + final time. If None the last value of return_timestamps will be + used as a final time. Default is None. + + time_step: float or None + Time step of the simulation. If float, the given value will be + used to compute the return_timestamps (if not given) and + euler time series. If None the default value from components + will be used. Default is None. + + saveper: float or None + Saving step of the simulation. If float, the given value will be + used to compute the return_timestamps (if not given). If None + the default value from components will be used. Default is None. + + reload : bool (optional) + If True, reloads the model from the translated model file + before making changes. Default is False. + + progress : bool (optional) + If True, a progressbar will be shown during integration. + Default is False. + + flatten_output: bool (optional) + If True, once the output dataframe has been formatted will + split the xarrays in new columns following vensim's naming + to make a totally flat output. Default is False. + + cache_output: bool (optional) + If True, the number of calls of outputs variables will be increased + in 1. This helps caching output variables if they are called only + once. For performance reasons, if time step = saveper it is + recommended to activate this feature, if time step << saveper + it is recommended to deactivate it. Default is True. + + Examples + -------- + >>> model.run(params={'exogenous_constant': 42}) + >>> model.run(params={'exogenous_variable': timeseries_input}) + >>> model.run(return_timestamps=[1, 2, 3, 4, 10]) + >>> model.run(return_timestamps=10) + >>> model.run(return_timestamps=np.linspace(1, 10, 20)) + + See Also + -------- + pysd.set_components : handles setting model parameters + pysd.set_initial_condition : handles setting initial conditions + + """ + if reload: + self.reload() + + self.progress = progress + + self.time.add_return_timestamps(return_timestamps) + if self.time.return_timestamps is not None and not final_time: + # if not final time given the model will end in the list + # return timestamp (the list is reversed for popping) + if self.time.return_timestamps: + final_time = self.time.return_timestamps[0] + else: + final_time = self.time.next_return + + self.time.set_control_vars( + final_time=final_time, time_step=time_step, saveper=saveper) + + if params: + self.set_components(params) + + # update cache types after setting params + self._assign_cache_type() + + self.set_initial_condition(initial_condition) + + if return_columns is None or isinstance(return_columns, str): + return_columns = self._default_return_columns(return_columns) + + capture_elements, return_addresses = utils.get_return_elements( + return_columns, self._namespace) + + # create a dictionary splitting run cached and others + capture_elements = self._split_capture_elements(capture_elements) + + # include outputs in cache if needed + self._dependencies["OUTPUTS"] = { + element: 1 for element in capture_elements["step"] + } + if cache_output: + self._assign_cache_type() + self._add_constant_cache() + + # Run the model + self.time.stage = 'Run' + # need to clean cache to remove the values from active_initial + self.clean_caches() + + res = self._integrate(capture_elements['step']) + + del self._dependencies["OUTPUTS"] + + self._add_run_elements(res, capture_elements['run']) + self._remove_constant_cache() + + return_df = utils.make_flat_df(res, return_addresses, flatten_output) + + return return_df + + def select_submodel(self, vars=[], modules=[], exogenous_components={}): + """ + Select a submodel from the original model. After selecting a submodel + only the necessary stateful objects for integrating this submodel will + be computed. + + Parameters + ---------- + vars: set or list of strings (optional) + Variables to include in the new submodel. + It can be an empty list if the submodel is only selected by + module names. Default is an empty list. + + modules: set or list of strings (optional) + Modules to include in the new submodel. + It can be an empty list if the submodel is only selected by + variable names. Default is an empty list. Can select a full + module or a submodule by passing the path without the .py, e.g.: + "view_1/submodule1". + + exogenous_components: dictionary of parameters (optional) + Exogenous value to fix to the model variables that are needed + to run the selected submodel. The exogenous_components should + be passed as a dictionary in the same way it is done for + set_components method. By default it is an empty dict and + the needed exogenous components will be set to a numpy.nan value. + + Returns + ------- + None + + Notes + ----- + modules can be only passed when the model has been split in + different files during translation. + + Examples + -------- + >>> model.select_submodel( + ... vars=["Room Temperature", "Teacup temperature"]) + UserWarning: Selecting submodel, to run the full model again use model.reload() + + >>> model.select_submodel( + ... modules=["view_1", "view_2/subview_1"]) + UserWarning: Selecting submodel, to run the full model again use model.reload() + UserWarning: Exogenous components for the following variables are necessary but not given: + initial_value_stock1, stock3 + + >>> model.select_submodel( + ... vars=["stock3"], + ... modules=["view_1", "view_2/subview_1"]) + UserWarning: Selecting submodel, to run the full model again use model.reload() + UserWarning: Exogenous components for the following variables are necessary but not given: + initial_value_stock1, initial_value_stock3 + Please, set them before running the model using set_components method... + + >>> model.select_submodel( + ... vars=["stock3"], + ... modules=["view_1", "view_2/subview_1"], + ... exogenous_components={ + ... "initial_value_stock1": 3, + ... "initial_value_stock3": 5}) + UserWarning: Selecting submodel, to run the full model again use model.reload() + + """ + c_vars, d_vars, s_deps = self._get_dependencies(vars, modules) + warnings.warn( + "Selecting submodel, " + "to run the full model again use model.reload()") + + # get set of all dependencies and all variables to select + all_deps = d_vars["initial"].copy() + all_deps.update(d_vars["step"]) + all_deps.update(d_vars["lookup"]) + + all_vars = all_deps.copy() + all_vars.update(c_vars) + + # clean dependendies and namespace dictionaries, and remove + # the rows from the documentation + for real_name, py_name in self._namespace.copy().items(): + if py_name not in all_vars: + del self._namespace[real_name] + del self._dependencies[py_name] + self._doc.drop( + self._doc.index[self._doc["Real Name"] == real_name], + inplace=True + ) + + for py_name in self._dependencies.copy().keys(): + if py_name.startswith("_") and py_name not in s_deps: + del self._dependencies[py_name] + + # remove active initial from s_deps as they are "fake" objects + # in dependencies + s_deps = { + dep for dep in s_deps if not dep.startswith("_active_initial") + } + + # reassing the dictionary and lists of needed stateful objects + self._stateful_elements = { + name: getattr(self.components, name) + for name in s_deps + if isinstance(getattr(self.components, name), Stateful) + } + self._dynamicstateful_elements = [ + getattr(self.components, name) for name in s_deps + if isinstance(getattr(self.components, name), DynamicStateful) + ] + self._macro_elements = [ + getattr(self.components, name) for name in s_deps + if isinstance(getattr(self.components, name), Macro) + ] + + # keeping only needed external objects + ext_deps = set() + for values in self._dependencies.values(): + if "__external__" in values: + ext_deps.add(values["__external__"]) + self._external_elements = [ + getattr(self.components, name) for name in ext_deps + if isinstance(getattr(self.components, name), External) + ] + + # set all exogenous values to np.nan by default + new_components = {element: np.nan for element in all_deps} + # update exogenous values with the user input + [new_components.update( + { + utils.get_key_and_value_by_insensitive_key_or_value( + key, + self._namespace)[1]: value + }) for key, value in exogenous_components.items()] + + self.set_components(new_components) + + # show a warning message if exogenous values are needed for a + # dependency + new_components = [ + key for key, value in new_components.items() if value is np.nan] + if new_components: + warnings.warn( + "Exogenous components for the following variables are " + f"necessary but not given:\n\t{', '.join(new_components)}" + "\n\n Please, set them before running the model using " + "set_components method...") + + # re-assign the cache_type and initialization order + self._assign_cache_type() + self._get_initialize_order() + + def get_dependencies(self, vars=[], modules=[]): + """ + Get the dependencies of a set of variables or modules. + + Parameters + ---------- + vars: set or list of strings (optional) + Variables to get the dependencies from. + It can be an empty list if the dependencies are computed only + using modules. Default is an empty list. + modules: set or list of strings (optional) + Modules to get the dependencies from. + It can be an empty list if the dependencies are computed only + using variables. Default is an empty list. Can select a full + module or a submodule by passing the path without the .py, e.g.: + "view_1/submodule1". + + Returns + ------- + dependencies: set + Set of dependencies nedded to run vars. + + Notes + ----- + modules can be only passed when the model has been split in + different files during translation. + + Examples + -------- + >>> model.get_dependencies( + ... vars=["Room Temperature", "Teacup temperature"]) + Selected variables (total 1): + room_temperature, teacup_temperature + Stateful objects integrated with the selected variables (total 1): + _integ_teacup_temperature + + >>> model.get_dependencies( + ... modules=["view_1", "view_2/subview_1"]) + Selected variables (total 4): + var1, var2, stock1, delay1 + Dependencies for initialization only (total 1): + initial_value_stock1 + Dependencies that may change over time (total 2): + stock3 + Stateful objects integrated with the selected variables (total 1): + _integ_stock1, _delay_fixed_delay1 + + >>> model.get_dependencies( + ... vars=["stock3"], + ... modules=["view_1", "view_2/subview_1"]) + Selected variables (total 4): + var1, var2, stock1, stock3, delay1 + Dependencies for initialization only (total 1): + initial_value_stock1, initial_value_stock3 + Stateful objects integrated with the selected variables (total 1): + _integ_stock1, _integ_stock3, _delay_fixed_delay1 + + """ + c_vars, d_vars, s_deps = self._get_dependencies(vars, modules) + + text = utils.print_objects_format(c_vars, "Selected variables") + + if d_vars["initial"]: + text += utils.print_objects_format( + d_vars["initial"], + "\nDependencies for initialization only") + if d_vars["step"]: + text += utils.print_objects_format( + d_vars["step"], + "\nDependencies that may change over time") + if d_vars["lookup"]: + text += utils.print_objects_format( + d_vars["lookup"], + "\nLookup table dependencies") + + text += utils.print_objects_format( + s_deps, + "\nStateful objects integrated with the selected variables") + + print(text) + + def _get_dependencies(self, vars=[], modules=[]): + """ + Get the dependencies of a set of variables or modules. + + Parameters + ---------- + vars: set or list of strings (optional) + Variables to get the dependencies from. + It can be an empty list if the dependencies are computed only + using modules. Default is an empty list. + modules: set or list of strings (optional) + Modules to get the dependencies from. + It can be an empty list if the dependencies are computed only + using variables. Default is an empty list. Can select a full + module or a submodule by passing the path without the .py, e.g.: + "view_1/submodule1". + + Returns + ------- + c_vars: set + Set of all selected model variables. + d_deps: dict of sets + Dictionary of dependencies nedded to run vars and modules. + s_deps: set + Set of stateful objects to update when integrating selected + model variables. + + """ + def check_dep(dependencies, initial=False): + for dep in dependencies: + if dep in c_vars or dep.startswith("__"): + pass + elif dep.startswith("_"): + s_deps.add(dep) + dep = self._dependencies[dep] + check_dep(dep["initial"], True) + check_dep(dep["step"]) + else: + if initial and dep not in d_deps["step"]\ + and dep not in d_deps["lookup"]: + d_deps["initial"].add(dep) + else: + if dep in d_deps["initial"]: + d_deps["initial"].remove(dep) + if self.get_args(dep): + d_deps["lookup"].add(dep) + else: + d_deps["step"].add(dep) + + d_deps = {"initial": set(), "step": set(), "lookup": set()} + s_deps = set() + c_vars = {"time", "time_step", "initial_time", "final_time", "saveper"} + for var in vars: + py_name = utils.get_key_and_value_by_insensitive_key_or_value( + var, + self._namespace)[1] + c_vars.add(py_name) + for module in modules: + c_vars.update(self.get_vars_in_module(module)) + + for var in c_vars: + if var == "time": + continue + check_dep(self._dependencies[var]) + + return c_vars, d_deps, s_deps + + def get_vars_in_module(self, module): + """ + Return the name of python vars in a module. + + Parameters + ---------- + module: str + Name of the module to search in. + + Returns + ------- + vars: set + Set of varible names in the given module. + + """ + if self._modules: + module_content = self._modules.copy() + else: + raise ValueError( + "Trying to get a module from a non-modularized model") + + try: + # get the module or the submodule content + for submodule in module.split("/"): + module_content = module_content[submodule] + module_content = [module_content] + except KeyError: + raise NameError( + f"Module or submodule '{submodule}' not found...\n") + + vars, new_content = set(), [] + + while module_content: + # find the vars in the module or the submodule + for content in module_content: + if isinstance(content, list): + vars.update(content) + else: + [new_content.append(value) for value in content.values()] + + module_content, new_content = new_content, [] + + return vars + + def reload(self): + """ + Reloads the model from the translated model file, so that all the + parameters are back to their original value. + """ + self.__init__(self.py_model_file, data_files=self.data_files, + initialize=True, + missing_values=self.missing_values) + + def _default_return_columns(self, which): + """ + Return a list of the model elements tha change on time that + does not include lookup other functions that take parameters + or run-cached functions. + + Parameters + ---------- + which: str or None + If it is 'step' only cache step elements will be returned. + Else cache 'step' and 'run' elements will be returned. + Default is None. + + Returns + ------- + return_columns: list + List of columns to return + + """ + if which == 'step': + types = ['step'] + else: + types = ['step', 'run'] + + return_columns = [] + + for key, pykey in self._namespace.items(): + if pykey in self.cache_type and self.cache_type[pykey] in types\ + and not self.get_args(pykey): + + return_columns.append(key) + + return return_columns + + def _split_capture_elements(self, capture_elements): + """ + Splits the capture elements list between those with run cache + and others. + + Parameters + ---------- + capture_elements: list + Captured elements list + + Returns + ------- + capture_dict: dict + Dictionary of sets with keywords step and run. + + """ + capture_dict = {'step': set(), 'run': set(), None: set()} + [capture_dict[self.cache_type[element]].add(element) + for element in capture_elements] + return capture_dict + + def set_initial_condition(self, initial_condition): + """ Set the initial conditions of the integration. + + Parameters + ---------- + initial_condition : str or (float, dict) + The starting time, and the state of the system (the values of + all the stocks) at that starting time. 'original' or 'o'uses + model-file specified initial condition. 'current' or 'c' uses + the state of the model after the previous execution. Other str + objects, loads initial conditions from the pickle file with the + given name.(float, dict) tuple lets the user specify a starting + time (float) and (possibly partial) dictionary of initial values + for stock (stateful) objects. + + Examples + -------- + >>> model.set_initial_condition('original') + >>> model.set_initial_condition('current') + >>> model.set_initial_condition('exported_pickle.pic') + >>> model.set_initial_condition((10, {'teacup_temperature': 50})) + + See Also + -------- + model.set_initial_value() + + """ + + if isinstance(initial_condition, tuple): + self.initialize() + self.set_initial_value(*initial_condition) + elif isinstance(initial_condition, str): + if initial_condition.lower() in ["original", "o"]: + self.time.set_control_vars( + initial_time=self.components._control_vars["initial_time"]) + self.initialize() + elif initial_condition.lower() in ["current", "c"]: + pass + else: + self.import_pickle(initial_condition) + else: + raise TypeError( + "Invalid initial conditions. " + + "Check documentation for valid entries or use " + + "'help(model.set_initial_condition)'.") + + def _euler_step(self, dt): + """ + Performs a single step in the euler integration, + updating stateful components + + Parameters + ---------- + dt : float + This is the amount to increase time by this step + + """ + self.state = self.state + self.ddt() * dt + + def _integrate(self, capture_elements): + """ + Performs euler integration. + + Parameters + ---------- + capture_elements: set + Which model elements to capture - uses pysafe names. + + Returns + ------- + outputs: pandas.DataFrame + Output capture_elements data. + + """ + # necessary to have always a non-xaray object for appending objects + # to the DataFrame time will always be a model element and not saved + # TODO: find a better way of saving outputs + capture_elements.add("time") + outputs = pd.DataFrame(columns=capture_elements) + + if self.progress: + # initialize progress bar + progressbar = utils.ProgressBar( + int((self.time.final_time()-self.time())/self.time.time_step()) + ) + else: + # when None is used the update will do nothing + progressbar = utils.ProgressBar(None) + + while self.time.in_bounds(): + if self.time.in_return(): + outputs.at[self.time.round()] = [ + getattr(self.components, key)() + for key in capture_elements] + self._euler_step(self.time.time_step()) + self.time.update(self.time()+self.time.time_step()) + self.clean_caches() + progressbar.update() + + # need to add one more time step, because we run only the state + # updates in the previous loop and thus may be one short. + if self.time.in_return(): + outputs.at[self.time.round()] = [getattr(self.components, key)() + for key in capture_elements] + + progressbar.finish() + + # delete time column as it was created only for avoiding errors + # of appending data. See previous TODO. + del outputs["time"] + return outputs + + def _add_run_elements(self, df, capture_elements): + """ + Adds constant elements to a dataframe. + + Parameters + ---------- + df: pandas.DataFrame + Dataframe to add elements. + + capture_elements: list + List of constant elements + + Returns + ------- + None + + """ + nt = len(df.index.values) + for element in capture_elements: + df[element] = [getattr(self.components, element)()] * nt diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index f28ca445..41aac9f8 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -1,26 +1,15 @@ """ The stateful objects are used and updated each time step with an update -method. This include from basic Integ class objects until the Model -class objects. +method. This include Integs, Delays, Forecasts, Smooths, and Trends, +between others. The Macro class and Model class are also Stateful class +child. But defined in the file model.py. """ -import inspect -import pickle import warnings -from typing import Union import numpy as np -import pandas as pd import xarray as xr -from . import utils from .functions import zidz, if_then_else -from .external import External, Excels -from .cache import Cache, constant_cache -from .data import TabData -from .lookups import HardcodedLookups -from .components import Components, Time - -from pysd._version import __version__ small_vensim = 1e-6 # What is considered zero according to Vensim Help @@ -575,1685 +564,3 @@ def initialize(self, init_val=None): def export(self): return {'state': self.state} - - -class Macro(DynamicStateful): - """ - The Macro class implements a stateful representation of the system, - and contains the majority of methods for accessing and modifying - components. - - When the instance in question also serves as the root model object - (as opposed to a macro or submodel within another model) it will have - added methods to facilitate execution. - - The Macro object will be created with components drawn from a - translated python model file. - - Parameters - ---------- - py_model_file: str or pathlib.Path - Filename of a model or macro which has already been converted - into a python format. - params: dict or None (optional) - Dictionary of the macro parameters. Default is None. - return_func: str or None (optional) - The name of the function to return from the macro. Default is None. - time: components.Time or None (optional) - Time object for integration. If None a new time object will - be generated (for models), if passed the time object will be - used (for macros). Default is None. - time_initialization: callable or None - Time to set at the begginning of the Macro. Default is None. - data_files: dict or list or str or None - The dictionary with keys the name of file and variables to - load the data from there. Or the list of names or name of the - file to search the data in. Only works for TabData type object - and it is neccessary to provide it. Default is None. - py_name: str or None - The name of the Macro object. Default is None. - - """ - def __init__(self, py_model_file, params=None, return_func=None, - time=None, time_initialization=None, data_files=None, - py_name=None): - super().__init__() - self.time = time - self.time_initialization = time_initialization - self.cache = Cache() - self.py_name = py_name - self.external_loaded = False - self.lookups_loaded = False - self.components = Components(str(py_model_file), self.set_components) - - if __version__.split(".")[0]\ - != self.get_pysd_compiler_version().split(".")[0]: - raise ImportError( - "\n\nNot able to import the model. " - + "The model was translated with a " - + "not compatible version of PySD:" - + "\n\tPySD " + self.get_pysd_compiler_version() - + "\n\nThe current version of PySd is:" - + "\n\tPySD " + __version__ + "\n\n" - + "Please translate again the model with the function" - + " read_vensim or read_xmile.") - - self._namespace = self.components._components.component.namespace - self._dependencies =\ - self.components._components.component.dependencies.copy() - self._subscript_dict = getattr( - self.components._components, "_subscript_dict", {}) - self._modules = getattr( - self.components._components, "_modules", {}) - - self._doc = self._build_doc() - - if params is not None: - # add params to namespace - self._namespace.update(self.components._components._params) - # create new components with the params - self.set_components(params, new=True) - # update dependencies - for param in params: - self._dependencies[ - self._namespace[param]] = {"time"} - - # Get the collections of stateful elements and external elements - self._stateful_elements = { - name: getattr(self.components, name) - for name in dir(self.components) - if isinstance(getattr(self.components, name), Stateful) - } - self._dynamicstateful_elements = [ - getattr(self.components, name) for name in dir(self.components) - if isinstance(getattr(self.components, name), DynamicStateful) - ] - self._external_elements = [ - getattr(self.components, name) for name in dir(self.components) - if isinstance(getattr(self.components, name), External) - ] - self._macro_elements = [ - getattr(self.components, name) for name in dir(self.components) - if isinstance(getattr(self.components, name), Macro) - ] - - self._data_elements = [ - getattr(self.components, name) for name in dir(self.components) - if isinstance(getattr(self.components, name), TabData) - ] - - self._lookup_elements = [ - getattr(self.components, name) for name in dir(self.components) - if isinstance(getattr(self.components, name), HardcodedLookups) - ] - - if data_files: - self._get_data(data_files) - - self._assign_cache_type() - self._get_initialize_order() - - if return_func is not None: - self.return_func = getattr(self.components, return_func) - else: - self.return_func = lambda: 0 - - self.py_model_file = str(py_model_file) - - def __call__(self): - return self.return_func() - - @property - def doc(self) -> pd.DataFrame: - """ - The documentation of the model. - """ - return self._doc.copy() - - @property - def namespace(self) -> dict: - """ - The namespace dictionary of the model. - """ - return self._namespace.copy() - - @property - def dependencies(self) -> dict: - """ - The dependencies dictionary of the model. - """ - return self._dependencies.copy() - - @property - def subscripts(self) -> dict: - """ - The subscripts dictionary of the model. - """ - return self._subscript_dict.copy() - - @property - def modules(self) -> Union[dict, None]: - """ - The dictionary of modules of the model. If the model is not - split by modules it returns None. - """ - return self._modules.copy() or None - - def clean_caches(self): - """ - Clean the cahce of the object and the macros objects that it - contains - """ - self.cache.clean() - # if nested macros - [macro.clean_caches() for macro in self._macro_elements] - - def _get_data(self, data_files): - if isinstance(data_files, dict): - for data_file, vars in data_files.items(): - for var in vars: - found = False - for element in self._data_elements: - if var in [element.py_name, element.real_name]: - element.load_data(data_file) - found = True - break - if not found: - raise ValueError( - f"'{var}' not found as model data variable") - - else: - for element in self._data_elements: - element.load_data(data_files) - - def _get_initialize_order(self): - """ - Get the initialization order of the stateful elements - and their the full dependencies. - """ - # get the full set of dependencies to initialize an stateful object - # includying all levels - self.stateful_initial_dependencies = { - ext: set() - for ext in self._dependencies - if (ext.startswith("_") and not ext.startswith("_active_initial_")) - } - for element in self.stateful_initial_dependencies: - self._get_full_dependencies( - element, self.stateful_initial_dependencies[element], - "initial") - - # get the full dependencies of stateful objects taking into account - # only other objects - current_deps = { - element: [ - dep for dep in deps - if dep in self.stateful_initial_dependencies - ] for element, deps in self.stateful_initial_dependencies.items() - } - - # get initialization order of the stateful elements - self.initialize_order = [] - delete = True - while delete: - delete = [] - for element in current_deps: - if not current_deps[element]: - # if stateful element has no deps on others - # add to the queue to initialize - self.initialize_order.append(element) - delete.append(element) - for element2 in current_deps: - # remove dependency on the initialized element - if element in current_deps[element2]: - current_deps[element2].remove(element) - # delete visited elements - for element in delete: - del current_deps[element] - - if current_deps: - # if current_deps is not an empty set there is a circular - # reference between stateful objects - raise ValueError( - 'Circular initialization...\n' - + 'Not able to initialize the following objects:\n\t' - + '\n\t'.join(current_deps)) - - def _get_full_dependencies(self, element, dep_set, stateful_deps): - """ - Get all dependencies of an element, i.e., also get the dependencies - of the dependencies. When finding an stateful element only dependencies - for initialization are considered. - - Parameters - ---------- - element: str - Element to get the full dependencies. - dep_set: set - Set to include the dependencies of the element. - stateful_deps: "initial" or "step" - The type of dependencies to take in the case of stateful objects. - - Returns - ------- - None - - """ - deps = self._dependencies[element] - if element.startswith("_"): - deps = deps[stateful_deps] - for dep in deps: - if dep not in dep_set and not dep.startswith("__")\ - and dep != "time": - dep_set.add(dep) - self._get_full_dependencies(dep, dep_set, stateful_deps) - - def _add_constant_cache(self): - self.constant_funcs = set() - for element, cache_type in self.cache_type.items(): - if cache_type == "run": - self.components._set_component( - element, - constant_cache(getattr(self.components, element)) - ) - self.constant_funcs.add(element) - - def _remove_constant_cache(self): - for element in self.constant_funcs: - self.components._set_component( - element, - getattr(self.components, element).function) - self.constant_funcs = set() - - def _assign_cache_type(self): - """ - Assigns the cache type to all the elements from the namespace. - """ - self.cache_type = {"time": None} - - for element in self._namespace.values(): - if element not in self.cache_type\ - and element in self._dependencies: - self._assign_cache(element) - - for element, cache_type in self.cache_type.items(): - if cache_type is not None: - if element not in self.cache.cached_funcs\ - and self._count_calls(element) > 1: - self.components._set_component( - element, - self.cache(getattr(self.components, element))) - self.cache.cached_funcs.add(element) - - def _count_calls(self, element): - n_calls = 0 - for subelement in self._dependencies: - if subelement.startswith("_") and\ - element in self._dependencies[subelement]["step"]: - if element in\ - self._dependencies[subelement]["initial"]: - n_calls +=\ - 2*self._dependencies[subelement]["step"][element] - else: - n_calls +=\ - self._dependencies[subelement]["step"][element] - elif (not subelement.startswith("_") and - element in self._dependencies[subelement]): - n_calls +=\ - self._dependencies[subelement][element] - - return n_calls - - def _assign_cache(self, element): - """ - Assigns the cache type to the given element and its dependencies if - needed. - - Parameters - ---------- - element: str - Element name. - - Returns - ------- - None - - """ - if not self._dependencies[element]: - self.cache_type[element] = "run" - elif "__lookup__" in self._dependencies[element]: - self.cache_type[element] = None - elif self._isdynamic(self._dependencies[element]): - self.cache_type[element] = "step" - else: - self.cache_type[element] = "run" - for subelement in self._dependencies[element]: - if subelement.startswith("_initial_")\ - or subelement.startswith("__"): - continue - if subelement not in self.cache_type: - self._assign_cache(subelement) - if self.cache_type[subelement] == "step": - self.cache_type[element] = "step" - break - - def _isdynamic(self, dependencies): - """ - - Parameters - ---------- - dependencies: iterable - List of dependencies. - - Returns - ------- - isdynamic: bool - True if 'time' or a dynamic stateful objects is in dependencies. - - """ - if "time" in dependencies: - return True - for dep in dependencies: - if dep.startswith("_") and not dep.startswith("_initial_")\ - and not dep.startswith("__"): - return True - return False - - def get_pysd_compiler_version(self): - """ - Returns the version of pysd complier that used for generating - this model - """ - return self.components.__pysd_version__ - - def initialize(self): - """ - This function initializes the external objects and stateful objects - in the given order. - """ - # Initialize time - if self.time is None: - self.time = self.time_initialization() - - # Reset time to the initial one - self.time.reset() - self.cache.clean() - - self.components._init_outer_references({ - 'scope': self, - 'time': self.time - }) - - if not self.lookups_loaded: - # Initialize HardcodedLookups elements - for element in self._lookup_elements: - element.initialize() - - self.lookups_loaded = True - - if not self.external_loaded: - # Initialize external elements - for element in self._external_elements: - element.initialize() - - # Remove Excel data from memory - Excels.clean() - - self.external_loaded = True - - # Initialize stateful objects - for element_name in self.initialize_order: - self._stateful_elements[element_name].initialize() - - def ddt(self): - return np.array([component.ddt() for component - in self._dynamicstateful_elements], dtype=object) - - @property - def state(self): - return np.array([component.state for component - in self._dynamicstateful_elements], dtype=object) - - @state.setter - def state(self, new_value): - [component.update(val) for component, val - in zip(self._dynamicstateful_elements, new_value)] - - def export(self, file_name): - """ - Export stateful values to pickle file. - - Parameters - ---------- - file_name: str - Name of the file to export the values. - - """ - warnings.warn( - "\nCompatibility of exported states could be broken between" - " different versions of PySD or xarray, current versions:\n" - f"\tPySD {__version__}\n\txarray {xr.__version__}\n" - ) - stateful_elements = { - name: element.export() - for name, element in self._stateful_elements.items() - } - - with open(file_name, 'wb') as file: - pickle.dump( - (self.time(), - stateful_elements, - {'pysd': __version__, 'xarray': xr.__version__} - ), file) - - def import_pickle(self, file_name): - """ - Import stateful values from pickle file. - - Parameters - ---------- - file_name: str - Name of the file to import the values from. - - """ - with open(file_name, 'rb') as file: - time, stateful_dict, metadata = pickle.load(file) - - if __version__ != metadata['pysd']\ - or xr.__version__ != metadata['xarray']: # pragma: no cover - warnings.warn( - "\nCompatibility of exported states could be broken between" - " different versions of PySD or xarray. Current versions:\n" - f"\tPySD {__version__}\n\txarray {xr.__version__}\n" - "Loaded versions:\n" - f"\tPySD {metadata['pysd']}\n\txarray {metadata['xarray']}\n" - ) - - self.set_stateful(stateful_dict) - self.time.set_control_vars(initial_time=time) - - def get_args(self, param): - """ - Returns the arguments of a model element. - - Parameters - ---------- - param: str or func - The model element name or function. - - Returns - ------- - args: list - List of arguments of the function. - - Examples - -------- - >>> model.get_args('birth_rate') - >>> model.get_args('Birth Rate') - - """ - if isinstance(param, str): - func_name = utils.get_key_and_value_by_insensitive_key_or_value( - param, - self._namespace)[1] or param - - func = getattr(self.components, func_name) - else: - func = param - - if hasattr(func, 'args'): - # cached functions - return func.args - else: - # regular functions - args = inspect.getfullargspec(func)[0] - if 'self' in args: - args.remove('self') - return args - - def get_coords(self, param): - """ - Returns the coordinates and dims of a model element. - - Parameters - ---------- - param: str or func - The model element name or function. - - Returns - ------- - (coords, dims) or None: (dict, list) or None - The coords and the dimensions of the element if it has. - Otherwise, returns None. - - Examples - -------- - >>> model.get_coords('birth_rate') - >>> model.get_coords('Birth Rate') - - """ - if isinstance(param, str): - func_name = utils.get_key_and_value_by_insensitive_key_or_value( - param, - self._namespace)[1] or param - - func = getattr(self.components, func_name) - - else: - func = param - - if hasattr(func, "subscripts"): - dims = func.subscripts - if not dims: - return None - coords = {dim: self.components._subscript_dict[dim] - for dim in dims} - return coords, dims - elif hasattr(func, "state") and isinstance(func.state, xr.DataArray): - value = func() - else: - return None - - dims = list(value.dims) - coords = {coord: list(value.coords[coord].values) - for coord in value.coords} - return coords, dims - - def __getitem__(self, param): - """ - Returns the current value of a model component. - - Parameters - ---------- - param: str or func - The model element name. - - Returns - ------- - value: float or xarray.DataArray - The value of the model component. - - Examples - -------- - >>> model['birth_rate'] - >>> model['Birth Rate'] - - Note - ---- - It will crash if the model component takes arguments. - - """ - func_name = utils.get_key_and_value_by_insensitive_key_or_value( - param, - self._namespace)[1] or param - - if self.get_args(getattr(self.components, func_name)): - raise ValueError( - "Trying to get the current value of a lookup " - "to get all the values with the series data use " - "model.get_series_data(param)\n\n") - - return getattr(self.components, func_name)() - - def get_series_data(self, param): - """ - Returns the original values of a model lookup/data component. - - Parameters - ---------- - param: str - The model lookup/data element name. - - Returns - ------- - value: xarray.DataArray - Array with the value of the interpolating series - in the first dimension. - - Examples - -------- - >>> model['room_temperature'] - >>> model['Room temperature'] - - """ - func_name = utils.get_key_and_value_by_insensitive_key_or_value( - param, - self._namespace)[1] or param - - if func_name.startswith("_ext_"): - return getattr(self.components, func_name).data - elif "__data__" in self._dependencies[func_name]: - return getattr( - self.components, - self._dependencies[func_name]["__data__"] - ).data - elif "__lookup__" in self._dependencies[func_name]: - return getattr( - self.components, - self._dependencies[func_name]["__lookup__"] - ).data - else: - raise ValueError( - "Trying to get the values of a constant variable. " - "'model.get_series_data' only works lookups/data objects.\n\n") - - def set_components(self, params, new=False): - """ Set the value of exogenous model elements. - Element values can be passed as keyword=value pairs in the - function call. Values can be numeric type or pandas Series. - Series will be interpolated by integrator. - - Examples - -------- - >>> model.set_components({'birth_rate': 10}) - >>> model.set_components({'Birth Rate': 10}) - - >>> br = pandas.Series(index=range(30), values=np.sin(range(30)) - >>> model.set_components({'birth_rate': br}) - - - """ - # TODO: allow the params argument to take a pandas dataframe, where - # column names are variable names. However some variables may be - # constant or have no values for some index. This should be processed. - # TODO: make this compatible with loading outputs from other files - - for key, value in params.items(): - func_name = utils.get_key_and_value_by_insensitive_key_or_value( - key, - self._namespace)[1] - - if isinstance(value, np.ndarray) or isinstance(value, list): - raise TypeError( - 'When setting ' + key + '\n' - 'Setting subscripted must be done using a xarray.DataArray' - ' with the correct dimensions or a constant value ' - '(https://pysd.readthedocs.io/en/master/basic_usage.html)') - - if func_name is None: - raise NameError( - "\n'%s' is not recognized as a model component." - % key) - - if new: - func = None - dims = None - else: - func = getattr(self.components, func_name) - _, dims = self.get_coords(func) or (None, None) - - # if the variable is a lookup or a data we perform the change in - # the object they call - func_type = getattr(func, "type", None) - if func_type in ["Lookup", "Data"]: - # getting the object from original dependencies - obj = self._dependencies[func_name][f"__{func_type.lower()}__"] - getattr( - self.components, - obj - ).set_values(value) - - # Update dependencies - if func_type == "Data": - if isinstance(value, pd.Series): - self._dependencies[func_name] = { - "time": 1, "__data__": obj - } - else: - self._dependencies[func_name] = {"__data__": obj} - - continue - - if isinstance(value, pd.Series): - new_function, deps = self._timeseries_component( - value, dims) - self._dependencies[func_name] = deps - elif callable(value): - new_function = value - # Using step cache adding time as dependency - # TODO it would be better if we can parse the content - # of the function to get all the dependencies - self._dependencies[func_name] = {"time": 1} - - else: - new_function = self._constant_component(value, dims) - self._dependencies[func_name] = {} - - # this won't handle other statefuls... - if '_integ_' + func_name in dir(self.components): - warnings.warn("Replacing the equation of stock" - + "{} with params".format(key), - stacklevel=2) - - new_function.__name__ = func_name - if dims: - new_function.dims = dims - self.components._set_component(func_name, new_function) - if func_name in self.cache.cached_funcs: - self.cache.cached_funcs.remove(func_name) - - def _timeseries_component(self, series, dims): - """ Internal function for creating a timeseries model element """ - # this is only called if the set_component function recognizes a - # pandas series - # TODO: raise a warning if extrapolating from the end of the series. - # TODO: data type variables should be creted using a Data object - # lookup type variables should be created using a Lookup object - - if isinstance(series.values[0], xr.DataArray): - # the interpolation will be time dependent - return lambda: utils.rearrange(xr.concat( - series.values, - series.index).interp(concat_dim=self.time()).reset_coords( - 'concat_dim', drop=True), - dims, self._subscript_dict), {'time': 1} - - elif dims: - # the interpolation will be time dependent - return lambda: utils.rearrange( - np.interp(self.time(), series.index, series.values), - dims, self._subscript_dict), {'time': 1} - - else: - # the interpolation will be time dependent - return lambda:\ - np.interp(self.time(), series.index, series.values),\ - {'time': 1} - - def _constant_component(self, value, dims): - """ Internal function for creating a constant model element """ - if dims: - return lambda: utils.rearrange( - value, dims, self._subscript_dict) - - else: - return lambda: value - - def set_initial_value(self, t, initial_value): - """ Set the system initial value. - - Parameters - ---------- - t : numeric - The system time - - initial_value : dict - A (possibly partial) dictionary of the system initial values. - The keys to this dictionary may be either pysafe names or - original model file names - - """ - self.time.set_control_vars(initial_time=t) - stateful_name = "_NONE" - modified_statefuls = set() - - for key, value in initial_value.items(): - component_name =\ - utils.get_key_and_value_by_insensitive_key_or_value( - key, self._namespace)[1] - if component_name is not None: - if self._dependencies[component_name]: - deps = list(self._dependencies[component_name]) - if len(deps) == 1 and deps[0] in self.initialize_order: - stateful_name = deps[0] - else: - component_name = key - stateful_name = key - - try: - _, dims = self.get_coords(component_name) - except TypeError: - dims = None - - if isinstance(value, xr.DataArray)\ - and not set(value.dims).issubset(set(dims)): - raise ValueError( - f"\nInvalid dimensions for {component_name}." - f"It should be a subset of {dims}, " - f"but passed value has {list(value.dims)}") - - if isinstance(value, np.ndarray) or isinstance(value, list): - raise TypeError( - 'When setting ' + key + '\n' - 'Setting subscripted must be done using a xarray.DataArray' - ' with the correct dimensions or a constant value ' - '(https://pysd.readthedocs.io/en/master/basic_usage.html)') - - # Try to update stateful component - try: - element = getattr(self.components, stateful_name) - if dims: - value = utils.rearrange( - value, dims, - self._subscript_dict) - element.initialize(value) - modified_statefuls.add(stateful_name) - except NameError: - # Try to override component - raise ValueError( - f"\nUnrecognized stateful '{component_name}'. If you want" - " to set a value of a regular component. Use params={" - f"'{component_name}': {value}" + "} instead.") - - self.clean_caches() - - # get the elements to initialize - elements_to_initialize =\ - self._get_elements_to_initialize(modified_statefuls) - - # Initialize remaining stateful objects - for element_name in self.initialize_order: - if element_name in elements_to_initialize: - self._stateful_elements[element_name].initialize() - - def _get_elements_to_initialize(self, modified_statefuls): - elements_to_initialize = set() - for stateful, deps in self.stateful_initial_dependencies.items(): - if stateful in modified_statefuls: - # if elements initial conditions have been modified - # we should not modify it - continue - for modified_sateteful in modified_statefuls: - if modified_sateteful in deps: - # if element has dependencies on a modified element - # we should re-initialize it - elements_to_initialize.add(stateful) - continue - - return elements_to_initialize - - def set_stateful(self, stateful_dict): - """ - Set stateful values. - - Parameters - ---------- - stateful_dict: dict - Dictionary of the stateful elements and the attributes to change. - - """ - for element, attrs in stateful_dict.items(): - for attr, value in attrs.items(): - setattr(getattr(self.components, element), attr, value) - - def _build_doc(self): - """ - Formats a table of documentation strings to help users remember - variable names, and understand how they are translated into - python safe names. - - Returns - ------- - docs_df: pandas dataframe - Dataframe with columns for the model components: - - Real names - - Python safe identifiers (as used in model.components) - - Units string - - Documentation strings from the original model file - """ - collector = [] - for name, pyname in self._namespace.items(): - element = getattr(self.components, pyname) - collector.append({ - 'Real Name': name, - 'Py Name': pyname, - 'Subscripts': element.subscripts, - 'Units': element.units, - 'Limits': element.limits, - 'Type': element.type, - 'Subtype': element.subtype, - 'Comment': element.__doc__.strip().strip("\n").strip() - if element.__doc__ else None - }) - - return pd.DataFrame( - collector - ).sort_values(by="Real Name").reset_index(drop=True) - - def __str__(self): - """ Return model source files """ - - # JT: Might be helpful to return not only the source file, but - # also how the instance differs from that source file. This - # would give a more accurate view of the current model. - string = 'Translated Model File: ' + self.py_model_file - if hasattr(self, 'mdl_file'): - string += '\n Original Model File: ' + self.mdl_file - - return string - - -class Model(Macro): - """ - The Model class implements a stateful representation of the system. - It inherits methods from the Macro class to integrate the model and - access and modify model components. It also contains the main - methods for running the model. - - The Model object will be created with components drawn from a - translated python model file. - - Parameters - ---------- - py_model_file: str or pathlib.Path - Filename of a model which has already been converted into a - python format. - data_files: dict or list or str or None - The dictionary with keys the name of file and variables to - load the data from there. Or the list of names or name of the - file to search the data in. Only works for TabData type object - and it is neccessary to provide it. Default is None. - initialize: bool - If False, the model will not be initialize when it is loaded. - Default is True. - missing_values : str ("warning", "error", "ignore", "keep") (optional) - What to do with missing values. If "warning" (default) - shows a warning message and interpolates the values. - If "raise" raises an error. If "ignore" interpolates - the values without showing anything. If "keep" it will keep - the missing values, this option may cause the integration to - fail, but it may be used to check the quality of the data. - - """ - def __init__(self, py_model_file, data_files, initialize, missing_values): - """ Sets up the python objects """ - super().__init__(py_model_file, None, None, Time(), - data_files=data_files) - self.time.stage = 'Load' - self.time.set_control_vars(**self.components._control_vars) - self.data_files = data_files - self.missing_values = missing_values - if initialize: - self.initialize() - - def initialize(self): - """ Initializes the simulation model """ - self.time.stage = 'Initialization' - External.missing = self.missing_values - super().initialize() - - def run(self, params=None, return_columns=None, return_timestamps=None, - initial_condition='original', final_time=None, time_step=None, - saveper=None, reload=False, progress=False, flatten_output=False, - cache_output=True): - """ - Simulate the model's behavior over time. - Return a pandas dataframe with timestamps as rows, - model elements as columns. - - Parameters - ---------- - params: dict (optional) - Keys are strings of model component names. - Values are numeric or pandas Series. - Numeric values represent constants over the model integration. - Timeseries will be interpolated to give time-varying input. - - return_timestamps: list, numeric, ndarray (1D) (optional) - Timestamps in model execution at which to return state information. - Defaults to model-file specified timesteps. - - return_columns: list, 'step' or None (optional) - List of string model component names, returned dataframe - will have corresponding columns. If 'step' only variables with - cache step will be returned. If None, variables with cache step - and run will be returned. Default is None. - - initial_condition: str or (float, dict) (optional) - The starting time, and the state of the system (the values of - all the stocks) at that starting time. 'original' or 'o'uses - model-file specified initial condition. 'current' or 'c' uses - the state of the model after the previous execution. Other str - objects, loads initial conditions from the pickle file with the - given name.(float, dict) tuple lets the user specify a starting - time (float) and (possibly partial) dictionary of initial values - for stock (stateful) objects. Default is 'original'. - - final_time: float or None - Final time of the simulation. If float, the given value will be - used to compute the return_timestamps (if not given) and as a - final time. If None the last value of return_timestamps will be - used as a final time. Default is None. - - time_step: float or None - Time step of the simulation. If float, the given value will be - used to compute the return_timestamps (if not given) and - euler time series. If None the default value from components - will be used. Default is None. - - saveper: float or None - Saving step of the simulation. If float, the given value will be - used to compute the return_timestamps (if not given). If None - the default value from components will be used. Default is None. - - reload : bool (optional) - If True, reloads the model from the translated model file - before making changes. Default is False. - - progress : bool (optional) - If True, a progressbar will be shown during integration. - Default is False. - - flatten_output: bool (optional) - If True, once the output dataframe has been formatted will - split the xarrays in new columns following vensim's naming - to make a totally flat output. Default is False. - - cache_output: bool (optional) - If True, the number of calls of outputs variables will be increased - in 1. This helps caching output variables if they are called only - once. For performance reasons, if time step = saveper it is - recommended to activate this feature, if time step << saveper - it is recommended to deactivate it. Default is True. - - Examples - -------- - >>> model.run(params={'exogenous_constant': 42}) - >>> model.run(params={'exogenous_variable': timeseries_input}) - >>> model.run(return_timestamps=[1, 2, 3, 4, 10]) - >>> model.run(return_timestamps=10) - >>> model.run(return_timestamps=np.linspace(1, 10, 20)) - - See Also - -------- - pysd.set_components : handles setting model parameters - pysd.set_initial_condition : handles setting initial conditions - - """ - if reload: - self.reload() - - self.progress = progress - - self.time.add_return_timestamps(return_timestamps) - if self.time.return_timestamps is not None and not final_time: - # if not final time given the model will end in the list - # return timestamp (the list is reversed for popping) - if self.time.return_timestamps: - final_time = self.time.return_timestamps[0] - else: - final_time = self.time.next_return - - self.time.set_control_vars( - final_time=final_time, time_step=time_step, saveper=saveper) - - if params: - self.set_components(params) - - # update cache types after setting params - self._assign_cache_type() - - self.set_initial_condition(initial_condition) - - if return_columns is None or isinstance(return_columns, str): - return_columns = self._default_return_columns(return_columns) - - capture_elements, return_addresses = utils.get_return_elements( - return_columns, self._namespace) - - # create a dictionary splitting run cached and others - capture_elements = self._split_capture_elements(capture_elements) - - # include outputs in cache if needed - self._dependencies["OUTPUTS"] = { - element: 1 for element in capture_elements["step"] - } - if cache_output: - self._assign_cache_type() - self._add_constant_cache() - - # Run the model - self.time.stage = 'Run' - # need to clean cache to remove the values from active_initial - self.clean_caches() - - res = self._integrate(capture_elements['step']) - - del self._dependencies["OUTPUTS"] - - self._add_run_elements(res, capture_elements['run']) - self._remove_constant_cache() - - return_df = utils.make_flat_df(res, return_addresses, flatten_output) - - return return_df - - def select_submodel(self, vars=[], modules=[], exogenous_components={}): - """ - Select a submodel from the original model. After selecting a submodel - only the necessary stateful objects for integrating this submodel will - be computed. - - Parameters - ---------- - vars: set or list of strings (optional) - Variables to include in the new submodel. - It can be an empty list if the submodel is only selected by - module names. Default is an empty list. - - modules: set or list of strings (optional) - Modules to include in the new submodel. - It can be an empty list if the submodel is only selected by - variable names. Default is an empty list. Can select a full - module or a submodule by passing the path without the .py, e.g.: - "view_1/submodule1". - - exogenous_components: dictionary of parameters (optional) - Exogenous value to fix to the model variables that are needed - to run the selected submodel. The exogenous_components should - be passed as a dictionary in the same way it is done for - set_components method. By default it is an empty dict and - the needed exogenous components will be set to a numpy.nan value. - - Returns - ------- - None - - Notes - ----- - modules can be only passed when the model has been split in - different files during translation. - - Examples - -------- - >>> model.select_submodel( - ... vars=["Room Temperature", "Teacup temperature"]) - UserWarning: Selecting submodel, to run the full model again use model.reload() - - >>> model.select_submodel( - ... modules=["view_1", "view_2/subview_1"]) - UserWarning: Selecting submodel, to run the full model again use model.reload() - UserWarning: Exogenous components for the following variables are necessary but not given: - initial_value_stock1, stock3 - - >>> model.select_submodel( - ... vars=["stock3"], - ... modules=["view_1", "view_2/subview_1"]) - UserWarning: Selecting submodel, to run the full model again use model.reload() - UserWarning: Exogenous components for the following variables are necessary but not given: - initial_value_stock1, initial_value_stock3 - Please, set them before running the model using set_components method... - - >>> model.select_submodel( - ... vars=["stock3"], - ... modules=["view_1", "view_2/subview_1"], - ... exogenous_components={ - ... "initial_value_stock1": 3, - ... "initial_value_stock3": 5}) - UserWarning: Selecting submodel, to run the full model again use model.reload() - - """ - c_vars, d_vars, s_deps = self._get_dependencies(vars, modules) - warnings.warn( - "Selecting submodel, " - "to run the full model again use model.reload()") - - # get set of all dependencies and all variables to select - all_deps = d_vars["initial"].copy() - all_deps.update(d_vars["step"]) - all_deps.update(d_vars["lookup"]) - - all_vars = all_deps.copy() - all_vars.update(c_vars) - - # clean dependendies and namespace dictionaries, and remove - # the rows from the documentation - for real_name, py_name in self._namespace.copy().items(): - if py_name not in all_vars: - del self._namespace[real_name] - del self._dependencies[py_name] - self._doc.drop( - self._doc.index[self._doc["Real Name"] == real_name], - inplace=True - ) - - for py_name in self._dependencies.copy().keys(): - if py_name.startswith("_") and py_name not in s_deps: - del self._dependencies[py_name] - - # remove active initial from s_deps as they are "fake" objects - # in dependencies - s_deps = { - dep for dep in s_deps if not dep.startswith("_active_initial") - } - - # reassing the dictionary and lists of needed stateful objects - self._stateful_elements = { - name: getattr(self.components, name) - for name in s_deps - if isinstance(getattr(self.components, name), Stateful) - } - self._dynamicstateful_elements = [ - getattr(self.components, name) for name in s_deps - if isinstance(getattr(self.components, name), DynamicStateful) - ] - self._macro_elements = [ - getattr(self.components, name) for name in s_deps - if isinstance(getattr(self.components, name), Macro) - ] - - # keeping only needed external objects - ext_deps = set() - for values in self._dependencies.values(): - if "__external__" in values: - ext_deps.add(values["__external__"]) - self._external_elements = [ - getattr(self.components, name) for name in ext_deps - if isinstance(getattr(self.components, name), External) - ] - - # set all exogenous values to np.nan by default - new_components = {element: np.nan for element in all_deps} - # update exogenous values with the user input - [new_components.update( - { - utils.get_key_and_value_by_insensitive_key_or_value( - key, - self._namespace)[1]: value - }) for key, value in exogenous_components.items()] - - self.set_components(new_components) - - # show a warning message if exogenous values are needed for a - # dependency - new_components = [ - key for key, value in new_components.items() if value is np.nan] - if new_components: - warnings.warn( - "Exogenous components for the following variables are " - f"necessary but not given:\n\t{', '.join(new_components)}" - "\n\n Please, set them before running the model using " - "set_components method...") - - # re-assign the cache_type and initialization order - self._assign_cache_type() - self._get_initialize_order() - - def get_dependencies(self, vars=[], modules=[]): - """ - Get the dependencies of a set of variables or modules. - - Parameters - ---------- - vars: set or list of strings (optional) - Variables to get the dependencies from. - It can be an empty list if the dependencies are computed only - using modules. Default is an empty list. - modules: set or list of strings (optional) - Modules to get the dependencies from. - It can be an empty list if the dependencies are computed only - using variables. Default is an empty list. Can select a full - module or a submodule by passing the path without the .py, e.g.: - "view_1/submodule1". - - Returns - ------- - dependencies: set - Set of dependencies nedded to run vars. - - Notes - ----- - modules can be only passed when the model has been split in - different files during translation. - - Examples - -------- - >>> model.get_dependencies( - ... vars=["Room Temperature", "Teacup temperature"]) - Selected variables (total 1): - room_temperature, teacup_temperature - Stateful objects integrated with the selected variables (total 1): - _integ_teacup_temperature - - >>> model.get_dependencies( - ... modules=["view_1", "view_2/subview_1"]) - Selected variables (total 4): - var1, var2, stock1, delay1 - Dependencies for initialization only (total 1): - initial_value_stock1 - Dependencies that may change over time (total 2): - stock3 - Stateful objects integrated with the selected variables (total 1): - _integ_stock1, _delay_fixed_delay1 - - >>> model.get_dependencies( - ... vars=["stock3"], - ... modules=["view_1", "view_2/subview_1"]) - Selected variables (total 4): - var1, var2, stock1, stock3, delay1 - Dependencies for initialization only (total 1): - initial_value_stock1, initial_value_stock3 - Stateful objects integrated with the selected variables (total 1): - _integ_stock1, _integ_stock3, _delay_fixed_delay1 - - """ - c_vars, d_vars, s_deps = self._get_dependencies(vars, modules) - - text = utils.print_objects_format(c_vars, "Selected variables") - - if d_vars["initial"]: - text += utils.print_objects_format( - d_vars["initial"], - "\nDependencies for initialization only") - if d_vars["step"]: - text += utils.print_objects_format( - d_vars["step"], - "\nDependencies that may change over time") - if d_vars["lookup"]: - text += utils.print_objects_format( - d_vars["lookup"], - "\nLookup table dependencies") - - text += utils.print_objects_format( - s_deps, - "\nStateful objects integrated with the selected variables") - - print(text) - - def _get_dependencies(self, vars=[], modules=[]): - """ - Get the dependencies of a set of variables or modules. - - Parameters - ---------- - vars: set or list of strings (optional) - Variables to get the dependencies from. - It can be an empty list if the dependencies are computed only - using modules. Default is an empty list. - modules: set or list of strings (optional) - Modules to get the dependencies from. - It can be an empty list if the dependencies are computed only - using variables. Default is an empty list. Can select a full - module or a submodule by passing the path without the .py, e.g.: - "view_1/submodule1". - - Returns - ------- - c_vars: set - Set of all selected model variables. - d_deps: dict of sets - Dictionary of dependencies nedded to run vars and modules. - s_deps: set - Set of stateful objects to update when integrating selected - model variables. - - """ - def check_dep(dependencies, initial=False): - for dep in dependencies: - if dep in c_vars or dep.startswith("__"): - pass - elif dep.startswith("_"): - s_deps.add(dep) - dep = self._dependencies[dep] - check_dep(dep["initial"], True) - check_dep(dep["step"]) - else: - if initial and dep not in d_deps["step"]\ - and dep not in d_deps["lookup"]: - d_deps["initial"].add(dep) - else: - if dep in d_deps["initial"]: - d_deps["initial"].remove(dep) - if self.get_args(dep): - d_deps["lookup"].add(dep) - else: - d_deps["step"].add(dep) - - d_deps = {"initial": set(), "step": set(), "lookup": set()} - s_deps = set() - c_vars = {"time", "time_step", "initial_time", "final_time", "saveper"} - for var in vars: - py_name = utils.get_key_and_value_by_insensitive_key_or_value( - var, - self._namespace)[1] - c_vars.add(py_name) - for module in modules: - c_vars.update(self.get_vars_in_module(module)) - - for var in c_vars: - if var == "time": - continue - check_dep(self._dependencies[var]) - - return c_vars, d_deps, s_deps - - def get_vars_in_module(self, module): - """ - Return the name of python vars in a module. - - Parameters - ---------- - module: str - Name of the module to search in. - - Returns - ------- - vars: set - Set of varible names in the given module. - - """ - if self._modules: - module_content = self._modules.copy() - else: - raise ValueError( - "Trying to get a module from a non-modularized model") - - try: - # get the module or the submodule content - for submodule in module.split("/"): - module_content = module_content[submodule] - module_content = [module_content] - except KeyError: - raise NameError( - f"Module or submodule '{submodule}' not found...\n") - - vars, new_content = set(), [] - - while module_content: - # find the vars in the module or the submodule - for content in module_content: - if isinstance(content, list): - vars.update(content) - else: - [new_content.append(value) for value in content.values()] - - module_content, new_content = new_content, [] - - return vars - - def reload(self): - """ - Reloads the model from the translated model file, so that all the - parameters are back to their original value. - """ - self.__init__(self.py_model_file, data_files=self.data_files, - initialize=True, - missing_values=self.missing_values) - - def _default_return_columns(self, which): - """ - Return a list of the model elements tha change on time that - does not include lookup other functions that take parameters - or run-cached functions. - - Parameters - ---------- - which: str or None - If it is 'step' only cache step elements will be returned. - Else cache 'step' and 'run' elements will be returned. - Default is None. - - Returns - ------- - return_columns: list - List of columns to return - - """ - if which == 'step': - types = ['step'] - else: - types = ['step', 'run'] - - return_columns = [] - - for key, pykey in self._namespace.items(): - if pykey in self.cache_type and self.cache_type[pykey] in types\ - and not self.get_args(pykey): - - return_columns.append(key) - - return return_columns - - def _split_capture_elements(self, capture_elements): - """ - Splits the capture elements list between those with run cache - and others. - - Parameters - ---------- - capture_elements: list - Captured elements list - - Returns - ------- - capture_dict: dict - Dictionary of sets with keywords step and run. - - """ - capture_dict = {'step': set(), 'run': set(), None: set()} - [capture_dict[self.cache_type[element]].add(element) - for element in capture_elements] - return capture_dict - - def set_initial_condition(self, initial_condition): - """ Set the initial conditions of the integration. - - Parameters - ---------- - initial_condition : str or (float, dict) - The starting time, and the state of the system (the values of - all the stocks) at that starting time. 'original' or 'o'uses - model-file specified initial condition. 'current' or 'c' uses - the state of the model after the previous execution. Other str - objects, loads initial conditions from the pickle file with the - given name.(float, dict) tuple lets the user specify a starting - time (float) and (possibly partial) dictionary of initial values - for stock (stateful) objects. - - Examples - -------- - >>> model.set_initial_condition('original') - >>> model.set_initial_condition('current') - >>> model.set_initial_condition('exported_pickle.pic') - >>> model.set_initial_condition((10, {'teacup_temperature': 50})) - - See Also - -------- - model.set_initial_value() - - """ - - if isinstance(initial_condition, tuple): - self.initialize() - self.set_initial_value(*initial_condition) - elif isinstance(initial_condition, str): - if initial_condition.lower() in ["original", "o"]: - self.time.set_control_vars( - initial_time=self.components._control_vars["initial_time"]) - self.initialize() - elif initial_condition.lower() in ["current", "c"]: - pass - else: - self.import_pickle(initial_condition) - else: - raise TypeError( - "Invalid initial conditions. " - + "Check documentation for valid entries or use " - + "'help(model.set_initial_condition)'.") - - def _euler_step(self, dt): - """ - Performs a single step in the euler integration, - updating stateful components - - Parameters - ---------- - dt : float - This is the amount to increase time by this step - - """ - self.state = self.state + self.ddt() * dt - - def _integrate(self, capture_elements): - """ - Performs euler integration. - - Parameters - ---------- - capture_elements: set - Which model elements to capture - uses pysafe names. - - Returns - ------- - outputs: pandas.DataFrame - Output capture_elements data. - - """ - # necessary to have always a non-xaray object for appending objects - # to the DataFrame time will always be a model element and not saved - # TODO: find a better way of saving outputs - capture_elements.add("time") - outputs = pd.DataFrame(columns=capture_elements) - - if self.progress: - # initialize progress bar - progressbar = utils.ProgressBar( - int((self.time.final_time()-self.time())/self.time.time_step()) - ) - else: - # when None is used the update will do nothing - progressbar = utils.ProgressBar(None) - - while self.time.in_bounds(): - if self.time.in_return(): - outputs.at[self.time.round()] = [ - getattr(self.components, key)() - for key in capture_elements] - self._euler_step(self.time.time_step()) - self.time.update(self.time()+self.time.time_step()) - self.clean_caches() - progressbar.update() - - # need to add one more time step, because we run only the state - # updates in the previous loop and thus may be one short. - if self.time.in_return(): - outputs.at[self.time.round()] = [getattr(self.components, key)() - for key in capture_elements] - - progressbar.finish() - - # delete time column as it was created only for avoiding errors - # of appending data. See previous TODO. - del outputs["time"] - return outputs - - def _add_run_elements(self, df, capture_elements): - """ - Adds constant elements to a dataframe. - - Parameters - ---------- - df: pandas.DataFrame - Dataframe to add elements. - - capture_elements: list - List of constant elements - - Returns - ------- - None - - """ - nt = len(df.index.values) - for element in capture_elements: - df[element] = [getattr(self.components, element)()] * nt diff --git a/pysd/pysd.py b/pysd/pysd.py index 5b6b4d51..63283718 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -7,7 +7,7 @@ import sys -from pysd.py_backend.statefuls import Model +from pysd.py_backend.model import Model if sys.version_info[:2] < (3, 7): # pragma: no cover diff --git a/tests/pytest_pysd/pytest_functions.py b/tests/pytest_pysd/pytest_functions.py index 5316267e..c3ecb58f 100644 --- a/tests/pytest_pysd/pytest_functions.py +++ b/tests/pytest_pysd/pytest_functions.py @@ -2,6 +2,7 @@ import numpy as np import xarray as xr +from pysd.py_backend.components import Time from pysd.py_backend.functions import\ ramp, step, pulse, xidz, zidz, if_then_else, sum, prod, vmin, vmax,\ invert_matrix @@ -62,8 +63,6 @@ def test_pulse_chain(self): assert pulse(lambda: 15, 1, repeat_time=5, width=3, end=13) == 0 def test_pulse_magnitude(self): - from pysd.py_backend.statefuls import Time - # Pulse function with repeat time # before first impulse t = Time() @@ -145,7 +144,6 @@ def test_pulse_magnitude(self): assert pulse(t, 4, magnitude=10) == 0 def test_numeric_error(self): - from pysd.py_backend.statefuls import Time time = Time() time.set_control_vars(initial_time=0, time_step=0.1, final_time=10) err = 4e-16 diff --git a/tests/unit_test_statefuls.py b/tests/unit_test_statefuls.py index 5ae5c371..4c78907e 100644 --- a/tests/unit_test_statefuls.py +++ b/tests/unit_test_statefuls.py @@ -357,7 +357,7 @@ def test_not_initialized_object(self): class TestMacroMethods(unittest.TestCase): def test_get_elements_to_initialize(self): from pysd import read_vensim - from pysd.py_backend.statefuls import Macro + from pysd.py_backend.model import Macro test_model = _root.joinpath("test-models/samples/teacup/teacup.mdl") read_vensim(test_model) From ad54b030250ab16c76fd008d9b4249a8efc35060 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 11:28:23 +0200 Subject: [PATCH 65/96] Update path to model in documentation --- docs/advanced_usage.rst | 4 ++-- docs/structure/model_class.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst index abadc212..00188b68 100644 --- a/docs/advanced_usage.rst +++ b/docs/advanced_usage.rst @@ -116,11 +116,11 @@ Selecting and running a submodel -------------------------------- A submodel of a translated model can be selected in order to run only a part of the original model. This can be done through the :py:data:`.select_submodel()` method: -.. automethod:: pysd.py_backend.statefuls.Model.select_submodel +.. automethod:: pysd.py_backend.model.Model.select_submodel :noindex: In order to preview the needed exogenous variables the :py:data:`.get_dependencies()` method can be used: -.. automethod:: pysd.py_backend.statefuls.Model.get_dependencies +.. automethod:: pysd.py_backend.model.Model.get_dependencies :noindex: diff --git a/docs/structure/model_class.rst b/docs/structure/model_class.rst index 59c93a69..55f918d3 100644 --- a/docs/structure/model_class.rst +++ b/docs/structure/model_class.rst @@ -3,11 +3,11 @@ Python model class Model class ----------- -.. autoclass:: pysd.py_backend.statefuls.Model +.. autoclass:: pysd.py_backend.model.Model :members: Macro class ----------- -.. autoclass:: pysd.py_backend.statefuls.Macro +.. autoclass:: pysd.py_backend.model.Macro :members: From beed36178346fcde25d53fab6f006b35f00ec66b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Thu, 12 May 2022 10:42:16 +0200 Subject: [PATCH 66/96] revised project documentation --- docs/advanced_usage.rst | 30 +++++++-------- docs/basic_usage.rst | 60 +++++++++++++++--------------- docs/command_line_usage.rst | 59 +++++++++++++++-------------- docs/conf.py | 5 ++- docs/development/complement.rst | 10 +++-- docs/development/contributing.rst | 12 +++--- docs/development/pathway.rst | 2 +- docs/index.rst | 2 +- docs/installation.rst | 21 +++++------ docs/reporting_bugs.rst | 12 +++--- docs/structure/structure_index.rst | 22 +++++------ docs/tools.rst | 2 +- 12 files changed, 119 insertions(+), 118 deletions(-) diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst index abadc212..da8b5552 100644 --- a/docs/advanced_usage.rst +++ b/docs/advanced_usage.rst @@ -1,7 +1,7 @@ Advanced Usage ============== -The power of PySD, and its motivation for existence, is its ability to tie in to other models and analysis packages in the Python environment. In this section we'll discuss how those connections happen. +The power of PySD, and its motivation for existence, is its ability to tie in to other models and analysis packages in the Python environment. In this section we discuss how those connections happen. Replacing model components with more complex objects @@ -16,11 +16,11 @@ However, when we made the room temperature something that varied with time, PySD def room_temperature(): return np.interp(t, series.index, series.values) -This drew on the internal state of the system, namely the time t, and the time-series data series that that we wanted to variable to represent. This process of substitution is available to the user, and we can replace functions ourselves, if we are careful. +This drew on the internal state of the system, namely the time t, and the time-series data series that we wanted the variable to represent. This process of substitution is available to the user, and we can replace functions ourselves, if we are careful. Because PySD assumes that all components in a model are represented as functions taking no arguments, any component that we wish to modify must be replaced with a function taking no arguments. As the state of the system and all auxiliary or flow methods are public, our replacement function can call these methods as part of its internal structure. -In our teacup example, suppose we didn't know the functional form for calculating the heat lost to the room, but instead had a lot of data of teacup temperatures and heat flow rates. We could use a regression model (here a support vector regression from Scikit-Learn) in place of the analytic function:: +In our teacup example, suppose we did not know the functional form for calculating the heat lost to the room, but instead had a lot of data of teacup temperatures and heat flow rates. We could use a regression model (here a support vector regression from Scikit-Learn) in place of the analytic function:: from sklearn.svm import SVR regression = SVR() @@ -35,7 +35,7 @@ Once the regression model is fit, we write a wrapper function for its predict me room_temp = model.components.room_temperature() return regression.predict([room_temp, tea_temp])[0] -In order to substitute this function directly for the heat_loss_to_room model component using the :py:func:`set_component()` method:: +To substitute this function directly for the heat_loss_to_room model component using the :py:func:`set_component()` method:: model.set_components({'heat_loss_to_room': new_heatflow_function}) @@ -50,7 +50,7 @@ If you want to replace a subscripted variable, you need to ensure that the outpu Splitting Vensim views in separate Python files (modules) --------------------------------------------------------- -In order to replicate the Vensim views in translated models, the user can set the `split_views` argument to True in the :py:func:`read_vensim` function:: +In order to replicate the Vensim views in the translated models, the user can set the `split_views` argument to True in the :py:func:`read_vensim` function:: read_vensim("many_views_model.mdl", split_views=True) @@ -65,12 +65,10 @@ In a Vensim model with three separate views (e.g. `view_1`, `view_2` and `view_3 | │ ├── view_1.py | │ ├── view_2.py | │ └── view_3.py -| ├── _namespace_many_views_model.json | ├── _subscripts_many_views_model.json -| ├── _dependencies_many_views_model.json | ├── many_views_model.py -| -| + + .. note :: Often, modelers wish to organise views further. To that end, a common practice is to include a particular character in the View name to indicate that what comes after it is the name of the subview. For instance, we could name one view as `ENERGY.Supply` and another one as `ENERGY.Demand`. @@ -78,7 +76,7 @@ In a Vensim model with three separate views (e.g. `view_1`, `view_2` and `view_3 read_vensim("many_views_model.mdl", split_views=True, subview_sep=["."]) -If macros are present, they will be self-contained in files named as the macro itself. The macro inner variables will be placed inside the module that corresponds with the view in which they were defined. +If macros are present, they will be self-contained in files named after the macro itself. The macro inner variables will be placed inside the module that corresponds with the view in which they were defined. Starting simulations from an end-state of another simulation @@ -90,7 +88,7 @@ The current state of a model can be saved in a pickle file using the :py:data:`. model1.run(final_time=50) model1.export("final_state.pic") -Then the exported data can be used in another session:: +then the exported data can be used in another session:: import pysd model2 = pysd.load("my_model.py") @@ -105,22 +103,22 @@ the new simulation will have initial time equal to 50 with the saved values from model1.run(final_time=50, return_timestamps=[]) .. note:: - The changes done with *params* arguments are not ported to the new model (*model2*) object that you initialize with *final_state.pic*. If you want to keep them, you need to call run with the same *params* values as in the original model (*model1*). + The changes made with the *params* arguments are not ported to the new model (*model2*) object that you initialize with *final_state.pic*. If you want to keep them, you need to call run with the same *params* values as in the original model (*model1*). .. warning:: - Exported data is saved and loaded using `pickle `_, this data can be incompatible with future versions of - *PySD* or *xarray*. In order to prevent data losses save always the source code. + Exported data is saved and loaded using `pickle `_. The data stored in the pickles may be incompatible with future versions of + *PySD* or *xarray*. In order to prevent data losses, always save the source code. Selecting and running a submodel -------------------------------- -A submodel of a translated model can be selected in order to run only a part of the original model. This can be done through the :py:data:`.select_submodel()` method: +A submodel of a translated model can be run as a standalone model. This can be done through the :py:data:`.select_submodel()` method: .. automethod:: pysd.py_backend.statefuls.Model.select_submodel :noindex: -In order to preview the needed exogenous variables the :py:data:`.get_dependencies()` method can be used: +In order to preview the needed exogenous variables, the :py:data:`.get_dependencies()` method can be used: .. automethod:: pysd.py_backend.statefuls.Model.get_dependencies :noindex: diff --git a/docs/basic_usage.rst b/docs/basic_usage.rst index bf8d9d3d..a5b9622e 100644 --- a/docs/basic_usage.rst +++ b/docs/basic_usage.rst @@ -3,13 +3,13 @@ Basic Usage Importing a model and getting started ------------------------------------- -To begin, we must first load the PySD module, and use it to import a supported model file:: +To begin, we must first load the PySD module, and use it to import a model file:: >>> import pysd >>> model = pysd.read_vensim('Teacup.mdl') -This code creates an instance of the PySD class loaded with an example model that we will use as the system dynamics equivalent of ‘Hello World’: a cup of tea cooling to room temperature. +This code creates an instance of the PySD Model class from an example model that we will use as the system dynamics equivalent of ‘Hello World’: a cup of tea cooling at room temperature. .. image:: images/Teacup.png :width: 350 px @@ -18,7 +18,7 @@ This code creates an instance of the PySD class loaded with an example model tha .. note:: The teacup model can be found in the `samples of the test-models repository `_. -To view a synopsis of the model equations and documentation, use the :py:func:`.doc` property of the model class. This will generate a listing of all the model elements, their documentation, units, and initial values, where appropriate, and return them as a :py:class:`pandas.DataFrame`. Here is a sample from the teacup model:: +To view a synopsis of the model equations and documentation, use the :py:func:`.doc` property of the Model class. This will generate a listing of all model elements, their documentation, units, and initial values, where appropriate, and return them as a :py:class:`pandas.DataFrame`. Here is a sample from the teacup model:: >>> model.doc @@ -35,13 +35,13 @@ To view a synopsis of the model equations and documentation, use the :py:func:`. .. note:: - You can also load an already translated model file, what will be faster as you will load a Python file:: + You can also load an already translated model file. This will be faster than loading an `mdl` model, as the translation is not required:: >>> import pysd >>> model = pysd.load('Teacup.py') .. note:: - The functions :py:func:`pysd.read_vensim()`, :py:func:`pysd.read_xmile()` and :py:func:`pysd.load()` have optional arguments for advanced usage, you can check the full description in :doc:`Model loading ` or using :py:func:`help()` e.g.:: + The functions :py:func:`pysd.read_vensim()`, :py:func:`pysd.read_xmile()` and :py:func:`pysd.load()` have optional arguments for advanced usage. You can check the full description in :doc:`Model loading ` or using :py:func:`help()` e.g.:: >>> import pysd >>> help(pysd.load) @@ -49,7 +49,7 @@ To view a synopsis of the model equations and documentation, use the :py:func:`. Running the Model ----------------- -The simplest way to simulate the model is to use the :py:func:`.run()` command with no options. This runs the model with the default parameters supplied by the model file, and returns a :py:class:`pandas.DataFrame` of the values of the model components at every timestamp:: +The simplest way to simulate the model is to use the :py:func:`.run()` command with no options. This runs the model with the default parameters supplied in the model file, and returns a :py:class:`pandas.DataFrame` of the values of the model components at every timestamp:: >>> stocks = model.run() >>> stocks @@ -69,7 +69,7 @@ The simplest way to simulate the model is to use the :py:func:`.run()` command w [241 rows x 8 columns] -Pandas gives us simple plotting capability, so we can see how the cup of tea behaves:: +Pandas proovides a simple plotting capability, that we can use to see how the temperature of the tea cup evolves over time:: >>> import matplotlib.pyplot as plt >>> stocks["Teacup Temperature"].plot() @@ -82,29 +82,29 @@ Pandas gives us simple plotting capability, so we can see how the cup of tea beh :width: 400 px :align: center -To show a progressbar during the model integration the progress flag can be passed to the :py:func:`.run()` command, progressbar package is needed:: +To show a progressbar during the model integration, the `progress` argument can be passed to the :py:func:`.run()` command. To do so, the progressbar package is required:: >>> stocks = model.run(progress=True) Running models with DATA type components ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Venim's regular DATA type components are given by an empty expression in the model equation. These values are read from a binary `.vdf` file. PySD allows running models with this kind of data definition using the data_files argument when calling :py:func:`.run()` command, e.g.:: +Venim allows to import DATA type data from binary `.vdf` files. Variables defined without an equation in the model, will attempt to read their values from the `.vdf`. PySD allows running models with this kind of data definition using the data_files argument when calling :py:func:`.run()` command, e.g.:: >>> stocks = model.run(data_files="input_data.tab") -Several files can be passed by using a list, then if the data information has not been found in the first file, the next one will be used until finding the data values:: +Several files can be passed by using a list. If the data information is not found in the first file, the next one will be used until finding the data values:: >>> stocks = model.run(data_files=["input_data.tab", "input_data2.tab", ..., "input_datan.tab"]) -If a variable is given in different files to choose the specific file a dictionary can be used:: +If a variables are defined in different files, to choose the specific file a dictionary can be used:: >>> stocks = model.run(data_files={"input_data.tab": ["data_var1", "data_var3"], "input_data2.tab": ["data_var2"]}) .. note:: - Only `tab` and `csv` files are supported, they should be given as a table, each variable one column (or row) and the time in the first column (or first row). The column (or row) names can be given using the original name or using python names. + Only `tab` and `csv` files are supported. They should be given as a table, with each variable in a column (or row) and the time in the first column (or first row). The column (or row) names can be given using the name of the variable in the original model or using python names. .. note:: - Subscripted variables must be given in the vensim format, one column (or row) per subscript combination. Example of column names for 2x2 variable: + Subscripted variables must be given in the Vensim format, one column (or row) per subscript combination. Example of column names for 2x2 variable: `subs var[A, C]` `subs var[B, C]` `subs var[A, D]` `subs var[B, D]` Outputting various run information @@ -129,7 +129,7 @@ The :py:func:`.run()` command has a few options that make it more useful. In man [241 rows x 2 columns] -If the measured data that we are comparing with our model comes in at irregular timestamps, we may want to sample the model at timestamps to match. The :py:func:`.run()` function gives us this ability with the return_timestamps keyword argument:: +If the measured data that we are comparing with our model comes in at irregular timestamps, we may want to sample the model at timestamps to match. The :py:func:`.run()` function provides this functionality with the return_timestamps keyword argument:: >>> model.run(return_timestamps=[0, 1, 3, 7, 9.5, 13, 21, 25, 30]) @@ -145,31 +145,31 @@ If the measured data that we are comparing with our model comes in at irregular 30.0 10 0.537400 70 75.374001 30 0 0.125 0.125 -Retrieving totally flat dataframe +Retrieving a flat DataFrame --------------------------------- -The subscripted variables, in general, will be returned as :py:class:`xarray.DataArray` in the output :py:class:`pandas.DataFrame`. To get a totally flat dataframe, like Vensim outuput the `flatten=True` when calling the run function:: +The subscripted variables, in general, will be returned as :py:class:`xarray.DataArray` in the output :py:class:`pandas.DataFrame`. To get a flat dataframe, set `flatten=True` when calling the :py:func:`run()` method:: >>> model.run(flatten=True) Setting parameter values ------------------------ -In many cases, we want to modify the parameters of the model to investigate its behavior under different assumptions. There are several ways to do this in PySD, but the :py:func:`run()` function gives us a convenient method in the params keyword argument. +In some situations we may want to modify the parameters of the model to investigate its behavior under different assumptions. There are several ways to do this in PySD, but the :py:func:`run()` method gives us a convenient method in the `params` keyword argument. -This argument expects a dictionary whose keys correspond to the components of the model. The associated values can either be a constant, or a Pandas series whose indices are timestamps and whose values are the values that the model component should take on at the corresponding time. For instance, in our model we can set the room temperature to a constant value:: +This argument expects a dictionary whose keys correspond to the components of the model. The associated values can either be constants, or :py:class:`pandas.Series` whose indices are timestamps and whose values are the values that the model component should take on at the corresponding time. For instance, in our model we may set the room temperature to a constant value:: >>> model.run(params={'Room Temperature': 20}) -Alternately, if we believe the room temperature is changing over the course of the simulation, we can give the run function a set of time-series values in the form of a :py:class:`pandas.Series`, and PySD will linearly interpolate between the given values in the course of its integration:: +Alternately, if we want the room temperature to vary over the course of the simulation, we can give the :py:func:`run()` method a set of time-series values in the form of a :py:class:`pandas.Series`, and PySD will linearly interpolate between the given values in the course of its integration:: >>> import pandas as pd >>> temp = pd.Series(index=range(30), data=range(20, 80, 2)) >>> model.run(params={'Room Temperature': temp}) -If the parameter value to change is a subscripted variable (vector, matrix...), there are three different options to set new value. Suposse we have ‘Subscripted var’ with dims :py:data:`['dim1', 'dim2']` and coordinates :py:data:`{'dim1': [1, 2], 'dim2': [1, 2]}`. A constant value can be used and all the values will be replaced:: +If the parameter value to change is a subscripted variable (vector, matrix...), there are three different options to set the new value. Suposse we have ‘Subscripted var’ with dims :py:data:`['dim1', 'dim2']` and coordinates :py:data:`{'dim1': [1, 2], 'dim2': [1, 2]}`. A constant value can be used and all the values will be replaced:: >>> model.run(params={'Subscripted var': 0}) -A partial :py:class:`xarray.DataArray` can be used, for example a new variable with ‘dim2’ but not ‘dim2’, the result will be repeated in the remaining dimensions:: +A partial :py:class:`xarray.DataArray` can be used. For example a new variable with ‘dim2’ but not ‘dim2’. In that case, the result will be repeated in the remaining dimensions:: >>> import xarray as xr >>> new_value = xr.DataArray([1, 5], {'dim2': [1, 2]}, ['dim2']) @@ -181,10 +181,10 @@ Same dimensions :py:class:`xarray.DataArray` can be used (recommended):: >>> new_value = xr.DataArray([[1, 5], [3, 4]], {'dim1': [1, 2], 'dim2': [1, 2]}, ['dim1', 'dim2']) >>> model.run(params={'Subscripted var': new_value}) -In the same way, a Pandas series can be used with constan values, partially defined :py:class:`xarray.DataArray` or same dimensions :py:class:`xarray.DataArray`. +In the same way, a :py:class:`pandas.Series` can be used with constant values, partially defined :py:class:`xarray.DataArray` or same dimensions :py:class:`xarray.DataArray`. .. note:: - That once parameters are set by the run command, they are permanently changed within the model. We can also change model parameters without running the model, using PySD’s :py:data:`set_components(params={})` method, which takes the same params dictionary as the run function. We might choose to do this in situations where we'll be running the model many times, and only want to spend time setting the parameters once. + Once parameters are set by the :py:func:`run()` command, they are permanently changed within the model. We can also change model parameters without running the model, using PySD’s :py:data:`set_components(params={})` method, which takes the same params dictionary as the :py:func:`run()` method. We might choose to do this in situations where we will be running the model many times, and only want to set the parameters once. .. note:: If you need to know the dimensions of a variable, you can check them by using :py:data:`.get_coords(variable__name)` function:: @@ -197,14 +197,14 @@ In the same way, a Pandas series can be used with constan values, partially defi ({'dim1': [1, 2], 'dim2': [1, 2]}, ['dim1', 'dim2']) - this will return the coords dictionary and the dimensions list if the variable is subscripted or ‘None’ if the variable is an scalar. + this will return the coords dictionary and the dimensions list, if the variable is subscripted, or ‘None’ if the variable is an scalar. .. note:: If you change the value of a lookup function by a constant, the constant value will be used always. If a :py:class:`pandas.Series` is given the index and values will be used for interpolation when the function is called in the model, keeping the arguments that are included in the model file. If you change the value of any other variable type by a constant, the constant value will be used always. If a :py:class:`pandas.Series` is given the index and values will be used for interpolation when the function is called in the model, using the time as argument. - If you need to know if a variable takes arguments, i.e., if it is a lookup variable, you can check it by using :py:func:`.get_args(variable__name)` function:: + If you need to know if a variable takes arguments, i.e., if it is a lookup variable, you can check it by using the :py:func:`.get_args(variable__name)` function:: >>> model.get_args('Room Temperature') @@ -216,13 +216,13 @@ In the same way, a Pandas series can be used with constan values, partially defi Setting simulation initial conditions ------------------------------------- -Finally, we can set the initial conditions of our model in several ways. So far, we've been using the default value for the initial_condition keyword argument, which is ‘original’. This value runs the model from the initial conditions that were specified originally by the model file. We can alternately specify a tuple containing the start time and a dictionary of values for the system's stocks. Here we start the model with the tea at just above freezing:: +Initial conditions for our model can be set in several ways. So far, we have used the default value for the `initial_condition` keyword argument, which is ‘original’. This value runs the model from the initial conditions that were specified originally in the model file. We can alternately specify a tuple containing the start time and a dictionary of values for the system's stocks. Here we start the model with the tea at just above freezing temperature:: >>> model.run(initial_condition=(0, {'Teacup Temperature': 33})) -The new value setted can be a *xarray.DataArray* as it is explained in the previous section. +The new value can be a *xarray.DataArray*, as explained in the previous section. -Additionally we can run the model forward from its current position, by passing the initial_condition argument the keyword ‘current’. After having run the model from time zero to thirty, we can ask the model to continue running forward for another chunk of time:: +Additionally, we can run the model forward from its current position, by passing initial_condition=‘current’. After having run the model from time zero to thirty, we can ask the model to continue running forward for another chunk of time:: >>> model.run(initial_condition='current', return_timestamps=range(31, 45)) @@ -234,10 +234,10 @@ There are times when we may choose to overwrite a stock with a constant value (i Querying current values ----------------------- -We can easily access the current value of a model component using curly brackets. For instance, to find the temperature of the teacup, we simply call:: +We can easily access the current value of a model component using curly brackets. For instance, to find the temperature of the tea cup, we simply call:: >>> model['Teacup Temperature'] -If you try to get the current values of a lookup variable the previous method will fail as lookup variables take arguments. However, it is possible to get the full series of a lookup or data object with :py:func:`.get_series_data` method:: +If you try to get the current values of a lookup variable, the previous method will fail, as lookup variables take arguments. However, it is possible to get the full series of a lookup or data object with :py:func:`.get_series_data` method:: >>> model.get_series_data('Growth lookup') diff --git a/docs/command_line_usage.rst b/docs/command_line_usage.rst index d7ff8966..15469411 100644 --- a/docs/command_line_usage.rst +++ b/docs/command_line_usage.rst @@ -4,14 +4,14 @@ Command Line Usage Basic command line usage ------------------------ -Most of the features available in :doc:`basic usage <../basic_usage>` are also available using command line. Running: +Most of the features available in :doc:`basic usage <../basic_usage>` are also available using the command line. Running: .. code-block:: text python -m pysd Teacup.mdl -will translate *Teacup.mdl* to *Teacup.py* and run it with the default values. The output will be saved in *Teacup_output_%Y_%m_%d-%H_%M_%S_%f.tab*. The command line accepts several arguments, this can be checked using the *-h/--help* argument: +will translate *Teacup.mdl* to *Teacup.py* and run it with the default values. The output will be saved in *Teacup_output_%Y_%m_%d-%H_%M_%S_%f.tab*. The command line interface accepts several arguments, this can be checked using the *-h/--help* argument: .. code-block:: text @@ -19,7 +19,7 @@ will translate *Teacup.mdl* to *Teacup.py* and run it with the default values. T Set output file ^^^^^^^^^^^^^^^ -In order to set the output file *-o/--output-file* argument can be used: +In order to set the output file path, the *-o/--output-file* argument can be used: .. code-block:: text @@ -29,13 +29,13 @@ In order to set the output file *-o/--output-file* argument can be used: The output file can be a *.csv* or *.tab*. .. note:: - If *-o/--output-file* is not given the output will be saved in a file - that starts with the model file name and has a time stamp to avoid + If *-o/--output-file* is not given, the output will be saved in a file + that starts with the model file name followed by a time stamp to avoid overwritting files. Activate progress bar ^^^^^^^^^^^^^^^^^^^^^ -The progress bar can be activated using *-p/--progress* command: +The progress bar can be activated using the *-p/--progress* argument: .. code-block:: text @@ -44,9 +44,9 @@ The progress bar can be activated using *-p/--progress* command: Translation options ------------------- -Only translate model file -^^^^^^^^^^^^^^^^^^^^^^^^^ -To only translate the model file, it does not run the model, *-t/--trasnlate* command is provided: +Only translate the model file +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +To translate the model file and not run the model, the *-t/--trasnlate* command is provided: .. code-block:: text @@ -54,24 +54,25 @@ To only translate the model file, it does not run the model, *-t/--trasnlate* co Splitting Vensim views in different files ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In order to split the Vensim model views in different files as explained in :doc:`advanced usage <../advanced_usage>`: +In order to split the Vensim model views in different files, as explained in :doc:`advanced usage <../advanced_usage>`, use the *--split-views* argument: .. code-block:: text - python -m pysd --split-modules many_views_model.mdl + python -m pysd --split-views many_views_model.mdl + Outputting various run information ---------------------------------- -The output number of variables can be modified bu passing them as arguments separated by commas, using *-r/return_columns* argument: +The number of output variables can be modified by passing them as arguments separated by commas, using the *-r/return_columns* argument: .. code-block:: text python -m pysd -r 'Teacup Temperature, Room Temperature' Teacup.mdl -Note that the argument passed after *-r/return_columns* should be inside '' to be properly read. Moreover each variable name must be split with commas. +Note that the a single string must be passed after the *-r/return_columns* argument, containing the names of the variables separated by commas. -Sometimes, the variable names have special characteres, such as commas, which can happen when trying to return a variable with subscripts. -In this case whe can save a *.txt* file with one variable name per row and use it as an argument: +Sometimes, variable names have special characteres, such as commas, which can happen when trying to return a variable with subscripts. +In this case we can save a *.txt* file with one variable name per row and use it as an argument: .. code-block:: text @@ -89,15 +90,15 @@ In this case whe can save a *.txt* file with one variable name per row and use i where *N* is an integer. .. note:: - The time outputs can be also modified using the model control variables, explained in next section. + The time outputs can also be modified using the model control variables, explained in next section. Modify model variables ---------------------- Modify model control variables ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The model control variables such as the *initial time*. *final time*, *time step* and *saving step* can be easily -modified using the *-I/--initial_time*, *-F/--final-time*, *-T/--time-step* and *-S/--saveper* commands respectively. For example: +The values of the model control variables (i.e. *initial time*. *final time*, *time step* and *saving step*) can be +modified using the *-I/--initial_time*, *-F/--final-time*, *-T/--time-step* and *-S/--saveper* arguments, respectively. For example: .. code-block:: text @@ -106,32 +107,30 @@ modified using the *-I/--initial_time*, *-F/--final-time*, *-T/--time-step* and will set the initial time to 2005, the final time to 2010 and the time step to 1. .. note:: - If *-R/--return-timestamps* argument is used the *final time* and *saving step* will be ignored. + If the *-R/--return-timestamps* argument is used, the *final time* and *saving step* will be ignored. Modify model variables ^^^^^^^^^^^^^^^^^^^^^^ -In order to modify the values of model variables they can be passed after the model file: +To modify the values of model variables, their new values may be passed after the model file: .. code-block:: text python -m pysd Teacup.mdl 'Room Temperature'=5 -this will set *Room Temperature* variable to the constant value 5. A series can be also passed -to change a value of a value to a time dependent series or the interpolation values -of a lookup variable two lists of the same length must be given: +this will set *Room Temperature* variable to 5. A time series or a lookup can also be passed +as the new value of a variable as two lists of the same length: .. code-block:: text python -m pysd Teacup.mdl 'Temperature Lookup=[[1, 2, 3, 4], [10, 15, 17, 18]]' -The first list will be used for the *time* or *x* values and the second for the data. See setting parameter values in :doc:`basic usage <../basic_usage>` for more information. +The first list will be used for the *time* or *x* values, and the second for the data values. See setting parameter values in :doc:`basic usage <../basic_usage>` for further details. .. note:: - If a variable name or the right hand side are defined with whitespaces - it is needed to add '' define it, as has been done in the last example. + If a variable name or the right hand side are defined with white spaces, they must be enclosed in quotes, as in the previous example. Several variables can be changed at the same time, e.g.: @@ -141,7 +140,7 @@ Several variables can be changed at the same time, e.g.: Modify initial conditions of model variables ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Sometimes we do not want to change a variable value to a constant but change its initial value, for example change initial value of a stock object, this can be similarly done to the previos case but using ':' instead of '=': +Sometimes we do not want to change the actual value of a variable but we want to change its initial value instead. An example of this would be changing the initial value of a stock object. This can be done similarly to what was shown in the previos case, but using ':' instead of '=': .. code-block:: text @@ -149,12 +148,12 @@ Sometimes we do not want to change a variable value to a constant but change its this will set initial *Teacup Temperature* to 30. -Putting It All Together +Putting It all together ----------------------- -Several commands can be used together, first need to add optional arguments, those starting with '-', next the model file, and last the variable or variables to change, for example: +Several commands may be used together. The optional arguments and model arguments go first (those starting with '-' or '--'), then the model file path, and finally the variable or variables to change: .. code-block:: text python -m pysd -o my_output_file.csv --progress --final-time=2010 --time-step=1 Teacup.mdl 'Room Temperature'=5 temperature_lookup='[[1, 2, 3, 4], [10, 15, 17, 18]]' 'Teacup Temperature':30 -will save step 1 outputs until 2010 in *my_output_file.csv*, showing a progressbar during integration and settung foo to *5* and *temperature_lookup* to ((1, 10), (2, 15), (3, 17), (4, 18)) and initial *Teacup Temperature* to 30. \ No newline at end of file +will save step 1 outputs until 2010 in *my_output_file.csv*, showing a progressbar during integration and setting foo to *5*, *temperature_lookup* to ((1, 10), (2, 15), (3, 17), (4, 18)) and initial *Teacup Temperature* to 30. \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 9e2f05b7..de1013ea 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -18,10 +18,11 @@ import mock import sphinx_rtd_theme -from generate_tables import generate_tables +sys.path.insert(0, os.path.abspath('../')) + +from docs.generate_tables import generate_tables -sys.path.insert(0, os.path.abspath('../')) # Generate tables used for documentation generate_tables() diff --git a/docs/development/complement.rst b/docs/development/complement.rst index fd1253ac..1e356d2f 100644 --- a/docs/development/complement.rst +++ b/docs/development/complement.rst @@ -3,8 +3,12 @@ Complementary Projects The most valuable component for better integrating models with *basically anything else* is a standard language for communicating the structure of those models. That language is `XMILE `_. The draft specifications for this have been finalized and the standard should be approved in the next few months. -A python library for analyzing system dynamics models called the `Exploratory Modeling and Analysis (EMA) Workbench `_ is being developed by `Erik Pruyt `_ and `Jan Kwakkel `_ at TU Delft. This package implements a variety of analysis methods that are unique to dynamic models, and could work very tightly with PySD. +A Python library for analyzing system dynamics models called the `Exploratory Modeling and Analysis (EMA) Workbench `_ is being developed by `Erik Pruyt `_ and `Jan Kwakkel `_ at TU Delft. This package implements a variety of analysis methods that are unique to dynamic models, and could work very tightly with PySD. -An excellent javascript library called `sd.js `_ created by Bobby Powers at `SDlabs `_ exists as a standalone SD engine, and provides a beautiful front end. This front end could be rendered as an iPython widget to facilitate display of SD models. +An excellent JavaScript library called `sd.js `_ created by Bobby Powers at `SDlabs `_ exists as a standalone SD engine, and provides a beautiful front end. This front end could be rendered as an iPython widget to facilitate display of SD models. -The `Behavior Analysis and Testing Software(BATS) `_ delveloped by `Gönenç Yücel `_ includes a really neat method for categorizing behavior modes and exploring parameter space to determine the boundaries between them. \ No newline at end of file +The `Behavior Analysis and Testing Software(BATS) `_ delveloped by `Gönenç Yücel `_ includes a really neat method for categorizing behavior modes and exploring parameter space to determine the boundaries between them. + +The `SDQC library ` developed by Eneko Martin Martinez may be used to check the quality of the data imported by Vensim models from speadsheet files. + +The `excels2vensim library `, also developed by Eneko Martin Martinez, aims to simplify the incorporation of equations from external data into Vensim. \ No newline at end of file diff --git a/docs/development/contributing.rst b/docs/development/contributing.rst index ba6ba04c..1adff9ce 100644 --- a/docs/development/contributing.rst +++ b/docs/development/contributing.rst @@ -6,10 +6,10 @@ If you are interested in helping to develop PySD, the :doc:`pathway` lists areas for contribution. To get started, you can fork the repository and make contributions to your own version. -When you're happy with your edits, submit a pull request to the main branch. +When you are happy with your edits, submit a pull request to the main branch. .. note:: - In order to open a pull request,the new features and changes should be througly tested. + In order to open a pull request, the new features and changes should be througly tested. To do so, unit tests of new features or translated functions should be added, please check the Development Tools section below. When opening a pull request all tests are run and the coverage and pep8 style are checked. Development Tools @@ -40,7 +40,7 @@ complementary tests in the corresponding `unit_test_*.py` file. Speed Tests ^^^^^^^^^^^ -The speed tests may be developed in the future. Any contribution is welcome. +Speed tests may be developed in the future. Any contribution is welcome. Profiler @@ -64,7 +64,7 @@ It doesn't need to be perfect, but we should aspire always to move in a positive PySD Design Philosophy ---------------------- -Understanding that a focussed project is both more robust and maintainable, PySD aspires to the +Understanding that a focussed project is both more robust and maintainable, PySD adheres to the following philosophy: @@ -73,9 +73,9 @@ following philosophy: * Anything that is not endemic to System Dynamics (such as plotting, integration, fitting, etc) should either be implemented using external tools, or omitted. * Stick to SD. Let other disciplines (ABM, Discrete Event Simulation, etc) create their own tools. - * Use external model creation tools + * Use external model creation tools. -* Use the core language of system dynamics. +* Use the core language of System Dynamics. * Limit implementation to the basic XMILE standard. * Resist the urge to include everything that shows up in all vendors' tools. diff --git a/docs/development/pathway.rst b/docs/development/pathway.rst index cedbe8db..b0151934 100644 --- a/docs/development/pathway.rst +++ b/docs/development/pathway.rst @@ -14,7 +14,7 @@ High Priority Medium Priority --------------- -* Improve model exexution speed using cython, theano, numba, or another package +* Improve model execution speed using cython, theano, numba, or another package Low Priority diff --git a/docs/index.rst b/docs/index.rst index 3d61e91f..ca4a1c14 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -34,7 +34,7 @@ PySD .. |DOI| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.5654824.svg :target: https://doi.org/10.5281/zenodo.5654824 -This project is a simple library for running System Dynamics models in python, with the purpose of +This project is a simple library for running System Dynamics models in Python, with the purpose of improving integration of Big Data and Machine Learning into the SD workflow. PySD translates :doc:`Vensim ` or diff --git a/docs/installation.rst b/docs/installation.rst index 729f5d93..c6175e67 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,8 +3,7 @@ Installation Installing via pip ------------------ -To install the PySD package from the Python package index into an established -Python environment, use the pip command: +To install the PySD package from the Python package index use the pip command: .. code-block:: bash @@ -12,7 +11,7 @@ Python environment, use the pip command: Installing with conda --------------------- -To install PySD with conda, using the conda-forge channel, into a conda environment, use the following command: +To install PySD with conda, using the conda-forge channel, use the following command: .. code-block:: bash @@ -20,15 +19,15 @@ To install PySD with conda, using the conda-forge channel, into a conda environm Installing from source ---------------------- -To install from the source, clone the project with git: +To install from source, clone the project with git: .. code-block:: bash git clone https://github.com/JamesPHoughton/pysd.git -Or download the latest version from the project webpage: https://github.com/JamesPHoughton/pysd +or download the latest version from the project repository: https://github.com/JamesPHoughton/pysd -In the source directory use the command +In the source directory use the command: .. code-block:: bash @@ -38,9 +37,9 @@ In the source directory use the command Required Dependencies --------------------- -PySD was originally built on python 2.7. Hoewever, the last version requires at least **python 3.7**. +PySD requires **python 3.7** or above. -PySD calls on the core Python data analytics stack, and a third party parsing library: +PySD builds on the core Python data analytics stack, and the Parsimonious parsing library: * Numpy * Scipy @@ -56,13 +55,13 @@ PySD calls on the core Python data analytics stack, and a third party parsing li * progressbar These modules should build automatically if you are installing via `pip`. If you are building from -the source code, or if pip fails to load them, they can be loaded with the same `pip` syntax as +source, or if pip fails to load them, they can be loaded with the same `pip` syntax as above. Optional Dependencies --------------------- -In order to plot results from the model as shown in :doc:`basic usage <../basic_usage>`: +In order to plot model outputs as shown in :doc:`basic usage <../basic_usage>`: * Matplotlib @@ -77,7 +76,7 @@ These Python libraries bring additional data analytics capabilities to the analy Additionally, the System Dynamics Translator utility developed by Robert Ward is useful for translating models from other system dynamics formats into the XMILE standard, to be read by PySD. -These modules can be installed using pip with syntax similar to the above. +These modules can be installed using pip with a syntax similar to the above. Additional Resources diff --git a/docs/reporting_bugs.rst b/docs/reporting_bugs.rst index 6a24fb16..71833f81 100644 --- a/docs/reporting_bugs.rst +++ b/docs/reporting_bugs.rst @@ -1,21 +1,21 @@ Reporting bugs ============== -Before reporting any bug, please make sure that you are using the latest version of PySD, you can get your version by running `python -m pysd -v` on the command line. +Before reporting any bug, please make sure that you are using the latest version of PySD. You can get the version number by running `python -m pysd -v` on the command line. -All the bugs must be reported in the project's `issue tracker on github `_. +All bugs must be reported in the project's `issue tracker on github `_. Bugs during translation ----------------------- 1. Check the line where it happened and try to identify if it is due to a missing function or feature or for any other reason. -2. See if there is any opened issue with the same of similar bug. If it is you can add there your specific problem. -3. If there is no similar issue, open a new one. Try to use a descriptive title such us `Missing subscripts support for Xmile models`, avoid titles like `Error when parsing Xmile model`. Provide the given error information, and if possible, a small model reproducing the same error. +2. See if there is any open issue with the same or a similar bug. If there is, you can add your specific problem there. +3. If not previously reported, open a new issue. Try to use a descriptive title such us `Missing subscripts support for Xmile models`, avoid titles like `Error while parsing Xmile model`. Provide the given error information and, if possible, a small model reproducing the same error. Bugs during runtime ------------------- -1. Check if similar bug has been detected on the issue tracker. If not open a new issue with a descriptive title. +1. Check if a similar bug has been reported on the issue tracker. If that is not the case, open a new issue with a descriptive title. 2. Provide the error information and all the relevant lines you used to execute the model. -3. If possible provide a small model reproducing the bug. +3. If possible, provide a small model reproducing the bug. diff --git a/docs/structure/structure_index.rst b/docs/structure/structure_index.rst index 3594df79..11c0dd71 100644 --- a/docs/structure/structure_index.rst +++ b/docs/structure/structure_index.rst @@ -1,20 +1,20 @@ -Structure of the PySD module -============================ +Structure of the PySD library +============================= -PySD provides a set of translators that allow to build an original model into an abstract model representation (AMR), also called Abstract Model. This representation is based on a series of Python classes that allow to have a version of the model independent of the source language which classifies its elements depending on their type and expresses the mathematical formulations in an abstract syntax tree. This representation can be used by a builder, which allows to write the final functional language in another programming language. See the example of the complete process in the figure below. +PySD provides translators that allow to build an original model into an Abstract Model Representation (AMR), or :doc:`Abstract Model ` for short. This representation allows to gather all the model equations and behavior into a number of Python data classes. Therefore, the AMR is Python code, hence independent of the source language used to write the model. The AMR is then passed to a builder, which converts it to source code of a programming language of our choice. See the example of the complete process in the figure below. .. image:: ../images/abstract_model.png :width: 700 px :align: center -Currently, PYSD can translate Vensim models (mdl format) or models in Xmile format (exported from Vensim, Stella or other software). The only builder available at the moment builds the models in Python. +Currently, PySD can translate Vensim models (mdl format) or models in Xmile format (exported from Vensim, Stella or other software) into an AMR. The only builder available at the moment builds the models in Python. -For models translated into Python, all the necessary functions and classes are incorporated in this library so that they can be executed. The Model class is the main class that allows loading and running a model, as well as modifying the values of its parameters, among many other possibilities. +For models translated into Python, all the necessary functions and classes to run it are incorporated in PySD. The :py:class:`Model` class is the main class that allows loading and running a model, as well as modifying the values of its parameters, among many other possibilities. Translation ----------- -The internal functions of the model translation components and relevant objects can be seen in the following documents: +The internals of the translation process may be found in the following documents: .. toctree:: :maxdepth: 2 @@ -25,13 +25,13 @@ The internal functions of the model translation components and relevant objects -The PySD module is capable of importing models from a Vensim model file (\*.mdl) or an XMILE format xml file. Translation makes use of a Parsing Expression Grammar parser, using the third party Python library Parsimonious to construct an abstract syntax tree based upon the full model file (in the case of Vensim) or individual expressions (in the case of XMILE). The translators then crawl the tree, using a set of classes to define a pseudo model representation called :doc:`Abstract Model `. +PySD can import models in Vensim's \*.mdl file format and in XMILE format (\*xml file). Parsimonious is the Parsing Expression Grammar (PEG) parser library used in PySD to parse the original models and construct an abstract syntax tree. The translators then crawl the tree, using a set of classes to define a pseudo model representation called :doc:`Abstract Model `. Building the model ------------------ -The builders allow you to build the final model in the desired language. To do so, they use a series of classes that subtract the information from the abstract model and convert it into the desired code. Currently there is only one builder to build the models in Python, any contribution to add new builders is welcome. +The builders allow you to build the final model in the desired programming language. To do so, they use a series of classes that obtain the information from the Abstract Model and convert it into the desired code. Currently PySD only includes a builder to build the models in Python. Any contribution to add new builders (and solvers) for other programming languages is welcome. .. toctree:: :maxdepth: 2 @@ -49,11 +49,11 @@ For loading a translated model with Python see :doc:`basic usage <../basic_usage The Python builder constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a `statefull` model in much the same way that the system itself has a specific state at any point in time. -The model class also contains a function for each of the model components, representing the essential model equations. Each function contains its units, subcscripts type infromation and documentation as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. +The Model class also contains a function for each of the model components, representing the essential model equations. Each function contains its units, subcscripts type infromation and documentation as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. -The model class maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. +The Model class maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. -Lastly, the model class provides a set of methods that are used to facilitate simulation. The .run() function returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. +Lastly, the model class provides a set of methods that are used to facilitate simulation. The :py:func:`run()` method returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. .. toctree:: :maxdepth: 2 diff --git a/docs/tools.rst b/docs/tools.rst index 2a30bdfc..99aa61f5 100644 --- a/docs/tools.rst +++ b/docs/tools.rst @@ -1,7 +1,7 @@ Tools ===== -Some tools are given with the library. +Some additional tools are provided with the library. Benchmarking ------------ From bd237b99d7569bfd9add53666b73bf86c3b4b216 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Thu, 12 May 2022 12:48:13 +0200 Subject: [PATCH 67/96] more docs reviews --- docs/development/complement.rst | 4 ++-- docs/structure/abstract_model.rst | 20 +++++++++++--------- docs/structure/structure_index.rst | 10 +++++----- 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/docs/development/complement.rst b/docs/development/complement.rst index 1e356d2f..7cacb285 100644 --- a/docs/development/complement.rst +++ b/docs/development/complement.rst @@ -9,6 +9,6 @@ An excellent JavaScript library called `sd.js `_ delveloped by `Gönenç Yücel `_ includes a really neat method for categorizing behavior modes and exploring parameter space to determine the boundaries between them. -The `SDQC library ` developed by Eneko Martin Martinez may be used to check the quality of the data imported by Vensim models from speadsheet files. +The `SDQC library `_ developed by Eneko Martin Martinez may be used to check the quality of the data imported by Vensim models from speadsheet files. -The `excels2vensim library `, also developed by Eneko Martin Martinez, aims to simplify the incorporation of equations from external data into Vensim. \ No newline at end of file +The `excels2vensim library `_, also developed by Eneko Martin Martinez, aims to simplify the incorporation of equations from external data into Vensim. \ No newline at end of file diff --git a/docs/structure/abstract_model.rst b/docs/structure/abstract_model.rst index 2d495023..50b42a4e 100644 --- a/docs/structure/abstract_model.rst +++ b/docs/structure/abstract_model.rst @@ -1,23 +1,25 @@ Abstract Model Representation ============================= The Abstract Model representation allows a separation of concern between -translation and the building. The translation will be called anything that -happens between the source code and the Abstract Model representation. While the -building will be everything that happens between the Abstract Model and the -final code. +translation and building. Translation involves anything that +happens from the moment the source code of the original model is loaded +into memory up to the creation of the Abstract Model representation. Similarly, +the building will be everything that happens between the Abstract Model and the +source code of the model written in a programming language different than that +of the original model. -This approach allows easily including new source codes or output codes, +This approach allows to easily include new source codes or output codes, without needing to make a lot of changes in the whole library. The -:py:class:`AbstractModel` object should keep as mutch information as -possible from the original model. Althought the information is not used +:py:class:`AbstractModel` object should retain as much information from the +original model as possible. Although the information is not used in the output code, it may be necessary for other future output languages or for improvements in the currently supported outputs. For example, currently -the unchangeable constanst (== defined in Vensim) are treated as regular +unchangeable constansts (== defined in Vensim) are treated as regular components with Python, but in the future we may want to protect them from user interaction. The lowest level of this representation is the Abstract Syntax Tree (AST). -Which includes all the operations and calls in a given component expression. +This includes all the operations and calls in a given component expression. Main abstract structures ------------------------ diff --git a/docs/structure/structure_index.rst b/docs/structure/structure_index.rst index 11c0dd71..729c0e4f 100644 --- a/docs/structure/structure_index.rst +++ b/docs/structure/structure_index.rst @@ -1,7 +1,7 @@ Structure of the PySD library ============================= -PySD provides translators that allow to build an original model into an Abstract Model Representation (AMR), or :doc:`Abstract Model ` for short. This representation allows to gather all the model equations and behavior into a number of Python data classes. Therefore, the AMR is Python code, hence independent of the source language used to write the model. The AMR is then passed to a builder, which converts it to source code of a programming language of our choice. See the example of the complete process in the figure below. +PySD provides translators that allow to convert the original model into an Abstract Model Representation (AMR), or :doc:`Abstract Model ` for short. This representation allows to gather all the model equations and behavior into a number of Python data classes. Therefore, the AMR is Python code, hence independent of the programming language used to write the original model. The AMR is then passed to a builder, which converts it to source code of a programming language of our choice. See the example of the complete process in the figure below. .. image:: ../images/abstract_model.png :width: 700 px @@ -9,12 +9,12 @@ PySD provides translators that allow to build an original model into an Abstract Currently, PySD can translate Vensim models (mdl format) or models in Xmile format (exported from Vensim, Stella or other software) into an AMR. The only builder available at the moment builds the models in Python. -For models translated into Python, all the necessary functions and classes to run it are incorporated in PySD. The :py:class:`Model` class is the main class that allows loading and running a model, as well as modifying the values of its parameters, among many other possibilities. +For models translated to Python, all the necessary functions and classes to run them are included in PySD. The :py:class:`Model` class is the main class that allows loading and running a model, as well as modifying the values of its parameters, among many other possibilities. Translation ----------- -The internals of the translation process may be found in the following documents: +The internals of the translation process may be found in the following links of the documentation: .. toctree:: :maxdepth: 2 @@ -25,13 +25,13 @@ The internals of the translation process may be found in the following documents -PySD can import models in Vensim's \*.mdl file format and in XMILE format (\*xml file). Parsimonious is the Parsing Expression Grammar (PEG) parser library used in PySD to parse the original models and construct an abstract syntax tree. The translators then crawl the tree, using a set of classes to define a pseudo model representation called :doc:`Abstract Model `. +PySD can import models in Vensim's \*.mdl file format and in XMILE format (\*xml file). `Parsimonious `_ is the Parsing Expression Grammar `(PEG) `_ parser library used in PySD to parse the original models and construct an abstract syntax tree. The translators then crawl the tree, using a set of classes to define the :doc:`Abstract Model `. Building the model ------------------ -The builders allow you to build the final model in the desired programming language. To do so, they use a series of classes that obtain the information from the Abstract Model and convert it into the desired code. Currently PySD only includes a builder to build the models in Python. Any contribution to add new builders (and solvers) for other programming languages is welcome. +The builders allow to build the final model in any programming language (so long as there is a builder for that particular language). To do so, they use a series of classes that obtain the information from the Abstract Model and convert it into the desired code. Currently PySD only includes a builder to build the models in Python. Any contribution to add new builders (and solvers) for other programming languages is welcome. .. toctree:: :maxdepth: 2 From 75513db46737a4f18f0e9d1eba9482b9a003a729 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 13:23:09 +0200 Subject: [PATCH 68/96] Document --- docs/basic_usage.rst | 12 ++++++------ docs/command_line_usage.rst | 2 +- docs/installation.rst | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/basic_usage.rst b/docs/basic_usage.rst index a5b9622e..e7ee33fe 100644 --- a/docs/basic_usage.rst +++ b/docs/basic_usage.rst @@ -35,7 +35,7 @@ To view a synopsis of the model equations and documentation, use the :py:func:`. .. note:: - You can also load an already translated model file. This will be faster than loading an `mdl` model, as the translation is not required:: + You can also load an already translated model file. This will be faster than loading an original model, as the translation is not required:: >>> import pysd >>> model = pysd.load('Teacup.py') @@ -69,7 +69,7 @@ The simplest way to simulate the model is to use the :py:func:`.run()` command w [241 rows x 8 columns] -Pandas proovides a simple plotting capability, that we can use to see how the temperature of the tea cup evolves over time:: +Pandas proovides a simple plotting capability, that we can use to see how the temperature of the teacup evolves over time:: >>> import matplotlib.pyplot as plt >>> stocks["Teacup Temperature"].plot() @@ -82,7 +82,7 @@ Pandas proovides a simple plotting capability, that we can use to see how the te :width: 400 px :align: center -To show a progressbar during the model integration, the `progress` argument can be passed to the :py:func:`.run()` command. To do so, the progressbar package is required:: +To show a progressbar during the model integration, the `progress` argument can be passed to the :py:func:`.run()` command:: >>> stocks = model.run(progress=True) @@ -146,7 +146,7 @@ If the measured data that we are comparing with our model comes in at irregular Retrieving a flat DataFrame ---------------------------------- +--------------------------- The subscripted variables, in general, will be returned as :py:class:`xarray.DataArray` in the output :py:class:`pandas.DataFrame`. To get a flat dataframe, set `flatten=True` when calling the :py:func:`run()` method:: >>> model.run(flatten=True) @@ -220,7 +220,7 @@ Initial conditions for our model can be set in several ways. So far, we have use >>> model.run(initial_condition=(0, {'Teacup Temperature': 33})) -The new value can be a *xarray.DataArray*, as explained in the previous section. +The new value can be a :py:class:`xarray.DataArray`, as explained in the previous section. Additionally, we can run the model forward from its current position, by passing initial_condition=‘current’. After having run the model from time zero to thirty, we can ask the model to continue running forward for another chunk of time:: @@ -234,7 +234,7 @@ There are times when we may choose to overwrite a stock with a constant value (i Querying current values ----------------------- -We can easily access the current value of a model component using curly brackets. For instance, to find the temperature of the tea cup, we simply call:: +We can easily access the current value of a model component using curly brackets. For instance, to find the temperature of the teacup, we simply call:: >>> model['Teacup Temperature'] diff --git a/docs/command_line_usage.rst b/docs/command_line_usage.rst index 15469411..cf757f93 100644 --- a/docs/command_line_usage.rst +++ b/docs/command_line_usage.rst @@ -148,7 +148,7 @@ Sometimes we do not want to change the actual value of a variable but we want to this will set initial *Teacup Temperature* to 30. -Putting It all together +Putting it all together ----------------------- Several commands may be used together. The optional arguments and model arguments go first (those starting with '-' or '--'), then the model file path, and finally the variable or variables to change: diff --git a/docs/installation.rst b/docs/installation.rst index c6175e67..ea2d414a 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -39,7 +39,7 @@ Required Dependencies --------------------- PySD requires **python 3.7** or above. -PySD builds on the core Python data analytics stack, and the Parsimonious parsing library: +PySD builds on the core Python data analytics stack, and the following third party libraries: * Numpy * Scipy @@ -52,7 +52,7 @@ PySD builds on the core Python data analytics stack, and the Parsimonious parsin * chardet * black * openpyxl -* progressbar +* progressbar2 These modules should build automatically if you are installing via `pip`. If you are building from source, or if pip fails to load them, they can be loaded with the same `pip` syntax as From 07be4872cbf4ac9842f62246d280c6d4a7cf7f45 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 13:26:43 +0200 Subject: [PATCH 69/96] Remove line --- docs/conf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index de1013ea..d5ac7e65 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -23,7 +23,6 @@ from docs.generate_tables import generate_tables - # Generate tables used for documentation generate_tables() From 4335de51fc96940cd5c7fc2e13a261a01b8305a6 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 13:30:58 +0200 Subject: [PATCH 70/96] Add link to PySD model class --- docs/basic_usage.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/basic_usage.rst b/docs/basic_usage.rst index e7ee33fe..a4e7eca7 100644 --- a/docs/basic_usage.rst +++ b/docs/basic_usage.rst @@ -9,7 +9,7 @@ To begin, we must first load the PySD module, and use it to import a model file: >>> model = pysd.read_vensim('Teacup.mdl') -This code creates an instance of the PySD Model class from an example model that we will use as the system dynamics equivalent of ‘Hello World’: a cup of tea cooling at room temperature. +This code creates an instance of the :doc:`PySD Model class ` from an example model that we will use as the system dynamics equivalent of ‘Hello World’: a cup of tea cooling at room temperature. .. image:: images/Teacup.png :width: 350 px From eb258a6f7c2733c849104c599a6dea3213974a7c Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 13:41:04 +0200 Subject: [PATCH 71/96] Rename folders --- docs/structure/abstract_model.rst | 6 +++--- docs/structure/python_builder.rst | 10 +++++----- docs/structure/vensim_translation.rst | 6 +++--- docs/structure/xmile_translation.rst | 6 +++--- pysd/{building => builders}/__init__.py | 0 pysd/{building => builders}/python/__init__.py | 0 pysd/{building => builders}/python/imports.py | 0 pysd/{building => builders}/python/namespace.py | 0 .../python/python_expressions_builder.py | 2 +- pysd/{building => builders}/python/python_functions.py | 0 .../python/python_model_builder.py | 2 +- pysd/{building => builders}/python/subscripts.py | 2 +- pysd/cli/main.py | 4 ++-- pysd/cli/parser.py | 4 ++-- pysd/pysd.py | 8 ++++---- pysd/tools/benchmarking.py | 4 ++-- pysd/{translation => translators}/__init__.py | 0 .../structures/__init__.py | 0 .../structures/abstract_expressions.py | 0 .../structures/abstract_model.py | 0 pysd/{translation => translators}/vensim/__init__.py | 0 .../vensim/parsing_grammars/common_grammar.peg | 0 .../vensim/parsing_grammars/components.peg | 0 .../vensim/parsing_grammars/element_object.peg | 0 .../vensim/parsing_grammars/file_sections.peg | 0 .../vensim/parsing_grammars/lookups.peg | 0 .../vensim/parsing_grammars/section_elements.peg | 0 .../vensim/parsing_grammars/sketch.peg | 0 .../vensim/vensim_element.py | 0 .../{translation => translators}/vensim/vensim_file.py | 0 .../vensim/vensim_section.py | 0 .../vensim/vensim_structures.py | 0 .../vensim/vensim_utils.py | 0 pysd/{translation => translators}/xmile/__init__.py | 0 .../xmile/parsing_grammars/equations.peg | 0 .../xmile/xmile_element.py | 0 pysd/{translation => translators}/xmile/xmile_file.py | 0 .../xmile/xmile_section.py | 0 .../xmile/xmile_structures.py | 0 pysd/{translation => translators}/xmile/xmile_utils.py | 0 .../pytest_python.py | 8 ++++---- .../pytest_split_views.py | 0 .../pytest_vensim.py | 4 ++-- 43 files changed, 33 insertions(+), 33 deletions(-) rename pysd/{building => builders}/__init__.py (100%) rename pysd/{building => builders}/python/__init__.py (100%) rename pysd/{building => builders}/python/imports.py (100%) rename pysd/{building => builders}/python/namespace.py (100%) rename pysd/{building => builders}/python/python_expressions_builder.py (99%) rename pysd/{building => builders}/python/python_functions.py (100%) rename pysd/{building => builders}/python/python_model_builder.py (99%) rename pysd/{building => builders}/python/subscripts.py (99%) rename pysd/{translation => translators}/__init__.py (100%) rename pysd/{translation => translators}/structures/__init__.py (100%) rename pysd/{translation => translators}/structures/abstract_expressions.py (100%) rename pysd/{translation => translators}/structures/abstract_model.py (100%) rename pysd/{translation => translators}/vensim/__init__.py (100%) rename pysd/{translation => translators}/vensim/parsing_grammars/common_grammar.peg (100%) rename pysd/{translation => translators}/vensim/parsing_grammars/components.peg (100%) rename pysd/{translation => translators}/vensim/parsing_grammars/element_object.peg (100%) rename pysd/{translation => translators}/vensim/parsing_grammars/file_sections.peg (100%) rename pysd/{translation => translators}/vensim/parsing_grammars/lookups.peg (100%) rename pysd/{translation => translators}/vensim/parsing_grammars/section_elements.peg (100%) rename pysd/{translation => translators}/vensim/parsing_grammars/sketch.peg (100%) rename pysd/{translation => translators}/vensim/vensim_element.py (100%) rename pysd/{translation => translators}/vensim/vensim_file.py (100%) rename pysd/{translation => translators}/vensim/vensim_section.py (100%) rename pysd/{translation => translators}/vensim/vensim_structures.py (100%) rename pysd/{translation => translators}/vensim/vensim_utils.py (100%) rename pysd/{translation => translators}/xmile/__init__.py (100%) rename pysd/{translation => translators}/xmile/parsing_grammars/equations.peg (100%) rename pysd/{translation => translators}/xmile/xmile_element.py (100%) rename pysd/{translation => translators}/xmile/xmile_file.py (100%) rename pysd/{translation => translators}/xmile/xmile_section.py (100%) rename pysd/{translation => translators}/xmile/xmile_structures.py (100%) rename pysd/{translation => translators}/xmile/xmile_utils.py (100%) rename tests/{pytest_building => pytest_builders}/pytest_python.py (94%) rename tests/{pytest_translation => pytest_translators}/pytest_split_views.py (100%) rename tests/{pytest_translation => pytest_translators}/pytest_vensim.py (97%) diff --git a/docs/structure/abstract_model.rst b/docs/structure/abstract_model.rst index 50b42a4e..50195eac 100644 --- a/docs/structure/abstract_model.rst +++ b/docs/structure/abstract_model.rst @@ -2,7 +2,7 @@ Abstract Model Representation ============================= The Abstract Model representation allows a separation of concern between translation and building. Translation involves anything that -happens from the moment the source code of the original model is loaded +happens from the moment the source code of the original model is loaded into memory up to the creation of the Abstract Model representation. Similarly, the building will be everything that happens between the Abstract Model and the source code of the model written in a programming language different than that @@ -23,10 +23,10 @@ This includes all the operations and calls in a given component expression. Main abstract structures ------------------------ -.. automodule:: pysd.translation.structures.abstract_model +.. automodule:: pysd.translators.structures.abstract_model :members: Abstrat structures for the AST ------------------------------ -.. automodule:: pysd.translation.structures.abstract_expressions +.. automodule:: pysd.translators.structures.abstract_expressions :members: diff --git a/docs/structure/python_builder.rst b/docs/structure/python_builder.rst index b6734c27..0e9a6088 100644 --- a/docs/structure/python_builder.rst +++ b/docs/structure/python_builder.rst @@ -10,12 +10,12 @@ In addition to translating individual commands between Vensim/XMILE and Python, Main builders ------------- -.. automodule:: pysd.building.python.python_model_builder +.. automodule:: pysd.builders.python.python_model_builder :members: Expression builders ------------------- -.. automodule:: pysd.building.python.python_expressions_builder +.. automodule:: pysd.builders.python.python_expressions_builder :members: Supported expressions examples @@ -48,17 +48,17 @@ Functions Namespace manager ----------------- -.. automodule:: pysd.building.python.namespace +.. automodule:: pysd.builders.python.namespace :members: NamespaceManager Subscript manager ----------------- -.. automodule:: pysd.building.python.subscripts +.. automodule:: pysd.builders.python.subscripts :members: Imports manager --------------- -.. automodule:: pysd.building.python.imports +.. automodule:: pysd.builders.python.imports :members: diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst index d31eedbb..9607c584 100644 --- a/docs/structure/vensim_translation.rst +++ b/docs/structure/vensim_translation.rst @@ -19,21 +19,21 @@ Once the model is parsed and broken following the previous steps. The :py:class: Vensim file ^^^^^^^^^^^ -.. automodule:: pysd.translation.vensim.vensim_file +.. automodule:: pysd.translators.vensim.vensim_file :members: VensimFile :undoc-members: Vensim section ^^^^^^^^^^^^^^ -.. automodule:: pysd.translation.vensim.vensim_section +.. automodule:: pysd.translators.vensim.vensim_section :members: Section :undoc-members: Vensim element ^^^^^^^^^^^^^^ -.. automodule:: pysd.translation.vensim.vensim_element +.. automodule:: pysd.translators.vensim.vensim_element :members: SubscriptRange, Element, Component, UnchangeableConstant, Data, Lookup :undoc-members: diff --git a/docs/structure/xmile_translation.rst b/docs/structure/xmile_translation.rst index d5ad6209..3bb07c63 100644 --- a/docs/structure/xmile_translation.rst +++ b/docs/structure/xmile_translation.rst @@ -22,21 +22,21 @@ Once the model is parsed and broken following the previous steps. The :py:class: Xmile file ^^^^^^^^^^ -.. automodule:: pysd.translation.xmile.xmile_file +.. automodule:: pysd.translators.xmile.xmile_file :members: XmileFile :undoc-members: Xmile section ^^^^^^^^^^^^^ -.. automodule:: pysd.translation.xmile.xmile_section +.. automodule:: pysd.translators.xmile.xmile_section :members: Section :undoc-members: Xmile element ^^^^^^^^^^^^^ -.. automodule:: pysd.translation.xmile.xmile_element +.. automodule:: pysd.translators.xmile.xmile_element :members: SubscriptRange, Element, Flaux, Gf, Stock :undoc-members: diff --git a/pysd/building/__init__.py b/pysd/builders/__init__.py similarity index 100% rename from pysd/building/__init__.py rename to pysd/builders/__init__.py diff --git a/pysd/building/python/__init__.py b/pysd/builders/python/__init__.py similarity index 100% rename from pysd/building/python/__init__.py rename to pysd/builders/python/__init__.py diff --git a/pysd/building/python/imports.py b/pysd/builders/python/imports.py similarity index 100% rename from pysd/building/python/imports.py rename to pysd/builders/python/imports.py diff --git a/pysd/building/python/namespace.py b/pysd/builders/python/namespace.py similarity index 100% rename from pysd/building/python/namespace.py rename to pysd/builders/python/namespace.py diff --git a/pysd/building/python/python_expressions_builder.py b/pysd/builders/python/python_expressions_builder.py similarity index 99% rename from pysd/building/python/python_expressions_builder.py rename to pysd/builders/python/python_expressions_builder.py index c8a61978..dca8e2e4 100644 --- a/pysd/building/python/python_expressions_builder.py +++ b/pysd/builders/python/python_expressions_builder.py @@ -15,7 +15,7 @@ import numpy as np from pysd.py_backend.utils import compute_shape -from pysd.translation.structures.abstract_expressions import\ +from pysd.translators.structures.abstract_expressions import\ AbstractSyntax, ArithmeticStructure, CallStructure, DataStructure,\ DelayFixedStructure, DelayStructure, DelayNStructure, ForecastStructure,\ GameStructure, GetConstantsStructure, GetDataStructure,\ diff --git a/pysd/building/python/python_functions.py b/pysd/builders/python/python_functions.py similarity index 100% rename from pysd/building/python/python_functions.py rename to pysd/builders/python/python_functions.py diff --git a/pysd/building/python/python_model_builder.py b/pysd/builders/python/python_model_builder.py similarity index 99% rename from pysd/building/python/python_model_builder.py rename to pysd/builders/python/python_model_builder.py index 2775c1a0..b267a447 100644 --- a/pysd/building/python/python_model_builder.py +++ b/pysd/builders/python/python_model_builder.py @@ -11,7 +11,7 @@ from pathlib import Path from typing import Union -from pysd.translation.structures.abstract_model import\ +from pysd.translators.structures.abstract_model import\ AbstractComponent, AbstractElement, AbstractModel, AbstractSection from . import python_expressions_builder as vs diff --git a/pysd/building/python/subscripts.py b/pysd/builders/python/subscripts.py similarity index 99% rename from pysd/building/python/subscripts.py rename to pysd/builders/python/subscripts.py index e37f749c..df9b0258 100644 --- a/pysd/building/python/subscripts.py +++ b/pysd/builders/python/subscripts.py @@ -3,7 +3,7 @@ import numpy as np from typing import List -from pysd.translation.structures.abstract_model import AbstractSubscriptRange +from pysd.translators.structures.abstract_model import AbstractSubscriptRange from pysd.py_backend.external import ExtSubscript diff --git a/pysd/cli/main.py b/pysd/cli/main.py index 53b5bcfc..5c5384ec 100644 --- a/pysd/cli/main.py +++ b/pysd/cli/main.py @@ -8,9 +8,9 @@ from .parser import parser import pysd -from pysd.translation.vensim.vensim_utils import supported_extensions as\ +from pysd.translators.vensim.vensim_utils import supported_extensions as\ vensim_extensions -from pysd.translation.xmile.xmile_utils import supported_extensions as\ +from pysd.translators.xmile.xmile_utils import supported_extensions as\ xmile_extensions diff --git a/pysd/cli/parser.py b/pysd/cli/parser.py index 1160b66b..2a59a662 100644 --- a/pysd/cli/parser.py +++ b/pysd/cli/parser.py @@ -8,9 +8,9 @@ from argparse import ArgumentParser, Action from pysd import __version__ -from pysd.translation.vensim.vensim_utils import supported_extensions as\ +from pysd.translators.vensim.vensim_utils import supported_extensions as\ vensim_extensions -from pysd.translation.xmile.xmile_utils import supported_extensions as\ +from pysd.translators.xmile.xmile_utils import supported_extensions as\ xmile_extensions docs = "https://pysd.readthedocs.io/en/master/command_line_usage.html" diff --git a/pysd/pysd.py b/pysd/pysd.py index 63283718..cb1dcc3b 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -61,8 +61,8 @@ def read_xmile(xmile_file, data_files=None, initialize=True, >>> model = read_xmile('../tests/test-models/samples/teacup/teacup.xmile') """ - from pysd.translation.xmile.xmile_file import XmileFile - from pysd.building.python.python_model_builder import ModelBuilder + from pysd.translators.xmile.xmile_file import XmileFile + from pysd.builders.python.python_model_builder import ModelBuilder # Read and parse Xmile file xmile_file_obj = XmileFile(xmile_file) @@ -138,8 +138,8 @@ def read_vensim(mdl_file, data_files=None, initialize=True, >>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl') """ - from pysd.translation.vensim.vensim_file import VensimFile - from pysd.building.python.python_model_builder import ModelBuilder + from pysd.translators.vensim.vensim_file import VensimFile + from pysd.builders.python.python_model_builder import ModelBuilder # Read and parse Vensim file ven_file = VensimFile(mdl_file, encoding=encoding) ven_file.parse() diff --git a/pysd/tools/benchmarking.py b/pysd/tools/benchmarking.py index 32cf294b..51573805 100644 --- a/pysd/tools/benchmarking.py +++ b/pysd/tools/benchmarking.py @@ -10,9 +10,9 @@ from pysd import read_vensim, read_xmile, load from ..py_backend.utils import load_outputs, detect_encoding -from pysd.translation.vensim.vensim_utils import supported_extensions as\ +from pysd.translators.vensim.vensim_utils import supported_extensions as\ vensim_extensions -from pysd.translation.xmile.xmile_utils import supported_extensions as\ +from pysd.translators.xmile.xmile_utils import supported_extensions as\ xmile_extensions diff --git a/pysd/translation/__init__.py b/pysd/translators/__init__.py similarity index 100% rename from pysd/translation/__init__.py rename to pysd/translators/__init__.py diff --git a/pysd/translation/structures/__init__.py b/pysd/translators/structures/__init__.py similarity index 100% rename from pysd/translation/structures/__init__.py rename to pysd/translators/structures/__init__.py diff --git a/pysd/translation/structures/abstract_expressions.py b/pysd/translators/structures/abstract_expressions.py similarity index 100% rename from pysd/translation/structures/abstract_expressions.py rename to pysd/translators/structures/abstract_expressions.py diff --git a/pysd/translation/structures/abstract_model.py b/pysd/translators/structures/abstract_model.py similarity index 100% rename from pysd/translation/structures/abstract_model.py rename to pysd/translators/structures/abstract_model.py diff --git a/pysd/translation/vensim/__init__.py b/pysd/translators/vensim/__init__.py similarity index 100% rename from pysd/translation/vensim/__init__.py rename to pysd/translators/vensim/__init__.py diff --git a/pysd/translation/vensim/parsing_grammars/common_grammar.peg b/pysd/translators/vensim/parsing_grammars/common_grammar.peg similarity index 100% rename from pysd/translation/vensim/parsing_grammars/common_grammar.peg rename to pysd/translators/vensim/parsing_grammars/common_grammar.peg diff --git a/pysd/translation/vensim/parsing_grammars/components.peg b/pysd/translators/vensim/parsing_grammars/components.peg similarity index 100% rename from pysd/translation/vensim/parsing_grammars/components.peg rename to pysd/translators/vensim/parsing_grammars/components.peg diff --git a/pysd/translation/vensim/parsing_grammars/element_object.peg b/pysd/translators/vensim/parsing_grammars/element_object.peg similarity index 100% rename from pysd/translation/vensim/parsing_grammars/element_object.peg rename to pysd/translators/vensim/parsing_grammars/element_object.peg diff --git a/pysd/translation/vensim/parsing_grammars/file_sections.peg b/pysd/translators/vensim/parsing_grammars/file_sections.peg similarity index 100% rename from pysd/translation/vensim/parsing_grammars/file_sections.peg rename to pysd/translators/vensim/parsing_grammars/file_sections.peg diff --git a/pysd/translation/vensim/parsing_grammars/lookups.peg b/pysd/translators/vensim/parsing_grammars/lookups.peg similarity index 100% rename from pysd/translation/vensim/parsing_grammars/lookups.peg rename to pysd/translators/vensim/parsing_grammars/lookups.peg diff --git a/pysd/translation/vensim/parsing_grammars/section_elements.peg b/pysd/translators/vensim/parsing_grammars/section_elements.peg similarity index 100% rename from pysd/translation/vensim/parsing_grammars/section_elements.peg rename to pysd/translators/vensim/parsing_grammars/section_elements.peg diff --git a/pysd/translation/vensim/parsing_grammars/sketch.peg b/pysd/translators/vensim/parsing_grammars/sketch.peg similarity index 100% rename from pysd/translation/vensim/parsing_grammars/sketch.peg rename to pysd/translators/vensim/parsing_grammars/sketch.peg diff --git a/pysd/translation/vensim/vensim_element.py b/pysd/translators/vensim/vensim_element.py similarity index 100% rename from pysd/translation/vensim/vensim_element.py rename to pysd/translators/vensim/vensim_element.py diff --git a/pysd/translation/vensim/vensim_file.py b/pysd/translators/vensim/vensim_file.py similarity index 100% rename from pysd/translation/vensim/vensim_file.py rename to pysd/translators/vensim/vensim_file.py diff --git a/pysd/translation/vensim/vensim_section.py b/pysd/translators/vensim/vensim_section.py similarity index 100% rename from pysd/translation/vensim/vensim_section.py rename to pysd/translators/vensim/vensim_section.py diff --git a/pysd/translation/vensim/vensim_structures.py b/pysd/translators/vensim/vensim_structures.py similarity index 100% rename from pysd/translation/vensim/vensim_structures.py rename to pysd/translators/vensim/vensim_structures.py diff --git a/pysd/translation/vensim/vensim_utils.py b/pysd/translators/vensim/vensim_utils.py similarity index 100% rename from pysd/translation/vensim/vensim_utils.py rename to pysd/translators/vensim/vensim_utils.py diff --git a/pysd/translation/xmile/__init__.py b/pysd/translators/xmile/__init__.py similarity index 100% rename from pysd/translation/xmile/__init__.py rename to pysd/translators/xmile/__init__.py diff --git a/pysd/translation/xmile/parsing_grammars/equations.peg b/pysd/translators/xmile/parsing_grammars/equations.peg similarity index 100% rename from pysd/translation/xmile/parsing_grammars/equations.peg rename to pysd/translators/xmile/parsing_grammars/equations.peg diff --git a/pysd/translation/xmile/xmile_element.py b/pysd/translators/xmile/xmile_element.py similarity index 100% rename from pysd/translation/xmile/xmile_element.py rename to pysd/translators/xmile/xmile_element.py diff --git a/pysd/translation/xmile/xmile_file.py b/pysd/translators/xmile/xmile_file.py similarity index 100% rename from pysd/translation/xmile/xmile_file.py rename to pysd/translators/xmile/xmile_file.py diff --git a/pysd/translation/xmile/xmile_section.py b/pysd/translators/xmile/xmile_section.py similarity index 100% rename from pysd/translation/xmile/xmile_section.py rename to pysd/translators/xmile/xmile_section.py diff --git a/pysd/translation/xmile/xmile_structures.py b/pysd/translators/xmile/xmile_structures.py similarity index 100% rename from pysd/translation/xmile/xmile_structures.py rename to pysd/translators/xmile/xmile_structures.py diff --git a/pysd/translation/xmile/xmile_utils.py b/pysd/translators/xmile/xmile_utils.py similarity index 100% rename from pysd/translation/xmile/xmile_utils.py rename to pysd/translators/xmile/xmile_utils.py diff --git a/tests/pytest_building/pytest_python.py b/tests/pytest_builders/pytest_python.py similarity index 94% rename from tests/pytest_building/pytest_python.py rename to tests/pytest_builders/pytest_python.py index aaa6aec6..3a5b28bd 100644 --- a/tests/pytest_building/pytest_python.py +++ b/tests/pytest_builders/pytest_python.py @@ -1,12 +1,12 @@ import pytest from pathlib import Path -from pysd.building.python.subscripts import SubscriptManager -from pysd.building.python.python_model_builder import\ +from pysd.builders.python.subscripts import SubscriptManager +from pysd.builders.python.python_model_builder import\ ComponentBuilder, ElementBuilder, SectionBuilder -from pysd.building.python.python_expressions_builder import\ +from pysd.builders.python.python_expressions_builder import\ StructureBuilder, BuildAST -from pysd.translation.structures.abstract_model import\ +from pysd.translators.structures.abstract_model import\ AbstractComponent, AbstractElement, AbstractSection, AbstractSubscriptRange diff --git a/tests/pytest_translation/pytest_split_views.py b/tests/pytest_translators/pytest_split_views.py similarity index 100% rename from tests/pytest_translation/pytest_split_views.py rename to tests/pytest_translators/pytest_split_views.py diff --git a/tests/pytest_translation/pytest_vensim.py b/tests/pytest_translators/pytest_vensim.py similarity index 97% rename from tests/pytest_translation/pytest_vensim.py rename to tests/pytest_translators/pytest_vensim.py index b5d36332..e1ec096e 100644 --- a/tests/pytest_translation/pytest_vensim.py +++ b/tests/pytest_translators/pytest_vensim.py @@ -2,8 +2,8 @@ from pathlib import Path from parsimonious import VisitationError -from pysd.translation.vensim.vensim_file import VensimFile -from pysd.translation.vensim.vensim_element import Element +from pysd.translators.vensim.vensim_file import VensimFile +from pysd.translators.vensim.vensim_element import Element @pytest.mark.parametrize( From f164338a20390e646957ba4f6c8d330db4125dc5 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 13:51:19 +0200 Subject: [PATCH 72/96] Update paths in MANIFEST and setup --- MANIFEST.in | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index d8707988..2492107b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ include requirements.txt include README.md include LICENSE -graft pysd/translation/*/parsing_grammars +graft pysd/translators/*/parsing_grammars diff --git a/setup.py b/setup.py index 77caae2f..5bca76a9 100755 --- a/setup.py +++ b/setup.py @@ -29,7 +29,7 @@ ], install_requires=open('requirements.txt').read().strip().split('\n'), package_data={ - 'translation': [ + 'translators': [ '*/parsing_grammars/*.peg' ] }, From 7dc8a430163c858c72ce5d7e48b5144b5e14237b Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 14:25:10 +0200 Subject: [PATCH 73/96] Update links to methods --- docs/advanced_usage.rst | 14 +++++++------- docs/basic_usage.rst | 26 +++++++++++++------------- docs/structure/structure_index.rst | 2 +- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst index 978af408..05d7b7bb 100644 --- a/docs/advanced_usage.rst +++ b/docs/advanced_usage.rst @@ -35,22 +35,22 @@ Once the regression model is fit, we write a wrapper function for its predict me room_temp = model.components.room_temperature() return regression.predict([room_temp, tea_temp])[0] -To substitute this function directly for the heat_loss_to_room model component using the :py:func:`set_component()` method:: +To substitute this function directly for the heat_loss_to_room model component using the :py:meth:`.set_components` method:: model.set_components({'heat_loss_to_room': new_heatflow_function}) -If you want to replace a subscripted variable, you need to ensure that the output from the new function is the same as the previous one. You can check the current coordinates and dimensions of a component by using :py:data:`.get_coords(variable_name)` as it is explained in :doc:`basic usage <../basic_usage>`. +If you want to replace a subscripted variable, you need to ensure that the output from the new function is the same as the previous one. You can check the current coordinates and dimensions of a component by using :py:meth:`.get_coords` as it is explained in :doc:`basic usage <../basic_usage>`. .. note:: Alternatively, you can also set a model component directly:: model.components.heat_loss_to_room = new_heatflow_function - However, this will only accept the python name of the model component. While for the :py:func:`set_component()` method, the original name can be also used. + However, this will only accept the python name of the model component. While for the :py:meth:`.set_components` method, the original name can be also used. Splitting Vensim views in separate Python files (modules) --------------------------------------------------------- -In order to replicate the Vensim views in the translated models, the user can set the `split_views` argument to True in the :py:func:`read_vensim` function:: +In order to replicate the Vensim views in the translated models, the user can set the `split_views` argument to True in the :py:func:`pysd.read_vensim` function:: read_vensim("many_views_model.mdl", split_views=True) @@ -81,7 +81,7 @@ If macros are present, they will be self-contained in files named after the macr Starting simulations from an end-state of another simulation ------------------------------------------------------------ -The current state of a model can be saved in a pickle file using the :py:data:`.export()` method:: +The current state of a model can be saved in a pickle file using the :py:meth:`.export` method:: import pysd model1 = pysd.read_vensim("my_model.mdl") @@ -112,13 +112,13 @@ the new simulation will have initial time equal to 50 with the saved values from Selecting and running a submodel -------------------------------- -A submodel of a translated model can be run as a standalone model. This can be done through the :py:data:`.select_submodel()` method: +A submodel of a translated model can be run as a standalone model. This can be done through the :py:meth:`.select_submodel` method: .. automethod:: pysd.py_backend.model.Model.select_submodel :noindex: -In order to preview the needed exogenous variables, the :py:data:`.get_dependencies()` method can be used: +In order to preview the needed exogenous variables, the :py:meth:`.get_dependencies` method can be used: .. automethod:: pysd.py_backend.model.Model.get_dependencies :noindex: diff --git a/docs/basic_usage.rst b/docs/basic_usage.rst index a4e7eca7..9338af2e 100644 --- a/docs/basic_usage.rst +++ b/docs/basic_usage.rst @@ -18,7 +18,7 @@ This code creates an instance of the :doc:`PySD Model class `_. -To view a synopsis of the model equations and documentation, use the :py:func:`.doc` property of the Model class. This will generate a listing of all model elements, their documentation, units, and initial values, where appropriate, and return them as a :py:class:`pandas.DataFrame`. Here is a sample from the teacup model:: +To view a synopsis of the model equations and documentation, use the :py:attr:`.doc` property of the Model class. This will generate a listing of all model elements, their documentation, units, and initial values, where appropriate, and return them as a :py:class:`pandas.DataFrame`. Here is a sample from the teacup model:: >>> model.doc @@ -49,7 +49,7 @@ To view a synopsis of the model equations and documentation, use the :py:func:`. Running the Model ----------------- -The simplest way to simulate the model is to use the :py:func:`.run()` command with no options. This runs the model with the default parameters supplied in the model file, and returns a :py:class:`pandas.DataFrame` of the values of the model components at every timestamp:: +The simplest way to simulate the model is to use the :py:meth:`.run` command with no options. This runs the model with the default parameters supplied in the model file, and returns a :py:class:`pandas.DataFrame` of the values of the model components at every timestamp:: >>> stocks = model.run() >>> stocks @@ -82,13 +82,13 @@ Pandas proovides a simple plotting capability, that we can use to see how the te :width: 400 px :align: center -To show a progressbar during the model integration, the `progress` argument can be passed to the :py:func:`.run()` command:: +To show a progressbar during the model integration, the `progress` argument can be passed to the :py:meth:`.run` command:: >>> stocks = model.run(progress=True) Running models with DATA type components ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Venim allows to import DATA type data from binary `.vdf` files. Variables defined without an equation in the model, will attempt to read their values from the `.vdf`. PySD allows running models with this kind of data definition using the data_files argument when calling :py:func:`.run()` command, e.g.:: +Venim allows to import DATA type data from binary `.vdf` files. Variables defined without an equation in the model, will attempt to read their values from the `.vdf`. PySD allows running models with this kind of data definition using the data_files argument when calling :py:meth:`.run` command, e.g.:: >>> stocks = model.run(data_files="input_data.tab") @@ -109,7 +109,7 @@ If a variables are defined in different files, to choose the specific file a dic Outputting various run information ---------------------------------- -The :py:func:`.run()` command has a few options that make it more useful. In many situations we want to access components of the model other than merely the stocks - we can specify which components of the model should be included in the returned dataframe by including them in a list that we pass to the :py:func:`.run()` command, using the return_columns keyword argument:: +The :py:meth:`.run` command has a few options that make it more useful. In many situations we want to access components of the model other than merely the stocks - we can specify which components of the model should be included in the returned dataframe by including them in a list that we pass to the :py:meth:`.run` command, using the return_columns keyword argument:: >>> model.run(return_columns=['Teacup Temperature', 'Room Temperature']) @@ -129,7 +129,7 @@ The :py:func:`.run()` command has a few options that make it more useful. In man [241 rows x 2 columns] -If the measured data that we are comparing with our model comes in at irregular timestamps, we may want to sample the model at timestamps to match. The :py:func:`.run()` function provides this functionality with the return_timestamps keyword argument:: +If the measured data that we are comparing with our model comes in at irregular timestamps, we may want to sample the model at timestamps to match. The :py:meth:`.run` function provides this functionality with the return_timestamps keyword argument:: >>> model.run(return_timestamps=[0, 1, 3, 7, 9.5, 13, 21, 25, 30]) @@ -147,19 +147,19 @@ If the measured data that we are comparing with our model comes in at irregular Retrieving a flat DataFrame --------------------------- -The subscripted variables, in general, will be returned as :py:class:`xarray.DataArray` in the output :py:class:`pandas.DataFrame`. To get a flat dataframe, set `flatten=True` when calling the :py:func:`run()` method:: +The subscripted variables, in general, will be returned as :py:class:`xarray.DataArray` in the output :py:class:`pandas.DataFrame`. To get a flat dataframe, set `flatten=True` when calling the :py:meth:`.run` method:: >>> model.run(flatten=True) Setting parameter values ------------------------ -In some situations we may want to modify the parameters of the model to investigate its behavior under different assumptions. There are several ways to do this in PySD, but the :py:func:`run()` method gives us a convenient method in the `params` keyword argument. +In some situations we may want to modify the parameters of the model to investigate its behavior under different assumptions. There are several ways to do this in PySD, but the :py:meth:`.run` method gives us a convenient method in the `params` keyword argument. This argument expects a dictionary whose keys correspond to the components of the model. The associated values can either be constants, or :py:class:`pandas.Series` whose indices are timestamps and whose values are the values that the model component should take on at the corresponding time. For instance, in our model we may set the room temperature to a constant value:: >>> model.run(params={'Room Temperature': 20}) -Alternately, if we want the room temperature to vary over the course of the simulation, we can give the :py:func:`run()` method a set of time-series values in the form of a :py:class:`pandas.Series`, and PySD will linearly interpolate between the given values in the course of its integration:: +Alternately, if we want the room temperature to vary over the course of the simulation, we can give the :py:meth:`.run` method a set of time-series values in the form of a :py:class:`pandas.Series`, and PySD will linearly interpolate between the given values in the course of its integration:: >>> import pandas as pd >>> temp = pd.Series(index=range(30), data=range(20, 80, 2)) @@ -184,10 +184,10 @@ Same dimensions :py:class:`xarray.DataArray` can be used (recommended):: In the same way, a :py:class:`pandas.Series` can be used with constant values, partially defined :py:class:`xarray.DataArray` or same dimensions :py:class:`xarray.DataArray`. .. note:: - Once parameters are set by the :py:func:`run()` command, they are permanently changed within the model. We can also change model parameters without running the model, using PySD’s :py:data:`set_components(params={})` method, which takes the same params dictionary as the :py:func:`run()` method. We might choose to do this in situations where we will be running the model many times, and only want to set the parameters once. + Once parameters are set by the :py:meth:`.run` command, they are permanently changed within the model. We can also change model parameters without running the model, using PySD’s :py:meth:`.set_components` method, which takes the same params dictionary as the :py:meth:`.run` method. We might choose to do this in situations where we will be running the model many times, and only want to set the parameters once. .. note:: - If you need to know the dimensions of a variable, you can check them by using :py:data:`.get_coords(variable__name)` function:: + If you need to know the dimensions of a variable, you can check them by using :py:meth:`.get_coords` method:: >>> model.get_coords('Room Temperature') @@ -204,7 +204,7 @@ In the same way, a :py:class:`pandas.Series` can be used with constant values, p If you change the value of any other variable type by a constant, the constant value will be used always. If a :py:class:`pandas.Series` is given the index and values will be used for interpolation when the function is called in the model, using the time as argument. - If you need to know if a variable takes arguments, i.e., if it is a lookup variable, you can check it by using the :py:func:`.get_args(variable__name)` function:: + If you need to know if a variable takes arguments, i.e., if it is a lookup variable, you can check it by using the :py:meth:`.get_args` method:: >>> model.get_args('Room Temperature') @@ -238,6 +238,6 @@ We can easily access the current value of a model component using curly brackets >>> model['Teacup Temperature'] -If you try to get the current values of a lookup variable, the previous method will fail, as lookup variables take arguments. However, it is possible to get the full series of a lookup or data object with :py:func:`.get_series_data` method:: +If you try to get the current values of a lookup variable, the previous method will fail, as lookup variables take arguments. However, it is possible to get the full series of a lookup or data object with :py:meth:`.get_series_data` method:: >>> model.get_series_data('Growth lookup') diff --git a/docs/structure/structure_index.rst b/docs/structure/structure_index.rst index 729c0e4f..9bd00217 100644 --- a/docs/structure/structure_index.rst +++ b/docs/structure/structure_index.rst @@ -53,7 +53,7 @@ The Model class also contains a function for each of the model components, repre The Model class maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. -Lastly, the model class provides a set of methods that are used to facilitate simulation. The :py:func:`run()` method returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. +Lastly, the model class provides a set of methods that are used to facilitate simulation. The :py:meth:`.run` method returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. .. toctree:: :maxdepth: 2 From 58906420d93b4734e10605fb431785569ea9fc7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Thu, 12 May 2022 16:01:34 +0200 Subject: [PATCH 74/96] review vensim_file docs --- docs/structure/abstract_model.rst | 9 ++--- docs/structure/vensim_translation.rst | 16 ++++---- pysd/translators/vensim/vensim_file.py | 52 +++++++++++++------------- 3 files changed, 38 insertions(+), 39 deletions(-) diff --git a/docs/structure/abstract_model.rst b/docs/structure/abstract_model.rst index 50195eac..bd7207c6 100644 --- a/docs/structure/abstract_model.rst +++ b/docs/structure/abstract_model.rst @@ -4,13 +4,12 @@ The Abstract Model representation allows a separation of concern between translation and building. Translation involves anything that happens from the moment the source code of the original model is loaded into memory up to the creation of the Abstract Model representation. Similarly, -the building will be everything that happens between the Abstract Model and the +the building will be everything that takes place between the Abstract Model and the source code of the model written in a programming language different than that -of the original model. +of the original model.This approach allows to easily include new code to the translation or or building process, +without the the risk of affecting one another. -This approach allows to easily include new source codes or output codes, -without needing to make a lot of changes in the whole library. The -:py:class:`AbstractModel` object should retain as much information from the +The :py:class:`AbstractModel` object should retain as much information from the original model as possible. Although the information is not used in the output code, it may be necessary for other future output languages or for improvements in the currently supported outputs. For example, currently diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst index 9607c584..ec035454 100644 --- a/docs/structure/vensim_translation.rst +++ b/docs/structure/vensim_translation.rst @@ -1,19 +1,19 @@ Vensim Translation ================== -PySD allows parsing a Vensim '.mdl' file and translates the result to an :py:class:`AbstractModel` object that can be used to builde the model. +PySD allows parsing a Vensim `.mdl` file and translates the result to an :py:class:`AbstractModel` object that can later (building process) be used to build the model in another programming language. -The translation workflow +Translation workflow ------------------------- -The following translation workflow allows splitting the Vensim file while parsing each part of it in order to make it possible to build an :py:class:`AbstractModel` type object. The workflow could be summarized as follows: +The following translation workflow allows splitting the Vensim file while parsing its contents in order to build an :py:class:`AbstractModel` object. The workflow may be summarized as follows: -1. Vensim file: Splits the file content from the sketch and allows splitting the model in sections (main section, macro section) -2. Vensim section: Full set of varibles and definitions that can be integrated. Allows splitting the model expressions. -3. Vensim element: A definition in the mdl file which could be a subscript (sub)range definition or a variable definition. It includes units and comments. Definitions for the same variable are grouped after in the same :py:class:`AbstractElement` object. Allows parsing its left hand side (LHS) to get the name of the subscript (sub)range or variable and it is returned as a specific type of component depending on the used assing operator (=, ==, :=, (), :) -4. Vensim component: The classified object for a variable definition, it depends on the opperator used to define the variable. Its right hand side (RHS) can be parsed to get the Abstract Syntax Tree (AST) of the expression. +1. **Vensim file**: splits the model equations from the sketch and allows splitting the model in sections (main section and macro sections). +2. **Vensim section**: is a full set of varibles and definitions that is integrable. The Vensim section can then be split into model expressions. +3. **Vensim element**: a definition in the mdl file which could be a subscript (sub)range definition or a variable definition. It includes units and comments. Definitions for the same variable are grouped after in the same :py:class:`AbstractElement` object. Allows parsing its left hand side (LHS) to get the name of the subscript (sub)range or variable and it is returned as a specific type of component depending on the used assing operator (=, ==, :=, (), :) +4. **Vensim component**: the classified object for a variable definition, it depends on the opperator used to define the variable. Its right hand side (RHS) can be parsed to get the Abstract Syntax Tree (AST) of the expression. -Once the model is parsed and broken following the previous steps. The :py:class:`AbstractModel` can be returned. +Once the model is parsed and broken following the previous steps, the :py:class:`AbstractModel` is returned. Vensim file diff --git a/pysd/translators/vensim/vensim_file.py b/pysd/translators/vensim/vensim_file.py index 2741acd6..f5884a76 100644 --- a/pysd/translators/vensim/vensim_file.py +++ b/pysd/translators/vensim/vensim_file.py @@ -3,7 +3,7 @@ parsing it into Section elements using the FileSectionsVisitor, parsing its sketch using SketchVisitor in order to classify the varibales per view. The final result can be exported to an AbstractModel class in -order to build a model in other language. +order to build the model in another programming language. """ import re from typing import Union, List @@ -21,10 +21,10 @@ class VensimFile(): """ - Create a VensimFile object which allows parsing a mdl file. - When the object is created the model file is automatically opened; + The VensimFile class allows parsing an mdl file. + When the object is created, the model file is automatically opened; unnecessary tabs, whitespaces, and linebreaks are removed; and - the sketch is split from the model. + the sketch is split from the model equations. Parameters ---------- @@ -60,7 +60,7 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print model information.""" + """Print model information to standard output.""" print(self._verbose) def _read(self, encoding: Union[None, str]) -> str: @@ -106,18 +106,18 @@ def _clean(self, text: str) -> str: def parse(self, parse_all: bool = True) -> None: """ Parse model file with parsimonious using the grammar given in - 'parsin_grammars/file_sections.peg' and the class FileSectionsVisitor + 'parsing_grammars/file_sections.peg' and the class FileSectionsVisitor to visit the parsed expressions. - This will break the model file in VensimSections, which are the - main model + macros. + This breaks the model file in VensimSections, which correspond to the + main model section and the macros. Parameters ---------- parse_all: bool (optional) - If True then the created VensimSection objects will be - automatically parsed. Otherwise, this objects will only be - added to self.sections but not parser. Default is True. + If True, the VensimSection objects created will be + automatically parsed. Otherwise, these objects will only be + added to self.sections but not parsed. Default is True. """ # get model sections (__main__ + macros) @@ -142,22 +142,22 @@ def parse(self, parse_all: bool = True) -> None: def parse_sketch(self, subview_sep: List[str]) -> None: """ Parse the sketch of the model with parsimonious using the grammar - given in 'parsin_grammars/sketch.peg' and the class SketchVisitor + given in 'parsing_grammars/sketch.peg' and the class SketchVisitor to visit the parsed expressions. - It will modify the views_dict of the first section, includying - the dictionary of the variables classification by views. This, - method should be called after calling self.parse method. + It will modify the views_dict of the first section, including + the dictionary of the classification of variables by views. This, + method should be called after calling the self.parse method. Parameters ---------- subview_sep: list - List oh the separators to use to classify the model views in + List of the separators to use to classify the model views in folders and subfolders. The sepparator must be ordered in the - same order they appear in the view patter. For example, + same order they appear in the view name. For example, if a view is named "economy:demand.exports" if - subview_sep=[":", "."] this view variables will be included - the file 'exports' inside the folders economy/demand. + subview_sep=[":", "."] this view's variables will be included + in the file 'exports.py' and inside the folders economy/demand. """ @@ -235,11 +235,11 @@ def parse_sketch(self, subview_sep: List[str]) -> None: def get_abstract_model(self) -> AbstractModel: """ - Get Abstract Model used for building. This, method should be - called after parsing the model (self.parse), and the sketch - (self.parse_sketch) in the case you want to split the variables - per views. This automatically calls the get_abstract_section - method from the model sections. + Instantiate the AbstractModel object used during building. This, + method should be called after parsing the model (self.parse) and, + in case you want to split the variables per views, also after + parsing the sketch (self.parse_sketch). This automatically calls the + get_abstract_section method from the model sections. Returns ------- @@ -261,7 +261,7 @@ def _clean_file_names(*args): Parameters ---------- *args: tuple - Any number of strings to to clean. + Any number of strings to clean. Returns ------- @@ -308,7 +308,7 @@ def __init__(self, ast): self.visit(ast) def visit_main(self, n, vc): - # main will be always stored as the first entry + # main will always be stored as the first entry if self.entries[0] is None: self.entries[0] = Section( name="__main__", From d09af1c5a0f48c71c736c13beede04b70ea48b76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Thu, 12 May 2022 16:23:40 +0200 Subject: [PATCH 75/96] reviewed docs for vensim_section.py --- pysd/translators/vensim/vensim_section.py | 40 ++++++++++++----------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/pysd/translators/vensim/vensim_section.py b/pysd/translators/vensim/vensim_section.py index dc33b42a..0ce29cb7 100644 --- a/pysd/translators/vensim/vensim_section.py +++ b/pysd/translators/vensim/vensim_section.py @@ -1,8 +1,9 @@ """ The Section class allows parsing a model section into Elements using the SectionElementsVisitor. The final result can be exported to an -AbstractSection class in order to build a model in other language. -A section could be either the main model (without the macros), or a +AbstractSection class in order to build a model in another programming +language. +A section is either the main model (without the macros), or a macro definition. """ from typing import List, Union @@ -17,22 +18,23 @@ class Section(): """ - Section object allows parsing the elements of that section. + The Section class allows parsing the elements of a model section. Parameters ---------- name: str - Section name. '__main__' for the main section or the macro name. + Section name. That is, '__main__' for the main section, and the + macro name for macros. path: pathlib.Path - Section path. It should be the model name for main section and - the clean macro name for a macro. + Section path. The model name for the main section and the clean + macro name for a macro. section_type: str ('main' or 'macro') The section type. params: list - List of params that takes the section. In the case of main + List of params that the section takes. In the case of the main section it will be an empty list. returns: list @@ -43,7 +45,7 @@ class Section(): Section content as string. split: bool - If split is True the created section will split the variables + If True, the created section will split the variables depending on the views_dict. views_dict: dict @@ -81,13 +83,13 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print section information.""" + """Print section information to standard output.""" print(self._verbose) def parse(self, parse_all: bool = True) -> None: """ Parse section object with parsimonious using the grammar given in - 'parsin_grammars/section_elements.peg' and the class + 'parsing_grammars/section_elements.peg' and the class SectionElementsVisitor to visit the parsed expressions. This will break the section (__main__ or macro) in VensimElements, @@ -127,18 +129,18 @@ def parse(self, parse_all: bool = True) -> None: def get_abstract_section(self) -> AbstractSection: """ - Get Abstract Section used for building. This, method should be - called after parsing the section (self.parse). This method is - automatically called by Model's get_abstract_model and - automatically generates the AbstractSubscript ranges and merge - the components in elements calling also the get_abstract_components - method from each model component. + Instantiate an object of the AbstractSection class that will be used + during the building process. This method should be called after + parsing the section (self.parse). This method is automatically called + by Model's get_abstract_model method, and automatically generates the + AbstractSubscript ranges and merges the components in elements. It also + calls the get_abstract_components method from each model component. Returns ------- AbstractSection: AbstractSection - Abstract Section object that can be used for building the model - in another language. + AbstractSection object that can be used for building the model + in another programming language. """ return AbstractSection( @@ -186,7 +188,7 @@ def _merge_components(self) -> List[AbstractElement]: class SectionElementsParser(parsimonious.NodeVisitor): """ - Visit section elements to get their equation units and documentation. + Visit section elements to get their equation, units and documentation. """ # TODO include units parsing def __init__(self, ast): From f5041d8dce468ea91688c8884b1deba20c037ede Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 16:44:41 +0200 Subject: [PATCH 76/96] Add pyclass --- docs/structure/abstract_model.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/structure/abstract_model.rst b/docs/structure/abstract_model.rst index 50195eac..4ab55d5f 100644 --- a/docs/structure/abstract_model.rst +++ b/docs/structure/abstract_model.rst @@ -18,7 +18,7 @@ unchangeable constansts (== defined in Vensim) are treated as regular components with Python, but in the future we may want to protect them from user interaction. -The lowest level of this representation is the Abstract Syntax Tree (AST). +The lowest level of this representation is the :py:class:`AbstractSyntax` Tree (AST). This includes all the operations and calls in a given component expression. Main abstract structures From d841e856d24d8fb69e13b4282cf329c80d5d2291 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 17:02:22 +0200 Subject: [PATCH 77/96] Add figure --- docs/images/Vensim_file.svg | 1387 +++++++++++++++++++++ docs/structure/vensim_translation.rst | 4 + pysd/translators/vensim/vensim_element.py | 14 +- pysd/translators/xmile/xmile_element.py | 4 +- pysd/translators/xmile/xmile_file.py | 2 +- pysd/translators/xmile/xmile_section.py | 2 +- 6 files changed, 1402 insertions(+), 11 deletions(-) create mode 100644 docs/images/Vensim_file.svg diff --git a/docs/images/Vensim_file.svg b/docs/images/Vensim_file.svg new file mode 100644 index 00000000..8bef89c1 --- /dev/null +++ b/docs/images/Vensim_file.svg @@ -0,0 +1,1387 @@ + + + +FilecontentSketchSection(main)Section(macro)Element(my value)SubscriptRange(dim)Component +(my value[A])Component +(my value[B])Component +(my value[C]){UTF-8}:MACRO: transform(input, parameter)transform = input/intermediate~Dmnl~Transformation of the input.|intermediate = input - parameter~Units~Difference respecte the parameter.|:END OF MACRO:dim: A, B, C ~~|inflow = Time~Month [0,?]~Inflow|myvalue[A] = transform(stock, 1) ~~|my value[B] = transform(stock, 2) ~~|my value[C] = transform(stock, 3)~Dmnl~First, second and third order transformations of stock.|outflow = SQRT(Time)~Month [0,?]~Outflow.|stock = INTEG (inflow-outflow, 0)~Month~Stock.|********************************************************.Control********************************************************~Simulation Control Parameters|FINAL TIME = 10~Month~The final time for the simulation.|INITIAL TIME = 0~Month~The initial time for the simulation.|SAVEPER = TIME STEP~Month [0,?]~The frequency with which output is stored.|TIME STEP = 1~Month [0,?]~The time step for the simulation.|\\\---/// Sketch information - do not modify anything except namesV300 Do not put anything below this section - it will be ignored*View 1$192-192-192,0,Times New Roman|12||0-0-0|0-0-0|0-0-255|-1--1--1|-1--1--1|96,96,100,010,1,stock,566,259,40,20,3,3,0,0,0,0,0,012,2,48,810,261,10,8,0,3,0,0,-1,0,0,01,3,5,2,4,0,0,22,0,0,0,-1--1--1,,1|(754,261)|1,4,5,1,100,0,0,22,0,0,0,-1--1--1,,1|(651,261)|11,5,48,703,261,6,8,34,3,0,0,1,0,0,010,6,outflow,703,280,25,11,40,3,0,0,-1,0,0,012,7,48,330,264,10,8,0,3,0,0,-1,0,0,01,8,10,1,4,0,0,22,0,0,0,-1--1--1,,1|(482,264)|1,9,10,7,100,0,0,22,0,0,0,-1--1--1,,1|(383,264)|11,10,48,433,264,6,8,34,3,0,0,1,0,0,010,11,inflow,433,283,20,11,40,3,0,0,-1,0,0,010,12,Time,573,356,26,11,8,2,0,3,-1,0,0,0,128-128-128,0-0-0,|12||128-128-1281,13,12,11,0,0,0,0,0,64,0,-1--1--1,,1|(508,322)|1,14,12,6,0,0,0,0,0,128,0,-1--1--1,,1|(631,321)|10,15,my value,569,142,29,11,8,3,0,0,0,0,0,01,16,1,15,0,0,0,0,0,128,0,-1--1--1,,1|(566,202)|///---\\\UnitsLimitsDocumentation diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst index ec035454..577b4b16 100644 --- a/docs/structure/vensim_translation.rst +++ b/docs/structure/vensim_translation.rst @@ -16,6 +16,10 @@ The following translation workflow allows splitting the Vensim file while parsin Once the model is parsed and broken following the previous steps, the :py:class:`AbstractModel` is returned. +.. image:: ../images/Vensim_file.svg + :alt: Vensim file parts + + Vensim file ^^^^^^^^^^^ diff --git a/pysd/translators/vensim/vensim_element.py b/pysd/translators/vensim/vensim_element.py index 3d4bbd0f..48e9730c 100644 --- a/pysd/translators/vensim/vensim_element.py +++ b/pysd/translators/vensim/vensim_element.py @@ -63,7 +63,7 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print element information.""" + """Print element information to standard output.""" print(self._verbose) def _parse_units(self, units_str: str) -> Tuple[str, tuple]: @@ -89,7 +89,7 @@ def _parse_units(self, units_str: str) -> Tuple[str, tuple]: def parse(self) -> object: """ Parse element object with parsimonious using the grammar given in - 'parsin_grammars/element_object.peg' and the class + 'parsing_grammars/element_object.peg' and the class ElementsComponentVisitor to visit the parsed expressions. Splits the LHS from the RHS of the equation. If the returned @@ -252,7 +252,7 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print subscript range information.""" + """Print subscript range information to standard output.""" print(self._verbose) def get_abstract_subscript_range(self) -> AbstractSubscriptRange: @@ -324,13 +324,13 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print component information.""" + """Print component information to standard output.""" print(self._verbose) def parse(self) -> None: """ Parse component object with parsimonious using the grammar given - in 'parsin_grammars/components.peg' and the class EquationVisitor + in 'parsing_grammars/components.peg' and the class EquationVisitor to visit the RHS of the expressions. """ @@ -433,7 +433,7 @@ def __init__(self, name: str, subscripts: Tuple[list, list], def parse(self) -> None: """ Parse component object with parsimonious using the grammar given - in 'parsin_grammars/lookups.peg' and the class LookupsVisitor + in 'parsing_grammars/lookups.peg' and the class LookupsVisitor to visit the RHS of the expressions. """ tree = vu.Grammar.get("lookups").parse(self.expression) @@ -497,7 +497,7 @@ def __str__(self): # pragma: no cover def parse(self) -> None: """ Parse component object with parsimonious using the grammar given - in 'parsin_grammars/components.peg' and the class EquationVisitor + in 'parsing_grammars/components.peg' and the class EquationVisitor to visit the RHS of the expressions. If the expression is None, then de data will be readen from a diff --git a/pysd/translators/xmile/xmile_element.py b/pysd/translators/xmile/xmile_element.py index 27a4f32d..b9180678 100644 --- a/pysd/translators/xmile/xmile_element.py +++ b/pysd/translators/xmile/xmile_element.py @@ -86,7 +86,7 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print element information.""" + """Print element information to standard output.""" print(self._verbose) def _get_xpath_text(self, node: etree._Element, @@ -495,7 +495,7 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print subscript range information.""" + """Print subscript range information to standard output.""" print(self._verbose) def get_abstract_subscript_range(self) -> AbstractSubscriptRange: diff --git a/pysd/translators/xmile/xmile_file.py b/pysd/translators/xmile/xmile_file.py index 3380cc90..6be34bdd 100644 --- a/pysd/translators/xmile/xmile_file.py +++ b/pysd/translators/xmile/xmile_file.py @@ -46,7 +46,7 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print model information.""" + """Print model information to standard output.""" print(self._verbose) def _get_root(self) -> etree._Element: diff --git a/pysd/translators/xmile/xmile_section.py b/pysd/translators/xmile/xmile_section.py index 50d87be6..45859bf6 100644 --- a/pysd/translators/xmile/xmile_section.py +++ b/pysd/translators/xmile/xmile_section.py @@ -87,7 +87,7 @@ def _verbose(self) -> str: # pragma: no cover @property def verbose(self): # pragma: no cover - """Print section information.""" + """Print section information to standard output.""" print(self._verbose) def parse(self, parse_all: bool = True) -> None: From 029b6a4636d85b016e41cd624952a333a92c1488 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 12 May 2022 17:04:31 +0200 Subject: [PATCH 78/96] Remove trailling whitespace --- pysd/translators/vensim/vensim_file.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pysd/translators/vensim/vensim_file.py b/pysd/translators/vensim/vensim_file.py index f5884a76..84488a89 100644 --- a/pysd/translators/vensim/vensim_file.py +++ b/pysd/translators/vensim/vensim_file.py @@ -236,9 +236,9 @@ def parse_sketch(self, subview_sep: List[str]) -> None: def get_abstract_model(self) -> AbstractModel: """ Instantiate the AbstractModel object used during building. This, - method should be called after parsing the model (self.parse) and, - in case you want to split the variables per views, also after - parsing the sketch (self.parse_sketch). This automatically calls the + method should be called after parsing the model (self.parse) and, + in case you want to split the variables per views, also after + parsing the sketch (self.parse_sketch). This automatically calls the get_abstract_section method from the model sections. Returns From ea4ebe968b61b6fee3dd4b25e67b2a26cdeb249b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Fri, 13 May 2022 08:30:42 +0200 Subject: [PATCH 79/96] review docs of vensim_element.py --- pysd/translators/vensim/vensim_element.py | 35 ++++++++++++----------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/pysd/translators/vensim/vensim_element.py b/pysd/translators/vensim/vensim_element.py index 3d4bbd0f..79e9aedf 100644 --- a/pysd/translators/vensim/vensim_element.py +++ b/pysd/translators/vensim/vensim_element.py @@ -1,7 +1,7 @@ """ -The Element class allows parsing the LHS side of a model equation, -depending on the LHS a SubscriptRange object or Component object will -be returned. There are 4 tipes of components: +The Element class allows parsing the LHS of a model equation. +Depending on the LHS value, either a SubscriptRange object or a Component +object will be returned. There are 4 components types: - Component: Regular component, defined with '='. - UnchangeableConstant: Unchangeable constant, defined with '=='. @@ -9,9 +9,9 @@ - Lookup: Lookup component, defined with '()' Lookup components have their own parser for the RHS of the expression, -while the other 3 components share the same RHS parser.The final result +while the other 3 components share the same parser. The final result from a parsed component can be exported to an AbstractComponent object -in order to build a model in other language. +in order to build a model in other programming languages. """ import re from typing import Union, Tuple, List @@ -29,7 +29,7 @@ class Element(): """ - Element object allows parsing the elements the LHS of the Vensim + Element object allows parsing the the LHS of the Vensim expressions. Parameters @@ -42,7 +42,7 @@ class Element(): the first '~' symbol. documentation: str - The comment of the element, i.e., the content after the seconf + The comment of the element, i.e., the content after the second '~' symbol. """ @@ -67,7 +67,7 @@ def verbose(self): # pragma: no cover print(self._verbose) def _parse_units(self, units_str: str) -> Tuple[str, tuple]: - """Split the limits from the units""" + """Separate the limits from the units.""" # TODO improve units parsing: parse them when parsing the section # elements if not units_str: @@ -88,14 +88,14 @@ def _parse_units(self, units_str: str) -> Tuple[str, tuple]: def parse(self) -> object: """ - Parse element object with parsimonious using the grammar given in - 'parsin_grammars/element_object.peg' and the class + Parse an Element object with parsimonious using the grammar given in + 'parsing_grammars/element_object.peg' and the class ElementsComponentVisitor to visit the parsed expressions. Splits the LHS from the RHS of the equation. If the returned object is a SubscriptRange, no more parsing is needed. Otherwise, the RHS of the returned object (Component) should be parsed - to get the Abstract Syntax Tree. + to get the AbstractSyntax Tree. Returns ------- @@ -112,7 +112,7 @@ def parse(self) -> object: class ElementsComponentVisitor(parsimonious.NodeVisitor): - """Visit model element definition to get the component object""" + """Visit model element definition to get the component object.""" def __init__(self, ast): self.mapping = [] @@ -179,7 +179,7 @@ def visit_subscript_mapping(self, n, vc): # full integration tests warnings.warn( "\nSubscript mapping detected. " - + "This feature works only in some simple cases." + + "This feature works only for simple cases." ) # Obtain subscript name and split by : and ( self.mapping.append(str(vc).split(":")[0].split("(")[1].strip()) @@ -257,14 +257,15 @@ def verbose(self): # pragma: no cover def get_abstract_subscript_range(self) -> AbstractSubscriptRange: """ - Get Abstract Subscript Range used for building. This method is - automatically called by Sections's get_abstract_section. + Instantiates an AbstractSubscriptRange object used for building. + This method is automatically called by the Sections's + get_abstract_section method. Returns ------- AbstractSubscriptRange: AbstractSubscriptRange - Abstract Subscript Range object that can be used for building - the model in another language. + AbstractSubscriptRange object that can be used for building + the model in another programming language. """ return AbstractSubscriptRange( From 3ea00f90de3b1348c7f82e5fe1b1ba98d070d406 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 13 May 2022 08:57:40 +0200 Subject: [PATCH 80/96] Remove double the --- pysd/translators/vensim/vensim_element.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pysd/translators/vensim/vensim_element.py b/pysd/translators/vensim/vensim_element.py index e13e92b7..c9ef1a44 100644 --- a/pysd/translators/vensim/vensim_element.py +++ b/pysd/translators/vensim/vensim_element.py @@ -29,8 +29,7 @@ class Element(): """ - Element object allows parsing the the LHS of the Vensim - expressions. + Element object allows parsing the LHS of the Vensim expressions. Parameters ---------- From 906bee51de085d89fe66318491537a08ffd9f088 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Fri, 13 May 2022 09:55:42 +0200 Subject: [PATCH 81/96] final revision of vensim_element.py docs --- pysd/translators/vensim/vensim_element.py | 56 ++++++++++++----------- 1 file changed, 29 insertions(+), 27 deletions(-) diff --git a/pysd/translators/vensim/vensim_element.py b/pysd/translators/vensim/vensim_element.py index e13e92b7..92c4bd68 100644 --- a/pysd/translators/vensim/vensim_element.py +++ b/pysd/translators/vensim/vensim_element.py @@ -285,9 +285,10 @@ class Component(): The original name of the component. subscripts: tuple - Tuple of length two with first argument the list of subscripts - in the variable definition and the second argument the list of - subscripts list that appears after :EXCEPT: keyword (if used). + Tuple of length two with the list of subscripts + in the variable definition as first argument and the list of + subscripts that appears after the :EXCEPT: keyword (if used) as + the second argument. expression: str The RHS of the element, expression to parse. @@ -330,7 +331,7 @@ def verbose(self): # pragma: no cover def parse(self) -> None: """ - Parse component object with parsimonious using the grammar given + Parse Component object with parsimonious using the grammar given in 'parsing_grammars/components.peg' and the class EquationVisitor to visit the RHS of the expressions. @@ -347,15 +348,15 @@ def get_abstract_component(self) -> Union[AbstractComponent, AbstractLookup]: """ Get Abstract Component used for building. This method is - automatically called by Sections's get_abstract_section. + automatically called by Sections's get_abstract_section method. Returns ------- AbstractComponent: AbstractComponent or AbstractLookup Abstract Component object that can be used for building the model in another language. If the component equations - includes external lookups (GET XLS/DIRECT LOOKUPS) - AbstractLookup class will be used + include external lookups (GET XLS/DIRECT LOOKUPS), an + AbstractLookup class will be used. """ if self.lookup: @@ -368,7 +369,7 @@ def get_abstract_component(self) -> Union[AbstractComponent, class UnchangeableConstant(Component): """ Unchangeable constant defined by "name == expr" in Vensim. - This class is a soon of Component. + This class inherits from the Component class. Parameters ---------- @@ -376,9 +377,10 @@ class UnchangeableConstant(Component): The original name of the component. subscripts: tuple - Tuple of length two with first argument the list of subscripts - in the variable definition and the second argument the list of - subscripts list that appears after :EXCEPT: keyword (if used). + Tuple of length two with the list of subscripts + in the variable definition as first argument and the list of + subscripts that appears after the :EXCEPT: keyword (if used) as + second argument. expression: str The RHS of the element, expression to parse. @@ -393,7 +395,7 @@ def __init__(self, name: str, subscripts: Tuple[list, list], def get_abstract_component(self) -> AbstractUnchangeableConstant: """ Get Abstract Component used for building. This method is - automatically called by Sections's get_abstract_section. + automatically called by Sections's get_abstract_section method. Returns ------- @@ -409,7 +411,7 @@ def get_abstract_component(self) -> AbstractUnchangeableConstant: class Lookup(Component): """ Lookup component, defined by "name(expr)" in Vensim. - This class is a soon of Component. + This class inherits from the Component class. Parameters ---------- @@ -417,9 +419,9 @@ class Lookup(Component): The original name of the component. subscripts: tuple - Tuple of length two with first argument the list of subscripts - in the variable definition and the second argument the list of - subscripts list that appears after :EXCEPT: keyword (if used). + Tuple of length two with the list of subscripts in the variable + definition as first argument and the list of subscripts that appear + after the :EXCEPT: keyword (if used) as second argument. expression: str The RHS of the element, expression to parse. @@ -443,12 +445,12 @@ def parse(self) -> None: def get_abstract_component(self) -> AbstractLookup: """ Get Abstract Component used for building. This method is - automatically called by Sections's get_abstract_section. + automatically called by Sections's get_abstract_section method. Returns ------- AbstractComponent: AbstractLookup - Abstract Component object that can be used for building + Abstract Component object that may be used for building the model in another language. """ @@ -458,7 +460,7 @@ def get_abstract_component(self) -> AbstractLookup: class Data(Component): """ Data component, defined by "name := expr" in Vensim. - This class is a soon of Component. + This class inherits from the Component class. Parameters ---------- @@ -466,13 +468,13 @@ class Data(Component): The original name of the component. subscripts: tuple - Tuple of length two with first argument the list of subscripts - in the variable definition and the second argument the list of - subscripts list that appears after :EXCEPT: keyword (if used). + Tuple of length two with the list of subscripts in the variable + definition as first argument and the list of subscripts that appear + after the :EXCEPT: keyword (if used) as second argument. keyword: str - The keyword used befor the ":=" symbol, it could be ('interpolate', - 'raw', 'hold_backward', 'look_forward') + The keyword used before the ":=" symbol. The following values are + possible: 'interpolate', 'raw', 'hold_backward' and 'look_forward'. expression: str The RHS of the element, expression to parse. @@ -501,8 +503,8 @@ def parse(self) -> None: in 'parsing_grammars/components.peg' and the class EquationVisitor to visit the RHS of the expressions. - If the expression is None, then de data will be readen from a - VDF file in Vensim. + If the expression is None, the data will be read from a VDF file in + Vensim. """ if not self.expression: @@ -514,7 +516,7 @@ def parse(self) -> None: def get_abstract_component(self) -> AbstractData: """ Get Abstract Component used for building. This method is - automatically called by Sections's get_abstract_section. + automatically called by Sections's get_abstract_section method. Returns ------- From 4f701b09d866a4f0cd17d3b6ee8f0c6e90896c79 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 13 May 2022 10:55:42 +0200 Subject: [PATCH 82/96] Add arithmetic order information --- docs/generate_tables.py | 4 ++++ docs/structure/structure_index.rst | 24 +++++++++++++++++++++++- docs/tables/arithmetic.tab | 10 ++++++++++ 3 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 docs/tables/arithmetic.tab diff --git a/docs/generate_tables.py b/docs/generate_tables.py index ec59d0a7..29fa20ca 100644 --- a/docs/generate_tables.py +++ b/docs/generate_tables.py @@ -60,3 +60,7 @@ def generate_tables(): content, tables_dir / f"{table}_{language}.csv" ) + + # transform arithmetic order table + file = tables_dir / "arithmetic.tab" + pd.read_table(file).to_csv(file.with_suffix(".csv"), index=None) diff --git a/docs/structure/structure_index.rst b/docs/structure/structure_index.rst index 9bd00217..361eee88 100644 --- a/docs/structure/structure_index.rst +++ b/docs/structure/structure_index.rst @@ -25,7 +25,29 @@ The internals of the translation process may be found in the following links of -PySD can import models in Vensim's \*.mdl file format and in XMILE format (\*xml file). `Parsimonious `_ is the Parsing Expression Grammar `(PEG) `_ parser library used in PySD to parse the original models and construct an abstract syntax tree. The translators then crawl the tree, using a set of classes to define the :doc:`Abstract Model `. +PySD can import models in Vensim's \*.mdl file format and in XMILE format (\*.xml, \*.xmile, or \*.stmx file). `Parsimonious `_ is the Parsing Expression Grammar `(PEG) `_ parser library used in PySD to parse the original models and construct an abstract syntax tree. The translators then crawl the tree, using a set of classes to define the :doc:`Abstract Model `. + +When parsing the expressions of any language, the order of operations must be taken into account. The order is shown in the following table and is used to create :py:class:`ArithmeticStructure` and :py:class:`LogicalStructure` objects correctly. The following expression :py:data:`1+2*3-5`` will be translated to:: + + ArithmeticStructure(operators=['+', '-'], arguments=(1, ArithmeticStructure(operators=['*'], arguments=(2, 3)), 5)) + +While something like :py:data:`1<5 and 5>3`:: + + LogicStructure(operators=[':AND:'], arguments=(LogicStructure(operators=['<'], arguments=(1, 5)), LogicStructure(operators=['>'], arguments=(5, 3)))) + +The parenthesis also affects same order operatos, for example :py:data:`1+2-3` is translated to:: + + ArithmeticStructure(operators=['+', '-'], arguments=(1, 2, 3)) + +While :py:data:`1+(2-3)` is translated to:: + + ArithmeticStructure(operators=['+'], arguments=(1, ArithmeticStructure(operators=['-'], arguments=(2, 3)))) + +It is important to maintain this order because although these operations by definition are commutative due to the numerical error due to the precision, they may not be commutative in the integration. + +.. csv-table:: Arithmetic order + :file: ../tables/arithmetic.csv + :header-rows: 1 Building the model diff --git a/docs/tables/arithmetic.tab b/docs/tables/arithmetic.tab new file mode 100644 index 00000000..79387a1c --- /dev/null +++ b/docs/tables/arithmetic.tab @@ -0,0 +1,10 @@ +Arithmetic order Operators Operations +0 "(), None" "parenthesis, function call, references, no-operations" +1 "\-" negative value +2 ^ exponentation +3 "\*, /" "multiplication, division" +4 "%" modulo +5 "+, -" "addition, substraction" +6 "=, <>, <, <=, >, >=" comparioson +7 "not" unary logical operation +8 "and, or" binary logical operations From 55259b1c34c3048b597b47864b835f32ed959dc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Fri, 13 May 2022 11:02:12 +0200 Subject: [PATCH 83/96] review docs of vensim_translation.rst --- docs/structure/vensim_translation.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst index 577b4b16..8e68322b 100644 --- a/docs/structure/vensim_translation.rst +++ b/docs/structure/vensim_translation.rst @@ -59,11 +59,11 @@ All the basic operators are supported, this includes the ones shown in the table :file: ../tables/binary_vensim.csv :header-rows: 1 -Moreover, the Vensim :EXCEPT: operator is also supported to manage exceptions in the subscripts. See the :ref:`Subscripts section`. +Moreover, the Vensim :EXCEPT: operator is also supported to manage exceptions in the subscripts. See the :ref:`Subscripts section` section. Functions ^^^^^^^^^ -Not all the Vensim functions all included yet, the list of supported functions are included below. +The list of currentlty supported Vensim functions are detailed below: .. csv-table:: Supported basic functions :file: ../tables/functions_vensim.csv @@ -87,11 +87,11 @@ Stocks defined in Vensim as `INTEG(flow, initial_value)` are supported and are t Subscripts ^^^^^^^^^^ -Several subscript related features all supported. This include: +Several subscript related features are also supported. Thiese include: - Basic subscript operations with different ranges. - Subscript ranges and subranges definitions. -- Basic subscript mapping where the subscript range is mapping to a full range (e.g. new_dim: A, B, C -> dim, dim_other), mapping to a partial range is not supported yet (e.g. new_dim: A, B, C -> dim: E, F, G). +- Basic subscript mapping, where the subscript range is mapping to a full range (e.g. new_dim: A, B, C -> dim, dim_other). Mapping to a partial range is not yet supported (e.g. new_dim: A, B, C -> dim: E, F, G). - Subscript copy (e.g. new_dim <-> dim). - \:EXCEPT: operator with any number of arguments. - Subscript usage as a variable (e.g. my_var[dim] = another var * dim). @@ -99,17 +99,17 @@ Several subscript related features all supported. This include: Lookups ^^^^^^^ -Vensim Lookups expressions are supported they can be given hardcoded, using `GET LOOKUPS` function or using `WITH LOOKUPS` function. +Vensim Lookups expressions are supported. They can be defined using hardcoded values, using `GET LOOKUPS` function or using `WITH LOOKUPS` function. Data ^^^^ -Data definitions from GET functions and empty definitions (no expressions, Vensim uses a VDF file) are supported. This definitions can include or not any of the possible interpolation keywords: :INTERPOLATE:, :LOOK FORWARD:, :HOLD BACKWARD:, :RAW:. This keywords will be stored in the 'keyword' argument of :py:class:`AbstractData` as 'interpolate', 'look_forward', 'hold_backward' and 'raw', respectively. The Abstract Structure for GET XLS/DATA is given in the supported get functions table, the Abstract Structure for the empty Data declarations is a :py:class:`DataStructure`. +Data definitions with GET functions and with empty definitions (no expressions, Vensim uses a VDF file) are supported. These definitions may or may not include any of the possible interpolation keywords: :INTERPOLATE:, :LOOK FORWARD:, :HOLD BACKWARD:, :RAW:. These keywords will be stored in the 'keyword' argument of :py:class:`AbstractData` as 'interpolate', 'look_forward', 'hold_backward' and 'raw', respectively. The Abstract Structure for GET XLS/DATA is given in the supported GET functions table. The Abstract Structure for the empty Data declarations is a :py:class:`DataStructure`. -For the moment, any specific functions applying over data are supported (e.g. SHIFT IF TRUE, TIME SHIFT...), but they may be includded in the future. +For the moment, any specific functions applying over data are supported (e.g. SHIFT IF TRUE, TIME SHIFT...), but new ones may be includded in the future. Macro ^^^^^ -Vensim macros are supported, The macro content between the keywords \:MACRO: and \:END OF MACRO: is classified as a section of the model and used for latelly build an independent section from the rest of the model. +Vensim macros are supported. The macro content between the keywords \:MACRO: and \:END OF MACRO: is classified as a section of the model and is subsequently sused to build an independent section from the rest of the model. Planed New Functions and Features --------------------------------- From cdc8012d9d401b12e96584b6b11bfb742ce4aa45 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 13 May 2022 11:05:42 +0200 Subject: [PATCH 84/96] Correct warning message in test --- tests/pytest_translators/pytest_vensim.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest_translators/pytest_vensim.py b/tests/pytest_translators/pytest_vensim.py index e1ec096e..5d438a85 100644 --- a/tests/pytest_translators/pytest_vensim.py +++ b/tests/pytest_translators/pytest_vensim.py @@ -114,7 +114,7 @@ def test_subscript_range_error(self, element, error_message): def test_complex_mapping(self, element, mapping): # parse the mapping warning_message = r"Subscript mapping detected\. "\ - r"This feature works only in some simple cases\." + r"This feature works only for simple cases\." with pytest.warns(UserWarning, match=warning_message): out = element.parse() From 95a3891b8df190e9f0cf99d78686ac54b357aa57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Fri, 13 May 2022 11:14:03 +0200 Subject: [PATCH 85/96] review docs for xmile_translation.rst --- docs/structure/xmile_translation.rst | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/structure/xmile_translation.rst b/docs/structure/xmile_translation.rst index 3bb07c63..289f6770 100644 --- a/docs/structure/xmile_translation.rst +++ b/docs/structure/xmile_translation.rst @@ -5,18 +5,18 @@ PySD allows parsing a Xmile file and translates the result to an :py:class:`Abst .. warning:: - Currently no Xmile users are working in the development of PySD, this is causing a gap between the Xmile and Vensim developments. Stella users are encouraged to take part in the development of PySD by invcludying new `test models `_ and adding support for new functions and features. + Currently no Xmile users are working on the development of PySD. This is causing a gap between the Xmile and Vensim developments. Stella users are encouraged to take part in the development of PySD by includying new `test models `_ and adding support for new functions and features. The translation workflow ------------------------- -The following translation workflow allows splitting the Xmile file while parsing each part of it in order to make it possible to build an :py:class:`AbstractModel` type object. The workflow could be summarized as follows: +The following translation workflow allows splitting the Xmile file while parsing each part of it to build an :py:class:`AbstractModel` type object. The workflow may be summarized as follows: -1. Xmile file: Parses the file with etree library and creates a section for the model. -2. Xmile section: Full set of varibles and definitions that can be integrated. Allows splitting the model elements. -3. Xmile element: A variable definition. It includes units and commnets. Allows parsing its the expressions that contains and saving them inside AbstractComponents that are part of an AbstractElement. +1. **Xmile file**: Parses the file with etree library and creates a section for the model. +2. **Xmile section**: Full set of varibles and definitions that can be integrated. Allows splitting the model elements. +3. **Xmile element**: A variable definition. It includes units and commnets. Allows parsing the expressions it contains and saving them inside AbstractComponents, that are part of an AbstractElement. -Once the model is parsed and broken following the previous steps. The :py:class:`AbstractModel` can be returned. +Once the model is parsed and split following the previous steps. The :py:class:`AbstractModel` can be returned. Xmile file @@ -44,14 +44,14 @@ Xmile element Supported Functions and Features -------------------------------- -Ongoing development of the translator will support the full subset of Xmile functionality. The current release supports the following operators, functions and features. +Ongoing development of the translator will support the full set of Xmile functionality. The current release supports the following operators, functions and features: .. warning:: Not all the supported functions and features are properly tested. Any new test model to cover the missing functions test will be wellcome. Operators ^^^^^^^^^ -All the basic operators are supported, this includes the ones shown in the tables below. +All the basic operators are supported, this includes the ones shown in the tables below.: .. csv-table:: Supported unary operators :file: ../tables/unary_xmile.csv @@ -64,7 +64,7 @@ All the basic operators are supported, this includes the ones shown in the table Functions ^^^^^^^^^ -Not all the Xmile functions all included yet, the list of supported functions are included below. +Not all the Xmile functions all included yet, the list of supported functions are included below: .. csv-table:: Supported basic functions :file: ../tables/functions_xmile.csv @@ -77,27 +77,27 @@ Not all the Xmile functions all included yet, the list of supported functions ar Stocks ^^^^^^ -Stocks are supported with any number of inflows and outflows. The stocks are translated to the AST as `IntegStructure(flows, initial_value)`. +Stocks are supported with any number of inflows and outflows. Stocks are translated to the AST as `IntegStructure(flows, initial_value)`. Subscripts ^^^^^^^^^^ -Several subscript related features all supported. This include: +Several subscript related features are supported. Thiese include: - Basic subscript operations with different ranges. - Subscript ranges and subranges definitions. Graphical functions ^^^^^^^^^^^^^^^^^^^ -Xmile graphical functions (gf), also known as lookups, are supported. They can be given hardcoded or inline. +Xmile graphical functions (gf), also known as lookups, are supported. They can be hardcoded or inlined. .. warning:: - Interpolation methods 'extrapolate' and 'discrete' are implemented but not tested. Full integration models with this methods are required. + Interpolation methods 'extrapolate' and 'discrete' are implemented but not tested. Full integration models with these methods are required. Supported in Vensim but not in Xmile ------------------------------------ Macro ^^^^^ -Currently Xmile macros are not supported. In Vensim macros are classified as an independent section of the model. If they are properly parsed in the :py:class:`XmileFile` adding support for Xmile should be easy. +Currently Xmile macros are not supported. In Vensim, macros are classified as an independent section of the model. If they are properly parsed in the :py:class:`XmileFile`, adding support for Xmile should be easy. Planed New Functions and Features --------------------------------- From 79c897cf587a54f968de0054e2ef39a5b082eef3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Fri, 13 May 2022 11:23:06 +0200 Subject: [PATCH 86/96] review doc in xmile_file.py --- docs/structure/xmile_translation.rst | 2 +- pysd/translators/xmile/xmile_file.py | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/structure/xmile_translation.rst b/docs/structure/xmile_translation.rst index 289f6770..e4ee0166 100644 --- a/docs/structure/xmile_translation.rst +++ b/docs/structure/xmile_translation.rst @@ -64,7 +64,7 @@ All the basic operators are supported, this includes the ones shown in the table Functions ^^^^^^^^^ -Not all the Xmile functions all included yet, the list of supported functions are included below: +Not all the Xmile functions are included yet. The list of supported functions is shown below: .. csv-table:: Supported basic functions :file: ../tables/functions_xmile.csv diff --git a/pysd/translators/xmile/xmile_file.py b/pysd/translators/xmile/xmile_file.py index 6be34bdd..456cb091 100644 --- a/pysd/translators/xmile/xmile_file.py +++ b/pysd/translators/xmile/xmile_file.py @@ -1,7 +1,7 @@ """ -The XmileFile class allows reading the original Xmile model file, +The XmileFile class allows reading the original Xmile model file and parsing it into Section elements. The final result can be exported to an -AbstractModel class in order to build a model in other language. +AbstractModel class in order to build a model in another programming language. """ from typing import Union from pathlib import Path @@ -15,8 +15,8 @@ class XmileFile(): """ - Create a XmileFile object which allows parsing a xmile file. - When the object is created the model file is automatically opened + The XmileFile class allows parsing an Xmile file. + When the object is created, the model file is automatically opened and parsed with lxml.etree. Parameters @@ -51,7 +51,7 @@ def verbose(self): # pragma: no cover def _get_root(self) -> etree._Element: """ - Read a Xmile file and assign its content to self.model_text + Read an Xmile file and assign its content to self.model_text Returns ------- @@ -74,16 +74,16 @@ def _get_root(self) -> etree._Element: def parse(self, parse_all: bool = True) -> None: """ Create a XmileSection object from the model content and parse it. - As currently the macros are not supported all the models will - have only one section. This functionshould split the macros in + As macros are currently not supported, all models will + have a single section. This function should split the macros in independent sections in the future. Parameters ---------- parse_all: bool (optional) - If True then the created XmileSection objects will be - automatically parsed. Otherwise, this objects will only be - added to self.sections but not parser. Default is True. + If True, the created XmileSection objects will be + automatically parsed. Otherwise, these objects will only be + added to self.sections but not parsed. Default is True. """ # TODO: in order to make macros work we need to split them here From fec83c75c8fb89fe3c327f7039c9aafac67e3d72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20Sams=C3=B3?= Date: Fri, 13 May 2022 11:26:17 +0200 Subject: [PATCH 87/96] review vensim_section.py docs --- pysd/translators/vensim/vensim_section.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pysd/translators/vensim/vensim_section.py b/pysd/translators/vensim/vensim_section.py index 0ce29cb7..e2f100bf 100644 --- a/pysd/translators/vensim/vensim_section.py +++ b/pysd/translators/vensim/vensim_section.py @@ -34,12 +34,12 @@ class Section(): The section type. params: list - List of params that the section takes. In the case of the main - section it will be an empty list. + List of parameters that the section takes. If it is the main + section, it will be an empty list. returns: list - List of variables that returns the section. In the case of main - section it will be an empty list. + List of variables that returns the section. If it is the main + section, it will be an empty list. content: str Section content as string. @@ -99,9 +99,9 @@ def parse(self, parse_all: bool = True) -> None: Parameters ---------- parse_all: bool (optional) - If True then the created VensimElement objects will be - automatically parsed. Otherwise, this objects will only be - added to self.elements but not parser. Default is True. + If True, the created VensimElement objects will be + automatically parsed. Otherwise, these objects will only be + added to self.elements but not parsed. Default is True. """ # parse the section to get the elements From b6ec5ae9fb9e2c6cd9b2d1e6bcf239289051f880 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 18 May 2022 14:12:10 +0200 Subject: [PATCH 88/96] Docs --- docs/structure/vensim_translation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst index 8e68322b..391f06a8 100644 --- a/docs/structure/vensim_translation.rst +++ b/docs/structure/vensim_translation.rst @@ -103,7 +103,7 @@ Vensim Lookups expressions are supported. They can be defined using hardcoded va Data ^^^^ -Data definitions with GET functions and with empty definitions (no expressions, Vensim uses a VDF file) are supported. These definitions may or may not include any of the possible interpolation keywords: :INTERPOLATE:, :LOOK FORWARD:, :HOLD BACKWARD:, :RAW:. These keywords will be stored in the 'keyword' argument of :py:class:`AbstractData` as 'interpolate', 'look_forward', 'hold_backward' and 'raw', respectively. The Abstract Structure for GET XLS/DATA is given in the supported GET functions table. The Abstract Structure for the empty Data declarations is a :py:class:`DataStructure`. +Data definitions with GET functions and empty data definitions (no expressions, Vensim uses a VDF file) are supported. These definitions may or may not include any of the possible interpolation keywords: :INTERPOLATE:, :LOOK FORWARD:, :HOLD BACKWARD:, :RAW:. These keywords will be stored in the 'keyword' argument of :py:class:`AbstractData` as 'interpolate', 'look_forward', 'hold_backward' and 'raw', respectively. The Abstract Structure for GET XLS/DATA is given in the supported GET functions table. The Abstract Structure for the empty Data declarations is a :py:class:`DataStructure`. For the moment, any specific functions applying over data are supported (e.g. SHIFT IF TRUE, TIME SHIFT...), but new ones may be includded in the future. From f541f08396b2d4e2910e0a9a92f03ba01f229bb5 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Wed, 18 May 2022 21:37:11 +0200 Subject: [PATCH 89/96] Update docs --- README.md | 3 +-- docs/advanced_usage.rst | 2 +- docs/command_line_usage.rst | 4 ++-- docs/{basic_usage.rst => getting_started.rst} | 4 ++-- docs/index.rst | 7 ++---- docs/installation.rst | 2 +- docs/structure/structure_index.rst | 22 +++++++++---------- pysd/py_backend/model.py | 6 +++-- 8 files changed, 23 insertions(+), 27 deletions(-) rename docs/{basic_usage.rst => getting_started.rst} (99%) diff --git a/README.md b/README.md index b2d93de7..38410adc 100644 --- a/README.md +++ b/README.md @@ -21,8 +21,7 @@ This project is a simple library for running [System Dynamics](http://en.wikiped See the [project documentation](http://pysd.readthedocs.org/) for information about: - [Installation](http://pysd.readthedocs.org/en/latest/installation.html) -- [Basic Usage](http://pysd.readthedocs.org/en/latest/basic_usage.html) -- [Function Reference](http://pysd.readthedocs.org/en/latest/functions.html) +- [Getting Started](http://pysd.readthedocs.org/en/latest/getting_started.html) For standard methods for data analysis with SD models, see the [PySD Cookbook](https://github.com/JamesPHoughton/PySD-Cookbook), containing (for example): diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst index 05d7b7bb..8aadbea5 100644 --- a/docs/advanced_usage.rst +++ b/docs/advanced_usage.rst @@ -39,7 +39,7 @@ To substitute this function directly for the heat_loss_to_room model component u model.set_components({'heat_loss_to_room': new_heatflow_function}) -If you want to replace a subscripted variable, you need to ensure that the output from the new function is the same as the previous one. You can check the current coordinates and dimensions of a component by using :py:meth:`.get_coords` as it is explained in :doc:`basic usage <../basic_usage>`. +If you want to replace a subscripted variable, you need to ensure that the output from the new function is the same as the previous one. You can check the current coordinates and dimensions of a component by using :py:meth:`.get_coords` as it is explained in :doc:`Getting started <../getting_started>`. .. note:: Alternatively, you can also set a model component directly:: diff --git a/docs/command_line_usage.rst b/docs/command_line_usage.rst index cf757f93..385a44e7 100644 --- a/docs/command_line_usage.rst +++ b/docs/command_line_usage.rst @@ -4,7 +4,7 @@ Command Line Usage Basic command line usage ------------------------ -Most of the features available in :doc:`basic usage <../basic_usage>` are also available using the command line. Running: +Most of the features available in :doc:`Getting started <../getting_started>` are also available using the command line. Running: .. code-block:: text @@ -126,7 +126,7 @@ as the new value of a variable as two lists of the same length: python -m pysd Teacup.mdl 'Temperature Lookup=[[1, 2, 3, 4], [10, 15, 17, 18]]' -The first list will be used for the *time* or *x* values, and the second for the data values. See setting parameter values in :doc:`basic usage <../basic_usage>` for further details. +The first list will be used for the *time* or *x* values, and the second for the data values. See setting parameter values in :doc:`Getting started <../getting_started>` for further details. .. note:: diff --git a/docs/basic_usage.rst b/docs/getting_started.rst similarity index 99% rename from docs/basic_usage.rst rename to docs/getting_started.rst index 9338af2e..2656b17e 100644 --- a/docs/basic_usage.rst +++ b/docs/getting_started.rst @@ -1,5 +1,5 @@ -Basic Usage -=========== +Getting Started +=============== Importing a model and getting started ------------------------------------- diff --git a/docs/index.rst b/docs/index.rst index ca4a1c14..bc5c7ec9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -78,14 +78,11 @@ Support For additional help or consulting, contact james.p.houghton@gmail.com or eneko.martin.martinez@gmail.com. -Contents --------- - .. toctree:: - :maxdepth: 2 + :hidden: installation - basic_usage + getting_started advanced_usage command_line_usage tools diff --git a/docs/installation.rst b/docs/installation.rst index ea2d414a..eb98d1a9 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -61,7 +61,7 @@ above. Optional Dependencies --------------------- -In order to plot model outputs as shown in :doc:`basic usage <../basic_usage>`: +In order to plot model outputs as shown in :doc:`Getting started <../getting_started>`: * Matplotlib diff --git a/docs/structure/structure_index.rst b/docs/structure/structure_index.rst index 361eee88..2c27dcf4 100644 --- a/docs/structure/structure_index.rst +++ b/docs/structure/structure_index.rst @@ -14,16 +14,14 @@ For models translated to Python, all the necessary functions and classes to run Translation ----------- -The internals of the translation process may be found in the following links of the documentation: - .. toctree:: - :maxdepth: 2 + :hidden: vensim_translation xmile_translation abstract_model - +PySD currentlty supports translation :doc:`from Vensim ` amb :doc:`from Xmile `. PySD can import models in Vensim's \*.mdl file format and in XMILE format (\*.xml, \*.xmile, or \*.stmx file). `Parsimonious `_ is the Parsing Expression Grammar `(PEG) `_ parser library used in PySD to parse the original models and construct an abstract syntax tree. The translators then crawl the tree, using a set of classes to define the :doc:`Abstract Model `. @@ -53,31 +51,31 @@ It is important to maintain this order because although these operations by defi Building the model ------------------ -The builders allow to build the final model in any programming language (so long as there is a builder for that particular language). To do so, they use a series of classes that obtain the information from the Abstract Model and convert it into the desired code. Currently PySD only includes a builder to build the models in Python. Any contribution to add new builders (and solvers) for other programming languages is welcome. +The builders allow to build the final model in any programming language (so long as there is a builder for that particular language). To do so, they use a series of classes that obtain the information from the Abstract Model and convert it into the desired code. Currently PySD only includes a :doc:`builder to build the models in Python ` . Any contribution to add new builders (and solvers) for other programming languages is welcome. .. toctree:: - :maxdepth: 2 + :hidden: python_builder The Python model ---------------- -For loading a translated model with Python see :doc:`basic usage <../basic_usage>` or: +For loading a translated model with Python see :doc:`Getting started <../../getting_started>` or :doc:`Model loading `: .. toctree:: - :maxdepth: 2 + :hidden: model_loading The Python builder constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a `statefull` model in much the same way that the system itself has a specific state at any point in time. -The Model class also contains a function for each of the model components, representing the essential model equations. Each function contains its units, subcscripts type infromation and documentation as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. +The :doc:`Model class ` also contains a function for each of the model components, representing the essential model equations. Each function contains its units, subcscripts type infromation and documentation as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. -The Model class maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. +The :doc:`Model class ` maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. -Lastly, the model class provides a set of methods that are used to facilitate simulation. The :py:meth:`.run` method returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. +Lastly, the :doc:`Model class ` provides a set of methods that are used to facilitate simulation. The :py:meth:`.run` method returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. .. toctree:: - :maxdepth: 2 + :hidden: model_class \ No newline at end of file diff --git a/pysd/py_backend/model.py b/pysd/py_backend/model.py index 31f16d2c..28a4d57d 100644 --- a/pysd/py_backend/model.py +++ b/pysd/py_backend/model.py @@ -716,7 +716,8 @@ def set_components(self, params, new=False): 'When setting ' + key + '\n' 'Setting subscripted must be done using a xarray.DataArray' ' with the correct dimensions or a constant value ' - '(https://pysd.readthedocs.io/en/master/basic_usage.html)') + '(https://pysd.readthedocs.io/en/master/' + 'getting_started.html)') if func_name is None: raise NameError( @@ -865,7 +866,8 @@ def set_initial_value(self, t, initial_value): 'When setting ' + key + '\n' 'Setting subscripted must be done using a xarray.DataArray' ' with the correct dimensions or a constant value ' - '(https://pysd.readthedocs.io/en/master/basic_usage.html)') + '(https://pysd.readthedocs.io/en/master/' + 'getting_started.html)') # Try to update stateful component try: From 7c719ccb9b551ef8f8e676866771cb43481a6c25 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 19 May 2022 11:08:11 +0200 Subject: [PATCH 90/96] Document and set flatten_output=True by default --- docs/{development => }/complement.rst | 4 +- docs/conf.py | 8 ++- docs/development/development_index.rst | 13 ++-- .../{contributing.rst => guidelines.rst} | 12 ++-- docs/development/pathway.rst | 6 +- docs/getting_started.rst | 7 ++- docs/index.rst | 6 +- docs/installation.rst | 2 +- docs/reporting_bugs.rst | 3 + docs/structure/python_builder.rst | 3 + docs/structure/structure_index.rst | 14 ++--- docs/structure/vensim_translation.rst | 2 + docs/structure/xmile_translation.rst | 2 + docs/whats_new.rst | 59 +++++++++++++++++++ pysd/builders/python/namespace.py | 18 +++--- pysd/builders/python/python_functions.py | 2 +- pysd/builders/python/python_model_builder.py | 16 ++--- pysd/builders/python/subscripts.py | 2 +- pysd/cli/main.py | 2 +- pysd/py_backend/external.py | 2 +- pysd/py_backend/functions.py | 2 +- pysd/py_backend/model.py | 20 +++---- pysd/py_backend/utils.py | 2 +- pysd/pysd.py | 18 +++--- tests/unit_test_pysd.py | 24 ++++++-- 25 files changed, 171 insertions(+), 78 deletions(-) rename docs/{development => }/complement.rst (79%) rename docs/development/{contributing.rst => guidelines.rst} (93%) create mode 100644 docs/whats_new.rst diff --git a/docs/development/complement.rst b/docs/complement.rst similarity index 79% rename from docs/development/complement.rst rename to docs/complement.rst index 7cacb285..c9f958c2 100644 --- a/docs/development/complement.rst +++ b/docs/complement.rst @@ -9,6 +9,6 @@ An excellent JavaScript library called `sd.js `_ delveloped by `Gönenç Yücel `_ includes a really neat method for categorizing behavior modes and exploring parameter space to determine the boundaries between them. -The `SDQC library `_ developed by Eneko Martin Martinez may be used to check the quality of the data imported by Vensim models from speadsheet files. +The `SDQC library `_ developed by Eneko Martin Martinez may be used to check the quality of the data imported by Vensim models from speadsheet files. -The `excels2vensim library `_, also developed by Eneko Martin Martinez, aims to simplify the incorporation of equations from external data into Vensim. \ No newline at end of file +The `excels2vensim library `_, also developed by Eneko Martin Martinez, aims to simplify the incorporation of equations from external data into Vensim. \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index d5ac7e65..295fe981 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -53,9 +53,15 @@ 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', - 'sphinx.ext.intersphinx' + 'sphinx.ext.intersphinx', + "sphinx.ext.extlinks" ] +extlinks = { + "issue": ("https://github.com/JamesPHoughton/pysd/issues/%s", "issue #%s"), + "pull": ("https://github.com/JamesPHoughton/pysd/pull/%s", "PR #%s"), +} + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/docs/development/development_index.rst b/docs/development/development_index.rst index a14c51a2..133ce3bc 100644 --- a/docs/development/development_index.rst +++ b/docs/development/development_index.rst @@ -1,11 +1,14 @@ Developer Documentation ======================= - .. toctree:: - :maxdepth: 2 + :hidden: - pysd_architecture_views/4+1view_model - contributing + guidelines pathway - complement + pysd_architecture_views/4+1view_model + +In order to contribut to PySD check the :doc:`guidelines` and the :doc:`pathway`. +You also will find helpful the :doc:`Structure of the PySD library <../../structure/structure_index>` to understand better how it works. + + diff --git a/docs/development/contributing.rst b/docs/development/guidelines.rst similarity index 93% rename from docs/development/contributing.rst rename to docs/development/guidelines.rst index 1adff9ce..b792e4f5 100644 --- a/docs/development/contributing.rst +++ b/docs/development/guidelines.rst @@ -1,5 +1,5 @@ -Contributing to PySD -==================== +Development Guidelines +====================== If you are interested in helping to develop PySD, the :doc:`pathway` lists areas that are ripe @@ -54,10 +54,10 @@ The profiler depends on :py:mod:`cProfile` and `cprofilev `_ is a module that checks that your code meets proper python +`Pylint `_ is a module that checks that your code meets proper Python coding practices. It is helpful for making sure that the code will be easy for other people to read, and also is good fast feedback for improving your coding practice. The lint checker can be run for -the entire packages, and for individual python modules or classes. It should be run at a local level +the entire packages, and for individual Python modules or classes. It should be run at a local level (ie, on specific files) whenever changes are made, and globally before the package is committed. It doesn't need to be perfect, but we should aspire always to move in a positive direction.' @@ -80,8 +80,8 @@ following philosophy: * Limit implementation to the basic XMILE standard. * Resist the urge to include everything that shows up in all vendors' tools. -* Emphasize ease of use. Let SD practitioners who haven't used python before understand the basics. -* Take advantage of general python constructions and best practices. +* Emphasize ease of use. Let SD practitioners who haven't used Python before understand the basics. +* Take advantage of general Python constructions and best practices. * Develop and use strong testing and profiling components. Share your work early. Find bugs early. * Avoid firefighting or rushing to add features quickly. SD knows enough about short term thinking in software development to know where that path leads. diff --git a/docs/development/pathway.rst b/docs/development/pathway.rst index b0151934..bbefeeb4 100644 --- a/docs/development/pathway.rst +++ b/docs/development/pathway.rst @@ -2,7 +2,7 @@ PySD Development Pathway ======================== High priority features, bugs, and other elements of active effort are listed on the `github issue -tracker. `_ To get involved see :doc:`contributing`. +tracker. `_ To get involved see :doc:`guidelines`. High Priority @@ -42,8 +42,8 @@ Current Features * Basic XMILE and Vensim parser * Established library structure and data formats -* Simulation using existing python integration tools -* Integration with basic python Data Science functionality +* Simulation using existing Python integration tools +* Integration with basic Python Data Science functionality * Run-at-a-time parameter modification * Time-variant exogenous inputs * Extended backends for storing parameters and output values diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 2656b17e..2e98e56e 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -46,6 +46,8 @@ To view a synopsis of the model equations and documentation, use the :py:attr:`. >>> import pysd >>> help(pysd.load) +.. note:: + Not all the features and functions are implemented. If you are in trouble while importing a Vensim or Xmile model check the :ref:`Vensim supported functions ` or :ref:`Xmile supported functions `. Running the Model ----------------- @@ -82,10 +84,13 @@ Pandas proovides a simple plotting capability, that we can use to see how the te :width: 400 px :align: center -To show a progressbar during the model integration, the `progress` argument can be passed to the :py:meth:`.run` command:: +To show a progressbar during the model integration, the `progress` argument can be passed to the :py:meth:`.run` method:: >>> stocks = model.run(progress=True) +.. note:: + The full description of the :py:meth:`.run` method and other methods can be found in the :doc:`Model methods section <../structure/model_class>`. + Running models with DATA type components ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Venim allows to import DATA type data from binary `.vdf` files. Variables defined without an equation in the model, will attempt to read their values from the `.vdf`. PySD allows running models with this kind of data definition using the data_files argument when calling :py:meth:`.run` command, e.g.:: diff --git a/docs/index.rst b/docs/index.rst index bc5c7ec9..9e7f4b20 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -38,7 +38,7 @@ This project is a simple library for running System Dynamics models in Python, w improving integration of Big Data and Machine Learning into the SD workflow. PySD translates :doc:`Vensim ` or -:doc:`XMILE ` model files into python modules, +:doc:`XMILE ` model files into Python modules, and provides methods to modify, simulate, and observe those translated models. @@ -50,7 +50,7 @@ PySD Cookbook A cookbook of simple recipes for advanced data analytics using PySD is available at: http://pysd-cookbook.readthedocs.org/ -The cookbook includes models, sample data, and code in the form of ipython notebooks that demonstrate a variety of data integration and analysis tasks. These models can be executed on your local machine, and modified to suit your particular analysis requirements. +The cookbook includes models, sample data, and code in the form of iPython notebooks that demonstrate a variety of data integration and analysis tasks. These models can be executed on your local machine, and modified to suit your particular analysis requirements. Contributing @@ -89,4 +89,6 @@ For additional help or consulting, contact james.p.houghton@gmail.com or eneko.m structure/structure_index development/development_index reporting_bugs + whats_new about + complement diff --git a/docs/installation.rst b/docs/installation.rst index eb98d1a9..2f9a73c8 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -37,7 +37,7 @@ In the source directory use the command: Required Dependencies --------------------- -PySD requires **python 3.7** or above. +PySD requires **Python 3.7** or above. PySD builds on the core Python data analytics stack, and the following third party libraries: diff --git a/docs/reporting_bugs.rst b/docs/reporting_bugs.rst index 71833f81..860a4c67 100644 --- a/docs/reporting_bugs.rst +++ b/docs/reporting_bugs.rst @@ -5,6 +5,9 @@ Before reporting any bug, please make sure that you are using the latest version All bugs must be reported in the project's `issue tracker on github `_. +.. note:: + Not all the features and functions are implemented. If you are in trouble while translating or running a Vensim or Xmile model check the :ref:`Vensim supported functions ` or :ref:`Xmile supported functions ` and consider that when openning a new issue. + Bugs during translation ----------------------- 1. Check the line where it happened and try to identify if it is due to a missing function or feature or for any other reason. diff --git a/docs/structure/python_builder.rst b/docs/structure/python_builder.rst index 0e9a6088..5c8d5eb5 100644 --- a/docs/structure/python_builder.rst +++ b/docs/structure/python_builder.rst @@ -18,6 +18,9 @@ Expression builders .. automodule:: pysd.builders.python.python_expressions_builder :members: + +.. _Python supported functions: + Supported expressions examples ------------------------------ Operators diff --git a/docs/structure/structure_index.rst b/docs/structure/structure_index.rst index 2c27dcf4..b33f0340 100644 --- a/docs/structure/structure_index.rst +++ b/docs/structure/structure_index.rst @@ -51,31 +51,27 @@ It is important to maintain this order because although these operations by defi Building the model ------------------ -The builders allow to build the final model in any programming language (so long as there is a builder for that particular language). To do so, they use a series of classes that obtain the information from the Abstract Model and convert it into the desired code. Currently PySD only includes a :doc:`builder to build the models in Python ` . Any contribution to add new builders (and solvers) for other programming languages is welcome. - .. toctree:: :hidden: python_builder +The builders allow to build the final model in any programming language (so long as there is a builder for that particular language). To do so, they use a series of classes that obtain the information from the :doc:`Abstract Model ` and convert it into the desired code. Currently PySD only includes a :doc:`builder to build the models in Python ` . Any contribution to add new builders (and solvers) for other programming languages is welcome. + + The Python model ---------------- -For loading a translated model with Python see :doc:`Getting started <../../getting_started>` or :doc:`Model loading `: .. toctree:: :hidden: model_loading + model_class -The Python builder constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a `statefull` model in much the same way that the system itself has a specific state at any point in time. +For loading a translated model with Python see :doc:`Getting started <../../getting_started>` or :doc:`Model loading `. The Python builder constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a `statefull` model in much the same way that the system itself has a specific state at any point in time. The :doc:`Model class ` also contains a function for each of the model components, representing the essential model equations. Each function contains its units, subcscripts type infromation and documentation as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. The :doc:`Model class ` maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. Lastly, the :doc:`Model class ` provides a set of methods that are used to facilitate simulation. The :py:meth:`.run` method returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. - -.. toctree:: - :hidden: - - model_class \ No newline at end of file diff --git a/docs/structure/vensim_translation.rst b/docs/structure/vensim_translation.rst index 391f06a8..d1796459 100644 --- a/docs/structure/vensim_translation.rst +++ b/docs/structure/vensim_translation.rst @@ -42,6 +42,8 @@ Vensim element :undoc-members: +.. _Vensim supported functions: + Supported Functions and Features -------------------------------- diff --git a/docs/structure/xmile_translation.rst b/docs/structure/xmile_translation.rst index e4ee0166..95fdfebf 100644 --- a/docs/structure/xmile_translation.rst +++ b/docs/structure/xmile_translation.rst @@ -41,6 +41,8 @@ Xmile element :undoc-members: +.. _Xmile supported functions: + Supported Functions and Features -------------------------------- diff --git a/docs/whats_new.rst b/docs/whats_new.rst new file mode 100644 index 00000000..d9aa6633 --- /dev/null +++ b/docs/whats_new.rst @@ -0,0 +1,59 @@ + +What's New +========== + +v3.0.0 (unreleased) +----------------------- + +New Features +~~~~~~~~~~~~ + +- Properties added to the :py:class:`Model` to make more accessible some information: :py:attr:`.namespace`, :py:attr:`.subscripts`, :py:attr:`.dependencies`, :py:attr:`.modules`, :py:attr:`.doc`. + +Breaking changes +~~~~~~~~~~~~~~~~ + +- The argument :py:data:`flatten_output` from :py:meth:`.run` is now set to :py:data:`True` by default. +- The docstring of the model is now a property and thus is it not callable,:py:attr:`.doc`. +- Allow the function :py:func:`py_backend.functions.pulse` to also perform the operations performed by :py:data:`py_backend.functions.pulse_train()` and :py:data:`py_backend.functions.pulse_magnitude()`. +- The first argument of :py:func:`py_backend.functions.active_initial` now is the stage and not the time. +- The function :py:data:`py_backend.utils.rearrange` now its mutch simpler oriented to perform simple rearrange cases for user interaction. +- The translation and the building of models has been totally modified to use the :doc:`Abstract Model Representation `. +- Move :py:data:`py_backend.statefuls.Model` and :py:data:`py_backend.statefuls.Macro` to :py:class:`py_backend.model.Model` and :py:class:`py_backend.model.Macro`, respectively. +- All kinds of lookups are now managed with the :py:class:`py_backend.lookups.Lookups` class. + +Deprecations +~~~~~~~~~~~~ + +- Remove :py:data:`py_backend.utils.xrmerge()`, :py:data:`py_backend.functions.pulse_train()`, :py:data:`py_backend.functions.pulse_magnitude()`, :py:data:`py_backend.functions.lookup()`, :py:data:`py_backend.functions.lookup_discrete()`, :py:data:`py_backend.functions.lookup_extrapolation()`, :py:data:`py_backend.functions.logical_and()`, :py:data:`py_backend.functions.logical_or()`, :py:data:`py_backend.functions.bounded_normal()`, :py:data:`py_backend.functions.log()`. +- Remove old translation and building files. + + +Bug fixes +~~~~~~~~~ + +- Generate the documentation of the model when loading it to avoid lossing information when replacing a variable value (:issue:`310`, :pull:`312`). +- Make random functions return arrays of the same shape as the variable, to avoid repeating values over a dimension (:issue:`309`, :pull:`312`). +- Fix bug when Vensim's :MACRO: definition is not at the top of the model file (:issue:`306`, :pull:`312`). +- Make builder identify the subscripts using a main range and subrange to allow using subscripts as numeric values as Vensim does (:issue:`296`, :issue:`301`, :pull:`312`). +- Fix bug of missmatching of functions and lookups names (:issue:`116`, :pull:`312`). +- Parse Xmile models case insensitively and ignoring the new lines characters (:issue:`203`, :issue:`253`, :pull:`312`). +- Add support for Vensim's `\:EXCEPT\: keyword `_ (:issue:`168`, :issue:`253`, :pull:`312`). +- Add spport for Xmile's FORCST and SAFEDIV functions (:issue:`154`, :pull:`312`). +- Add subscripts support for Xmile (:issue:`289`, :pull:`312`). +- Fix numeric error bug when using :py:data:`return_timestamps` and time step with non-integer values. + +Documentation +~~~~~~~~~~~~~ + +- Review the whole documentation, refract it, and describe the new features. + +Performance +~~~~~~~~~~~ + +- The variables defined in several equations are now assigned to a pre-allocated array instead of using :py:data:`py_backend.utils.xrmerge`. This improves the speed of subscripted models. +- The grammars for Parsimonious are only compiled once per translation. + +Internal Changes +~~~~~~~~~~~~~~~~ +- The translation and the building of models has been totally modified to use the :doc:`Abstract Model Representation `. diff --git a/pysd/builders/python/namespace.py b/pysd/builders/python/namespace.py index e08965b6..5bd98eb4 100644 --- a/pysd/builders/python/namespace.py +++ b/pysd/builders/python/namespace.py @@ -3,7 +3,7 @@ from unicodedata import normalize from typing import List -# used to create python safe names with the variable reserved_words +# used to create Python safe names with the variable reserved_words from keyword import kwlist from builtins import __dir__ as bidir from pysd.py_backend.components import __dir__ as cdir @@ -19,7 +19,7 @@ class NamespaceManager: """ NamespaceManager object allows includying new elements to the namespace and searching for elements in the namespace. When includying new - elements a python safe name is used to be able to write the equations. + elements a Python safe name is used to be able to write the equations. Parameters ---------- @@ -63,16 +63,16 @@ def make_python_identifier(self, string: str, prefix: str = None, """ Takes an arbitrary string and creates a valid Python identifier. - If the python identifier created is already in the namespace, + If the Python identifier created is already in the namespace, but the input string is not (ie, two similar strings resolve to - the same python identifier) or if the identifier is a reserved - word in the reserved_words list, or is a python default + the same Python identifier) or if the identifier is a reserved + word in the reserved_words list, or is a Python default reserved word, adds _1, or if _1 is in the namespace, _2, etc. Parameters ---------- string: str - The text to be converted into a valid python identifier. + The text to be converted into a valid Python identifier. prefix: str or None (optional) If given it will be used as a prefix for the output string. @@ -85,7 +85,7 @@ def make_python_identifier(self, string: str, prefix: str = None, Returns ------- identifier: str - A vaild python identifier based on the input string. + A vaild Python identifier based on the input string. Examples -------- @@ -98,7 +98,7 @@ def make_python_identifier(self, string: str, prefix: str = None, >>> make_python_identifier('multiple spaces') 'multiple_spaces' - When the name is a python keyword, add '_1' to differentiate it + When the name is a Python keyword, add '_1' to differentiate it >>> make_python_identifier('for') 'for_1' @@ -156,7 +156,7 @@ def make_python_identifier(self, string: str, prefix: str = None, # replace multiple _ after cleaning s = re.sub(r"[_]+", "_", s) - # Check that the string is not a python identifier + # Check that the string is not a Python identifier identifier = s i = 1 while identifier in self._used_words: diff --git a/pysd/builders/python/python_functions.py b/pysd/builders/python/python_functions.py index fcbb2d44..e8560205 100644 --- a/pysd/builders/python/python_functions.py +++ b/pysd/builders/python/python_functions.py @@ -84,7 +84,7 @@ # random functions must have the shape of the component subscripts # most of them are shifted, scaled and truncated - # TODO: it is difficult to find same parametrization in python, + # TODO: it is difficult to find same parametrization in Python, # maybe build a new model "random_0_1": ( "np.random.uniform(0, 1, size=%(size)s)", diff --git a/pysd/builders/python/python_model_builder.py b/pysd/builders/python/python_model_builder.py index b267a447..1f6e379f 100644 --- a/pysd/builders/python/python_model_builder.py +++ b/pysd/builders/python/python_model_builder.py @@ -46,7 +46,7 @@ def __init__(self, abstract_model: AbstractModel): def build_model(self) -> Path: """ - Build the python model in a file callled as the orginal model + Build the Python model in a file callled as the orginal model but with '.py' suffix. Returns @@ -102,7 +102,7 @@ def __init__(self, abstract_section: AbstractSection): def build_section(self) -> None: """ - Build the python section in a file callled as the orginal model + Build the Python section in a file callled as the orginal model if the section is main or in a file called as the macro name if the section is a macro. """ @@ -135,7 +135,7 @@ def _process_views_tree(self, view_name: str, """ if isinstance(view_content, set): # Will become a module - # Convert subview elements names to python names + # Convert subview elements names to Python names view_content = { self.namespace.cleanspace[var] for var in view_content } @@ -183,7 +183,7 @@ def _build_modular(self, elements_per_view: dict) -> None: def _build_separate_module(self, elements: list, module_name: str, module_dir: str) -> None: """ - Constructs and writes the python representation of a specific model + Constructs and writes the Python representation of a specific model module, when the split_views=True in the read_vensim function. Parameters @@ -223,7 +223,7 @@ def _build_separate_module(self, elements: list, module_name: str, def _build_main_module(self, elements: list) -> None: """ - Constructs and writes the python representation of the main model + Constructs and writes the Python representation of the main model module, when the split_views=True in the read_vensim function. Parameters @@ -233,7 +233,7 @@ def _build_main_module(self, elements: list) -> None: only be the initial_time, final_time, saveper and time_step, functions, though there might be others in some situations. Each element is a dictionary, with the various components - needed to assemble a model component in python syntax. This + needed to assemble a model component in Python syntax. This will contain multiple entries for elements that have multiple definitions in the original file, and which need to be combined. @@ -292,7 +292,7 @@ def _build_main_module(self, elements: list) -> None: def _build(self) -> None: """ - Constructs and writes the python representation of a section. + Constructs and writes the Python representation of a section. Returns ------- @@ -400,7 +400,7 @@ def _generate_functions(self, elements: dict) -> str: ---------- elements: dict Each element is a dictionary, with the various components - needed to assemble a model component in python syntax. This + needed to assemble a model component in Python syntax. This will contain multiple entries for elements that have multiple definitions in the original file, and which need to be combined. diff --git a/pysd/builders/python/subscripts.py b/pysd/builders/python/subscripts.py index df9b0258..70b92018 100644 --- a/pysd/builders/python/subscripts.py +++ b/pysd/builders/python/subscripts.py @@ -351,7 +351,7 @@ def simplify_subscript_input(self, coords: dict, Coordinates to write in the model file. merge_subs: list of strings or None (optional) - List of the final subscript range of the python array after + List of the final subscript range of the Python array after merging with other objects. If None the merge_subs will be taken from coords. Default is None. diff --git a/pysd/cli/main.py b/pysd/cli/main.py index 5c5384ec..29601b52 100644 --- a/pysd/cli/main.py +++ b/pysd/cli/main.py @@ -74,7 +74,7 @@ def load(model_file, data_files, missing_values, split_views, **kwargs): split_views: bool (optional) If True, the sketch is parsed to detect model elements in each - model view, and then translate each view in a separate python + model view, and then translate each view in a separate Python file. Setting this argument to True is recommended for large models split in many different views. Default is False. diff --git a/pysd/py_backend/external.py b/pysd/py_backend/external.py index e4433798..e4f7190f 100644 --- a/pysd/py_backend/external.py +++ b/pysd/py_backend/external.py @@ -68,7 +68,7 @@ class External(object): Attributes ---------- py_name: str - The python name of the object + The Python name of the object missing: str ("warning", "error", "ignore", "keep") What to do with missing values. If "warning" (default) shows a warning message and interpolates the values. diff --git a/pysd/py_backend/functions.py b/pysd/py_backend/functions.py index a97a1c22..c89b7a64 100644 --- a/pysd/py_backend/functions.py +++ b/pysd/py_backend/functions.py @@ -1,5 +1,5 @@ """ -These functions have no direct analog in the standard python data analytics +These functions have no direct analog in the standard Python data analytics stack, or require information about the internal state of the system beyond what is present in the function call. We provide them in a structure that makes it easy for the model elements to call. diff --git a/pysd/py_backend/model.py b/pysd/py_backend/model.py index 28a4d57d..59aa3194 100644 --- a/pysd/py_backend/model.py +++ b/pysd/py_backend/model.py @@ -36,13 +36,13 @@ class Macro(DynamicStateful): added methods to facilitate execution. The Macro object will be created with components drawn from a - translated python model file. + translated Python model file. Parameters ---------- py_model_file: str or pathlib.Path Filename of a model or macro which has already been converted - into a python format. + into a Python format. params: dict or None (optional) Dictionary of the macro parameters. Default is None. return_func: str or None (optional) @@ -930,7 +930,7 @@ def _build_doc(self): """ Formats a table of documentation strings to help users remember variable names, and understand how they are translated into - python safe names. + Python safe names. Returns ------- @@ -981,13 +981,13 @@ class Model(Macro): methods for running the model. The Model object will be created with components drawn from a - translated python model file. + translated Python model file. Parameters ---------- py_model_file: str or pathlib.Path Filename of a model which has already been converted into a - python format. + Python format. data_files: dict or list or str or None The dictionary with keys the name of file and variables to load the data from there. Or the list of names or name of the @@ -1006,7 +1006,7 @@ class Model(Macro): """ def __init__(self, py_model_file, data_files, initialize, missing_values): - """ Sets up the python objects """ + """ Sets up the Python objects """ super().__init__(py_model_file, None, None, Time(), data_files=data_files) self.time.stage = 'Load' @@ -1024,7 +1024,7 @@ def initialize(self): def run(self, params=None, return_columns=None, return_timestamps=None, initial_condition='original', final_time=None, time_step=None, - saveper=None, reload=False, progress=False, flatten_output=False, + saveper=None, reload=False, progress=False, flatten_output=True, cache_output=True): """ Simulate the model's behavior over time. @@ -1086,8 +1086,8 @@ def run(self, params=None, return_columns=None, return_timestamps=None, flatten_output: bool (optional) If True, once the output dataframe has been formatted will - split the xarrays in new columns following vensim's naming - to make a totally flat output. Default is False. + split the xarrays in new columns following Vensim's naming + to make a totally flat output. Default is True. cache_output: bool (optional) If True, the number of calls of outputs variables will be increased @@ -1469,7 +1469,7 @@ def check_dep(dependencies, initial=False): def get_vars_in_module(self, module): """ - Return the name of python vars in a module. + Return the name of Python vars in a module. Parameters ---------- diff --git a/pysd/py_backend/utils.py b/pysd/py_backend/utils.py index f6af4cf9..a2d9a787 100644 --- a/pysd/py_backend/utils.py +++ b/pysd/py_backend/utils.py @@ -40,7 +40,7 @@ def get_return_elements(return_columns, namespace): """ Takes a list of return elements formatted in vensim's format Varname[Sub1, SUb2] - and returns first the model elements (in python safe language) + and returns first the model elements (in Python safe language) that need to be computed and collected, and secondly the addresses that each element in the return columns list translates to diff --git a/pysd/pysd.py b/pysd/pysd.py index cb1dcc3b..21b6f93d 100644 --- a/pysd/pysd.py +++ b/pysd/pysd.py @@ -53,7 +53,7 @@ def read_xmile(xmile_file, data_files=None, initialize=True, Returns ------- model: a PySD class object - Elements from the python model are loaded into the PySD class + Elements from the Python model are loaded into the PySD class and ready to run Examples @@ -71,10 +71,10 @@ def read_xmile(xmile_file, data_files=None, initialize=True, # get AbstractModel abs_model = xmile_file_obj.get_abstract_model() - # build python file + # build Python file py_model_file = ModelBuilder(abs_model).build_model() - # load python file + # load Python file model = load(py_model_file, data_files, initialize, missing_values) model.xmile_file = str(xmile_file) @@ -110,7 +110,7 @@ def read_vensim(mdl_file, data_files=None, initialize=True, split_views: bool (optional) If True, the sketch is parsed to detect model elements in each - model view, and then translate each view in a separate python + model view, and then translate each view in a separate Python file. Setting this argument to True is recommended for large models split in many different views. Default is False. @@ -130,7 +130,7 @@ def read_vensim(mdl_file, data_files=None, initialize=True, Returns ------- model: a PySD class object - Elements from the python model are loaded into the PySD class + Elements from the Python model are loaded into the PySD class and ready to run Examples @@ -151,10 +151,10 @@ def read_vensim(mdl_file, data_files=None, initialize=True, # get AbstractModel abs_model = ven_file.get_abstract_model() - # build python file + # build Python file py_model_file = ModelBuilder(abs_model).build_model() - # load python file + # load Python file model = load(py_model_file, data_files, initialize, missing_values) model.mdl_file = str(mdl_file) @@ -164,13 +164,13 @@ def read_vensim(mdl_file, data_files=None, initialize=True, def load(py_model_file, data_files=None, initialize=True, missing_values="warning"): """ - Load a python-converted model file. + Load a Python-converted model file. Parameters ---------- py_model_file : str Filename of a model which has already been converted into a - python format. + Python format. initialize: bool (optional) If False, the model will not be initialize when it is loaded. diff --git a/tests/unit_test_pysd.py b/tests/unit_test_pysd.py index a5cabf2f..05a68eaa 100644 --- a/tests/unit_test_pysd.py +++ b/tests/unit_test_pysd.py @@ -362,7 +362,8 @@ def test_set_subscripted_value_with_constant(self): model = pysd.read_vensim(test_model_subs) model.set_components({"initial_values": 5, "final_time": 10}) - res = model.run(return_columns=["Initial Values"]) + res = model.run( + return_columns=["Initial Values"], flatten_output=False) self.assertTrue(output.equals(res["Initial Values"].iloc[0])) def test_set_subscripted_value_with_partial_xarray(self): @@ -380,7 +381,8 @@ def test_set_subscripted_value_with_partial_xarray(self): model = pysd.read_vensim(test_model_subs) model.set_components({"Initial Values": input_val, "final_time": 10}) - res = model.run(return_columns=["Initial Values"]) + res = model.run( + return_columns=["Initial Values"], flatten_output=False) self.assertTrue(output.equals(res["Initial Values"].iloc[0])) def test_set_subscripted_value_with_xarray(self): @@ -393,7 +395,8 @@ def test_set_subscripted_value_with_xarray(self): model = pysd.read_vensim(test_model_subs) model.set_components({"initial_values": output, "final_time": 10}) - res = model.run(return_columns=["Initial Values"]) + res = model.run( + return_columns=["Initial Values"], flatten_output=False) self.assertTrue(output.equals(res["Initial Values"].iloc[0])) def test_set_parameter_data(self): @@ -409,7 +412,9 @@ def test_set_parameter_data(self): simplefilter("ignore") model.set_components({"data_backward": 20, "data_forward": 70}) - out = model.run(return_columns=["data_backward", "data_forward"]) + out = model.run( + return_columns=["data_backward", "data_forward"], + flatten_output=False) for time in out.index: self.assertTrue((out["data_backward"][time] == 20).all()) @@ -418,7 +423,8 @@ def test_set_parameter_data(self): out = model.run( return_columns=["data_backward", "data_forward"], final_time=20, time_step=1, saveper=1, - params={"data_forward": 30, "data_backward": series}) + params={"data_forward": 30, "data_backward": series}, + flatten_output=False) for time in out.index: self.assertTrue((out["data_forward"][time] == 30).all()) @@ -484,6 +490,7 @@ def test_set_timeseries_parameter_lookup(self): params={"lookup_1d": temp_timeseries}, return_columns=["lookup_1d_time"], return_timestamps=timeseries, + flatten_output=False ) self.assertTrue((res["lookup_1d_time"] == temp_timeseries).all()) @@ -492,6 +499,7 @@ def test_set_timeseries_parameter_lookup(self): params={"lookup_2d": temp_timeseries}, return_columns=["lookup_2d_time"], return_timestamps=timeseries, + flatten_output=False ) self.assertTrue( @@ -521,6 +529,7 @@ def test_set_timeseries_parameter_lookup(self): params={"lookup_2d": temp_timeseries2}, return_columns=["lookup_2d_time"], return_timestamps=timeseries, + flatten_output=False ) self.assertTrue( @@ -558,6 +567,7 @@ def test_set_subscripted_timeseries_parameter_with_constant(self): params={"initial_values": temp_timeseries, "final_time": 10}, return_columns=["initial_values"], return_timestamps=timeseries, + flatten_output=False ) self.assertTrue( @@ -588,7 +598,8 @@ def test_set_subscripted_timeseries_parameter_with_partial_xarray(self): out_series = [out_b + val for val in val_series] model.set_components({"initial_values": temp_timeseries, "final_time": 10}) - res = model.run(return_columns=["initial_values"]) + res = model.run( + return_columns=["initial_values"], flatten_output=False) self.assertTrue( np.all( [r.equals(t) for r, t in zip(res["initial_values"].values, @@ -616,6 +627,7 @@ def test_set_subscripted_timeseries_parameter_with_xarray(self): params={"initial_values": temp_timeseries, "final_time": 10}, return_columns=["initial_values"], return_timestamps=timeseries, + flatten_output=False ) self.assertTrue( From bf867967f9ace3a498c41f8f15b9499b9fa62641 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 19 May 2022 11:14:34 +0200 Subject: [PATCH 91/96] Update whats_new --- docs/whats_new.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/whats_new.rst b/docs/whats_new.rst index d9aa6633..7cb809bd 100644 --- a/docs/whats_new.rst +++ b/docs/whats_new.rst @@ -8,24 +8,24 @@ v3.0.0 (unreleased) New Features ~~~~~~~~~~~~ -- Properties added to the :py:class:`Model` to make more accessible some information: :py:attr:`.namespace`, :py:attr:`.subscripts`, :py:attr:`.dependencies`, :py:attr:`.modules`, :py:attr:`.doc`. +- Properties added to the :py:class:`pysd.py_backend.model.Macro` to make more accessible some information: :py:attr:`.namespace`, :py:attr:`.subscripts`, :py:attr:`.dependencies`, :py:attr:`.modules`, :py:attr:`.doc`. Breaking changes ~~~~~~~~~~~~~~~~ - The argument :py:data:`flatten_output` from :py:meth:`.run` is now set to :py:data:`True` by default. - The docstring of the model is now a property and thus is it not callable,:py:attr:`.doc`. -- Allow the function :py:func:`py_backend.functions.pulse` to also perform the operations performed by :py:data:`py_backend.functions.pulse_train()` and :py:data:`py_backend.functions.pulse_magnitude()`. -- The first argument of :py:func:`py_backend.functions.active_initial` now is the stage and not the time. -- The function :py:data:`py_backend.utils.rearrange` now its mutch simpler oriented to perform simple rearrange cases for user interaction. +- Allow the function :py:func:`pysd.py_backend.functions.pulse` to also perform the operations performed by :py:data:`pysd.py_backend.functions.pulse_train()` and :py:data:`pysd.py_backend.functions.pulse_magnitude()`. +- The first argument of :py:func:`pysd.py_backend.functions.active_initial` now is the stage and not the time. +- The function :py:data:`pysd.py_backend.utils.rearrange()` now its mutch simpler oriented to perform simple rearrange cases for user interaction. - The translation and the building of models has been totally modified to use the :doc:`Abstract Model Representation `. -- Move :py:data:`py_backend.statefuls.Model` and :py:data:`py_backend.statefuls.Macro` to :py:class:`py_backend.model.Model` and :py:class:`py_backend.model.Macro`, respectively. -- All kinds of lookups are now managed with the :py:class:`py_backend.lookups.Lookups` class. +- Move :py:data:`pysd.py_backend.statefuls.Model` and :py:data:`pysd.py_backend.statefuls.Macro` to :py:class:`pysd.py_backend.model.Model` and :py:class:`pysd.py_backend.model.Macro`, respectively. +- All kinds of lookups are now managed with the :py:class:`pysd.py_backend.lookups.Lookups` class. Deprecations ~~~~~~~~~~~~ -- Remove :py:data:`py_backend.utils.xrmerge()`, :py:data:`py_backend.functions.pulse_train()`, :py:data:`py_backend.functions.pulse_magnitude()`, :py:data:`py_backend.functions.lookup()`, :py:data:`py_backend.functions.lookup_discrete()`, :py:data:`py_backend.functions.lookup_extrapolation()`, :py:data:`py_backend.functions.logical_and()`, :py:data:`py_backend.functions.logical_or()`, :py:data:`py_backend.functions.bounded_normal()`, :py:data:`py_backend.functions.log()`. +- Remove :py:data:`pysd.py_backend.utils.xrmerge()`, :py:data:`pysd.py_backend.functions.pulse_train()`, :py:data:`pysd.py_backend.functions.pulse_magnitude()`, :py:data:`pysd.py_backend.functions.lookup()`, :py:data:`pysd.py_backend.functions.lookup_discrete()`, :py:data:`pysd.py_backend.functions.lookup_extrapolation()`, :py:data:`pysd.py_backend.functions.logical_and()`, :py:data:`pysd.py_backend.functions.logical_or()`, :py:data:`pysd.py_backend.functions.bounded_normal()`, :py:data:`pysd.py_backend.functions.log()`. - Remove old translation and building files. @@ -51,7 +51,7 @@ Documentation Performance ~~~~~~~~~~~ -- The variables defined in several equations are now assigned to a pre-allocated array instead of using :py:data:`py_backend.utils.xrmerge`. This improves the speed of subscripted models. +- The variables defined in several equations are now assigned to a pre-allocated array instead of using :py:data:`pysd.py_backend.utils.xrmerge()`. This improves the speed of subscripted models. - The grammars for Parsimonious are only compiled once per translation. Internal Changes From 913281c7c1fe929550e17ca8538af82ea6804a98 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 19 May 2022 11:30:07 +0200 Subject: [PATCH 92/96] Update whats_new --- docs/whats_new.rst | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/whats_new.rst b/docs/whats_new.rst index 7cb809bd..65186170 100644 --- a/docs/whats_new.rst +++ b/docs/whats_new.rst @@ -8,7 +8,13 @@ v3.0.0 (unreleased) New Features ~~~~~~~~~~~~ +- The new :doc:`Abstract Model Representation ` translation and building workflow will allow to add new output languages in the future. - Properties added to the :py:class:`pysd.py_backend.model.Macro` to make more accessible some information: :py:attr:`.namespace`, :py:attr:`.subscripts`, :py:attr:`.dependencies`, :py:attr:`.modules`, :py:attr:`.doc`. +- The Python models now look cleaner: + - :py:data:`_namespace` and :py:data:`_dependencies` dictionaries are removed from the file. + - Variables original names, dependencies metadata are given through :py:meth:`pysd.py_backend.components.Component.add` decorator, instead of having them in the docstring. + - The merging of variable equations is done using the coordinates to a pre-allocated array, instead of using the `magic` function :py:data:`pysd.py_backend.utils.xrmerge()`. + - The arranging and subseting arrays are now done inplace instead of using the magic function :py:data:`pysd.py_backend.utils.rearrange()`. Breaking changes ~~~~~~~~~~~~~~~~ @@ -21,6 +27,7 @@ Breaking changes - The translation and the building of models has been totally modified to use the :doc:`Abstract Model Representation `. - Move :py:data:`pysd.py_backend.statefuls.Model` and :py:data:`pysd.py_backend.statefuls.Macro` to :py:class:`pysd.py_backend.model.Model` and :py:class:`pysd.py_backend.model.Macro`, respectively. - All kinds of lookups are now managed with the :py:class:`pysd.py_backend.lookups.Lookups` class. +- The lookups functions may now take a second argument to set the final coordinates when a subscripted variable is passed as an argument. Deprecations ~~~~~~~~~~~~ @@ -51,7 +58,8 @@ Documentation Performance ~~~~~~~~~~~ -- The variables defined in several equations are now assigned to a pre-allocated array instead of using :py:data:`pysd.py_backend.utils.xrmerge()`. This improves the speed of subscripted models. +- The variables defined in several equations are now assigned to a pre-allocated array instead of using :py:data:`pysd.py_backend.utils.xrmerge()`. +- The arranging and subseting of arrays is now done inplace instead of using the magic function :py:data:`pysd.py_backend.utils.rearrange()`. - The grammars for Parsimonious are only compiled once per translation. Internal Changes From b31bf414f4c6da16c6c40fc8a16f89e54abead4d Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 19 May 2022 11:37:17 +0200 Subject: [PATCH 93/96] Update whats_new --- docs/whats_new.rst | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/docs/whats_new.rst b/docs/whats_new.rst index 65186170..705c975a 100644 --- a/docs/whats_new.rst +++ b/docs/whats_new.rst @@ -9,31 +9,30 @@ New Features ~~~~~~~~~~~~ - The new :doc:`Abstract Model Representation ` translation and building workflow will allow to add new output languages in the future. -- Properties added to the :py:class:`pysd.py_backend.model.Macro` to make more accessible some information: :py:attr:`.namespace`, :py:attr:`.subscripts`, :py:attr:`.dependencies`, :py:attr:`.modules`, :py:attr:`.doc`. -- The Python models now look cleaner: - - :py:data:`_namespace` and :py:data:`_dependencies` dictionaries are removed from the file. - - Variables original names, dependencies metadata are given through :py:meth:`pysd.py_backend.components.Component.add` decorator, instead of having them in the docstring. - - The merging of variable equations is done using the coordinates to a pre-allocated array, instead of using the `magic` function :py:data:`pysd.py_backend.utils.xrmerge()`. - - The arranging and subseting arrays are now done inplace instead of using the magic function :py:data:`pysd.py_backend.utils.rearrange()`. +- Added new properties to the :py:class:`pysd.py_backend.model.Macro` to make more accessible some information: :py:attr:`.namespace`, :py:attr:`.subscripts`, :py:attr:`.dependencies`, :py:attr:`.modules`, :py:attr:`.doc`. +- Cleaner Python models: + - :py:data:`_namespace` and :py:data:`_dependencies` dictionaries have been removed from the file. + - Variables original names, dependencies metadata now are given through :py:meth:`pysd.py_backend.components.Component.add` decorator, instead of having them in the docstring. + - Merging of variable equations is now done using the coordinates to a pre-allocated array, instead of using the `magic` function :py:data:`pysd.py_backend.utils.xrmerge()`. + - Arranging and subseting arrays are now done inplace instead of using the magic function :py:data:`pysd.py_backend.utils.rearrange()`. Breaking changes ~~~~~~~~~~~~~~~~ -- The argument :py:data:`flatten_output` from :py:meth:`.run` is now set to :py:data:`True` by default. -- The docstring of the model is now a property and thus is it not callable,:py:attr:`.doc`. +- Set the argument :py:data:`flatten_output` from :py:meth:`.run` to :py:data:`True` by default. Previously it was set to :py:data:`False` by default. +- Move the docstring of the model to a property, :py:attr:`.doc`. Thus, it is not callable anymore. - Allow the function :py:func:`pysd.py_backend.functions.pulse` to also perform the operations performed by :py:data:`pysd.py_backend.functions.pulse_train()` and :py:data:`pysd.py_backend.functions.pulse_magnitude()`. -- The first argument of :py:func:`pysd.py_backend.functions.active_initial` now is the stage and not the time. -- The function :py:data:`pysd.py_backend.utils.rearrange()` now its mutch simpler oriented to perform simple rearrange cases for user interaction. -- The translation and the building of models has been totally modified to use the :doc:`Abstract Model Representation `. +- Change first argument of :py:func:`pysd.py_backend.functions.active_initial`, now it is the `stage of the model` and not the `time`. +- Simplify the function :py:data:`pysd.py_backend.utils.rearrange()` orienting it to perform simple rearrange cases for user interaction. - Move :py:data:`pysd.py_backend.statefuls.Model` and :py:data:`pysd.py_backend.statefuls.Macro` to :py:class:`pysd.py_backend.model.Model` and :py:class:`pysd.py_backend.model.Macro`, respectively. -- All kinds of lookups are now managed with the :py:class:`pysd.py_backend.lookups.Lookups` class. -- The lookups functions may now take a second argument to set the final coordinates when a subscripted variable is passed as an argument. +- Manage all kinds of lookups with the :py:class:`pysd.py_backend.lookups.Lookups` class. +- Include a second optional argument to lookups functions to set the final coordinates when a subscripted variable is passed as an argument. Deprecations ~~~~~~~~~~~~ - Remove :py:data:`pysd.py_backend.utils.xrmerge()`, :py:data:`pysd.py_backend.functions.pulse_train()`, :py:data:`pysd.py_backend.functions.pulse_magnitude()`, :py:data:`pysd.py_backend.functions.lookup()`, :py:data:`pysd.py_backend.functions.lookup_discrete()`, :py:data:`pysd.py_backend.functions.lookup_extrapolation()`, :py:data:`pysd.py_backend.functions.logical_and()`, :py:data:`pysd.py_backend.functions.logical_or()`, :py:data:`pysd.py_backend.functions.bounded_normal()`, :py:data:`pysd.py_backend.functions.log()`. -- Remove old translation and building files. +- Remove old translation and building files (:py:data:`pysd.translation`). Bug fixes From 772bd61146fcc187ef724bffba9e561538f2e8b8 Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Thu, 19 May 2022 12:06:16 +0200 Subject: [PATCH 94/96] Correct spelling --- docs/structure/xmile_translation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/structure/xmile_translation.rst b/docs/structure/xmile_translation.rst index 95fdfebf..c8908706 100644 --- a/docs/structure/xmile_translation.rst +++ b/docs/structure/xmile_translation.rst @@ -49,7 +49,7 @@ Supported Functions and Features Ongoing development of the translator will support the full set of Xmile functionality. The current release supports the following operators, functions and features: .. warning:: - Not all the supported functions and features are properly tested. Any new test model to cover the missing functions test will be wellcome. + Not all the supported functions and features are properly tested. Any new test model to cover the missing functions test will be welcome. Operators ^^^^^^^^^ From c5d7886bb1a30693a4ffe2ef36c33b2159fb63fb Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 20 May 2022 13:03:16 +0200 Subject: [PATCH 95/96] Update docs --- docs/getting_started.rst | 6 +- docs/index.rst | 1 + docs/python_api/functions.rst | 11 + .../{structure => python_api}/model_class.rst | 0 .../model_loading.rst | 0 docs/python_api/python_api_index.rst | 40 +++ docs/structure/structure_index.rst | 14 +- docs/tables/arithmetic.tab | 2 +- pysd/py_backend/functions.py | 97 +++--- pysd/py_backend/statefuls.py | 294 +++++++++++------- 10 files changed, 301 insertions(+), 164 deletions(-) create mode 100644 docs/python_api/functions.rst rename docs/{structure => python_api}/model_class.rst (100%) rename docs/{structure => python_api}/model_loading.rst (100%) create mode 100644 docs/python_api/python_api_index.rst diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 2e98e56e..d3cf8456 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -9,7 +9,7 @@ To begin, we must first load the PySD module, and use it to import a model file: >>> model = pysd.read_vensim('Teacup.mdl') -This code creates an instance of the :doc:`PySD Model class ` from an example model that we will use as the system dynamics equivalent of ‘Hello World’: a cup of tea cooling at room temperature. +This code creates an instance of the :doc:`PySD Model class ` from an example model that we will use as the system dynamics equivalent of ‘Hello World’: a cup of tea cooling at room temperature. .. image:: images/Teacup.png :width: 350 px @@ -41,7 +41,7 @@ To view a synopsis of the model equations and documentation, use the :py:attr:`. >>> model = pysd.load('Teacup.py') .. note:: - The functions :py:func:`pysd.read_vensim()`, :py:func:`pysd.read_xmile()` and :py:func:`pysd.load()` have optional arguments for advanced usage. You can check the full description in :doc:`Model loading ` or using :py:func:`help()` e.g.:: + The functions :py:func:`pysd.read_vensim()`, :py:func:`pysd.read_xmile()` and :py:func:`pysd.load()` have optional arguments for advanced usage. You can check the full description in :doc:`Model loading ` or using :py:func:`help()` e.g.:: >>> import pysd >>> help(pysd.load) @@ -89,7 +89,7 @@ To show a progressbar during the model integration, the `progress` argument can >>> stocks = model.run(progress=True) .. note:: - The full description of the :py:meth:`.run` method and other methods can be found in the :doc:`Model methods section <../structure/model_class>`. + The full description of the :py:meth:`.run` method and other methods can be found in the :doc:`Model methods section `. Running models with DATA type components ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/index.rst b/docs/index.rst index 9e7f4b20..56f9497a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -85,6 +85,7 @@ For additional help or consulting, contact james.p.houghton@gmail.com or eneko.m getting_started advanced_usage command_line_usage + python_api/python_api_index tools structure/structure_index development/development_index diff --git a/docs/python_api/functions.rst b/docs/python_api/functions.rst new file mode 100644 index 00000000..04a25a3f --- /dev/null +++ b/docs/python_api/functions.rst @@ -0,0 +1,11 @@ +Python functions and stateful objects +===================================== +Functions +--------- +.. automodule:: pysd.py_backend.functions + :members: + +Statefuls +--------- +.. automodule:: pysd.py_backend.statefuls + :members: \ No newline at end of file diff --git a/docs/structure/model_class.rst b/docs/python_api/model_class.rst similarity index 100% rename from docs/structure/model_class.rst rename to docs/python_api/model_class.rst diff --git a/docs/structure/model_loading.rst b/docs/python_api/model_loading.rst similarity index 100% rename from docs/structure/model_loading.rst rename to docs/python_api/model_loading.rst diff --git a/docs/python_api/python_api_index.rst b/docs/python_api/python_api_index.rst new file mode 100644 index 00000000..0b5d9d7e --- /dev/null +++ b/docs/python_api/python_api_index.rst @@ -0,0 +1,40 @@ +Python API +========== + +.. toctree:: + :hidden: + + model_loading + model_class + functions + +This sections describes the main functions and functionalities to translate +models to Python and run them. If you need more detailed description about +the translation and building process, please see the :doc:`../structure/structure_index` section. + +The model loading information can be found in :doc:`model_loading` and consists of the following functions: + +.. list-table:: Translating and loading functions + :widths: 25 75 + :header-rows: 0 + + * - :py:func:`pysd.read_vensim` + - Translates a Vensim file to Python and returns a :py:class:`Model` object. + * - :py:func:`pysd.read_xmile` + - Translates a Xmile file to Python and returns a :py:class:`Model` object. + * - :py:func:`pysd.load` + - Loads a transtaled Python file and returns a :py:class:`Model` object. + +The Model and Macro classes information ad public methods and attributes can be found in :doc:`model_class`. + +.. list-table:: Translating and loading functions + :widths: 25 75 + :header-rows: 0 + + * - :py:class:`pysd.py_backend.model.Model` + - Implements functionalities to load a translated model and interact with it. The :py:class:`Model` class inherits from :py:class:`Macro`, therefore, some public methods and properties are defined in the :py:class:`Macro` class. + * - :py:class:`pysd.py_backend.model.Macro` + - Implements functionalities to load a translated macro and interact with it. Most of its core methods are also use by :py:class:`Model` class. + + +Provided functions and stateful classes to integrate python models are described in :doc:`functions`. diff --git a/docs/structure/structure_index.rst b/docs/structure/structure_index.rst index b33f0340..e981f832 100644 --- a/docs/structure/structure_index.rst +++ b/docs/structure/structure_index.rst @@ -62,16 +62,10 @@ The builders allow to build the final model in any programming language (so long The Python model ---------------- -.. toctree:: - :hidden: - - model_loading - model_class - -For loading a translated model with Python see :doc:`Getting started <../../getting_started>` or :doc:`Model loading `. The Python builder constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a `statefull` model in much the same way that the system itself has a specific state at any point in time. +For loading a translated model with Python see :doc:`Getting started <../../getting_started>` or :doc:`Model loading <../../python_api/model_loading>`. The Python builder constructs a Python class that represents the system dynamics model. The class maintains a dictionary representing the current values of each of the system stocks, and the current simulation time, making it a `stateful` model in much the same way that the system itself has a specific state at any point in time. -The :doc:`Model class ` also contains a function for each of the model components, representing the essential model equations. Each function contains its units, subcscripts type infromation and documentation as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. +The :doc:`Model class <../../python_api/model_class>` also contains a function for each of the model components, representing the essential model equations. Each function contains its units, subcscripts type infromation and documentation as translated from the original model file. A query to any of the model functions will calculate and return its value according to the stored state of the system. -The :doc:`Model class ` maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. +The :doc:`Model class <../../python_api/model_class>` maintains only a single state of the system in memory, meaning that all functions must obey the Markov property - that the future state of the system can be calculated entirely based upon its current state. In addition to simplifying integration, this requirement enables analyses that interact with the model at a step-by-step level. -Lastly, the :doc:`Model class ` provides a set of methods that are used to facilitate simulation. The :py:meth:`.run` method returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. +Lastly, the :doc:`Model class <../../python_api/model_class>` provides a set of methods that are used to facilitate simulation. The :py:meth:`.run` method returns to the user a Pandas dataframe representing the output of their simulation run. A variety of options allow the user to specify which components of the model they would like returned, and the timestamps at which they would like those measurements. Additional parameters make parameter changes to the model, modify its starting conditions, or specify how simulation results should be logged. diff --git a/docs/tables/arithmetic.tab b/docs/tables/arithmetic.tab index 79387a1c..8e14c4ff 100644 --- a/docs/tables/arithmetic.tab +++ b/docs/tables/arithmetic.tab @@ -5,6 +5,6 @@ Arithmetic order Operators Operations 3 "\*, /" "multiplication, division" 4 "%" modulo 5 "+, -" "addition, substraction" -6 "=, <>, <, <=, >, >=" comparioson +6 "=, <>, <, <=, >, >=" comparison 7 "not" unary logical operation 8 "and, or" binary logical operations diff --git a/pysd/py_backend/functions.py b/pysd/py_backend/functions.py index c89b7a64..05349ffe 100644 --- a/pysd/py_backend/functions.py +++ b/pysd/py_backend/functions.py @@ -1,8 +1,10 @@ """ -These functions have no direct analog in the standard Python data analytics -stack, or require information about the internal state of the system beyond -what is present in the function call. We provide them in a structure that -makes it easy for the model elements to call. +The provided functions have no direct analog in the standard Python data +analytics stack, or require information about the internal state of the +system beyond what is present in the function call. They are provided +in a structure that makes it easy for the model elements to call. The +functions may be similar to the original functions given by Vensim or +Stella, but sometimes the number or order of arguments may change. """ import warnings @@ -18,7 +20,7 @@ def ramp(time, slope, start, finish=None): Parameters ---------- - time: function + time: callable Function that returns the current time. slope: float The slope of the ramp starting at zero at time start. @@ -30,7 +32,7 @@ def ramp(time, slope, start, finish=None): Returns ------- - response: float + float or xarray.DataArray: If prior to ramp start, returns zero. If after ramp ends, returns top of ramp. @@ -53,7 +55,7 @@ def step(time, value, tstep): Parameters ---------- - time: function + time: callable Function that returns the current time. value: float The height of the step. @@ -62,7 +64,7 @@ def step(time, value, tstep): Returns ------- - float: + float or xarray.DataArray: - In range [-inf, tstep): returns 0 - In range [tstep, +inf]: @@ -78,7 +80,7 @@ def pulse(time, start, repeat_time=0, width=None, magnitude=None, end=None): Parameters ---------- - time: function + time: callable Function that returns the current time. start: float Starting time of the pulse. @@ -97,13 +99,12 @@ def pulse(time, start, repeat_time=0, width=None, magnitude=None, end=None): Returns ------- - float: + float or xarray.DataArray: - In range [-inf, start): returns 0 - In range [start + n*repeat_time, start + n*repeat_time + width): returns magnitude/time_step or 1 - - In range [start + n*repeat_time + width, - start + (n+1)*repeat_time): + - In range [start + n*repeat_time + width, start + (n+1)*repeat_time): returns 0 """ @@ -126,14 +127,15 @@ def if_then_else(condition, val_if_true, val_if_false): Parameters ---------- condition: bool or xarray.DataArray of bools - val_if_true: function + val_if_true: callable Value to evaluate and return when condition is true. - val_if_false: function + val_if_false: callable Value to evaluate and return when condition is false. Returns ------- - The value depending on the condition. + float or xarray.DataArray: + The value depending on the condition. """ # NUMPY: replace xr by np @@ -178,8 +180,9 @@ def xidz(numerator, denominator, x): Returns ------- - numerator/denominator if denominator > small_vensim - otherwise, returns value_if_denom_is_zero + float or xarray.DataArray: + - numerator/denominator if denominator > small_vensim + - value_if_denom_is_zero otherwise """ # NUMPY: replace DataArray by np.ndarray, xr.where -> np.where @@ -215,8 +218,9 @@ def zidz(numerator, denominator): Returns ------- - result of division numerator/denominator if denominator is not zero, - otherwise zero. + float or xarray.DataArray: + - numerator/denominator if denominator > small_vensim + - 0 or 0s array otherwise """ # NUMPY: replace DataArray by np.ndarray, xr.where -> np.where @@ -239,18 +243,21 @@ def zidz(numerator, denominator): def active_initial(stage, expr, init_val): """ Implements vensim's ACTIVE INITIAL function + Parameters ---------- stage: str The stage of the model. - expr: function + expr: callable Running stage value init_val: float or xarray.DataArray Initialization stage value. Returns ------- - + float or xarray.DataArray: + - inti_val if stage='Initialization' + - expr() otherwise """ # NUMPY: both must have same dimensions in inputs, remove time.stage if stage == 'Initialization': @@ -282,7 +289,8 @@ def integer(x): Returns ------- - Returns integer part of x. + integer: float or xarray.DataArray + Returns integer part of x. """ # NUMPY: replace xr by np @@ -331,8 +339,9 @@ def modulo(x, m): Returns ------- - Returns x modulo m, if x is smaller than 0 the result is given in - the range (-m, 0] as Vensim does. x - quantum(x, m) + modulo: float or xarray.DataArray + Returns x modulo m, if x is smaller than 0 the result is given + in the range (-m, 0] as Vensim does. x - quantum(x, m) """ return x - quantum(x, m) @@ -345,15 +354,15 @@ def sum(x, dim=None): Parameters ---------- x: xarray.DataArray - Input value. + Input value. dim: list of strs (optional) - Dimensions to apply the function over. - If not given the function will be applied over all dimensions. + Dimensions to apply the function over. + If not given the function will be applied over all dimensions. Returns ------- - xarray.DataArray or float - The result of the sum operation in the given dimensions. + sum: xarray.DataArray or float + The result of the sum operation in the given dimensions. """ # NUMPY: replace by np.sum(x, axis=axis) put directly in the file @@ -371,15 +380,15 @@ def prod(x, dim=None): Parameters ---------- x: xarray.DataArray - Input value. + Input value. dim: list of strs (optional) - Dimensions to apply the function over. - If not given the function will be applied over all dimensions. + Dimensions to apply the function over. + If not given the function will be applied over all dimensions. Returns ------- - xarray.DataArray or float - The result of the product operation in the given dimensions. + prod: xarray.DataArray or float + The result of the product operation in the given dimensions. """ # NUMPY: replace by np.prod(x, axis=axis) put directly in the file @@ -397,15 +406,15 @@ def vmin(x, dim=None): Parameters ---------- x: xarray.DataArray - Input value. + Input value. dim: list of strs (optional) - Dimensions to apply the function over. - If not given the function will be applied over all dimensions. + Dimensions to apply the function over. + If not given the function will be applied over all dimensions. Returns ------- - xarray.DataArray or float - The result of the minimum value over the given dimensions. + vmin: xarray.DataArray or float + The result of the minimum value over the given dimensions. """ # NUMPY: replace by np.min(x, axis=axis) put directly in the file @@ -423,15 +432,15 @@ def vmax(x, dim=None): Parameters ---------- x: xarray.DataArray - Input value. + Input value. dim: list of strs (optional) - Dimensions to apply the function over. - If not given the function will be applied over all dimensions. + Dimensions to apply the function over. + If not given the function will be applied over all dimensions. Returns ------- - xarray.DataArray or float - The result of the maximum value over the dimensions. + vmax: xarray.DataArray or float + The result of the maximum value over the dimensions. """ # NUMPY: replace by np.max(x, axis=axis) put directly in the file diff --git a/pysd/py_backend/statefuls.py b/pysd/py_backend/statefuls.py index 41aac9f8..d7de8846 100644 --- a/pysd/py_backend/statefuls.py +++ b/pysd/py_backend/statefuls.py @@ -1,8 +1,8 @@ """ -The stateful objects are used and updated each time step with an update +The Stateful objects are used and updated each time step with an update method. This include Integs, Delays, Forecasts, Smooths, and Trends, -between others. The Macro class and Model class are also Stateful class -child. But defined in the file model.py. +between others. The Macro class and Model class are also Stateful type. +However, they are defined appart as they are more complex. """ import warnings @@ -61,20 +61,24 @@ def update(self, state): class Integ(DynamicStateful): """ - Implements INTEG function + Implements INTEG function. + + Parameters + ---------- + ddt: callable + Derivate to integrate. + initial_value: callable + Initial value. + py_name: str + Python name to identify the object. + + Attributes + ---------- + state: float or xarray.DataArray + Current state of the object. Value of the stock. + """ def __init__(self, ddt, initial_value, py_name): - """ - - Parameters - ---------- - ddt: function - This will become an attribute of the object - initial_value: function - Initial value - py_name: str - Python name to identify the object - """ super().__init__() self.init_func = initial_value self.ddt = ddt @@ -96,7 +100,29 @@ def export(self): class Delay(DynamicStateful): """ - Implements DELAY function + Implements DELAY function. + + Parameters + ---------- + delay_input: callable + Input of the delay. + delay_time: callable + Delay time. + initial_value: callable + Initial value. + order: callable + Delay order. + tsetp: callable + The time step of the model. + py_name: str + Python name to identify the object. + + Attributes + ---------- + state: numpy.array or xarray.DataArray + Current state of the object. Array of the delays values multiplied + by their corresponding average time. + """ # note that we could have put the `delay_input` argument as a parameter to # the `__call__` function, and more closely mirrored the vensim syntax. @@ -107,17 +133,6 @@ class Delay(DynamicStateful): def __init__(self, delay_input, delay_time, initial_value, order, tstep, py_name): - """ - - Parameters - ---------- - delay_input: function - delay_time: function - initial_value: function - order: function - py_name: str - Python name to identify the object - """ super().__init__() self.init_func = initial_value self.delay_time_func = delay_time @@ -180,7 +195,34 @@ def export(self): class DelayN(DynamicStateful): """ - Implements DELAY N function + Implements DELAY N function. + + Parameters + ---------- + delay_input: callable + Input of the delay. + delay_time: callable + Delay time. + initial_value: callable + Initial value. + order: callable + Delay order. + tsetp: callable + The time step of the model. + py_name: str + Python name to identify the object. + + Attributes + ---------- + state: numpy.array or xarray.DataArray + Current state of the object. Array of the delays values multiplied + by their corresponding average time. + + times: numpy.array or xarray.DataArray + Array of delay times used for computing the delay output. + If delay_time is constant, this array will be constant and + DelayN will behave ad Delay. + """ # note that we could have put the `delay_input` argument as a parameter to # the `__call__` function, and more closely mirrored the vensim syntax. @@ -191,17 +233,6 @@ class DelayN(DynamicStateful): def __init__(self, delay_input, delay_time, initial_value, order, tstep, py_name): - """ - - Parameters - ---------- - delay_input: function - delay_time: function - initial_value: function - order: function - py_name: str - Python name to identify the object - """ super().__init__() self.init_func = initial_value self.delay_time_func = delay_time @@ -277,22 +308,34 @@ def export(self): class DelayFixed(DynamicStateful): """ - Implements DELAY FIXED function + Implements DELAY FIXED function. + + Parameters + ---------- + delay_input: callable + Input of the delay. + delay_time: callable + Delay time. + initial_value: callable + Initial value. + tsetp: callable + The time step of the model. + py_name: str + Python name to identify the object. + + Attributes + ---------- + state: float or xarray.DataArray + Current state of the object, equal to pipe[pointer]. + pipe: list + List of the delays values. + pointer: int + Pointer to the last value in the pipe + """ def __init__(self, delay_input, delay_time, initial_value, tstep, py_name): - """ - - Parameters - ---------- - delay_input: function - delay_time: function - initial_value: function - order: function - py_name: str - Python name to identify the object - """ super().__init__() self.init_func = initial_value self.delay_time_func = delay_time @@ -341,21 +384,29 @@ def export(self): class Forecast(DynamicStateful): """ - Implements FORECAST function + Implements FORECAST function. + + Parameters + ---------- + forecast_input: callable + Input of the forecast. + average_time: callable + Average time. + horizon: callable + Forecast horizon. + initial_trend: callable + Initial trend of the forecast. + py_name: str + Python name to identify the object. + + Attributes + ---------- + state: float or xarray.DataArray + Current state of the object. AV value by Vensim docs. + """ def __init__(self, forecast_input, average_time, horizon, initial_trend, py_name): - """ - - Parameters - ---------- - forecast_input: function - average_time: function - horizon: function - py_name: str - Python name to identify the object - """ - super().__init__() self.horizon = horizon self.average_time = average_time @@ -391,21 +442,30 @@ def export(self): class Smooth(DynamicStateful): """ - Implements SMOOTH function + Implements SMOOTH function. + + Parameters + ---------- + smooth_input: callable + Input of the smooth. + smooth_time: callable + Smooth time. + initial_value: callable + Initial value. + order: callable + Delay order. + py_name: str + Python name to identify the object. + + Attributes + ---------- + state: numpy.array or xarray.DataArray + Current state of the object. Array of the inputs having the + value to return in the last position. + """ def __init__(self, smooth_input, smooth_time, initial_value, order, py_name): - """ - - Parameters - ---------- - smooth_input: function - smooth_time: function - initial_value: function - order: function - py_name: str - Python name to identify the object - """ super().__init__() self.init_func = initial_value self.smooth_time_func = smooth_time @@ -452,20 +512,26 @@ def export(self): class Trend(DynamicStateful): """ - Implements TREND function + Implements TREND function. + + Parameters + ---------- + trend_input: callable + Input of the trend. + average_time: callable + Average time. + initial_trend: callable + Initial trend. + py_name: str + Python name to identify the object. + + Attributes + ---------- + state: float or xarray.DataArray + Current state of the object. AV value by Vensim docs. + """ def __init__(self, trend_input, average_time, initial_trend, py_name): - """ - - Parameters - ---------- - trend_input: function - average_time: function - initial_trend: function - py_name: str - Python name to identify the object - """ - super().__init__() self.init_func = initial_trend self.average_time_function = average_time @@ -496,17 +562,28 @@ def export(self): class SampleIfTrue(DynamicStateful): + """ + Implements SAMPLE IF TRUE function. + + Parameters + ---------- + condition: callable + Condition for sample. + actual_value: callable + Value to update if condition is true. + initial_value: callable + Initial value. + py_name: str + Python name to identify the object. + + Attributes + ---------- + state: float or xarray.DataArray + Current state of the object. Last actual_value when condition + was true or the initial_value if condition has never been true. + + """ def __init__(self, condition, actual_value, initial_value, py_name): - """ - - Parameters - ---------- - condition: function - actual_value: function - initial_value: function - py_name: str - Python name to identify the object - """ super().__init__() self.condition = condition self.actual_value = actual_value @@ -541,17 +618,22 @@ def export(self): class Initial(Stateful): """ - Implements INITIAL function + Implements INITIAL function. + + Parameters + ---------- + initial_value: callable + Initial value. + py_name: str + Python name to identify the object. + + Attributes + ---------- + state: float or xarray.DataArray + Current state of the object, which will always be the initial_value. + """ def __init__(self, initial_value, py_name): - """ - - Parameters - ---------- - initial_value: function - py_name: str - Python name to identify the object - """ super().__init__() self.init_func = initial_value self.py_name = py_name From 06cb2bf5a187946ae40bb1aeb2d6731c5005fbad Mon Sep 17 00:00:00 2001 From: Eneko Martin-Martinez Date: Fri, 20 May 2022 13:54:44 +0200 Subject: [PATCH 96/96] Update the statement of need --- README.md | 7 +++---- docs/index.rst | 14 +++++++++++--- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 38410adc..a59b83cb 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ PySD ==== + [![Coverage Status](https://coveralls.io/repos/github/JamesPHoughton/pysd/badge.svg?branch=master)](https://coveralls.io/github/JamesPHoughton/pysd?branch=master) [![Anaconda-Server Badge](https://anaconda.org/conda-forge/pysd/badges/version.svg)](https://anaconda.org/conda-forge/pysd) [![PyPI version](https://badge.fury.io/py/pysd.svg)](https://badge.fury.io/py/pysd) @@ -13,11 +14,10 @@ Simulating System Dynamics Models in Python This project is a simple library for running [System Dynamics](http://en.wikipedia.org/wiki/System_dynamics) models in python, with the purpose of improving integration of *Big Data* and *Machine Learning* into the SD workflow. -**The current version needs to run at least Python 3.7. If you need support for Python 2, please use the release here: https://github.com/JamesPHoughton/pysd/releases/tag/LastPy2** - -**table2py feature was dropped in version 2.0.0, please use the release here if you want to build PySD model from a tabular file: https://github.com/JamesPHoughton/pysd/releases/tag/v1.11.0** +**The current version needs to run at least Python 3.7.** ### Resources + See the [project documentation](http://pysd.readthedocs.org/) for information about: - [Installation](http://pysd.readthedocs.org/en/latest/installation.html) @@ -37,7 +37,6 @@ You can also cite the library using the [DOI provided by Zenodo](https://zenodo. [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5654824.svg)](https://doi.org/10.5281/zenodo.5654824) - ### Why create a new SD simulation engine? There are a number of great SD programs out there ([Vensim](http://vensim.com/), [iThink](http://www.iseesystems.com/Softwares/Business/ithinkSoftware.aspx), [AnyLogic](http://www.anylogic.com/system-dynamics), [Insight Maker](http://insightmaker.com/), and [others](http://en.wikipedia.org/wiki/List_of_system_dynamics_software)). In order not to waste our effort, or fall victim to the [Not-Invented-Here](http://en.wikipedia.org/wiki/Not_invented_here) fallacy, we should have a very good reason for starting a new project. diff --git a/docs/index.rst b/docs/index.rst index 56f9497a..99d7d54d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -34,13 +34,21 @@ PySD .. |DOI| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.5654824.svg :target: https://doi.org/10.5281/zenodo.5654824 -This project is a simple library for running System Dynamics models in Python, with the purpose of -improving integration of Big Data and Machine Learning into the SD workflow. +This project is a simple library for running System Dynamics models in Python, with the purpose of improving integration of Big Data and Machine Learning into the SD workflow. PySD translates :doc:`Vensim ` or :doc:`XMILE ` model files into Python modules, -and provides methods to modify, simulate, and observe those translated models. +and provides methods to modify, simulate, and observe those translated models. The translation is done throught an intermediate :doc:`Abstract Synatax Tree representation `, +which makes it possible to add builders in other languages in a simpler way +Why create a new SD simulation engine? +-------------------------------------- + +There are a number of great SD programs out there (`Vensim `_, `iThink `_, `AnyLogic `_, `Insight Maker `_, and `others `_). In order not to waste our effort, or fall victim to the `Not-Invented-Here `_ fallacy, we should have a very good reason for starting a new project. + +That reason is this: There is a whole world of computational tools being developed in the larger data science community. **System dynamicists should directly use the tools that other people are building, instead of replicating their functionality in SD specific software.** The best way to do this is to bring specific SD functionality to the domain where those other tools are being developed. + +This approach allows SD modelers to take advantage of the most recent developments in data science, and focus our efforts on improving the part of the stack that is unique to System Dynamics modeling. Additional Resources --------------------