From 66c540e92c871051fa3771d807f7b0476333dc79 Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Wed, 30 Oct 2024 11:29:57 -0400 Subject: [PATCH 01/15] Updated interface to get_variables_data --- src/common/io.py | 80 ++++++++++++++++++++++++++++++------------------ 1 file changed, 50 insertions(+), 30 deletions(-) diff --git a/src/common/io.py b/src/common/io.py index 3603b8c0..3ff7005f 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# Copyright (C) 2010-2021 Modelon AB +# Copyright (C) 2010-2024 Modelon AB # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by @@ -1554,15 +1554,13 @@ def get_variables_data(self, names: list[str], start_index: int = 0, stop_index: Union[int, None] = None - ) -> tuple[list[Trajectory], Union[int, None]]: + ) -> tuple[dict[str, Trajectory], Union[int, None]]: """" - Returns multiple trajectories, sliced to index range. - Note that start_index and stop_index behaves as indices for slicing, i.e. array[start_index:stop_index]. - This also implies that stop_index = None or stop_index larger than the number of available data points - results in retrieving all the available data points from start_index, i.e. as the slice [start_index:]. - - Note that (start_index, stop_index) = (None, None) results in the slicing [None:None] which is equivalent to [:]. - + Returns trajectories for each variable in 'names' with lengths adjusted for the + interval [start_index, stop_index], i.e. partial trajectories. + This requires that 'start_index' and 'stop_index' are within the + range of [0, - 1]. + By default, start_index = 0 and stop_index = None, which implies that the full trajectory is returned. Parameters:: @@ -1570,16 +1568,18 @@ def get_variables_data(self, List of variables names for which to fetch trajectories. start_index -- - Starting index for trajectory slicing. + The index from where the trajectory data starts from. stop_index -- - Stopping index for trajectory slicing. + The index from where the trajectory data ends. If stop_index is set to None, + it implies that all data in the slice [start_index:] is returned. Raises:: - ValueError -- If stop_index < start_index. + ValueError -- If stop_index < start_index. + pyfmi.common.io.InvalidIndexError -- If start_index or stop_index are larger than the number of available data points. Returns:: - Tuple: (List of trajectories, next start index (non-negative)) + Tuple: (dict of trajectories with keys corresponding to variable names, next start index (non-negative)) """ """ @@ -1593,21 +1593,38 @@ def get_variables_data(self, if isinstance(start_index, int) and isinstance(stop_index, int) and stop_index < start_index: raise ValueError(f"Invalid values for {start_index=} and {stop_index=}, " + \ "'start_index' needs to be less than or equal to 'stop_index'.") - trajectories = [] + trajectories = {} - # Get the time trajectory + # First we need to check that start_index and stop_index are within a valid range of [0, -1] + # Another way to do it is to use data_2_info and data_3_info but then we also need to invoke verify_file_data, + # unclear what is the most efficient approach for now. if not self._contains_diagnostic_data: - time = self._get_trajectory(0, start_index, stop_index) + time = self._get_trajectory(0, 0, None) else: # Since we interpolate data if diagnostics is enabled - time = self._get_diagnostics_trajectory(0, start_index, stop_index) + time = self._get_diagnostics_trajectory(0, 0, None) + + max_valid_index = len(time) - 1 # -1 since we we have a 0-index based system + if start_index > max_valid_index: + raise InvalidIndexError( + f"Input 'start_index'={start_index} needs to be less than the number of available data points: {max_valid_index}") - # Need to account for data that might be added while we are iterating over 'names' later + if stop_index and stop_index > max_valid_index: # since stop_index is Default None + raise InvalidIndexError( + f"Input 'stop_index'={stop_index} needs to be less than the number of available data points: {max_valid_index}") + + # Need to account for data that might be added we are retrieving the trajectories if stop_index is None: - stop_index = len(time) + start_index + stop_index = max_valid_index + + # Now get the correct time trajectory where we account for the start and stop index. + if not self._contains_diagnostic_data: + time = self._get_trajectory(0, start_index, stop_index) + else: + time = self._get_diagnostics_trajectory(0, start_index, stop_index) for name in names: - trajectories.append(self._get_variable_data_as_trajectory(name, time, start_index, stop_index)) + trajectories[name] = self._get_variable_data_as_trajectory(name, time, start_index, stop_index) new_start_index = start_index + len(time) if len(trajectories) > 0 else None return trajectories, new_start_index @@ -1759,7 +1776,7 @@ def __init__(self, model, delimiter=";"): super().__init__(model) self.supports['result_max_size'] = True self._first_point = True - + def simulation_start(self): """ This method is called before the simulation has started and before @@ -1811,13 +1828,13 @@ def integration_point(self, solver = None): #Sets the parameters, if any if solver and self.options["sensitivities"]: self.param_sol += [np.array(solver.interpolate_sensitivity(model.time, 0)).flatten()] - + max_size = self.options.get("result_max_size", None) if max_size is not None: current_size = sys.getsizeof(self.time_sol) + sys.getsizeof(self.real_sol) + \ sys.getsizeof(self.int_sol) + sys.getsizeof(self.bool_sol) + \ sys.getsizeof(self.param_sol) - + verify_result_size(self._first_point, current_size, previous_size, max_size, self.options["ncp"], self.model.time) self._first_point = False @@ -2389,7 +2406,7 @@ def simulation_start(self): self.real_var_ref = np.array(self.real_var_ref) self.int_var_ref = np.array(self.int_var_ref) self.bool_var_ref = np.array(self.bool_var_ref) - + def _write(self, msg): self._current_file_size = self._current_file_size+len(msg) self._file.write(msg) @@ -2521,6 +2538,9 @@ class ResultSizeError(JIOError): Exception that is raised when a set maximum result size is exceeded. """ +class InvalidIndexError(JIOError): + """ Exception that is raised when indices for variable trajectories are invalid. """ + def robust_float(value): """ Function for robust handling of float values such as INF and NAN. @@ -2767,16 +2787,16 @@ def integration_point(self, solver = None): def diagnostics_point(self, diag_data): """ Generates a data point for diagnostics data by invoking the util function save_diagnostics_point. """ - self.dump_data_internal.save_diagnostics_point(diag_data) + self.dump_data_internal.save_diagnostics_point(diag_data) self.nbr_diag_points += 1 self._make_consistent(diag=True) def _make_consistent(self, diag=False): """ This method makes sure that the result file is always consistent, meaning that it is - always possible to load the result file in the result class. The method makes the + always possible to load the result file in the result class. The method makes the result file consistent by going back in the result file and updates the final time - as well as the number of result points in the file in specific locations of the + as well as the number of result points in the file in specific locations of the result file. In the end, it puts the file pointer back to the end of the file (which allows further writing of new result points) """ @@ -2841,8 +2861,8 @@ def verify_result_size(first_point, current_size, previous_size, max_size, ncp, raise ResultSizeError(msg + "To change the maximum allowed result file size, please use the option 'result_max_size'") if current_size > max_size: - raise ResultSizeError("Maximum size of the result reached (limit: %g GB) at time t=%g. " - "To change the maximum allowed result size, please use the option " + raise ResultSizeError("Maximum size of the result reached (limit: %g GB) at time t=%g. " + "To change the maximum allowed result size, please use the option " "'result_max_size' or consider reducing the number of communication " "points alternatively the number of variables to store result for."%(max_size/1024**3, time)) @@ -2873,7 +2893,7 @@ def get_result_handler(model, opts): result_handler = ResultHandlerDummy(model) else: raise fmi.FMUException("Unknown option to result_handling.") - + if (opts.get("result_max_size", 0) > 0) and not result_handler.supports["result_max_size"]: logging_module.warning("The chosen result handler does not support limiting the result size. Ignoring option 'result_max_size'.") From 4cbc62ad5625f90bd654e6e8a81c7ade1688cfdd Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Wed, 30 Oct 2024 13:11:33 -0400 Subject: [PATCH 02/15] Removed check for index values for now and only keep return type change --- src/common/io.py | 27 +++++---------------------- tests/test_io.py | 42 +++++++++++++++++++++--------------------- 2 files changed, 26 insertions(+), 43 deletions(-) diff --git a/src/common/io.py b/src/common/io.py index 3ff7005f..0f4ec9a0 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1595,33 +1595,16 @@ def get_variables_data(self, "'start_index' needs to be less than or equal to 'stop_index'.") trajectories = {} - # First we need to check that start_index and stop_index are within a valid range of [0, -1] - # Another way to do it is to use data_2_info and data_3_info but then we also need to invoke verify_file_data, - # unclear what is the most efficient approach for now. + # Get the corresponding time trajectory if not self._contains_diagnostic_data: - time = self._get_trajectory(0, 0, None) + time = self._get_trajectory(0, start_index, stop_index) else: # Since we interpolate data if diagnostics is enabled - time = self._get_diagnostics_trajectory(0, 0, None) - - max_valid_index = len(time) - 1 # -1 since we we have a 0-index based system - if start_index > max_valid_index: - raise InvalidIndexError( - f"Input 'start_index'={start_index} needs to be less than the number of available data points: {max_valid_index}") - - if stop_index and stop_index > max_valid_index: # since stop_index is Default None - raise InvalidIndexError( - f"Input 'stop_index'={stop_index} needs to be less than the number of available data points: {max_valid_index}") + time = self._get_diagnostics_trajectory(0, start_index, stop_index) - # Need to account for data that might be added we are retrieving the trajectories + # Need to account for data that might be added while we are iterating over 'names' later if stop_index is None: - stop_index = max_valid_index - - # Now get the correct time trajectory where we account for the start and stop index. - if not self._contains_diagnostic_data: - time = self._get_trajectory(0, start_index, stop_index) - else: - time = self._get_diagnostics_trajectory(0, start_index, stop_index) + stop_index = len(time) + start_index for name in names: trajectories[name] = self._get_variable_data_as_trajectory(name, time, start_index, stop_index) diff --git a/tests/test_io.py b/tests/test_io.py index 430f5690..34c464bc 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -1793,7 +1793,7 @@ def _test_get_variables_data(self, dynamic_diagnostics: bool, nbr_of_calls: int, fmu.set('J4.phi', f(fmu.time)) # arbitrary trajectories, start_index = rdb.get_variables_data(vars_to_test, start_index, stop_index_function(start_index)) - data_to_return[i] = [t.x for t in trajectories] + data_to_return[i] = trajectories.copy() assert data_to_return, "Something went wrong, no test data was generated" return data_to_return @@ -1811,7 +1811,7 @@ def test_get_variables_data_values0(self): } for index, test_data in test_data_sets.items(): - np.testing.assert_array_almost_equal(test_data[0], reference_data[index]) + np.testing.assert_array_almost_equal(test_data['J4.phi'].x, reference_data[index]) @testattr(stddist = True) def test_get_variables_data_values1(self): @@ -1830,7 +1830,7 @@ def test_get_variables_data_values1(self): # Just verify results for J4.phi here, but we retrieve all four trajectories at once # to see that it works for index, test_data in test_data_sets.items(): - np.testing.assert_array_almost_equal(test_data[1], reference_data[index]) + np.testing.assert_array_almost_equal(test_data['J4.phi'].x, reference_data[index]) @testattr(stddist = True) def test_get_variables_data_values2(self): @@ -1847,7 +1847,7 @@ def test_get_variables_data_values2(self): } for index, test_data in test_data_sets.items(): - np.testing.assert_array_almost_equal(test_data[1], reference_data[index]) + np.testing.assert_array_almost_equal(test_data['J4.phi'].x, reference_data[index]) def test_get_variables_data_values3(self): """ Verifing values from get_variables_data, and only asking for diagnostic variables. """ @@ -1872,8 +1872,8 @@ def test_get_variables_data_values3(self): } for index, test_data in test_data_sets.items(): - np.testing.assert_array_almost_equal(test_data[0], reference_data['@Diagnostics.step_time'][index]) - np.testing.assert_array_almost_equal(test_data[1], reference_data['@Diagnostics.nbr_steps'][index]) + np.testing.assert_array_almost_equal(test_data['@Diagnostics.step_time'].x, reference_data['@Diagnostics.step_time'][index]) + np.testing.assert_array_almost_equal(test_data['@Diagnostics.nbr_steps'].x, reference_data['@Diagnostics.nbr_steps'][index]) def test_get_variables_data_values4(self): """ Verifing values from get_variables_data, partial trajectories and checking both time and diagnostic data.""" @@ -1898,8 +1898,8 @@ def test_get_variables_data_values4(self): } for index, test_data in test_data_sets.items(): - np.testing.assert_array_almost_equal(test_data[0], reference_data['time'][index]) - np.testing.assert_array_almost_equal(test_data[1], reference_data['@Diagnostics.nbr_steps'][index]) + np.testing.assert_array_almost_equal(test_data['time'].x, reference_data['time'][index]) + np.testing.assert_array_almost_equal(test_data['@Diagnostics.nbr_steps'].x, reference_data['@Diagnostics.nbr_steps'][index]) if assimulo_installed: class TestFileSizeLimit: @@ -1909,7 +1909,7 @@ def _setup(self, result_type, result_file_name="", fmi_type="me"): model = Dummy_FMUModelME2([], os.path.join(file_path, "files", "FMUs", "XML", "ME2.0", "CoupledClutches.fmu"), _connect_dll=False) else: model = Dummy_FMUModelCS2([], os.path.join(file_path, "files", "FMUs", "XML", "CS2.0", "CoupledClutches.fmu"), _connect_dll=False) - + opts = model.simulate_options() opts["result_handling"] = result_type opts["result_file_name"] = result_file_name @@ -1973,7 +1973,7 @@ def _test_result_size_verification(self, result_type, result_file_name="", dynam assert file_size > max_size*0.9 and file_size < max_size*1.1, \ "The file size is not within 10% of the given max size" - + def _test_result_size_early_abort(self, result_type, result_file_name=""): """ Verifies that the ResultSizeError is triggered and also verifies that the cause of the error being @@ -1994,7 +1994,7 @@ def _test_result_size_early_abort(self, result_type, result_file_name=""): assert file_size < max_size*0.1, \ "The file size is not small, no early abort" - + # TODO: Pytest parametrization """ Binary @@ -2005,11 +2005,11 @@ def test_binary_file_size_verification_diagnostics(self): Make sure that the diagnostics variables are also taken into account. """ self._test_result_size_verification("binary", dynamic_diagnostics=True) - + @testattr(stddist = True) def test_binary_file_size_verification(self): self._test_result_size_verification("binary") - + @testattr(stddist = True) def test_binary_file_size_early_abort(self): self._test_result_size_early_abort("binary") @@ -2017,11 +2017,11 @@ def test_binary_file_size_early_abort(self): @testattr(stddist = True) def test_small_size_binary_file(self): self._test_result_exception("binary") - + @testattr(stddist = True) def test_small_size_binary_file_cs(self): self._test_result_exception("binary", fmi_type="cs") - + @testattr(stddist = True) def test_small_size_binary_file_stream(self): self._test_result_exception("binary", BytesIO()) @@ -2040,7 +2040,7 @@ def test_large_size_binary_file_stream(self): @testattr(stddist = True) def test_text_file_size_verification(self): self._test_result_size_verification("file") - + @testattr(stddist = True) def test_text_file_size_early_abort(self): self._test_result_size_early_abort("file") @@ -2048,7 +2048,7 @@ def test_text_file_size_early_abort(self): @testattr(stddist = True) def test_small_size_text_file(self): self._test_result_exception("file") - + @testattr(stddist = True) def test_small_size_text_file_stream(self): self._test_result_exception("file", StringIO()) @@ -2067,7 +2067,7 @@ def test_large_size_text_file_stream(self): @testattr(stddist = True) def test_csv_file_size_verification(self): self._test_result_size_verification("csv") - + @testattr(stddist = True) def test_csv_file_size_early_abort(self): self._test_result_size_early_abort("csv") @@ -2075,7 +2075,7 @@ def test_csv_file_size_early_abort(self): @testattr(stddist = True) def test_small_size_csv_file(self): self._test_result_exception("csv") - + @testattr(stddist = True) def test_small_size_csv_file_stream(self): self._test_result_exception("csv", StringIO()) @@ -2094,11 +2094,11 @@ def test_large_size_csv_file_stream(self): @testattr(stddist = True) def test_small_size_memory(self): self._test_result_exception("memory") - + @testattr(stddist = True) def test_memory_size_early_abort(self): self._test_result_size_early_abort("memory") - + @testattr(stddist = True) def test_small_size_memory_stream(self): self._test_result_exception("memory", StringIO()) From cc10bc627e7341eb0a8965847da51ed1b831f05f Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Fri, 1 Nov 2024 11:36:50 -0400 Subject: [PATCH 03/15] Added fix to adjust indices if out of bounds and updated docstring --- src/common/io.py | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/src/common/io.py b/src/common/io.py index 0f4ec9a0..17908979 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1344,17 +1344,10 @@ def _get_trajectory(self, data_index, start_index = 0, stop_index = None): nbr_variables = self._data_2_info["nbr_variables"] # Account for sub-sets of data - if start_index > 0: - new_file_position = file_position + start_index*sizeof_type*nbr_variables - new_nbr_points = nbr_points - start_index - else: - new_file_position = file_position - new_nbr_points = nbr_points - - if stop_index is not None and stop_index > 0: - new_nbr_points = stop_index - if start_index > 0: - new_nbr_points -= start_index + start_index = max(0, start_index) + stop_index = max(0, nbr_points if stop_index is None else min(nbr_points, stop_index)) + new_file_position = file_position + start_index*sizeof_type*nbr_variables + new_nbr_points = stop_index - start_index self._data_2[data_index] = fmi_util.read_trajectory( encode(self._fname), @@ -1558,8 +1551,12 @@ def get_variables_data(self, """" Returns trajectories for each variable in 'names' with lengths adjusted for the interval [start_index, stop_index], i.e. partial trajectories. - This requires that 'start_index' and 'stop_index' are within the - range of [0, - 1]. + Improper values for start_index and stop_index that are out of bounds are automatically corrected, + such that: + Negative values are always adjusted to 0 or larger. + Out of bounds for stop_index is adjusted for the number of available data points, as an example + if you set start_index = 0, stop_index = 5 but there are only 3 data points available, + then this function returns 3 data_points. By default, start_index = 0 and stop_index = None, which implies that the full trajectory is returned. Parameters:: @@ -1576,7 +1573,6 @@ def get_variables_data(self, Raises:: ValueError -- If stop_index < start_index. - pyfmi.common.io.InvalidIndexError -- If start_index or stop_index are larger than the number of available data points. Returns:: Tuple: (dict of trajectories with keys corresponding to variable names, next start index (non-negative)) From 12121b82cf056bf97fe5b95c0c4cd8d7b3c40397 Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Tue, 5 Nov 2024 09:13:21 -0500 Subject: [PATCH 04/15] Added fix for cached values affecting partial trajectories and added test --- src/common/io.py | 15 ++++++++++++++- tests/test_io.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/src/common/io.py b/src/common/io.py index 17908979..ea38be7b 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1208,6 +1208,7 @@ def __init__(self, fname, delayed_trajectory_loading = True, allow_file_updates= self._is_stream = True delayed_trajectory_loading = False self._allow_file_updates = allow_file_updates + self._last_set_of_indices = (None, None) # used for dealing with cached data and partial trajectories data_sections = ["name", "dataInfo", "data_2", "data_3", "data_4"] if not self._is_stream: @@ -1331,11 +1332,18 @@ def _get_name_dict(self): return name_dict + def _can_use_partial_cache(self, start_index, stop_index): + """ Checks if start_index and stop_oindex are equal to the last cached indices. """ + return self._allow_file_updates and (self._last_set_of_indices == (start_index, stop_index)) + def _get_trajectory(self, data_index, start_index = 0, stop_index = None): if isinstance(self._data_2, dict): self._verify_file_data() - if data_index in self._data_2: + index_in_cache = data_index in self._data_2 + partial_cache_ok = self._can_use_partial_cache(start_index, stop_index) + if (index_in_cache and not self._allow_file_updates) or (index_in_cache and partial_cache_ok): + print(f"Doing an early return") return self._data_2[data_index] file_position = self._data_2_info["file_position"] @@ -1344,10 +1352,12 @@ def _get_trajectory(self, data_index, start_index = 0, stop_index = None): nbr_variables = self._data_2_info["nbr_variables"] # Account for sub-sets of data + print(f"{start_index=}, {stop_index=}") start_index = max(0, start_index) stop_index = max(0, nbr_points if stop_index is None else min(nbr_points, stop_index)) new_file_position = file_position + start_index*sizeof_type*nbr_variables new_nbr_points = stop_index - start_index + print(f"{start_index=}, {stop_index=}, {new_nbr_points=}") self._data_2[data_index] = fmi_util.read_trajectory( encode(self._fname), @@ -1589,6 +1599,8 @@ def get_variables_data(self, if isinstance(start_index, int) and isinstance(stop_index, int) and stop_index < start_index: raise ValueError(f"Invalid values for {start_index=} and {stop_index=}, " + \ "'start_index' needs to be less than or equal to 'stop_index'.") + + trajectories = {} # Get the corresponding time trajectory @@ -1606,6 +1618,7 @@ def get_variables_data(self, trajectories[name] = self._get_variable_data_as_trajectory(name, time, start_index, stop_index) new_start_index = start_index + len(time) if len(trajectories) > 0 else None + self._last_set_of_indices = (start_index, stop_index) # update them before we exit return trajectories, new_start_index def _calculate_events_and_steps(self, name): diff --git a/tests/test_io.py b/tests/test_io.py index 34c464bc..1298bf70 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -1849,6 +1849,7 @@ def test_get_variables_data_values2(self): for index, test_data in test_data_sets.items(): np.testing.assert_array_almost_equal(test_data['J4.phi'].x, reference_data[index]) + @testattr(stddist = True) def test_get_variables_data_values3(self): """ Verifing values from get_variables_data, and only asking for diagnostic variables. """ vars_to_test = ['@Diagnostics.step_time', '@Diagnostics.nbr_steps'] @@ -1875,6 +1876,7 @@ def test_get_variables_data_values3(self): np.testing.assert_array_almost_equal(test_data['@Diagnostics.step_time'].x, reference_data['@Diagnostics.step_time'][index]) np.testing.assert_array_almost_equal(test_data['@Diagnostics.nbr_steps'].x, reference_data['@Diagnostics.nbr_steps'][index]) + @testattr(stddist = True) def test_get_variables_data_values4(self): """ Verifing values from get_variables_data, partial trajectories and checking both time and diagnostic data.""" vars_to_test = ['time', '@Diagnostics.nbr_steps'] @@ -1901,6 +1903,33 @@ def test_get_variables_data_values4(self): np.testing.assert_array_almost_equal(test_data['time'].x, reference_data['time'][index]) np.testing.assert_array_almost_equal(test_data['@Diagnostics.nbr_steps'].x, reference_data['@Diagnostics.nbr_steps'][index]) + @testattr(stddist = True) + def test_stop_index_near_bounds(self): + """ Verify that we get expected results near the end of the result file, including + stop_index out of range. + """ + fmu = Dummy_FMUModelME2([], os.path.join(file_path, "files", "FMUs", "XML", "ME2.0", "bouncingBall.fmu"), _connect_dll=False) + res = fmu.simulate() + assert len(res['h']) == 501 + + rdb = ResultDymolaBinary(fmu.get_last_result_file(), allow_file_updates = True) + np.testing.assert_array_almost_equal( + (rdb.get_variables_data(['h'], 495, 496)[0]['h'].x), + np.array([0.37268813])) + np.testing.assert_array_almost_equal( + (rdb.get_variables_data(['h'], 495, 500)[0]['h'].x), + np.array([0.37268813, 0.37194424, 0.37120184, 0.37046092, 0.36972148])) + + np.testing.assert_array_almost_equal( + (rdb.get_variables_data(['h'], 495, 499)[0]['h'].x), + np.array([0.37268813, 0.37194424, 0.37120184, 0.37046092])) + np.testing.assert_array_almost_equal( + (rdb.get_variables_data(['h'], 495, 501)[0]['h'].x), + np.array([0.37268813, 0.37194424, 0.37120184, 0.37046092, 0.36972148, 0.36898351])) + np.testing.assert_array_almost_equal( + (rdb.get_variables_data(['h'], 495, 502)[0]['h'].x), + np.array([0.37268813, 0.37194424, 0.37120184, 0.37046092, 0.36972148, 0.36898351])) + if assimulo_installed: class TestFileSizeLimit: From 3f42f5fee60afcf273e6701e7467d4d460097d96 Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Tue, 5 Nov 2024 09:13:53 -0500 Subject: [PATCH 05/15] Removed print-statements --- src/common/io.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/common/io.py b/src/common/io.py index ea38be7b..66e557ea 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1343,7 +1343,6 @@ def _get_trajectory(self, data_index, start_index = 0, stop_index = None): index_in_cache = data_index in self._data_2 partial_cache_ok = self._can_use_partial_cache(start_index, stop_index) if (index_in_cache and not self._allow_file_updates) or (index_in_cache and partial_cache_ok): - print(f"Doing an early return") return self._data_2[data_index] file_position = self._data_2_info["file_position"] @@ -1352,12 +1351,10 @@ def _get_trajectory(self, data_index, start_index = 0, stop_index = None): nbr_variables = self._data_2_info["nbr_variables"] # Account for sub-sets of data - print(f"{start_index=}, {stop_index=}") start_index = max(0, start_index) stop_index = max(0, nbr_points if stop_index is None else min(nbr_points, stop_index)) new_file_position = file_position + start_index*sizeof_type*nbr_variables new_nbr_points = stop_index - start_index - print(f"{start_index=}, {stop_index=}, {new_nbr_points=}") self._data_2[data_index] = fmi_util.read_trajectory( encode(self._fname), From 7f50492226cfac80685883b1eb4904fe95b4b050 Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Tue, 5 Nov 2024 09:59:22 -0500 Subject: [PATCH 06/15] Added the fix for handling cache also for diagnostics variable --- src/common/io.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/common/io.py b/src/common/io.py index 66e557ea..56f3b77b 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1332,8 +1332,8 @@ def _get_name_dict(self): return name_dict - def _can_use_partial_cache(self, start_index, stop_index): - """ Checks if start_index and stop_oindex are equal to the last cached indices. """ + def _can_use_partial_cache(self, start_index: int, stop_index: Union[int, None]): + """ Checks if start_index and stop_index are equal to the last cached indices. """ return self._allow_file_updates and (self._last_set_of_indices == (start_index, stop_index)) def _get_trajectory(self, data_index, start_index = 0, stop_index = None): @@ -1373,10 +1373,12 @@ def _get_diagnostics_trajectory(self, data_index, start_index = 0, stop_index = """ Returns trajectory for the diagnostics variable that corresponds to index 'data_index'. """ self._verify_file_data() - if data_index in self._data_3: + index_in_cache = data_index in self._data_3 + partial_cache_ok = self._can_use_partial_cache(start_index, stop_index) + if (index_in_cache and not self._allow_file_updates) or (index_in_cache and partial_cache_ok): return self._data_3[data_index] - self._data_3[data_index] = self._read_trajectory_data(data_index, True, start_index, stop_index) - return self._data_3[data_index][start_index:stop_index] + self._data_3[data_index] = self._read_trajectory_data(data_index, True, start_index, stop_index)[start_index:stop_index] + return self._data_3[data_index] def _read_trajectory_data(self, data_index, read_diag_data, start_index = 0, stop_index = None): """ Reads corresponding trajectory data for variable with index 'data_index', @@ -1413,11 +1415,13 @@ def _read_trajectory_data(self, data_index, read_diag_data, start_index = 0, sto return data - def _get_interpolated_trajectory(self, data_index: int, start_index: int = None, stop_index: int = None) -> Trajectory: + def _get_interpolated_trajectory(self, data_index: int, start_index: int = 0, stop_index: int = None) -> Trajectory: """ Returns an interpolated trajectory for variable of corresponding index 'data_index'. """ self._verify_file_data() - if data_index in self._data_2: + index_in_cache = data_index in self._data_2 + partial_cache_ok = self._can_use_partial_cache(start_index, stop_index) + if (index_in_cache and not self._allow_file_updates) or (index_in_cache and partial_cache_ok): return self._data_2[data_index] diag_time_vector = self._get_diagnostics_trajectory(0, start_index, stop_index) @@ -1426,8 +1430,9 @@ def _get_interpolated_trajectory(self, data_index: int, start_index: int = None, f = scipy.interpolate.interp1d(time_vector, data, fill_value="extrapolate") + # note that we dont need to slice here because diag_time_vector is already sliced accordingly self._data_2[data_index] = f(diag_time_vector) - return self._data_2[data_index][start_index:stop_index] + return self._data_2[data_index] def _get_description(self): if not self._description: From 61fd9e46d42d3291a2734f74cc69c09168d48da1 Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Tue, 5 Nov 2024 10:01:13 -0500 Subject: [PATCH 07/15] Removed unused exception --- src/common/io.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/common/io.py b/src/common/io.py index 56f3b77b..2fb17928 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -2532,9 +2532,6 @@ class ResultSizeError(JIOError): Exception that is raised when a set maximum result size is exceeded. """ -class InvalidIndexError(JIOError): - """ Exception that is raised when indices for variable trajectories are invalid. """ - def robust_float(value): """ Function for robust handling of float values such as INF and NAN. From d55f064fd953e8ef86b650c548e0ad5ad370a0ed Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Wed, 6 Nov 2024 11:54:43 -0500 Subject: [PATCH 08/15] robuster solution for next start index --- src/common/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/io.py b/src/common/io.py index 2fb17928..2004149c 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1619,7 +1619,7 @@ def get_variables_data(self, for name in names: trajectories[name] = self._get_variable_data_as_trajectory(name, time, start_index, stop_index) - new_start_index = start_index + len(time) if len(trajectories) > 0 else None + new_start_index = stop_index if len(trajectories) > 0 else None self._last_set_of_indices = (start_index, stop_index) # update them before we exit return trajectories, new_start_index From 5738729b76f1ce8a0632a634091349e424a7ef43 Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Fri, 8 Nov 2024 09:06:58 -0500 Subject: [PATCH 09/15] Fixed issue with stop_index and added more tests --- src/common/io.py | 6 +++++- tests/test_io.py | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/src/common/io.py b/src/common/io.py index 2004149c..71c31024 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1619,7 +1619,11 @@ def get_variables_data(self, for name in names: trajectories[name] = self._get_variable_data_as_trajectory(name, time, start_index, stop_index) - new_start_index = stop_index if len(trajectories) > 0 else None + largest_trajectory_length = -1 + for v, t in trajectories.items(): + largest_trajectory_length = max(largest_trajectory_length, len(t.x)) + new_start_index = start_index + largest_trajectory_length if len(trajectories) > 0 else None + self._last_set_of_indices = (start_index, stop_index) # update them before we exit return trajectories, new_start_index diff --git a/tests/test_io.py b/tests/test_io.py index 1298bf70..52963010 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -1930,6 +1930,30 @@ def test_stop_index_near_bounds(self): (rdb.get_variables_data(['h'], 495, 502)[0]['h'].x), np.array([0.37268813, 0.37194424, 0.37120184, 0.37046092, 0.36972148, 0.36898351])) + + @testattr(stddist = True) + def test_trajectory_lengths(self): + """ Verify lengths of trajectories are expected for a bunch of different inputs. """ + fmu = Dummy_FMUModelME2([], os.path.join(file_path, "files", "FMUs", "XML", "ME2.0", "bouncingBall.fmu"), _connect_dll=False) + res = fmu.simulate() + assert len(res['h']) == 501 + rdb = ResultDymolaBinary(fmu.get_last_result_file(), allow_file_updates = True) + assert len(rdb.get_variables_data(['h'], 495, 496)[0]['h'].x) == 1 + assert len(rdb.get_variables_data(['h'], 495, 500)[0]['h'].x) == 5 + assert len(rdb.get_variables_data(['h'], 495, 499)[0]['h'].x) == 4 + assert len(rdb.get_variables_data(['h'], 495, 501)[0]['h'].x) == 6 + assert len(rdb.get_variables_data(['h'], 495, 502)[0]['h'].x) == 6 + # a couple of repeated values to verify the cache is not being used + assert len(rdb.get_variables_data(['h'], 0, None)[0]['h'].x) == 501 + assert len(rdb.get_variables_data(['h'], 0, 5)[0]['h'].x) == 5 + assert len(rdb.get_variables_data(['h'], 0, None)[0]['h'].x) == 501 + assert len(rdb.get_variables_data(['h'], 0, 5)[0]['h'].x) == 5 + assert len(rdb.get_variables_data(['h'], 0, 5)[0]['h'].x) == 5 + + assert len(rdb.get_variables_data(['h'], 5, 15)[0]['h'].x) == 10 + assert len(rdb.get_variables_data(['h'], 0, 550)[0]['h'].x) == 501 + assert len(rdb.get_variables_data(['h'], 0, 10000)[0]['h'].x) == 501 + if assimulo_installed: class TestFileSizeLimit: From 2590bf60e3220cd0ddd10b4cee1d9eba99604559 Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Fri, 8 Nov 2024 09:08:03 -0500 Subject: [PATCH 10/15] Changed if len(traj) > 0 to simply if traj --- src/common/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/io.py b/src/common/io.py index 71c31024..f5b1d79f 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1622,7 +1622,7 @@ def get_variables_data(self, largest_trajectory_length = -1 for v, t in trajectories.items(): largest_trajectory_length = max(largest_trajectory_length, len(t.x)) - new_start_index = start_index + largest_trajectory_length if len(trajectories) > 0 else None + new_start_index = start_index + largest_trajectory_length if trajectories else None self._last_set_of_indices = (start_index, stop_index) # update them before we exit return trajectories, new_start_index From a880dc76abc75ab76d8b5f07808722d03d2e3ca5 Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Mon, 11 Nov 2024 08:23:13 -0500 Subject: [PATCH 11/15] Fixed issue with start out of bounds --- src/common/io.py | 4 +++- tests/test_io.py | 6 ++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/common/io.py b/src/common/io.py index f5b1d79f..78fac0c6 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1354,6 +1354,9 @@ def _get_trajectory(self, data_index, start_index = 0, stop_index = None): start_index = max(0, start_index) stop_index = max(0, nbr_points if stop_index is None else min(nbr_points, stop_index)) new_file_position = file_position + start_index*sizeof_type*nbr_variables + # Finally when stop_index = None, we can end up with start > stop, + # therefore we need to use min(start, stop) + start_index = min(start_index, stop_index) new_nbr_points = stop_index - start_index self._data_2[data_index] = fmi_util.read_trajectory( @@ -1602,7 +1605,6 @@ def get_variables_data(self, raise ValueError(f"Invalid values for {start_index=} and {stop_index=}, " + \ "'start_index' needs to be less than or equal to 'stop_index'.") - trajectories = {} # Get the corresponding time trajectory diff --git a/tests/test_io.py b/tests/test_io.py index 52963010..c2befc7b 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -1954,6 +1954,12 @@ def test_trajectory_lengths(self): assert len(rdb.get_variables_data(['h'], 0, 550)[0]['h'].x) == 501 assert len(rdb.get_variables_data(['h'], 0, 10000)[0]['h'].x) == 501 + # test different scenarios of start_index out of bounds + assert len(rdb.get_variables_data(['h'], 501, 502)[0]['h'].x) == 0 + assert len(rdb.get_variables_data(['h'], 501, None)[0]['h'].x) == 0 + assert len(rdb.get_variables_data(['h'], 501)[0]['h'].x) == 0 + assert len(rdb.get_variables_data(['h'], 1234567)[0]['h'].x) == 0 + if assimulo_installed: class TestFileSizeLimit: From a510568cd98069cc9757ea71c30999efd1ed7b30 Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Mon, 11 Nov 2024 09:46:35 -0500 Subject: [PATCH 12/15] Added another fix for stop_index>data points, updated docstring --- src/common/io.py | 20 +++++++++++++------- tests/test_io.py | 5 +++++ 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/src/common/io.py b/src/common/io.py index 78fac0c6..86f6d828 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1569,9 +1569,15 @@ def get_variables_data(self, Improper values for start_index and stop_index that are out of bounds are automatically corrected, such that: Negative values are always adjusted to 0 or larger. - Out of bounds for stop_index is adjusted for the number of available data points, as an example - if you set start_index = 0, stop_index = 5 but there are only 3 data points available, - then this function returns 3 data_points. + Out of bounds for stop_index is adjusted for the number of available data points, example: + If start_index = 0, stop_index = 5 and there are only 3 data points available, + then returned trajectories are of length 3. + If start_index is larger than or equal to the number of available data points, empty trajectories + are returned, i.e. trajectories of length 0. + Note that trajectories for parameters are always of length 2 if indices 0 and 1 are + part of the requested trajectory since they reflect the values of before and after initialization. + Therefore if you request a trajectory for a parameter with start_index>=2, returned trajectory is empty. + By default, start_index = 0 and stop_index = None, which implies that the full trajectory is returned. Parameters:: @@ -1614,9 +1620,9 @@ def get_variables_data(self, # Since we interpolate data if diagnostics is enabled time = self._get_diagnostics_trajectory(0, start_index, stop_index) - # Need to account for data that might be added while we are iterating over 'names' later - if stop_index is None: - stop_index = len(time) + start_index + # If stop_index > number of data points, and data gets added while we are iterating + # then we might get trajectories of unequal lengths. Therefore ensure we set stop_index here accordingly. + stop_index = min(len(time) + start_index, float('inf') if stop_index is None else stop_index) for name in names: trajectories[name] = self._get_variable_data_as_trajectory(name, time, start_index, stop_index) @@ -1624,7 +1630,7 @@ def get_variables_data(self, largest_trajectory_length = -1 for v, t in trajectories.items(): largest_trajectory_length = max(largest_trajectory_length, len(t.x)) - new_start_index = start_index + largest_trajectory_length if trajectories else None + new_start_index = (start_index + largest_trajectory_length) if trajectories else start_index self._last_set_of_indices = (start_index, stop_index) # update them before we exit return trajectories, new_start_index diff --git a/tests/test_io.py b/tests/test_io.py index c2befc7b..746117e0 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -1960,6 +1960,11 @@ def test_trajectory_lengths(self): assert len(rdb.get_variables_data(['h'], 501)[0]['h'].x) == 0 assert len(rdb.get_variables_data(['h'], 1234567)[0]['h'].x) == 0 + # Verify next_start_index also for no variables is equal to start_index + assert rdb.get_variables_data([], start_index = 0)[1] == 0 + assert rdb.get_variables_data([], start_index = 1)[1] == 1 + assert rdb.get_variables_data([], start_index = 5)[1] == 5 + if assimulo_installed: class TestFileSizeLimit: From 81fe25e1f8eca48f5aaf59db7dd5c4c14825315c Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Tue, 12 Nov 2024 09:38:35 -0500 Subject: [PATCH 13/15] Fixed if-statement for stop_index and removed use of inf --- src/common/io.py | 5 ++++- src/pyfmi/fmi.pyx | 14 +++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/common/io.py b/src/common/io.py index 86f6d828..d5966382 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1622,7 +1622,10 @@ def get_variables_data(self, # If stop_index > number of data points, and data gets added while we are iterating # then we might get trajectories of unequal lengths. Therefore ensure we set stop_index here accordingly. - stop_index = min(len(time) + start_index, float('inf') if stop_index is None else stop_index) + if stop_index is None: + stop_index = len(time) + start_index + else: + stop_index = min(len(time) + start_index, stop_index) for name in names: trajectories[name] = self._get_variable_data_as_trajectory(name, time, start_index, stop_index) diff --git a/src/pyfmi/fmi.pyx b/src/pyfmi/fmi.pyx index f6b3dd3e..3fb043ec 100644 --- a/src/pyfmi/fmi.pyx +++ b/src/pyfmi/fmi.pyx @@ -692,7 +692,7 @@ cdef class ModelBase: if self._additional_logger: self._additional_logger(module, log_level, message) - + if self._max_log_size_msg_sent: return @@ -4583,7 +4583,7 @@ cdef class FMUModelBase2(ModelBase): if nref == 0: ## get_string([]) return [] - + cdef FMIL.fmi2_string_t* output_value = FMIL.malloc(sizeof(FMIL.fmi2_string_t)*nref) self._log_handler.capi_start_callback(self._max_log_size_msg_sent, self._current_log_size) @@ -4977,7 +4977,7 @@ cdef class FMUModelBase2(ModelBase): def set_debug_logging(self, logging_on, categories = []): """ Specifies if the debugging should be turned on or off and calls fmi2SetDebugLogging - for the specified categories, after checking they are valid. + for the specified categories, after checking they are valid. Parameters:: @@ -5827,7 +5827,7 @@ cdef class FMUModelBase2(ModelBase): relative_quantity = FMIL.fmi2_import_get_real_variable_relative_quantity(real_variable) return relative_quantity == FMI2_TRUE - + cpdef get_variable_unbounded(self, variable_name): """ Get the unbounded attribute of a real variable. @@ -7853,7 +7853,7 @@ cdef class FMUModelME2(FMUModelBase2): Deallocate memory allocated """ self._invoked_dealloc = 1 - + if self._initialized_fmu == 1: FMIL.fmi2_import_terminate(self._fmu) @@ -9067,8 +9067,8 @@ cdef class LogHandler: pass cdef class LogHandlerDefault(LogHandler): - """Default LogHandler that uses checkpoints around FMI CAPI calls to - ensure logs are truncated at checkpoints. For FMUs generating XML during + """Default LogHandler that uses checkpoints around FMI CAPI calls to + ensure logs are truncated at checkpoints. For FMUs generating XML during CAPI calls, this ensures valid XML. """ def __init__(self, max_log_size): super().__init__(max_log_size) From 51228a70c4b4c6dcc29f22ead12a0dcffc19e03b Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Tue, 12 Nov 2024 10:59:44 -0500 Subject: [PATCH 14/15] improved code and added test for next start index --- src/common/io.py | 24 +++++++---- tests/test_io.py | 103 ++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 114 insertions(+), 13 deletions(-) diff --git a/src/common/io.py b/src/common/io.py index d5966382..7b20c208 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1630,14 +1630,24 @@ def get_variables_data(self, for name in names: trajectories[name] = self._get_variable_data_as_trajectory(name, time, start_index, stop_index) - largest_trajectory_length = -1 - for v, t in trajectories.items(): - largest_trajectory_length = max(largest_trajectory_length, len(t.x)) + largest_trajectory_length = self._find_max_trajectory_length(trajectories) new_start_index = (start_index + largest_trajectory_length) if trajectories else start_index self._last_set_of_indices = (start_index, stop_index) # update them before we exit return trajectories, new_start_index + def _find_max_trajectory_length(self, trajectories): + """ + Given a dict of trajectories, find the length of the largest trajectory + among the set of continuous variables. We disregard parameters/constants since they are not stored + with the same amount of data points as trajectories for continuous variables. + """ + length = 0 + for var_name, trajectory in trajectories.items(): + if self.is_variable(var_name): # since we only consider continuous variables + length = max(length, len(trajectory.x)) + return length + def _calculate_events_and_steps(self, name): if name in self._data_3: return self._data_3[name] @@ -1712,15 +1722,13 @@ def is_variable(self, name): return True elif '{}.'.format(DiagnosticsBase.calculated_diagnostics['nbr_state_limits_step']['name']) in name: return True + variable_index = self.get_variable_index(name) data_mat = self._dataInfo[0][variable_index] - if data_mat<1: + if data_mat < 1: data_mat = 1 - if data_mat == 1: - return False - else: - return True + return data_mat != 1 def is_negated(self, name): """ diff --git a/tests/test_io.py b/tests/test_io.py index 746117e0..c9e270ef 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -24,11 +24,30 @@ from collections import OrderedDict from pyfmi import testattr -from pyfmi.fmi import FMUException, FMUModelME2 -from pyfmi.common.io import (ResultHandler, ResultDymolaTextual, ResultDymolaBinary, JIOError, ResultSizeError, - ResultHandlerCSV, ResultCSVTextual, ResultHandlerBinaryFile, ResultHandlerFile) -from pyfmi.common.io import get_result_handler -from pyfmi.common.diagnostics import DIAGNOSTICS_PREFIX, setup_diagnostics_variables +from pyfmi.fmi import ( + FMUException, + FMUModelME2, + FMI2_PARAMETER, + FMI2_CONSTANT, + FMI2_LOCAL +) +from pyfmi.common.io import ( + ResultHandler, + ResultDymolaTextual, + ResultDymolaBinary, + JIOError, + ResultSizeError, + ResultHandlerCSV, + ResultCSVTextual, + ResultHandlerBinaryFile, + ResultHandlerFile, + Trajectory, + get_result_handler +) +from pyfmi.common.diagnostics import ( + DIAGNOSTICS_PREFIX, + setup_diagnostics_variables +) import pyfmi.fmi as fmi from pyfmi.tests.test_util import Dummy_FMUModelME1, Dummy_FMUModelME2, Dummy_FMUModelCS2 @@ -1695,6 +1714,80 @@ def test_csv_options_cs2(self): class TestResultDymolaBinary: + def test_next_start_index(self): + """ + Test that calculation of the next start index works as expected. + + This test sets up a dummy FMU and dummy trajectories since we need + trajectories of uneven lengths. + + """ + # Begin by setting up minimal required environment in order to perform the test + fmu = Dummy_FMUModelME2([], os.path.join(file_path, "files", "FMUs", "XML", "ME2.0", "CoupledClutches.fmu"), + _connect_dll=False) + + result_handler = ResultHandlerBinaryFile(fmu) + + opts = fmu.simulate_options() + opts["result_handling"] = "binary" + opts["result_handler"] = result_handler + + fmu.setup_experiment() + fmu.initialize() + opts["initialize"] = False + + result_handler.set_options(opts) # required in order to call simulation_start() + result_handler.initialize_complete() + result_handler.simulation_start() + + fmu.set('J4.phi', 1) # arbitrary + result_handler.integration_point() + rdb = ResultDymolaBinary(fmu.get_last_result_file(), allow_file_updates=True) + + # Actual test starts below + vars_to_test = [ + 'J1.J', # this is a parameter + 'clutch1.Backward' # this is a constant + ] + + # if this is not True, then the rest of test does not hold + assert vars_to_test[0] in result_handler.model.get_model_variables(causality = FMI2_PARAMETER).keys() + assert vars_to_test[1] in result_handler.model.get_model_variables(variability = FMI2_CONSTANT).keys() + assert 'J4.phi' in result_handler.model.states.keys() + + + for v in vars_to_test: + trajectories1 = { + 'J4.phi': Trajectory(np.array([]), np.array([])), + v: Trajectory(np.array([0]), np.array([1])) + } + + trajectories2 = { + 'J4.phi': Trajectory(np.array([0]), np.array([1])), + v: Trajectory(np.array([0, 1]), np.array([1, 1])) + } + + trajectories3 = { + 'J4.phi': Trajectory(np.array([0]), np.array([1])), + v: Trajectory(np.array([0]), np.array([1])) + } + + trajectories4 = { + 'J4.phi': Trajectory(np.array([0, 1]), np.array([1, 1])), + v: Trajectory(np.array([0]), np.array([1])) + } + + trajectories5 = { + 'J4.phi': Trajectory(np.array([0, 1, 2]), np.array([1, 1, 1])), + v: Trajectory(np.array([0]), np.array([1])) + } + + assert rdb._find_max_trajectory_length(trajectories1) == 0 + assert rdb._find_max_trajectory_length(trajectories2) == 1 + assert rdb._find_max_trajectory_length(trajectories3) == 1 + assert rdb._find_max_trajectory_length(trajectories4) == 2 + assert rdb._find_max_trajectory_length(trajectories5) == 3 + def _test_get_variables_data(self, dynamic_diagnostics: bool, nbr_of_calls: int, diag_data_ratio: int, vars_to_test: list, stop_index_function: callable, result_file_name: str) -> dict: """ From afba33205c24254e20aa59721c3155c1833d391a Mon Sep 17 00:00:00 2001 From: Robin Andersson Date: Wed, 13 Nov 2024 09:27:31 -0500 Subject: [PATCH 15/15] Changed for loop to another expression with better performance --- src/common/io.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/common/io.py b/src/common/io.py index 7b20c208..9e91ace0 100644 --- a/src/common/io.py +++ b/src/common/io.py @@ -1642,11 +1642,7 @@ def _find_max_trajectory_length(self, trajectories): among the set of continuous variables. We disregard parameters/constants since they are not stored with the same amount of data points as trajectories for continuous variables. """ - length = 0 - for var_name, trajectory in trajectories.items(): - if self.is_variable(var_name): # since we only consider continuous variables - length = max(length, len(trajectory.x)) - return length + return max([0] + [len(t.x) for v, t in trajectories.items() if self.is_variable(v)]) def _calculate_events_and_steps(self, name): if name in self._data_3: