diff --git a/.gitignore b/.gitignore index b022a5988..1dfde2e1c 100644 --- a/.gitignore +++ b/.gitignore @@ -118,3 +118,6 @@ tests/management/data/sdk_project_scraps/run/baseline_scenario/system_parameter. tests/management/data/sdk_project_scraps/run/baseline_scenario/DEU_Stuttgart.107380_IWEC* geojson_modelica_translator/modelica/buildingslibrary/ not/ +tests/geojson_modelica_translator/data/modelica_5/modelica_5.Districts.DistrictEnergySystem_results/modelica_5.Districts.DistrictEnergySystem_result.csv +tests/geojson_modelica_translator/data/modelica_model +tests/geojson_modelica_translator/data/modelica_multiple/modelica_multiple.Districts.DistrictEnergySystem_results/modelica_multiple.Districts.DistrictEnergySystem_result.csv diff --git a/geojson_modelica_translator/results_ghp.py b/geojson_modelica_translator/results_ghp.py new file mode 100644 index 000000000..546c6ead1 --- /dev/null +++ b/geojson_modelica_translator/results_ghp.py @@ -0,0 +1,142 @@ +# :copyright (c) URBANopt, Alliance for Sustainable Energy, LLC, and other contributors. +# See also https://github.com/urbanopt/geojson-modelica-translator/blob/develop/LICENSE.md +import re +from datetime import datetime, timezone +from pathlib import Path + +import pandas as pd +from buildingspy.io.outputfile import Reader + + +class ResultsModelica: + """Results from Modelica Project Simulation""" + + def __init__(self, modelica_project): + self._modelica_project = Path(modelica_project).resolve() + + def calculate_results(self): + # Extract the project name from the modelica_project path + project_name = self._modelica_project.name + + # Construct the path for the .mat file + result_mat_file = ( + self._modelica_project + / f"{project_name}.Districts.DistrictEnergySystem_results" + / f"{project_name}.Districts.DistrictEnergySystem_res.mat" + ) + + # Print the resulting path for debugging purposes + print(f"Generated path: {result_mat_file}") + + if result_mat_file.exists(): + print(f"The path {result_mat_file} exists.") + else: + print(f"The path {result_mat_file} does not exist.") + return + + # Initialize the Reader object + results = Reader(result_mat_file, "dymola") + + # Define patterns and output variable names + patterns = { + "heating_electric_power": r"^TimeSerLoa_\w+\.PHea$", + "cooling_electric_power": r"^TimeSerLoa_\w+\.PCoo$", + "pump_power": r"^TimeSerLoa_\w+\.PPum$", + "ets_pump_power": r"^TimeSerLoa_\w+\.PPumETS$", + "Heating system capacity": r"^TimeSerLoa_\w+\.ets.QHeaWat_flow_nominal$", + "Cooling system capacity": r"^TimeSerLoa_\w+\.ets.QChiWat_flow_nominal$", + "electrical_power_consumed": "pumDis.P", + } + + key_value_pairs = {} + time_values = None + + for name, pattern in patterns.items(): + for var in results.varNames(pattern): + time, values = results.values(var) # Unpack the tuple + if time_values is None: + time_values = time.tolist() # Initialize time_values from the first variable + key_value_pairs[var] = values.tolist() + + # Convert seconds to timezone-aware datetime and adjust year to 2017 + def adjust_year(dt): + return dt.replace(year=2017) + + # Convert timestamps to timezone-aware datetime objects in UTC + time_values = [datetime.fromtimestamp(t, tz=timezone.utc) for t in time_values] + adjusted_time_values = [adjust_year(dt) for dt in time_values] + + data_for_df = { + "Datetime": adjusted_time_values, + "TimeInSeconds": [int(dt.timestamp()) for dt in adjusted_time_values], + } + + for var, values in key_value_pairs.items(): + if len(values) < len(adjusted_time_values): + values.extend([None] * (len(adjusted_time_values) - len(values))) + elif len(values) > len(adjusted_time_values): + trimmed_values = values[: len(adjusted_time_values)] + data_for_df[var] = trimmed_values + else: + data_for_df[var] = values + + df_values = pd.DataFrame(data_for_df) + + # Convert 'Datetime' to datetime and set it as index + df_values["Datetime"] = pd.to_datetime(df_values["Datetime"]) + df_values = df_values.set_index("Datetime") + + # Resample to 1 hour data, taking the first occurrence for each interval + df_resampled = df_values.resample("1H").first().reset_index() + + # Format datetime to desired format + df_resampled["Datetime"] = df_resampled["Datetime"].dt.strftime("%m/%d/%Y %H:%M") + + # Interpolate only numeric columns + numeric_columns = df_resampled.select_dtypes(include=["number"]).columns + df_resampled[numeric_columns] = df_resampled[numeric_columns].interpolate(method="linear", inplace=False) + + # Check if the number of rows is not equal to 8760 (hourly) or 8760 * 4 (15-minute) + if df_resampled.shape[0] != 8760 or df_resampled.shape[0] != 8760 * 4: + print("Data length is incorrect. Expected 8760 (hourly) or 8760 * 4 (15-minute) entries.") + + # Define patterns with placeholders + patterns = { + "heating_electric_power_#{building_id}": r"^TimeSerLoa_(\w+)\.PHea$", + "cooling_electric_power_#{building_id}": r"^TimeSerLoa_(\w+)\.PCoo$", + "pump_power_#{building_id}": r"^TimeSerLoa_(\w+)\.PPum$", + "ets_pump_power_#{building_id}": r"^TimeSerLoa_(\w+)\.PPumETS$", + "heating_system_capacity_#{building_id}": r"^TimeSerLoa_(\w+)\.ets.QHeaWat_flow_nominal$", + "cooling_system_capacity_#{building_id}": r"^TimeSerLoa_(\w+)\.ets.QChiWat_flow_nominal$", + "electrical_power_consumed": "pumDis.P", + } + + # Function to rename columns based on patterns + def rename_column(col_name): + for key, pattern in patterns.items(): + match = re.match(pattern, col_name) + if match: + if key == "electrical_power_consumed": + return key + try: + building_id = match.group(1) + return key.replace("#{building_id}", building_id) + except IndexError: + print(f"Error: Column '{col_name}' does not match expected pattern.") + return col_name + # If no pattern matches, return the original column name + return col_name + + # Rename columns + df_resampled.columns = [rename_column(col) for col in df_resampled.columns] + + # Define the path to save the CSV file + results_dir = self._modelica_project / f"{project_name}.Districts.DistrictEnergySystem_results" + csv_file_path = results_dir / f"{project_name}.Districts.DistrictEnergySystem_result.csv" + + # Ensure the results directory exists + results_dir.mkdir(parents=True, exist_ok=True) + + df_resampled.to_csv(csv_file_path, index=False) + + print(f"Results saved at: {csv_file_path}") diff --git a/management/uo_des.py b/management/uo_des.py index 74862774a..17077b29c 100644 --- a/management/uo_des.py +++ b/management/uo_des.py @@ -8,6 +8,7 @@ from geojson_modelica_translator.geojson_modelica_translator import GeoJsonModelicaTranslator from geojson_modelica_translator.modelica.modelica_runner import ModelicaRunner +from geojson_modelica_translator.results_ghp import ResultsModelica from geojson_modelica_translator.system_parameters.system_parameters import SystemParameters CONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"]} @@ -192,7 +193,7 @@ def create_model(sys_param_file: Path, geojson_feature_file: Path, project_path: @click.option( "-i", "--intervals", - default=100, + default=144, help="Number of intervals to divide the simulation into (alternative to step_size)", type=int, ) @@ -208,6 +209,7 @@ def run_model(modelica_project: Path, start_time: int, stop_time: int, step_size default = ./model_from_sdk \f + :param sys_param_file: Path, location and name of file created with this cli :param modelica_project: Path, name & location of modelica project, possibly created with this cli :param start_time (int): start time of the simulation (seconds of a year) :param stop_time (int): stop time of the simulation (seconds of a year) @@ -241,3 +243,28 @@ def run_model(modelica_project: Path, start_time: int, stop_time: int, step_size print(f"\nModelica model {project_name} ran successfully and can be found in {run_location}") else: raise SystemExit(f"\n{project_name} failed. Check the error log at {run_location}/stdout.log for more info.") + + +@cli.command(short_help="Process Modelica model") +@click.argument( + "modelica_project", + default="./model_from_sdk", + required=True, + type=click.Path(exists=True, file_okay=False, dir_okay=True), +) +def des_process(modelica_project: Path): + """Post Process the model + + \b + Post process results from Modelica project run previously, for GHP LCCA analysis + + \b + MODELICA_PROJECT: Path to the Modelica project, possibly created by this cli + default = ./model_from_sdk + + \f + :param modelica_project: Path, name & location of modelica project, possibly created with this cli + """ + modelica_path = Path(modelica_project).resolve() + result = ResultsModelica(modelica_path) + result.calculate_results() diff --git a/tests/geojson_modelica_translator/data/modelica_5/modelica_5.Districts.DistrictEnergySystem_results/modelica_5.Districts.DistrictEnergySystem_res.mat b/tests/geojson_modelica_translator/data/modelica_5/modelica_5.Districts.DistrictEnergySystem_results/modelica_5.Districts.DistrictEnergySystem_res.mat new file mode 100644 index 000000000..3f914caa1 Binary files /dev/null and b/tests/geojson_modelica_translator/data/modelica_5/modelica_5.Districts.DistrictEnergySystem_results/modelica_5.Districts.DistrictEnergySystem_res.mat differ diff --git a/tests/geojson_modelica_translator/data/modelica_multiple/modelica_multiple.Districts.DistrictEnergySystem_results/modelica_multiple.Districts.DistrictEnergySystem_res.mat b/tests/geojson_modelica_translator/data/modelica_multiple/modelica_multiple.Districts.DistrictEnergySystem_results/modelica_multiple.Districts.DistrictEnergySystem_res.mat new file mode 100644 index 000000000..cf53c97ea Binary files /dev/null and b/tests/geojson_modelica_translator/data/modelica_multiple/modelica_multiple.Districts.DistrictEnergySystem_results/modelica_multiple.Districts.DistrictEnergySystem_res.mat differ diff --git a/tests/geojson_modelica_translator/test_results_ghp.py b/tests/geojson_modelica_translator/test_results_ghp.py new file mode 100644 index 000000000..0fa4b2416 --- /dev/null +++ b/tests/geojson_modelica_translator/test_results_ghp.py @@ -0,0 +1,99 @@ +# :copyright (c) URBANopt, Alliance for Sustainable Energy, LLC, and other contributors. +# See also https://github.com/urbanopt/geojson-modelica-translator/blob/develop/LICENSE.md + + +from pathlib import Path + +import pandas as pd + +from geojson_modelica_translator.results_ghp import ResultsModelica +from tests.base_test_case import TestCaseBase + + +class ResultsTest(TestCaseBase): + def setUp(self): + self.data_dir = Path(__file__).parent / "data" + + def test_result(self): + # Construct the path to the Modelica project directory + modelica_path = Path(self.data_dir) / "modelica_5" + modelica_path = modelica_path.resolve() + + # Construct the path to the CSV file + csv_file_path = ( + modelica_path + / "modelica_5.Districts.DistrictEnergySystem_results" + / "modelica_5.Districts.DistrictEnergySystem_result.csv" + ) + + # Delete csv path if it exists + if csv_file_path.exists(): + csv_file_path.unlink() + + result = ResultsModelica(modelica_path) + result.calculate_results() + + # Check if the CSV file exists + assert csv_file_path.exists(), f"File does not exist at path: {csv_file_path}" + + # Read the CSV file into a DataFrame + csv_data = pd.read_csv(csv_file_path) + + assert "Datetime" in csv_data.columns, "The 'Datetime' column is missing from the CSV file." + + assert ( + "heating_electric_power_d55aa383" in csv_data.columns + ), "The heating_electric_power column is missing from the CSV file." + + assert "pump_power_3da62a1d" in csv_data.columns, "The pump_power column is missing from the CSV file." + + assert ( + "electrical_power_consumed" in csv_data.columns + ), "The electrical_power_consumed column is missing from the CSV file." + + def test_result_multiple_ghp(self): + # Construct the path to the Modelica project directory + modelica_path = Path(self.data_dir) / "modelica_multiple" + modelica_path = modelica_path.resolve() + + # Construct the path to the CSV file + csv_file_path = ( + modelica_path + / "modelica_multiple.Districts.DistrictEnergySystem_results" + / "modelica_multiple.Districts.DistrictEnergySystem_result.csv" + ) + + # Delete csv path if it exists + if csv_file_path.exists(): + csv_file_path.unlink() + + result = ResultsModelica(modelica_path) + result.calculate_results() + + # Check if the CSV file exists + assert csv_file_path.exists(), f"File does not exist at path: {csv_file_path}" + + # Read the CSV file into a DataFrame + csv_data_multiple = pd.read_csv(csv_file_path) + + assert "Datetime" in csv_data_multiple.columns, "The 'Datetime' column is missing from the CSV file." + + # Check if any columns contain the "heating_electric_power_" substring + heating_electric_power = [col for col in csv_data_multiple.columns if "heating_electric_power_" in col] + + assert heating_electric_power, "No columns with 'heating_electric_power' found in the CSV file." + + assert ( + len(heating_electric_power) == 13 + ), f"Expected 13 columns with 'heating_electric_power_' but found {len(heating_electric_power)}." + + pump_power = [col for col in csv_data_multiple.columns if "pump_power_" in col] + + # Assert that there is at least one column with the substring + assert pump_power, "No columns with 'pump_power' found in the CSV file." + + assert len(pump_power) == 26, f"Expected 26 columns with 'pump_power' but found {len(pump_power)}." + + assert ( + "electrical_power_consumed" in csv_data_multiple.columns + ), "The electrical_power_consumed column is missing from the CSV file."