From 72f344254df2a0ffebdb74838e1d1aad7467a00b Mon Sep 17 00:00:00 2001 From: David HERNANDEZ Date: Thu, 12 May 2022 00:51:27 +0200 Subject: [PATCH] Imp - Finished all improvements for new release --- CHANGELOG.md | 2 + Dockerfile | 6 +-- README.md | 49 ++++++++++++++++++++----- config_emhass.json | 55 ---------------------------- docs/conf.py | 2 +- docs/intro.md | 63 ++++++++++++++++++++++++++++---- requirements_webserver.txt | 2 +- setup.py | 2 +- src/emhass/command_line.py | 6 +-- src/emhass/optimization.py | 43 +++++++++++++--------- src/emhass/retrieve_hass.py | 5 ++- src/emhass/utils.py | 12 +++--- src/emhass/web_server.py | 51 ++++++++++++++------------ templates/index.html | 51 ++++++++++++++++++++++++-- tests/test_command_line_utils.py | 9 ++--- tests/test_forecast.py | 9 ++--- tests/test_optimization.py | 10 ++--- tests/test_retrieve_hass.py | 9 ++--- 18 files changed, 229 insertions(+), 157 deletions(-) delete mode 100644 config_emhass.json diff --git a/CHANGELOG.md b/CHANGELOG.md index bc9a6d1e..bbdc6797 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added add-on paramter to command line to define if launching emhass from add-on or in standalone mode. - Added new testing file for command_line. - Added a webserver. Moved the webserver from the add-on to the core emhass module. +- Added a WSGI production server for flask using waitress. - Added a Dockerfile and procedure to run emhass in standalone mode. +- Updated the documentation. ## [0.2.14] - 2022-05-05 ### Improvement diff --git a/Dockerfile b/Dockerfile index a438f605..8c1127c3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,11 +19,9 @@ COPY src/emhass/optimization.py /app/src/emhass/optimization.py COPY src/emhass/retrieve_hass.py /app/src/emhass/retrieve_hass.py COPY src/emhass/utils.py /app/src/emhass/utils.py COPY src/emhass/web_server.py /app/src/emhass/web_server.py -COPY config_emhass.json /app/config_emhass.json -COPY secrets_emhass.yaml /app/secrets_emhass.yaml COPY data/opt_res_dayahead_latest.csv /app/data/opt_res_dayahead_latest.csv -COPY templates/index.html /app/templates/index.html -COPY static/style.css /app/static/style.css +COPY templates/index.html /app/src/emhass/templates/index.html +COPY static/style.css /app/src/emhass/static/style.css RUN python3 setup.py install diff --git a/README.md b/README.md index ec9b9b75..ce901e64 100644 --- a/README.md +++ b/README.md @@ -71,7 +71,7 @@ python3 -m pip install --upgrade emhass ### Using Docker -To install using docker you will need to build your image locally. For this use the rovided make file with this command: +To install using docker you will need to build your image locally. For this clone this repository, setup your `config_emhass.yaml` file and use the provided make file with this command: ``` make -f deploy_docker.mk clean_deploy ``` @@ -81,7 +81,7 @@ docker load -i .tar ``` Finally launch the docker itself: ``` -docker run -it --restart always -p 5000:5000 -e "LOCAL_COSTFUN=profit" --name DockerEMHASS +docker run -it --restart always -p 5000:5000 -e "LOCAL_COSTFUN=profit" -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS ``` ### The EMHASS add-on @@ -98,11 +98,12 @@ These architectures are supported: `amd64`, `armv7` and `aarch64`. To run a command simply use the `emhass` command followed by the needed arguments. The available arguments are: -- `--action`: That is used to set the desired action, options are: `perfect-optim`, `dayahead-optim` and `publish-data` +- `--action`: That is used to set the desired action, options are: `perfect-optim`, `dayahead-optim`, `naive-mpc-optim` and `publish-data` - `--config`: Define path to the config.yaml file (including the yaml file itself) - `--costfun`: Define the type of cost function, this is optional and the options are: `profit` (default), `cost`, `self-consumption` - `--log2file`: Define if we should log to a file or not, this is optional and the options are: `True` or `False` (default) -- `--params`: Configuration and data passed as JSON. This can be used to pass you own forecast data to EMHASS. +- `--params`: Configuration as JSON. +- `--runtimeparams`: Data passed at runtime. This can be used to pass you own forecast data to EMHASS. - `--version`: Show the current version of EMHASS. For example, the following line command can be used to perform a day-ahead optimization task: @@ -111,14 +112,16 @@ emhass --action 'dayahead-optim' --config '/home/user/emhass/config_emhass.yaml' ``` Before running any valuable command you need to modify the `config_emhass.yaml` and `secrets_emhass.yaml` files. These files should contain the information adapted to your own system. To do this take a look at the special section for this in the [documentation](https://emhass.readthedocs.io/en/latest/config.html). -If using the add-on, it exposes a simple webserver on port 5000. You can access it directly using your brower, ex: http://localhost:5000. +If using the add-on or the standalone docker installation, it exposes a simple webserver on port 5000. You can access it directly using your brower, ex: http://localhost:5000. With this web server you can perform RESTful POST commands on one ENDPOINT called `action` with two main options: -- A POST call to `action/dayahead-optim` to perform a day-ahead optimization task of your home energy -- A POST call to `action/publish-data ` to publish the optimization results data. +- A POST call to `action/perfect-optim` to perform a perfect optimization task on the historical data. +- A POST call to `action/dayahead-optim` to perform a day-ahead optimization task of your home energy. +- A POST call to `action/naive-mpc-optim` to perform a naive Model Predictive Controller optimization task. If using this option you will need to define the correct `runtimeparams` (see further below). +- A POST call to `action/publish-data` to publish the optimization results data for the current timestamp. -A `curl` command can the be used to launch an optimization task like this: `curl -i -H "Content-Type: application/json" -X POST -d '{}' http://localhost:5000/action/dayahead-optim`. +A `curl` command can then be used to launch an optimization task like this: `curl -i -H "Content-Type: application/json" -X POST -d '{}' http://localhost:5000/action/dayahead-optim`. ## Home Assistant integration @@ -152,7 +155,7 @@ And in `automations.yaml`: action: - service: shell_command.publish_data ``` -In these automations the optimization is performed everyday at 5:30am and the data is published every 5 minutes. +In these automations the day-ahead optimization is performed everyday at 5:30am and the data is published every 5 minutes. Create the file `dayahead_optim.sh` with the following content: ``` @@ -237,6 +240,34 @@ The possible dictionnary keys to pass data are: - `prod_price_forecast` for the PV production selling price forecast. +### A naive Model Predictive Controller + +A MPC controller was introduced in v0.3.0. This an informal/naive representation of a MPC controller. + +A MPC controller performs the following actions: + +- Set the prediction horizon and receiding horizon parameters. +- Perform an optimization on the prediction horizon. +- Apply the first element of the obtained optimized control variables. +- Repeat at a relatively high frequency, ex: 5 min. + +This is the receiding horizon principle. + +When applyin this controller, the following `runtimeparams` should be defined: + +- `prediction_horizon` for the MPC prediction horizon. Fix this at at least 5 times the optimization time step. + +- `soc_init` for the initial value of the battery SOC for the current iteration of the MPC. + +- `soc_final` for the final value of the battery SOC for the current iteration of the MPC. + +- `def_total_hours` for the list of deferrable loads functioning hours. These values can decrease as the day advances to take into account receidding horizon daily energy objectives for each deferrable load. + +A correct call for a MPC optimization should look like: + +``` +curl -i -H "Content-Type: application/json" -X POST -d '{"pv_power_forecast":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93, 1164.33, 1046.68, 1559.1, 2091.26, 1556.76, 1166.73, 1516.63, 1391.13, 1720.13, 820.75, 804.41, 251.63, 79.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "prediction_horizon":10, "soc_init":0.5,"soc_final":0.6,"def_total_hours":[1,3]}' http://localhost:5000/action/naive-mpc-optim +``` ## Development diff --git a/config_emhass.json b/config_emhass.json deleted file mode 100644 index 22628a26..00000000 --- a/config_emhass.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "retrieve_hass_conf": - [ - {"freq": 30}, - {"days_to_retrieve": 2}, - {"var_PV": "sensor.power_photovoltaics"}, - {"var_load": "sensor.power_load_no_var_loads"}, - {"load_negative": false}, - {"set_zero_min": true}, - {"var_replace_zero": ["sensor.power_photovoltaics"]}, - {"var_interp": ["sensor.power_photovoltaics", "sensor.power_load_no_var_loads"]} - ], - "optim_conf": - [ - {"set_use_battery": false}, - {"delta_forecast": 1}, - {"num_def_loads": 2}, - {"P_deferrable_nom": [3000.0, 750.0]}, - {"def_total_hours": [5, 8]}, - {"treat_def_as_semi_cont": [true, true]}, - {"set_def_constant": [false, false]}, - {"weather_forecast_method": "scrapper"}, - {"load_forecast_method": "naive"}, - {"load_cost_forecast_method": "hp_hc_periods"}, - {"list_hp_periods": - [ - {"period_hp_1": [{"start": "02:54"}, {"end": "15:24"}]}, - {"period_hp_2": [{"start": "17:24"}, {"end": "20:24"}]} - ] - }, - {"load_cost_hp": 0.1907}, - {"load_cost_hc": 0.1419}, - {"prod_price_forecast_method": "constant"}, - {"prod_sell_price": 0.065}, - {"set_total_pv_sell": false} - ], - "plant_conf": - [ - {"P_grid_max": 9000}, - {"module_model": "CSUN_Eurasia_Energy_Systems_Industry_and_Trade_CSUN295_60M"}, - {"inverter_model": "Fronius_International_GmbH__Fronius_Primo_5_0_1_208_240__240V_"}, - {"surface_tilt": 30}, - {"surface_azimuth": 205}, - {"modules_per_string": 16}, - {"strings_per_inverter": 1}, - {"Pd_max": 1000}, - {"Pc_max": 1000}, - {"eta_disch": 0.95}, - {"eta_ch": 0.95}, - {"Enom": 5000}, - {"SOCmin": 0.3}, - {"SOCmax": 0.9}, - {"SOCtarget": 0.6} - ] -} diff --git a/docs/conf.py b/docs/conf.py index 714d51fc..17ef1a2f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,7 +22,7 @@ author = 'David HERNANDEZ' # The full version, including alpha/beta/rc tags -release = '0.2.14' +release = '0.3.0' # -- General configuration --------------------------------------------------- diff --git a/docs/intro.md b/docs/intro.md index d50200db..5a9a3a37 100644 --- a/docs/intro.md +++ b/docs/intro.md @@ -39,6 +39,21 @@ To upgrade the installation in the future just use: python3 -m pip install --upgrade emhass ``` +### Using Docker + +To install using docker you will need to build your image locally. For this clone this repository, setup your `config_emhass.yaml` file and use the provided make file with this command: +``` +make -f deploy_docker.mk clean_deploy +``` +Then load the image in the .tar file: +``` +docker load -i .tar +``` +Finally launch the docker itself: +``` +docker run -it --restart always -p 5000:5000 -e "LOCAL_COSTFUN=profit" -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS +``` + ### The EMHASS add-on For Home Assistant OS and HA Supervised users, I've developed an add-on that will help you use EMHASS. The add-on is more user friendly as the configuration can be modified directly in the add-on options pane and also it exposes a web ui that can be used to inspect the optimization results and manually trigger a new optimization. @@ -53,11 +68,12 @@ These architectures are supported: `amd64`, `armv7` and `aarch64`. To run a command simply use the `emhass` command followed by the needed arguments. The available arguments are: -- `--action`: That is used to set the desired action, options are: `perfect-optim`, `dayahead-optim` and `publish-data` -- `--config`: Define path to the config_emhass.yaml file (including the yaml file itself) +- `--action`: That is used to set the desired action, options are: `perfect-optim`, `dayahead-optim`, `naive-mpc-optim` and `publish-data` +- `--config`: Define path to the config.yaml file (including the yaml file itself) - `--costfun`: Define the type of cost function, this is optional and the options are: `profit` (default), `cost`, `self-consumption` - `--log2file`: Define if we should log to a file or not, this is optional and the options are: `True` or `False` (default) -- `--params`: Configuration and data passed as JSON. This can be used to pass you own forecast data to EMHASS. +- `--params`: Configuration as JSON. +- `--runtimeparams`: Data passed at runtime. This can be used to pass you own forecast data to EMHASS. - `--version`: Show the current version of EMHASS. For example, the following line command can be used to perform a day-ahead optimization task: @@ -66,14 +82,16 @@ emhass --action 'dayahead-optim' --config '/home/user/emhass/config_emhass.yaml' ``` Before running any valuable command you need to modify the `config_emhass.yaml` and `secrets_emhass.yaml` files. These files should contain the information adapted to your own system. To do this take a look at the special section for this in the [documentation](https://emhass.readthedocs.io/en/latest/config.html). -If using the add-on, it exposes a simple webserver on port 5000. You can access it directly using your brower, ex: http://localhost:5000. +If using the add-on or the standalone docker installation, it exposes a simple webserver on port 5000. You can access it directly using your brower, ex: http://localhost:5000. With this web server you can perform RESTful POST commands on one ENDPOINT called `action` with two main options: -- A POST call to `action/dayahead-optim` to perform a day-ahead optimization task of your home energy -- A POST call to `action/publish-data ` to publish the optimization results data. +- A POST call to `action/perfect-optim` to perform a perfect optimization task on the historical data. +- A POST call to `action/dayahead-optim` to perform a day-ahead optimization task of your home energy. +- A POST call to `action/naive-mpc-optim` to perform a naive Model Predictive Controller optimization task. If using this option you will need to define the correct `runtimeparams` (see further below). +- A POST call to `action/publish-data` to publish the optimization results data for the current timestamp. -A `curl` command can the be used to launch an optimization task like this: `curl -i -H "Content-Type: application/json" -X POST -d '{}' http://localhost:5000/action/dayahead-optim`. +A `curl` command can then be used to launch an optimization task like this: `curl -i -H "Content-Type: application/json" -X POST -d '{}' http://localhost:5000/action/dayahead-optim`. ## Home Assistant integration @@ -107,7 +125,7 @@ And in `automations.yaml`: action: - service: shell_command.publish_data ``` -In these automations the optimization is performed everyday at 5:30am and the data is published every 5 minutes. +In these automations the day-ahead optimization is performed everyday at 5:30am and the data is published every 5 minutes. Create the file `dayahead_optim.sh` with the following content: ``` @@ -191,3 +209,32 @@ The possible dictionnary keys to pass data are: - `load_cost_forecast` for the Load cost forecast. - `prod_price_forecast` for the PV production selling price forecast. + +### A naive Model Predictive Controller + +A MPC controller was introduced in v0.3.0. This an informal/naive representation of a MPC controller. + +A MPC controller performs the following actions: + +- Set the prediction horizon and receiding horizon parameters. +- Perform an optimization on the prediction horizon. +- Apply the first element of the obtained optimized control variables. +- Repeat at a relatively high frequency, ex: 5 min. + +This is the receiding horizon principle. + +When applyin this controller, the following `runtimeparams` should be defined: + +- `prediction_horizon` for the MPC prediction horizon. Fix this at at least 5 times the optimization time step. + +- `soc_init` for the initial value of the battery SOC for the current iteration of the MPC. + +- `soc_final` for the final value of the battery SOC for the current iteration of the MPC. + +- `def_total_hours` for the list of deferrable loads functioning hours. These values can decrease as the day advances to take into account receidding horizon daily energy objectives for each deferrable load. + +A correct call for a MPC optimization should look like: + +``` +curl -i -H "Content-Type: application/json" -X POST -d '{"pv_power_forecast":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93, 1164.33, 1046.68, 1559.1, 2091.26, 1556.76, 1166.73, 1516.63, 1391.13, 1720.13, 820.75, 804.41, 251.63, 79.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "prediction_horizon":10, "soc_init":0.5,"soc_final":0.6,"def_total_hours":[1,3]}' http://localhost:5000/action/naive-mpc-optim +``` \ No newline at end of file diff --git a/requirements_webserver.txt b/requirements_webserver.txt index 0c826dac..5e6c0b86 100644 --- a/requirements_webserver.txt +++ b/requirements_webserver.txt @@ -11,6 +11,6 @@ pyyaml>=5.4.1 netcdf4>=1.5.3 tables>=3.6.1 flask>=2.0.3 -#waitress>=2.1.1 +waitress>=2.1.1 #Paste>=3.5.0 plotly>=5.6.0 \ No newline at end of file diff --git a/setup.py b/setup.py index f8023455..33b54f43 100644 --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ setup( name='emhass', # Required - version='0.2.14', # Required + version='0.3.0', # Required description='An Energy Management System for Home Assistant', # Optional long_description=long_description, # Optional long_description_content_type='text/markdown', # Optional (see note above) diff --git a/src/emhass/command_line.py b/src/emhass/command_line.py index a9f10f61..cb11460f 100644 --- a/src/emhass/command_line.py +++ b/src/emhass/command_line.py @@ -184,10 +184,10 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger, prediction_horizon = input_data_dict['params']['passed_data']['prediction_horizon'] soc_init = input_data_dict['params']['passed_data']['soc_init'] soc_final = input_data_dict['params']['passed_data']['soc_final'] - past_def_load_energies = input_data_dict['params']['passed_data']['past_def_load_energies'] + def_total_hours = input_data_dict['params']['passed_data']['def_total_hours'] opt_res_naive_mpc = input_data_dict['opt'].perform_naive_mpc_optim( df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'], - prediction_horizon, soc_init, soc_final, past_def_load_energies) + prediction_horizon, soc_init, soc_final, def_total_hours) # Save CSV file for publish_data if save_data_to_file: today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0) @@ -260,7 +260,7 @@ def main(): """Define the main command line entry function.""" # Parsing arguments parser = argparse.ArgumentParser() - parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim and publish-data') + parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim, naive-mpc-optim and publish-data') parser.add_argument('--config', type=str, help='Define path to the config.yaml file') parser.add_argument('--costfun', type=str, default='profit', help='Define the type of cost function, options are: profit, cost, self-consumption') parser.add_argument('--log2file', type=bool, default=False, help='Define if we should log to a file or not') diff --git a/src/emhass/optimization.py b/src/emhass/optimization.py index 477a2655..98fd9072 100644 --- a/src/emhass/optimization.py +++ b/src/emhass/optimization.py @@ -78,7 +78,7 @@ def __init__(self, retrieve_hass_conf: dict, optim_conf: dict, plant_conf: dict, def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: np.array, unit_load_cost: np.array, unit_prod_price: np.array, soc_init: Optional[float] = None, soc_final: Optional[float] = None, - past_def_load_energies: Optional[list] = None) -> pd.DataFrame: + def_total_hours: Optional[list] = None) -> pd.DataFrame: """ Perform the actual optimization using linear programming (LP). @@ -104,9 +104,8 @@ def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: n :param soc_final: The final battery SOC for the optimization. This parameter \ is optional, if not given soc_init = soc_final = soc_target from the configuration file. :type soc_final: - :param past_def_load_energies: The past already distributed values of the objective energy \ - for each deferrable load. - :type past_def_load_energies: list + :param def_total_hours: The functioning hours for this iteration for each deferrable load. + :type def_total_hours: list :return: The input DataFrame with all the different results from the \ optimization appended :rtype: pd.DataFrame @@ -123,8 +122,8 @@ def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: n soc_final = soc_init else: soc_final = self.plant_conf['SOCtarget'] - if past_def_load_energies is None: - past_def_load_energies = [0*i for i in range(self.optim_conf['num_def_loads'])] + if def_total_hours is None: + def_total_hours = self.optim_conf['def_total_hours'] #### The LP problem using Pulp #### opt_model = plp.LpProblem("LP_Model", plp.LpMaximize) @@ -243,13 +242,22 @@ def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: n for i in set_I}) # Total time of deferrable load - for k in range(self.optim_conf['num_def_loads']): - constraints.update({"constraint_defload{}_energy".format(k) : - plp.LpConstraint( - e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in set_I), - sense = plp.LpConstraintEQ, - rhs = self.optim_conf['def_total_hours'][k]*self.optim_conf['P_deferrable_nom'][k] - past_def_load_energies[k]) - }) + if def_total_hours is None: + for k in range(self.optim_conf['num_def_loads']): + constraints.update({"constraint_defload{}_energy".format(k) : + plp.LpConstraint( + e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in set_I), + sense = plp.LpConstraintEQ, + rhs = self.optim_conf['def_total_hours'][k]*self.optim_conf['P_deferrable_nom'][k]) + }) + else: + for k in range(self.optim_conf['num_def_loads']): + constraints.update({"constraint_defload{}_energy".format(k) : + plp.LpConstraint( + e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in set_I), + sense = plp.LpConstraintEQ, + rhs = def_total_hours[k]*self.optim_conf['P_deferrable_nom'][k]) + }) # Treat deferrable load as a semi-continuous variable for k in range(self.optim_conf['num_def_loads']): @@ -464,7 +472,7 @@ def perform_dayahead_forecast_optim(self, df_input_data: pd.DataFrame, def perform_naive_mpc_optim(self, df_input_data: pd.DataFrame, P_PV: pd.Series, P_load: pd.Series, prediction_horizon: int, soc_init: Optional[float] = None, soc_final: Optional[float] = None, - past_def_load_energies: Optional[list] = None) -> pd.DataFrame: + def_total_hours: Optional[list] = None) -> pd.DataFrame: """ Perform a naive approach to a Model Predictive Control (MPC). \ This implementaion is naive because we are not using the formal formulation \ @@ -487,9 +495,8 @@ def perform_naive_mpc_optim(self, df_input_data: pd.DataFrame, P_PV: pd.Series, :param soc_final: The final battery SOC for the optimization. This parameter \ is optional, if not given soc_init = soc_final = soc_target from the configuration file. :type soc_final: - :param past_def_load_energies: The past already distributed values of the objective energy \ - for each deferrable load. - :type past_def_load_energies: list + :param def_total_hours: The functioning hours for this iteration for each deferrable load. + :type def_total_hours: list :return: opt_res: A DataFrame containing the optimization results :rtype: pandas.DataFrame @@ -509,6 +516,6 @@ def perform_naive_mpc_optim(self, df_input_data: pd.DataFrame, P_PV: pd.Series, # Call optimization function self.opt_res = self.perform_optimization(df_input_data, P_PV.values.ravel(), P_load.values.ravel(), unit_load_cost, unit_prod_price, soc_init=soc_init, - soc_final=soc_final, past_def_load_energies=past_def_load_energies) + soc_final=soc_final, def_total_hours=def_total_hours) return self.opt_res \ No newline at end of file diff --git a/src/emhass/retrieve_hass.py b/src/emhass/retrieve_hass.py index f50cfc7d..49f80ae6 100644 --- a/src/emhass/retrieve_hass.py +++ b/src/emhass/retrieve_hass.py @@ -91,7 +91,7 @@ def get_data(self, days_list: pd.date_range, var_list: list, minimal_response: O for i, var in enumerate(var_list): - if self.params is not None: # If this is the case we suppose that we are using the supervisor API + if self.hass_url == "http://supervisor/core/api": # If we are using the supervisor API url = self.hass_url+"/history/period/"+day.isoformat()+"?filter_entity_id="+var else: # Otherwise the Home Assistant Core API it is url = self.hass_url+"api/history/period/"+day.isoformat()+"?filter_entity_id="+var @@ -187,6 +187,7 @@ def prepare_data(self, var_load: str, load_negative: Optional[bool] = False, set if new_var_interp is not None: self.df_final[new_var_interp] = self.df_final[new_var_interp].interpolate( method='linear', axis=0, limit=None) + self.df_final[new_var_interp] = self.df_final[new_var_interp].fillna(0.0) # Setting the correct time zone on DF index if self.time_zone is not None: self.df_final.index = self.df_final.index.tz_convert(self.time_zone) @@ -212,7 +213,7 @@ def post_data(self, data_df: pd.DataFrame, idx: int, entity_id: str, :type friendly_name: str """ - if self.params is not None: # If this is the case we suppose that we are using the supervisor API + if self.hass_url == "http://supervisor/core/api": # If we are using the supervisor API url = self.hass_url+"/states/"+entity_id else: # Otherwise the Home Assistant Core API it is url = self.hass_url+"api/states/"+entity_id diff --git a/src/emhass/utils.py b/src/emhass/utils.py index 7f202ca4..8326252e 100644 --- a/src/emhass/utils.py +++ b/src/emhass/utils.py @@ -76,13 +76,13 @@ def treat_runtimeparams(runtimeparams: str, params:str, retrieve_hass_conf: dict if runtimeparams is not None: if params is None: params = {'passed_data':{'pv_power_forecast':None,'load_power_forecast':None,'load_cost_forecast':None,'prod_price_forecast':None, - 'prediction_horizon':None,'soc_init':None,'soc_final':None,'past_def_load_energies':None}} + 'prediction_horizon':None,'soc_init':None,'soc_final':None,'def_total_hours':None}} freq = int(retrieve_hass_conf['freq'].seconds/60.0) delta_forecast = int(optim_conf['delta_forecast'].days) forecast_dates = get_forecast_dates(freq, delta_forecast) if set_type == 'naive-mpc-optim': if 'prediction_horizon' not in runtimeparams.keys(): - prediction_horizon = int(20*retrieve_hass_conf['freq'].seconds/60) # 20 time steps by default + prediction_horizon = int(10*retrieve_hass_conf['freq'].seconds/60) # 10 time steps by default else: prediction_horizon = runtimeparams['prediction_horizon'] params['passed_data']['prediction_horizon'] = prediction_horizon @@ -96,11 +96,11 @@ def treat_runtimeparams(runtimeparams: str, params:str, retrieve_hass_conf: dict else: soc_final = runtimeparams['soc_final'] params['passed_data']['soc_final'] = soc_final - if 'past_def_load_energies' not in runtimeparams.keys(): - past_def_load_energies = [0*i for i in range(optim_conf['num_def_loads'])] + if 'def_total_hours' not in runtimeparams.keys(): + def_total_hours = optim_conf['def_total_hours'] else: - past_def_load_energies = runtimeparams['past_def_load_energies'] - params['passed_data']['past_def_load_energies'] = past_def_load_energies + def_total_hours = runtimeparams['def_total_hours'] + params['passed_data']['def_total_hours'] = def_total_hours forecast_dates = copy.deepcopy(forecast_dates)[0:int(pd.Timedelta(prediction_horizon, unit='minutes')/retrieve_hass_conf['freq'])] if 'pv_power_forecast' in runtimeparams.keys(): if type(runtimeparams['pv_power_forecast']) == list and len(runtimeparams['pv_power_forecast']) >= len(forecast_dates): diff --git a/src/emhass/web_server.py b/src/emhass/web_server.py index a3797682..880537ad 100644 --- a/src/emhass/web_server.py +++ b/src/emhass/web_server.py @@ -4,10 +4,10 @@ from flask import Flask, make_response, request from jinja2 import Environment, FileSystemLoader from requests import get -# from waitress import serve -# from paste.translogger import TransLogger +from waitress import serve +from importlib.metadata import version from pathlib import Path -import os, json, argparse, pickle, yaml +import os, json, argparse, pickle, yaml, logging import pandas as pd import plotly.express as px from emhass.command_line import set_input_data_dict @@ -17,6 +17,7 @@ # Define the Flask instance app = Flask(__name__, static_url_path='/static') +app.logger.setLevel(logging.INFO) def get_injection_dict(df, plot_size = 1366): # Create plots @@ -80,14 +81,14 @@ def build_params(params, options, add_on): # The params dict params['params_secrets'] = params_secrets params['passed_data'] = {'pv_power_forecast':None,'load_power_forecast':None,'load_cost_forecast':None,'prod_price_forecast':None, - 'prediction_horizon':None,'soc_init':None,'soc_final':None,'past_def_load_energies':None} + 'prediction_horizon':None,'soc_init':None,'soc_final':None,'def_total_hours':None} return params @app.route('/') def index(): app.logger.info("EMHASS server online, serving index.html...") # Load HTML template - file_loader = FileSystemLoader('/app/templates') + file_loader = FileSystemLoader('/app/src/emhass/templates') env = Environment(loader=file_loader) template = env.get_template('index.html') # Load cache dict @@ -108,12 +109,12 @@ def action_call(action_name): input_data_dict = set_input_data_dict(config_path, str(config_path.parent), costfun, params, runtimeparams, action_name, app.logger) if action_name == 'publish-data': - app.logger.info("Publishing data...") + app.logger.info(" >> Publishing data...") _ = publish_data(input_data_dict, app.logger) msg = f'EMHASS >> Action publish-data executed... \n' return make_response(msg, 201) elif action_name == 'perfect-optim': - app.logger.info("Performing perfect optimization...") + app.logger.info(" >> Performing perfect optimization...") opt_res = perfect_forecast_optim(input_data_dict, app.logger) injection_dict = get_injection_dict(opt_res) with open('/app/data/injection_dict.pkl', "wb") as fid: @@ -121,7 +122,7 @@ def action_call(action_name): msg = f'EMHASS >> Action perfect-optim executed... \n' return make_response(msg, 201) elif action_name == 'dayahead-optim': - app.logger.info("Performing dayahead optimization...") + app.logger.info(" >> Performing dayahead optimization...") opt_res = dayahead_forecast_optim(input_data_dict, app.logger) injection_dict = get_injection_dict(opt_res) with open('/app/data/injection_dict.pkl', "wb") as fid: @@ -129,7 +130,7 @@ def action_call(action_name): msg = f'EMHASS >> Action dayahead-optim executed... \n' return make_response(msg, 201) elif action_name == 'naive-mpc-optim': - app.logger.info("Performing naive MPC optimization...") + app.logger.info(" >> Performing naive MPC optimization...") opt_res = naive_mpc_optim(input_data_dict, app.logger) injection_dict = get_injection_dict(opt_res) with open('/app/data/injection_dict.pkl', "wb") as fid: @@ -153,27 +154,26 @@ def action_call(action_name): if args.add_on: OPTIONS_PATH = "/data/options.json" options_json = Path(OPTIONS_PATH) - CONFIG_PATH = "/usr/src/config_emhass.json" + CONFIG_PATH = "/usr/src/config_emhass.yaml" config_path = Path(CONFIG_PATH) hass_url = args.url key = args.key + # Read options info + if options_json.exists(): + with options_json.open('r') as data: + options = json.load(data) + else: + app.logger.error("options.json does not exists") else: - OPTIONS_PATH = "/app/config_emhass.json" - options_json = Path(OPTIONS_PATH) - CONFIG_PATH = "/app/config_emhass.json" + CONFIG_PATH = "/app/config_emhass.yaml" config_path = Path(CONFIG_PATH) - - # Read options info - if options_json.exists(): - with options_json.open('r') as data: - options = json.load(data) - else: - app.logger.error("options.json does not exists") + options = None + # Read example config file if config_path.exists(): - with config_path.open('r') as data: - config = json.load(data) + with open(config_path, 'r') as file: + config = yaml.load(file, Loader=yaml.FullLoader) retrieve_hass_conf = config['retrieve_hass_conf'] optim_conf = config['optim_conf'] plant_conf = config['plant_conf'] @@ -226,6 +226,7 @@ def action_call(action_name): web_ui_url = '0.0.0.0' with open('/app/secrets_emhass.yaml', 'r') as file: params_secrets = yaml.load(file, Loader=yaml.FullLoader) + hass_url = params_secrets['hass_url'] # Build params params = build_params(params, options, args.add_on) @@ -234,5 +235,7 @@ def action_call(action_name): # Launch server port = int(os.environ.get('PORT', 5000)) - app.run(debug=False, host=web_ui_url, port=port) - #serve(TransLogger(app, setup_console_handler=True), host=web_ui_url, port=port) \ No newline at end of file + app.logger.info("Launching the emhass webserver at: http://"+web_ui_url+":"+str(port)) + app.logger.info("Home Assistant data fetch will be performed using url: "+hass_url) + app.logger.info("Using core emhass version: "+version('emhass')) + serve(app, host=web_ui_url, port=port) \ No newline at end of file diff --git a/templates/index.html b/templates/index.html index 4e1f893e..4f13503e 100644 --- a/templates/index.html +++ b/templates/index.html @@ -8,13 +8,26 @@

EMHASS: Energy Management Optimization for Home Assistant

-

Basic optimization control

+

Use the buttons below to manually launch different optimization tasks

- - + + + +
+
+

+
+
+
+

Use the button below to publish the optimized variables at the current timestamp

+
+
+
+
+


@@ -32,6 +45,22 @@

Basic optimization control