diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile deleted file mode 100644 index 337f928d..00000000 --- a/.devcontainer/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM mcr.microsoft.com/devcontainers/python:0-3.11 -# EXPOSE 5000:5000 -COPY .devcontainer/setup.sh ./ -COPY requirements.txt ./ -RUN ./setup.sh diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 7b4662b8..41c3b13e 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -3,31 +3,39 @@ { "name": "EMHASS", "build": { - "dockerfile": "Dockerfile", - "context": ".." + "dockerfile": "../Dockerfile", + "context": "../", + "args": { "TARGETARCH": "amd64"} }, "features": { - "ghcr.io/devcontainers/features/docker-outside-of-docker:1": {} + "ghcr.io/devcontainers/features/common-utils:2": { + "installZsh": "true", + "configureZshAsDefaultShell": "true", + "installOhMyZsh": "false", + "installOhMyZshConfig": "false" + } + // "ghcr.io/devcontainers/features/docker-outside-of-docker:1": {} }, //"appPort": ["5000:5000"] //used to access app from external device (User discretion advised) + + // Security issue, may be needed for Podman + // "runArgs": [ + // "--userns=keep-id", + // "--pid=host" + // ], + // "remoteUser": "root", + // "containerUser": "root", + + "customizations": { // Configure properties specific to VS Code. "vscode": { // Add the IDs of extensions you want installed when the container is created. "extensions": ["ms-python.debugpy", "ms-python.python"] } - } - - - // Features to add to the dev container. More info: https://containers.dev/features. - // "features": {}, - // Use 'forwardPorts' to make a list of ports inside the container available locally. - // "forwardPorts": [], - // Use 'postCreateCommand' to run commands after the container is created. - // "postCreateCommand": "pip3 install --user -r requirements.txt", - // Configure tool-specific properties. - // "customizations": {}, - // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. - // "remoteUser": "root" -} \ No newline at end of file + }, + + "postCreateCommand": ["pip3", "install", "requests-mock", "--break-system-packages"] + +} diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh deleted file mode 100755 index 2939a230..00000000 --- a/.devcontainer/setup.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -main() { - set -x - apt-get update - apt-get install -y --no-install-recommends \ - coinor-cbc \ - coinor-libcbc-dev \ - gcc \ - gfortran \ - libhdf5-dev \ - libhdf5-serial-dev \ - libnetcdf-dev \ - netcdf-bin - - ln -s /usr/include/hdf5/serial /usr/include/hdf5/include - export HDF5_DIR=/usr/include/hdf5 - pip install netCDF4 - - pip install -r requirements.txt - pip install requests-mock - - rm -rf "$0" -} - -main diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 34bd304f..ea24c0a9 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -29,7 +29,7 @@ jobs: strategy: fail-fast: false matrix: - language: [ 'python' ] + language: [ 'python' , 'javascript' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support diff --git a/.github/workflows/docker-build-test.yaml b/.github/workflows/docker-build-test.yaml index 1317344a..785b6d2a 100644 --- a/.github/workflows/docker-build-test.yaml +++ b/.github/workflows/docker-build-test.yaml @@ -34,10 +34,9 @@ jobs: context: . platforms: ${{ matrix.platform.buildx }} build-args: | - build_version=standalone TARGETARCH=${{ matrix.platform.target_arch }} os_version=${{ matrix.platform.os_version }} - tags: emhass/standalone-test + tags: emhass load: true - - name: Test #assume docker fail with FileNotFound secrets_emhass.yaml error - run: docker run --rm -it emhass/standalone-test | grep -q secrets_emhass.yaml && echo 0 || echo 1 + - name: Test # Run docker for 2min, if the output contains waitress INFO "Serving on" http://0.0.0.0:5000 assume it worked + run: timeout 2 docker run --rm -it emhass | grep -q "Serving on" && echo 0 || echo 1 diff --git a/.github/workflows/publish_docker.yaml b/.github/workflows/publish_docker.yaml index a64cbe3b..f518cd99 100644 --- a/.github/workflows/publish_docker.yaml +++ b/.github/workflows/publish_docker.yaml @@ -1,4 +1,4 @@ -#template modified from: https://docs.docker.com/build/ci/github-actions/multi-platform/ +#template modified from: https://docs.docker.com/build/ci/github-actions/multi-platform/ & https://docs.github.com/en/actions/use-cases-and-examples/publishing-packages/publishing-docker-images name: "Publish Docker" on: @@ -6,48 +6,61 @@ on: types: [published] workflow_dispatch: +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + jobs: build: runs-on: ubuntu-latest + permissions: + contents: read + packages: write + attestations: write + id-token: write strategy: fail-fast: false matrix: platform: [ - {buildx: linux/amd64, target_arch: amd64, os_version: debian}, - {buildx: linux/arm/v7, target_arch: armv7, os_version: debian}, - {buildx: linux/arm/v7, target_arch: armhf, os_version: raspbian}, - {buildx: linux/arm64, target_arch: aarch64, os_version: debian} + {target_arch: amd64, os_version: debian}, + {target_arch: armv7, os_version: debian}, + {target_arch: armhf, os_version: raspbian}, + {target_arch: aarch64, os_version: debian} ] steps: + # Pull git repo and build each architecture image separately (with QEMU and Buildx) + - name: lowercase repo + run: | + echo "IMAGE_NAME_LOWER=${GITHUB_REPOSITORY,,}" >>${GITHUB_ENV} - name: Checkout the repository uses: actions/checkout@v4 - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: | - ${{ secrets.DOCKERHUB_USERNAME }}/emhass-docker-standalone - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - name: Login to Docker Hub + - name: Log in to the Container registry uses: docker/login-action@v3 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LOWER }} - name: Build and push by digest id: build uses: docker/build-push-action@v5 with: context: . - platforms: ${{ matrix.platform.buildx }} + platforms: linux/${{ matrix.platform.target_arch }} build-args: | - build_version=standalone TARGETARCH=${{ matrix.platform.target_arch }} os_version=${{ matrix.platform.os_version }} labels: ${{ steps.meta.outputs.labels }} - outputs: type=image,name=${{ secrets.DOCKERHUB_USERNAME }}/emhass-docker-standalone,push-by-digest=true,name-canonical=true,push=true + outputs: type=image,name=${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LOWER }},push-by-digest=true,name-canonical=true,push=true + # Export the build images as artifact for the next job of merging - name: Export digest run: | mkdir -p /tmp/digests @@ -60,38 +73,45 @@ jobs: path: /tmp/digests/* if-no-files-found: error retention-days: 1 + # Merge platforms into images into a multi-platform image merge: if: always() runs-on: ubuntu-latest + permissions: + contents: read + packages: write + attestations: write + id-token: write needs: - build steps: + - name: lowercase repo + run: | + echo "IMAGE_NAME_LOWER=${GITHUB_REPOSITORY,,}" >>${GITHUB_ENV} - name: Download digests uses: actions/download-artifact@v4 with: path: /tmp/digests pattern: digests-* - merge-multiple: true + merge-multiple: true - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Docker meta + uses: docker/setup-buildx-action@v3 + - name: Extract metadata (tags, labels) for Docker id: meta uses: docker/metadata-action@v5 with: - images: ${{ secrets.DOCKERHUB_USERNAME }}/emhass-docker-standalone - tags: | - type=semver,pattern={{version}} - type=raw,value=latest - - name: Login to Docker Hub + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LOWER }} + - name: Log in to the Container registry uses: docker/login-action@v3 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Create manifest list and push working-directory: /tmp/digests run: | docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ - $(printf '${{ secrets.DOCKERHUB_USERNAME }}/emhass-docker-standalone@sha256:%s ' *) + $(printf '${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LOWER }}@sha256:%s ' *) - name: Inspect image run: | - docker buildx imagetools inspect ${{ secrets.DOCKERHUB_USERNAME }}/emhass-docker-standalone:${{ steps.meta.outputs.version }} + docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME_LOWER }}:${{ steps.meta.outputs.version }} diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml index fa17858d..4b0e1428 100644 --- a/.github/workflows/python-test.yml +++ b/.github/workflows/python-test.yml @@ -35,9 +35,8 @@ jobs: - name: Install dependencies run: | - python -m pip install --upgrade pip - python -m pip install pytest requests-mock - python setup.py install + pip install pytest requests-mock + pip install . - name: Test with pytest run: | diff --git a/.gitignore b/.gitignore index 2e8d38d6..c79a4eee 100644 --- a/.gitignore +++ b/.gitignore @@ -2,12 +2,13 @@ test.py # Secret yaml file secrets_emhass.yaml +# config json file +config.json *.tar .vscode/* .vscode/launch.json .vscode/settings.json .vscode/tasks.json -*.html *.pkl **/app diff --git a/.vscode/launch.json b/.vscode/launch.json index 1800d75c..abcbe8ec 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -19,41 +19,29 @@ ], "justMyCode": true, "env": { - "CONFIG_PATH": "/workspaces/emhass/config_emhass.yaml", - "OPTIONS_PATH": "/workspaces/emhass/options.json", + //"LEGACY_CONFIG_PATH": "/workspaces/emhass/tests/config_emhass.yaml", + "CONFIG_PATH": "/workspaces/emhass/config.json", "SECRETS_PATH": "/workspaces/emhass/secrets_emhass.yaml", "DATA_PATH": "/workspaces/emhass/data/", - "LOGGING_LEVEL": "DEBUG" - } + "LOGGING_LEVEL": "DEBUG" + }, }, { - "name": "EMHASS run ADDON", + "name": "EMHASS with options.json", "type": "debugpy", "request": "launch", "module": "emhass.web_server", "console": "integratedTerminal", - "args": [ - "--addon", - "true", - "--no_response", - "true" - ], "purpose": [ "debug-in-terminal" ], "justMyCode": true, "env": { - "CONFIG_PATH": "/workspaces/emhass/config_emhass.yaml", + "CONFIG_PATH": "/workspaces/emhass/config.json", "OPTIONS_PATH": "/workspaces/emhass/options.json", - "SECRETS_PATH": "/workspaces/emhass/secrets_emhass.yaml", "DATA_PATH": "/workspaces/emhass/data/", - "EMHASS_URL": "http://HAIPHERE:8123/", //change - "EMHASS_KEY": "PLACEKEYHERE", //change - "TIME_ZONE": "Europe/Paris", //optional change - "LAT": "45.83", //optional change - "LON": "6.86", //optional change - "ALT": "4807.8", //optional change - "LOGGING_LEVEL": "DEBUG" //optional change + "LOGGING_LEVEL": "DEBUG", + //"SUPERVISOR_TOKEN": "test" //test }, } ] diff --git a/.vscode/settings.json b/.vscode/settings.json index 2947cfe8..a3b632e7 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -9,5 +9,8 @@ "test_*.py" ], "python.testing.pytestEnabled": false, - "python.testing.unittestEnabled": true + "python.testing.unittestEnabled": true, + "cSpell.words": [ + "automations" + ] } \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json index ee23d121..9188cd10 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -12,6 +12,7 @@ "install", "--no-deps", "--force-reinstall", + "--break-system-packages", "--editable", "." ], @@ -19,7 +20,7 @@ "echo": true, "panel": "shared", "focus": true - } + }, }, { "label": "EMHASS install with dependencies", @@ -29,7 +30,7 @@ "isDefault": true }, "args": [ - "install", "--force-reinstall", "." + "install", "--break-system-packages", "--force-reinstall", "." ], "presentation": { "echo": true, diff --git a/Dockerfile b/Dockerfile old mode 100644 new mode 100755 index 5873a51a..80fde80d --- a/Dockerfile +++ b/Dockerfile @@ -1,28 +1,24 @@ ## EMHASS Docker ## Docker run addon testing example: - ## docker build -t emhass/docker --build-arg build_version=addon-local . - ## docker run -it -p 5000:5000 --name emhass-container -e LAT="45.83" -e LON="6.86" -e ALT="4807.8" -e TIME_ZONE="Europe/Paris" emhass/docker --url YOURHAURLHERE --key YOURHAKEYHERE -## Docker run standalone example: - ## docker build -t emhass/docker --build-arg build_version=standalone . - ## docker run -it -p 5000:5000 --name emhass-container -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml emhass/docker + ## docker build -t emhass . + ## OR docker build --build-arg TARGETARCH=amd64 -t emhass . + ## docker run --rm -it -p 5000:5000 --name emhass-container -v ./config.json:/share/config.json -v ./secrets_emhass.yaml:/app/secrets_emhass.yaml emhass -#build_version options are: addon, addon-pip, addon-git, addon-local, standalone (default) -ARG build_version=standalone - - -#armhf=raspbian, amd64,armv7,aarch64=debian +# armhf,amd64,armv7,aarch64 +ARG TARGETARCH +# armhf=raspbian, amd64,armv7,aarch64=debian ARG os_version=debian FROM ghcr.io/home-assistant/$TARGETARCH-base-$os_version:bookworm AS base -#check if TARGETARCH was passed by build-arg +# check if TARGETARCH was passed by build-arg ARG TARGETARCH ENV TARGETARCH=${TARGETARCH:?} WORKDIR /app COPY requirements.txt /app/ -#apt package install +# apt package install RUN apt-get update \ && apt-get install -y --no-install-recommends \ libffi-dev \ @@ -48,19 +44,19 @@ RUN apt-get update \ glpk-utils \ libatlas-base-dev \ libopenblas-dev -#specify hdf5 +# specify hdf5 RUN ln -s /usr/include/hdf5/serial /usr/include/hdf5/include && export HDF5_DIR=/usr/include/hdf5 -#install packages from pip, use piwheels if arm 32bit +# install packages from pip, use piwheels if arm 32bit RUN [[ "${TARGETARCH}" == "armhf" || "${TARGETARCH}" == "armv7" ]] && pip3 install --index-url=https://www.piwheels.org/simple --no-cache-dir --break-system-packages -r requirements.txt || pip3 install --no-cache-dir --break-system-packages -r requirements.txt -#try, symlink apt cbc, to pulp cbc, in python directory (for 32bit) +# try, symlink apt cbc, to pulp cbc, in python directory (for 32bit) RUN [[ "${TARGETARCH}" == "armhf" || "${TARGETARCH}" == "armv7" ]] && ln -sf /usr/bin/cbc /usr/local/lib/python3.11/dist-packages/pulp/solverdir/cbc/linux/32/cbc || echo "cbc symlink didnt work/not required" -#if armv7, try install libatomic1 to fix scipy issue +# if armv7, try install libatomic1 to fix scipy issue RUN [[ "${TARGETARCH}" == "armv7" ]] && apt-get update && apt-get install libatomic1 || echo "libatomic1 cant be installed" -#remove build only packages +# remove build only packages RUN apt-get purge -y --auto-remove \ gcc \ patchelf \ @@ -74,98 +70,50 @@ RUN apt-get purge -y --auto-remove \ libnetcdf-dev \ && rm -rf /var/lib/apt/lists/* -#copy config file -COPY config_emhass.yaml /app/ - -#make sure data directory exists +# make sure data directory exists RUN mkdir -p /app/data/ -#------------------------- -##EMHASS-Add-on default (this has no emhass package) -FROM base as addon +# make sure emhass share directory exists +RUN mkdir -p /share/emhass/ -LABEL \ - io.hass.name="emhass" \ - io.hass.description="EMHASS: Energy Management for Home Assistant" \ - io.hass.version=${BUILD_VERSION} \ - io.hass.type="addon" \ - io.hass.arch="aarch64|amd64|armhf|armv7" - -#----------- -#EMHASS-ADD-ON testing with pip emhass (EMHASS-Add-on testing reference) -FROM addon as addon-pip -#set build arg for pip version -ARG build_pip_version="" -RUN pip3 install --no-cache-dir --break-system-packages --upgrade --force-reinstall --no-deps --upgrade-strategy=only-if-needed -U emhass${build_pip_version} - -COPY options.json /app/ - -ENTRYPOINT [ "python3", "-m", "emhass.web_server","--addon", "True", "--no_response", "True"] - -#----------- -#EMHASS-Add-on testing from local files -FROM addon as addon-local +# copy required EMHASS files COPY src/emhass/ /app/src/emhass/ -COPY src/emhass/templates/ /app/src/emhass/templates/ -COPY src/emhass/static/ /app/src/emhass/static/ -COPY src/emhass/static/img/ /app/src/emhass/static/img/ -COPY src/emhass/data/ /app/src/emhass/data/ -COPY data/opt_res_latest.csv /app/data/ -#add options.json, this otherwise would be generated via HA -COPY options.json /app/ -COPY README.md /app/ -COPY setup.py /app/ -#compile EMHASS locally -RUN pip3 install --no-cache-dir --break-system-packages --no-deps --force-reinstall . -ENTRYPOINT [ "python3", "-m", "emhass.web_server","--addon", "True" , "--no_response", "True"] - - -#----------- -#EMHASS-Add-on testing with git -FROM addon as addon-git -ARG build_repo=https://github.com/davidusb-geek/emhass.git -ARG build_branch=master -WORKDIR /tmp/ -#Repo -RUN git clone $build_repo -WORKDIR /tmp/emhass -#Branch -RUN git checkout $build_branch -RUN mkdir -p /app/src/emhass/data/ -RUN cp -r /tmp/emhass/src/emhass/. /app/src/emhass/ -RUN cp /tmp/emhass/src/emhass/data/* /app/src/emhass/data/ -RUN cp /tmp/emhass/data/opt_res_latest.csv /app/data/ -RUN cp /tmp/emhass/setup.py /app/ -RUN cp /tmp/emhass/README.md /app/ -#add options.json, this otherwise would be generated via HA -RUN cp /tmp/emhass/options.json /app/ -WORKDIR /app -RUN pip3 install --no-cache-dir --break-system-packages --no-deps --force-reinstall . -ENTRYPOINT [ "python3", "-m", "emhass.web_server","--addon", "True" , "--no_response", "True"] -#------------------------- -#EMHASS standalone -FROM base as standalone - -COPY src/emhass/ /app/src/emhass/ +# webserver files COPY src/emhass/templates/ /app/src/emhass/templates/ COPY src/emhass/static/ /app/src/emhass/static/ +COPY src/emhass/static/data/ /app/src/emhass/static/data/ COPY src/emhass/static/img/ /app/src/emhass/static/img/ + +# emhass extra packadge data COPY src/emhass/data/ /app/src/emhass/data/ + +# pre generated optimization results COPY data/opt_res_latest.csv /app/data/ COPY README.md /app/ COPY setup.py /app/ -#secrets file can be copied manually at docker run + +# secrets file (secrets_emhass.yaml) can be copied into the container with volume mounts with docker run +# options.json file will be automatically generated and passed from Home Assistant using the addon #set python env variables ENV PYTHONDONTWRITEBYTECODE 1 ENV PYTHONUNBUFFERED 1 -#build EMHASS +# Docker Labels for hass +LABEL \ + io.hass.name="emhass" \ + io.hass.description="EMHASS: Energy Management for Home Assistant" \ + io.hass.version=${BUILD_VERSION} \ + io.hass.type="addon" \ + io.hass.arch="aarch64|amd64|armhf|armv7" + +# build EMHASS RUN pip3 install --no-cache-dir --break-system-packages --no-deps --force-reinstall . ENTRYPOINT [ "python3", "-m", "emhass.web_server"] -#------------------------- - -#check build arguments and build -FROM ${build_version} AS final \ No newline at end of file +# for running Unittest +#COPY tests/ /app/tests +#RUN apt-get update && apt-get install python3-requests-mock -y +#COPY data/ /app/data/ +#ENTRYPOINT ["python3","-m","unittest","discover","-s","./tests","-p","test_*.py"] diff --git a/README.md b/README.md index 32e04c48..8cd4f94e 100644 --- a/README.md +++ b/README.md @@ -90,90 +90,122 @@ Installation instructions and example Home Assistant automation configurations a You must follow these steps to make EMHASS work properly: -1) Define all the parameters in the configuration file according to your installation method. For the add-on method, you need to use the configuration pane directly on the add-on page. For other installation methods, it should be needed to set the variables using the `config_emhass.yaml` file. See below for details on the installation methods. See the description for each parameter in the **configuration** section. If you have a PV installation then this dedicated web app can be useful for finding your inverter and solar panel models: [https://emhass-pvlib-database.streamlit.app/](https://emhass-pvlib-database.streamlit.app/) +1) Install and run EMHASS. + - There are multiple methods of installing and Running EMHASS. See [Installation Method](##Installation-Methods) below to pick a method that best suits your use case. -2) You most notably will need to define the main data entering EMHASS. This will be the `sensor.power_photovoltaics` for the name of your hass variable containing the PV produced power and the variable `sensor.power_load_no_var_loads` for the load power of your household excluding the power of the deferrable loads that you want to optimize. +2) Define all the parameters in the configuration file *(`config.json`)* or configuration page *(`YOURIP:5000/configuration`)*. + - See the description for each parameter in the [configuration](https://emhass.readthedocs.io/en/latest/config.html) docs. + - You will most notably need to define the main data entering EMHASS. This will be the Home Assistant sensor/variable `sensor.power_photovoltaics` for the name of your Home Assistant variable containing the PV produced power, and the sensor/variable `sensor.power_load_no_var_loads`, for the load power of your household excluding the power of the deferrable loads that you want to optimize. + - If you have a PV installation then this dedicated web app can be useful for finding your inverter and solar panel models: [https://emhass-pvlib-database.streamlit.app/](https://emhass-pvlib-database.streamlit.app/) -3) Launch the actual optimization and check the results. This can be done manually using the buttons in the web UI or with a `curl` command like this: `curl -i -H 'Content-Type:application/json' -X POST -d '{}' http://localhost:5000/action/dayahead-optim`. +4) Launch the optimization and check the results. + - This can be done manually using the buttons in the web UI + - Or with a `curl` command like this: `curl -i -H 'Content-Type:application/json' -X POST -d '{}' http://localhost:5000/action/dayahead-optim`. -4) If you’re satisfied with the optimization results then you can set the optimization and data publish task commands in an automation. You can read more about this in the **usage** section below. +5) If you’re satisfied with the optimization results then you can set the optimization and data publish task commands in an automation. + - You can read more about this in the [usage](##usage) section below. -5) The final step is to link the deferrable loads variables to real switches on your installation. An example code for this using automations and the shell command integration is presented below in the **usage** section. +6) The final step is to link the deferrable loads variables to real switches on your installation. + - An example code for this using automations and the shell command integration is presented below in the [usage](##usage) section. A more detailed workflow is given below: -![](https://raw.githubusercontent.com/davidusb-geek/emhass/master/docs/images/workflow.png) +![workflow.png](https://raw.githubusercontent.com/davidusb-geek/emhass/master/docs/images/workflow.png) + +## Installation Methods ### Method 1) The EMHASS add-on for Home Assistant OS and supervised users -For Home Assistant OS and HA Supervised users, I've developed an add-on that will help you use EMHASS. The add-on is more user-friendly as the configuration can be modified directly in the add-on options pane and as with the standalone docker it exposes a web UI that can be used to inspect the optimization results and manually trigger a new optimization. +For Home Assistant OS and HA Supervised users, A [EMHASS an add-on repository](https://github.com/davidusb-geek/emhass-add-on) has been developed to allow the EMHASS Docker container to run as a [Home Assistant Addon](https://www.home-assistant.io/addons/). The add-on is more user-friendly as the Home Assistant secrets (URL and API key) are automatically placed inside of the EMHASS container, and web server port *(default 5000)* is already opened. You can find the add-on with the installation instructions here: [https://github.com/davidusb-geek/emhass-add-on](https://github.com/davidusb-geek/emhass-add-on) -The add-on usage instructions can be found on the documentation pane of the add-on once installed or directly here: [EMHASS Add-on documentation](https://github.com/davidusb-geek/emhass-add-on/blob/main/emhass/DOCS.md) - These architectures are supported: `amd64`, `armv7`, `armhf` and `aarch64`. -### Method 2) Using Docker in standalone mode +_Note: Both EMHASS via Docker and EMHASS-Add-on contain the same Docker image. The EMHASS-Add-on repository however, stores Home Assistant addon specific configuration information and maintains EMHASS image version control._ + +### Method 2) Running EMHASS in Docker -You can also install EMHASS using docker. This can be in the same machine as Home Assistant (if using the supervised install method) or in a different distant machine. To install first pull the latest image from docker hub: +You can also install EMHASS using Docker as a container. This can be in the same machine as Home Assistant (if your running Home Assistant as a Docker container) or in a different distant machine. To install first pull the latest image: ```bash -docker pull davidusb/emhass-docker-standalone +# pull Docker image +docker docker pull ghcr.io/davidusb-geek/emhass:latest +# run Docker image, mounting config.json and secrets_emhass.yaml from host +docker run --rm -it --restart always -p 5000:5000 --name emhass-container -v ./config.json:/share/config.json -v ./secrets_emhass.yaml:/app/secrets_emhass.yaml ghcr.io/davidusb-geek/emhass:latest ``` +*Note it is not recommended to install the latest EMHASS image with `:latest` *(as you would likely want to control when you update EMHASS version)*. Instead, find the [latest version tag](https://github.com/davidusb-geek/emhass/pkgs/container/emhass) (E.g: `v0.2.1`) and replace `latest`* -You can also build your image locally. For this clone this repository, setup your `config_emhass.yaml` file and use the provided make file with this command: +You can also build your image locally. For this clone this repository, and build the image from the Dockerfile: ```bash -make -f deploy_docker.mk clean_deploy -``` -Then load the image in the .tar file: +# git clone EMHASS repo +git clone docker pull ghcr.io/geoderp/emhass:v0.21.3 +# move to EMHASS directory +cd emhass +# build Docker image +# may need to set architecture tag (docker build --build-arg TARGETARCH=amd64 -t emhass-local .) +docker build -t emhass-local . +# run built Docker image, mounting config.json and secrets_emhass.yaml from host +docker run --rm -it --restart always -p 5000:5000 --name emhass-container -v ./config.json:/share/config.json -v ./secrets_emhass.yaml:/app/secrets_emhass.yaml emhass-local +``` + +Before running the docker container, make sure you have a designated folder for emhass on your host device and a `secrets_emhass.yaml` file. You can get a example of the secrets file from [`secrets_emhass(example).yaml`](https://github.com/davidusb-geek/emhass/blob/master/secrets_emhass(example).yaml) file on this repository. ```bash -docker load -i .tar -``` -Finally, check your image tag with `docker images` and launch the docker itself: -```bash -docker run -it --restart always -p 5000:5000 -e LOCAL_COSTFUN="profit" -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS -``` - - If you wish to keep a local, persistent copy of the EMHASS-generated data, create a local folder on your device, then mount said folder inside the container. - ```bash - mkdir -p $(pwd)/data #linux: create data folder on local device - - docker run -it --restart always -p 5000:5000 -e LOCAL_COSTFUN="profit" -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/data:/app/data -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS - ``` +# cli example of creating an emhass directory and appending a secrets_emhass.yaml file inside +mkdir ~/emhass +cd ~/emhass +cat <> ~/emhass/secrets_emhass.yaml +hass_url: https://myhass.duckdns.org/ +long_lived_token: thatverylongtokenhere +time_zone: Europe/Paris +Latitude: 45.83 +Longitude: 6.86 +Altitude: 4807.8 +EOT +docker run --rm -it --restart always -p 5000:5000 --name emhass-container -v ./config.json:/share/config.json -v ./secrets_emhass.yaml:/app/secrets_emhass.yaml ghcr.io/davidusb-geek/emhass:latest +``` + +#### Docker, things to note + +- You can create a `config.json` file prior to running emhass. *(obtain a example from: [config_defaults.json](https://github.com/davidusb-geek/emhass/blob/enhass-standalone-addon-merge/src/emhass/data/config_defaults.json)* Alteratively, you can insert your parameters into the configuration page on the EMHASS web server. (for EMHASS to auto create a config.json) With either option, the volume mount `-v ./config.json:/share/config.json` should be applied to make sure your config is stored on the host device. (to be not deleted when the EMHASS container gets removed/image updated)* + +- If you wish to keep a local, semi-persistent copy of the EMHASS-generated data, create a local folder on your device, then mount said folder inside the container. + ```bash + #create data folder + mkdir -p ~/emhass/data + docker run -it --restart always -p 5000:5000 -e LOCAL_COSTFUN="profit" -v ~/emhass/config.json:/app/config.json -v ~/emhass/data:/app/data -v ~/emhass/secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS + ``` -If you wish to set the web_server's diagrams to a timezone other than UTC, set `TZ` environment variable on: -```bash -docker run -it --restart always -p 5000:5000 -e TZ="Europe/Paris" -e LOCAL_COSTFUN="profit" -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS -``` -### Method 3) Legacy method using a Python virtual environment +- If you wish to set the web_server's homepage optimization diagrams to a timezone other than UTC, set `TZ` environment variable on docker run: + ```bash + docker run -it --restart always -p 5000:5000 -e TZ="Europe/Paris" -v ~/emhass/config.json:/app/config.json -v ~/emhass/secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS + ``` +### Method 3) Legacy method using a Python virtual environment *(Legacy CLI)* +If you wish to run EMHASS optimizations with cli commands. *(no persistent web server session)* you can run EMHASS via the python package alone *(not wrapped in a Docker container)*. With this method it is recommended to install on a virtual environment. -Create and activate a virtual environment: -```bash -python3 -m venv emhassenv -cd emhassenv -source bin/activate -``` -Install using the distribution files: -```bash -python3 -m pip install emhass -``` -Clone this repository to obtain the example configuration files. -We will suppose that this repository is cloned to: -``` -/home/user/emhass -``` -This will be the root path containing the yaml configuration files (`config_emhass.yaml` and `secrets_emhass.yaml`) and the different needed folders (a `data` folder to store the optimizations results and a `scripts` folder containing the bash scripts described further below). - -To upgrade the installation in the future just use: -```bash -python3 -m pip install --upgrade emhass -``` +- Create and activate a virtual environment: + ```bash + python3 -m venv ~/emhassenv + cd ~/emhassenv + source bin/activate + ``` +- Install using the distribution files: + ```bash + python3 -m pip install emhass + ``` +- Create and store configuration (config.json), secret (secrets_emhass.yaml) and data (/data) files in the emhass dir (`~/emhassenv`) +Note: You may wish to copy the `config.json` (config_defaults.json), `secrets_emhass.yaml` (secrets_emhass(example).yaml) and/or `/scripts/` files from this repository to the `~/emhassenv` folder for a starting point and/or to run the bash scripts described below. + +- To upgrade the installation in the future just use: + ```bash + python3 -m pip install --upgrade emhass + ``` ## Usage -### Method 1) Add-on and docker standalone +### Method 1) Add-on and Docker -If using the add-on or the standalone docker installation, it exposes a simple webserver on port 5000. You can access it directly using your browser, ex: http://localhost:5000. +If using the add-on or the Docker installation, it exposes a simple webserver on port 5000. You can access it directly using your browser. (E.g.: http://localhost:5000) With this web server, you can perform RESTful POST commands on multiple ENDPOINTS with the prefix `action/*`: @@ -192,25 +224,28 @@ A `curl` command can then be used to launch an optimization task like this: `cur To run a command simply use the `emhass` CLI command followed by the needed arguments. The available arguments are: - `--action`: This is used to set the desired action, options are: `perfect-optim`, `dayahead-optim`, `naive-mpc-optim`, `publish-data`, `forecast-model-fit`, `forecast-model-predict` and `forecast-model-tune`. -- `--config`: Define the path to the config.yaml file (including the yaml file itself) +- `--config`: Define the path to the config.json file (including the yaml file itself) +- `--secrets`: Define secret parameter file (secrets_emhass.yaml) path - `--costfun`: Define the type of cost function, this is optional and the options are: `profit` (default), `cost`, `self-consumption` - `--log2file`: Define if we should log to a file or not, this is optional and the options are: `True` or `False` (default) - `--params`: Configuration as JSON. - `--runtimeparams`: Data passed at runtime. This can be used to pass your own forecast data to EMHASS. - `--debug`: Use `True` for testing purposes. - `--version`: Show the current version of EMHASS. +- `--root`: Define path emhass root (E.g. ~/emhass ) +- `--data`: Define path to the Data files (.csv & .pkl) (E.g. ~/emhass/data/ ) For example, the following line command can be used to perform a day-ahead optimization task: ```bash -emhass --action 'dayahead-optim' --config '/home/user/emhass/config_emhass.yaml' --costfun 'profit' +emhass --action 'dayahead-optim' --config ~/emhass/config.json --costfun 'profit' ``` -Before running any valuable command you need to modify the `config_emhass.yaml` and `secrets_emhass.yaml` files. These files should contain the information adapted to your own system. To do this take a look at the special section for this in the [documentation](https://emhass.readthedocs.io/en/latest/config.html). +Before running any valuable command you need to modify the `config.json` and `secrets_emhass.yaml` files. These files should contain the information adapted to your own system. To do this take a look at the special section for this in the [documentation](https://emhass.readthedocs.io/en/latest/config.html). -## Home Assistant integration +## Home Assistant Automation -To integrate with Home Assistant we will need to define some shell commands in the `configuration.yaml` file and some basic automations in the `automations.yaml` file. -In the next few paragraphs, we are going to consider the `dayahead-optim` optimization strategy, which is also the first that was implemented, and we will also cover how to publish the results. -Then additional optimization strategies were developed, that can be used in combination with/replace the `dayahead-optim` strategy, such as MPC, or to expand the funcitonalities such as the Machine Learning method to predict your household consumption. Each of them has some specificities and features and will be considered in dedicated sections. +To automate EMHASS with Home Assistant, we will need to define some shell commands in the Home Assistant `configuration.yaml` file and some basic automations in the `automations.yaml` file. +In the next few paragraphs, we are going to consider the `dayahead-optim` optimization strategy, which is also the first that was implemented, and we will also cover how to publish the optimization results. +Additional optimization strategies were developed later, that can be used in combination with/replace the `dayahead-optim` strategy, such as MPC, or to expand the functionalities such as the Machine Learning method to predict your household consumption. Each of them has some specificities and features and will be considered in dedicated sections. ### Dayahead Optimization - Method 1) Add-on and docker standalone @@ -225,27 +260,27 @@ shell_command: In `configuration.yaml`: ```yaml shell_command: - dayahead_optim: /home/user/emhass/scripts/dayahead_optim.sh - publish_data: /home/user/emhass/scripts/publish_data.sh + dayahead_optim: ~/emhass/scripts/dayahead_optim.sh + publish_data: ~/emhass/scripts/publish_data.sh ``` Create the file `dayahead_optim.sh` with the following content: ```bash #!/bin/bash -. /home/user/emhassenv/bin/activate -emhass --action 'dayahead-optim' --config '/home/user/emhass/config_emhass.yaml' +. ~/emhassenv/bin/activate +emhass --action 'dayahead-optim' --config ~/emhass/config.json ``` And the file `publish_data.sh` with the following content: ```bash #!/bin/bash -. /home/user/emhassenv/bin/activate -emhass --action 'publish-data' --config '/home/user/emhass/config_emhass.yaml' +. ~/emhassenv/bin/activate +emhass --action 'publish-data' --config ~/emhass/config.json ``` Then specify user rights and make the files executables: ```bash -sudo chmod -R 755 /home/user/emhass/scripts/dayahead_optim.sh -sudo chmod -R 755 /home/user/emhass/scripts/publish_data.sh -sudo chmod +x /home/user/emhass/scripts/dayahead_optim.sh -sudo chmod +x /home/user/emhass/scripts/publish_data.sh +sudo chmod -R 755 ~/emhass/scripts/dayahead_optim.sh +sudo chmod -R 755 ~/emhass/scripts/publish_data.sh +sudo chmod +x ~/emhass/scripts/dayahead_optim.sh +sudo chmod +x ~/emhass/scripts/publish_data.sh ``` ### Common for any installation method @@ -280,15 +315,15 @@ In `automations.yaml`: - service: shell_command.dayahead_optim - service: shell_command.publish_data ``` -in configuration page/`config_emhass.yaml` +in configuration page/`config.json` ```json -"method_ts_round": "first" -"continual_publish": true +'method_ts_round': "first" +'continual_publish': true ``` In this automation, the day-ahead optimization is performed once a day, every day at 5:30am. -If the `freq` parameter is set to `30` *(default)* in the configuration, the results of the day-ahead optimization will generate 48 values *(for each entity)*, a value for every 30 minutes in a day *(i.e. 24 hrs x 2)*. +If the `optimization_time_step` parameter is set to `30` *(default)* in the configuration, the results of the day-ahead optimization will generate 48 values *(for each entity)*, a value for every 30 minutes in a day *(i.e. 24 hrs x 2)*. -Setting the parameter `continual_publish` to `true` in the configuration page will allow EMHASS to store the optimization results as entities/sensors into separate json files. `continual_publish` will periodically (every `freq` amount of minutes) run a publish, and publish the optimization results of each generated entities/sensors to Home Assistant. The current state of the sensor/entity being updated every time publish runs, selecting one of the 48 stored values, by comparing the stored values' timestamps, the current timestamp and [`"method_ts_round": "first"`](#the-publish-data-specificities) to select the optimal stored value for the current state. +Setting the parameter `continual_publish` to `true` in the configuration page will allow EMHASS to store the optimization results as entities/sensors into separate json files. `continual_publish` will periodically (every `optimization_time_step` amount of minutes) run a publish, and publish the optimization results of each generated entities/sensors to Home Assistant. The current state of the sensor/entity being updated every time publish runs, selecting one of the 48 stored values, by comparing the stored values' timestamps, the current timestamp and [`'method_ts_round': "first"`](#the-publish-data-specificities) to select the optimal stored value for the current state. option 1 and 2 are very similar, however, option 2 (`continual_publish`) will require a CPU thread to constantly be run inside of EMHASS, lowering efficiency. The reason why you may pick one over the other is explained in more detail below in [continual_publish](#continual_publish-emhass-automation). @@ -325,7 +360,7 @@ automation: - service: homeassistant.turn_off entity_id: switch.water_heater_switch ``` -These automations will turn on and off the Home Assistant entity `switch.water_heater_switch` using the current state from the EMHASS entity `sensor.p_deferrable0`. `sensor.p_deferrable0` being the entity generated from the EMHASS day-ahead optimization and published by examples above. The `sensor.p_deferrable0` entity's current state is updated every 30 minutes (or `freq` minutes) via an automated publish option 1 or 2. *(selecting one of the 48 stored data values)* +These automations will turn on and off the Home Assistant entity `switch.water_heater_switch` using the current state from the EMHASS entity `sensor.p_deferrable0`. `sensor.p_deferrable0` being the entity generated from the EMHASS day-ahead optimization and published by examples above. The `sensor.p_deferrable0` entity's current state is updated every 30 minutes (or `optimization_time_step` minutes) via an automated publish option 1 or 2. *(selecting one of the 48 stored data values)* ## The publish-data specificities @@ -356,7 +391,7 @@ Below you can find a list of the variables resulting from EMHASS computation, sh | --------------- | ---------- | --------------------------------| | P_PV | Forecasted power generation from your solar panels (Watts). This helps you predict how much solar energy you will produce during the forecast period. | sensor.p_pv_forecast | | P_Load | Forecasted household power consumption (Watts). This gives you an idea of how much energy your appliances are expected to use. | sensor.p_load_forecast | -| P_deferrableX
[X = 0, 1, 2, ...] | Forecasted power consumption of deferrable loads (Watts). Deferable loads are appliances that can be managed by EMHASS. EMHASS helps you optimise energy usage by prioritising solar self-consumption and minimizing reliance on the grid or by taking advantage or supply and feed-in tariff volatility. You can have multiple deferable loads and you use this sensor in HA to control these loads via smart switch or other IoT means at your disposal. | sensor.p_deferrableX | +| P_deferrableX
[X = 0, 1, 2, ...] | Forecasted power consumption of deferrable loads (Watts). Deferable loads are appliances that can be managed by EMHASS. EMHASS helps you optimize energy usage by prioritizing solar self-consumption and minimizing reliance on the grid or by taking advantage or supply and feed-in tariff volatility. You can have multiple deferable loads and you use this sensor in HA to control these loads via smart switch or other IoT means at your disposal. | sensor.p_deferrableX | | P_grid_pos | Forecasted power imported from the grid (Watts). This indicates the amount of energy you are expected to draw from the grid when your solar production is insufficient to meet your needs or it is advantageous to consume from the grid. | - | | P_grid_neg | Forecasted power exported to the grid (Watts). This indicates the amount of excess solar energy you are expected to send back to the grid during the forecast period. | - | | P_batt | Forecasted (dis)charge power load (Watts) for the battery (if installed). If negative it indicates the battery is charging, if positive that the battery is discharging. | sensor.p_batt_forecast | @@ -381,7 +416,7 @@ In EMHASS we have 4 forecasts to deal with: - PV production selling price forecast: at what price are you selling your excess PV production in the next 24 hours. This is given in EUR/kWh. -The sensor containing the load data should be specified in the parameter `var_load` in the configuration file. As we want to optimize household energy, we need to forecast the load power consumption. The default method for this is a naive approach using 1-day persistence. The load data variable should not contain the data from the deferrable loads themselves. For example, let's say that you set your deferrable load to be the washing machine. The variables that you should enter in EMHASS will be: `var_load: 'sensor.power_load_no_var_loads'` and `sensor.power_load_no_var_loads = sensor.power_load - sensor.power_washing_machine`. This is supposing that the overall load of your house is contained in the variable: `sensor.power_load`. The sensor `sensor.power_load_no_var_loads` can be easily created with a new template sensor in Home Assistant. +The sensor containing the load data should be specified in the parameter `sensor_power_load_no_var_loads` in the configuration file. As we want to optimize household energy, we need to forecast the load power consumption. The default method for this is a naive approach using 1-day persistence. The load data variable should not contain the data from the deferrable loads themselves. For example, let's say that you set your deferrable load to be the washing machine. The variables that you should enter in EMHASS will be: `sensor_power_load_no_var_loads: 'sensor.power_load_no_var_loads'` and `sensor.power_load_no_var_loads = sensor.power_load - sensor.power_washing_machine`. This is supposing that the overall load of your house is contained in the variable: `sensor.power_load`. The sensor `sensor.power_load_no_var_loads` can be easily created with a new template sensor in Home Assistant. If you are implementing an MPC controller, then you should also need to provide some data at the optimization runtime using the key `runtimeparams`. @@ -398,20 +433,20 @@ curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/a # Then publish teh results of dayahead curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/publish-data ``` -*Note, the published entities from the publish-data action will not automatically update the entities' current state (current state being used to check when to turn on and off appliances via Home Assistant automatons). To update the EMHASS entities state, another publish would have to be re-run later when the current time matches the next value's timestamp (e.g. every 30 minutes). See examples below for methods to automate the publish-action.* +*Note, the published entities from the publish-data action will not automatically update the entities' current state (current state being used to check when to turn on and off appliances via Home Assistant automations). To update the EMHASS entities state, another publish would have to be re-run later when the current time matches the next value's timestamp (e.g. every 30 minutes). See examples below for methods to automate the publish-action.* #### continual_publish *(EMHASS Automation)* -As discussed in [Common for any installation method - option 2](#option-2-emhass-automate-publish), setting `continual_publish` to `true` in the configuration saves the output of the optimization into the `data_path/entities` folder *(a .json file for each sensor/entity)*. A constant loop (in `freq` minutes) will run, observe the .json files in that folder, and publish the saved files periodically (updating the current state of the entity by comparing date.now with the saved data value timestamps). +As discussed in [Common for any installation method - option 2](#option-2-emhass-automate-publish), setting `continual_publish` to `true` in the configuration saves the output of the optimization into the `data_path/entities` folder *(a .json file for each sensor/entity)*. A constant loop (in `optimization_time_step` minutes) will run, observe the .json files in that folder, and publish the saved files periodically (updating the current state of the entity by comparing date.now with the saved data value timestamps). -For users that wish to run multiple different optimizations, you can set the runtime parameter: `publish_prefix` to something like: `"mpc_"` or `"dh_"`. This will generate unique entity_id names per optimization and save these unique entities as separate files in the folder. All the entity files will then be updated when the next loop iteration runs. If a different `freq` integer was passed as a runtime parameter in an optimization, the `continual_publish` loop will be based on the lowest `freq` saved. An example: +For users that wish to run multiple different optimizations, you can set the runtime parameter: `publish_prefix` to something like: `"mpc_"` or `"dh_"`. This will generate unique entity_id names per optimization and save these unique entities as separate files in the folder. All the entity files will then be updated when the next loop iteration runs. If a different `optimization_time_step` integer was passed as a runtime parameter in an optimization, the `continual_publish` loop will be based on the lowest `optimization_time_step` saved. An example: ```bash -# RUN dayahead, with freq=30 (default), prefix=dh_ +# RUN dayahead, with optimization_time_step=30 (default), prefix=dh_ curl -i -H 'Content-Type:application/json' -X POST -d '{"publish_prefix":"dh_"}' http://localhost:5000/action/dayahead-optim -# RUN MPC, with freq=5, prefix=mpc_ -curl -i -H 'Content-Type:application/json' -X POST -d '{"freq":5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim +# RUN MPC, with optimization_time_step=5, prefix=mpc_ +curl -i -H 'Content-Type:application/json' -X POST -d '{'optimization_time_step':5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim ``` -This will tell continual_publish to loop every 5 minutes based on the freq passed in MPC. All entities from the output of dayahead "dh_" and MPC "mpc_" will be published every 5 minutes. +This will tell continual_publish to loop every 5 minutes based on the optimization_time_step passed in MPC. All entities from the output of dayahead "dh_" and MPC "mpc_" will be published every 5 minutes.
@@ -419,13 +454,13 @@ This will tell continual_publish to loop every 5 minutes based on the freq passe #### Mixture of continual_publish and manual *(Home Assistant Automation for Publish)* -You can choose to save one optimization for continual_publish and bypass another optimization by setting `"continual_publish":false` runtime parameter: +You can choose to save one optimization for continual_publish and bypass another optimization by setting `'continual_publish':false` runtime parameter: ```bash -# RUN dayahead, with freq=30 (default), prefix=dh_, included into continual_publish +# RUN dayahead, with optimization_time_step=30 (default), prefix=dh_, included into continual_publish curl -i -H 'Content-Type:application/json' -X POST -d '{"publish_prefix":"dh_"}' http://localhost:5000/action/dayahead-optim -# RUN MPC, with freq=5, prefix=mpc_, Manually publish, excluded from continual_publish loop -curl -i -H 'Content-Type:application/json' -X POST -d '{"continual_publish":false,"freq":5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim +# RUN MPC, with optimization_time_step=5, prefix=mpc_, Manually publish, excluded from continual_publish loop +curl -i -H 'Content-Type:application/json' -X POST -d '{'continual_publish':false,'optimization_time_step':5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim # Publish MPC output curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/publish-data ``` @@ -435,16 +470,16 @@ This example saves the dayahead optimization into `data_path/entities` as .json For users who wish to have full control of exactly when they would like to run a publish and have the ability to save multiple different optimizations. The `entity_save` runtime parameter has been created to save the optimization output entities to .json files whilst `continual_publish` is set to `false` in the configuration. Allowing the user to reference the saved .json files manually via a publish: -in configuration page/`config_emhass.yaml` : +in configuration page/`config.json` : ```json -"continual_publish": false +'continual_publish': false ``` POST action : ```bash -# RUN dayahead, with freq=30 (default), prefix=dh_, save entity +# RUN dayahead, with optimization_time_step=30 (default), prefix=dh_, save entity curl -i -H 'Content-Type:application/json' -X POST -d '{"entity_save": true, "publish_prefix":"dh_"}' http://localhost:5000/action/dayahead-optim -# RUN MPC, with freq=5, prefix=mpc_, save entity -curl -i -H 'Content-Type:application/json' -X POST -d '{"entity_save": true", "freq":5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim +# RUN MPC, with optimization_time_step=5, prefix=mpc_, save entity +curl -i -H 'Content-Type:application/json' -X POST -d '{"entity_save": true", 'optimization_time_step':5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim ``` You can then reference these .json saved entities via their `publish_prefix`. Include the same `publish_prefix` in the `publish_data` action: ```bash @@ -470,7 +505,7 @@ curl -i -H 'Content-Type:application/json' -X POST -d '{"pv_power_forecast":[0, ``` Or if using the legacy method using a Python virtual environment: ```bash -emhass --action 'dayahead-optim' --config '/home/user/emhass/config_emhass.yaml' --runtimeparams '{"pv_power_forecast":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93, 1164.33, 1046.68, 1559.1, 2091.26, 1556.76, 1166.73, 1516.63, 1391.13, 1720.13, 820.75, 804.41, 251.63, 79.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}' +emhass --action 'dayahead-optim' --config ~/emhass/config.json --runtimeparams '{"pv_power_forecast":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93, 1164.33, 1046.68, 1559.1, 2091.26, 1556.76, 1166.73, 1516.63, 1391.13, 1720.13, 820.75, 804.41, 251.63, 79.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}' ``` The possible dictionary keys to pass data are: @@ -485,25 +520,25 @@ The possible dictionary keys to pass data are: ### Passing other data at runtime -It is possible to also pass other data during runtime to automate energy management. For example, it could be useful to dynamically update the total number of hours for each deferrable load (`def_total_hours`) using for instance a correlation with the outdoor temperature (useful for water heater for example). +It is possible to also pass other data during runtime to automate energy management. For example, it could be useful to dynamically update the total number of hours for each deferrable load (`operating_hours_of_each_deferrable_load`) using for instance a correlation with the outdoor temperature (useful for water heater for example). Here is the list of the other additional dictionary keys that can be passed at runtime: -- `num_def_loads` for the number of deferrable loads to consider. +- `number_of_deferrable_loads` for the number of deferrable loads to consider. -- `P_deferrable_nom` for the nominal power for each deferrable load in Watts. +- `nominal_power_of_deferrable_loads` for the nominal power for each deferrable load in Watts. -- `def_total_hours` for the total number of hours that each deferrable load should operate. +- `operating_hours_of_each_deferrable_load` for the total number of hours that each deferrable load should operate. -- `def_start_timestep` for the timestep from which each deferrable load is allowed to operate (if you don't want the deferrable load to use the whole optimization timewindow). +- `start_timesteps_of_each_deferrable_load` for the timestep from which each deferrable load is allowed to operate (if you don't want the deferrable load to use the whole optimization timewindow). -- `def_end_timestep` for the timestep before which each deferrable load should operate (if you don't want the deferrable load to use the whole optimization timewindow). +- `end_timesteps_of_each_deferrable_load` for the timestep before which each deferrable load should operate (if you don't want the deferrable load to use the whole optimization timewindow). - `def_current_state` Pass this as a list of booleans (True/False) to indicate the current deferrable load state. This is used internally to avoid incorrectly penalizing a deferrable load start if a forecast is run when that load is already running. -- `treat_def_as_semi_cont` to define if we should treat each deferrable load as a semi-continuous variable. +- `treat_deferrable_load_as_semi_cont` to define if we should treat each deferrable load as a semi-continuous variable. -- `set_def_constant` to define if we should set each deferrable load as a constant fixed value variable with just one startup for each optimization task. +- `set_deferrable_load_single_constant` to define if we should set each deferrable load as a constant fixed value variable with just one startup for each optimization task. - `solcast_api_key` for the SolCast API key if you want to use this service for PV power production forecast. @@ -511,15 +546,15 @@ Here is the list of the other additional dictionary keys that can be passed at r - `solar_forecast_kwp` for the PV peak installed power in kW used for the solar.forecast API call. -- `SOCmin` the minimum possible SOC. +- `battery_minimum_state_of_charge` the minimum possible SOC. -- `SOCmax` the maximum possible SOC. +- `battery_maximum_state_of_charge` the maximum possible SOC. -- `SOCtarget` for the desired target value of the initial and final SOC. +- `battery_target_state_of_charge` for the desired target value of the initial and final SOC. -- `Pd_max` for the maximum battery discharge power. +- `battery_discharge_power_max` for the maximum battery discharge power. -- `Pc_max` for the maximum battery charge power. +- `battery_charge_power_max` for the maximum battery charge power. - `publish_prefix` use this key to pass a common prefix to all published data. This will add a prefix to the sensor name but also the forecast attribute keys within the sensor. @@ -544,20 +579,20 @@ When applying this controller, the following `runtimeparams` should be defined: - `soc_final` for the final value of the battery SOC for the current iteration of the MPC. -- `def_total_hours` for the list of deferrable loads functioning hours. These values can decrease as the day advances to take into account receding horizon daily energy objectives for each deferrable load. +- `operating_hours_of_each_deferrable_load` for the list of deferrable loads functioning hours. These values can decrease as the day advances to take into account receding horizon daily energy objectives for each deferrable load. -- `def_start_timestep` for the timestep from which each deferrable load is allowed to operate (if you don't want the deferrable load to use the whole optimization timewindow). If you specify a value of 0 (or negative), the deferrable load will be optimized as from the beginning of the complete prediction horizon window. +- `start_timesteps_of_each_deferrable_load` for the timestep from which each deferrable load is allowed to operate (if you don't want the deferrable load to use the whole optimization timewindow). If you specify a value of 0 (or negative), the deferrable load will be optimized as from the beginning of the complete prediction horizon window. -- `def_end_timestep` for the timestep before which each deferrable load should operate (if you don't want the deferrable load to use the whole optimization timewindow). If you specify a value of 0 (or negative), the deferrable load optimization window will extend up to the end of the prediction horizon window. +- `end_timesteps_of_each_deferrable_load` for the timestep before which each deferrable load should operate (if you don't want the deferrable load to use the whole optimization timewindow). If you specify a value of 0 (or negative), the deferrable load optimization window will extend up to the end of the prediction horizon window. A correct call for an MPC optimization should look like this: ```bash curl -i -H 'Content-Type:application/json' -X POST -d '{"pv_power_forecast":[0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93], "prediction_horizon":10, "soc_init":0.5,"soc_final":0.6}' http://192.168.3.159:5000/action/naive-mpc-optim ``` -*Example with :`def_total_hours`, `def_start_timestep`, `def_end_timestep`.* +*Example with :`operating_hours_of_each_deferrable_load`, `start_timesteps_of_each_deferrable_load`, `end_timesteps_of_each_deferrable_load`.* ```bash -curl -i -H 'Content-Type:application/json' -X POST -d '{"pv_power_forecast":[0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93], "prediction_horizon":10, "soc_init":0.5,"soc_final":0.6,"def_total_hours":[1,3],"def_start_timestep":[0,3],"def_end_timestep":[0,6]}' http://localhost:5000/action/naive-mpc-optim +curl -i -H 'Content-Type:application/json' -X POST -d '{"pv_power_forecast":[0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93], "prediction_horizon":10, "soc_init":0.5,"soc_final":0.6,'operating_hours_of_each_deferrable_load':[1,3],'start_timesteps_of_each_deferrable_load':[0,3],'end_timesteps_of_each_deferrable_load':[0,6]}' http://localhost:5000/action/naive-mpc-optim ``` ## A machine learning forecaster diff --git a/config.json b/config.json new file mode 100644 index 00000000..02cedd0e --- /dev/null +++ b/config.json @@ -0,0 +1,117 @@ +{ + "logging_level": "INFO", + "costfun": "profit", + "optimization_time_step": 30, + "historic_days_to_retrieve": 2, + "method_ts_round": "nearest", + "continual_publish": false, + "data_path": "default", + "set_total_pv_sell": false, + "lp_solver": "default", + "lp_solver_path": "empty", + "set_nocharge_from_grid": false, + "set_nodischarge_to_grid": true, + "set_battery_dynamic": false, + "battery_dynamic_max": 0.9, + "battery_dynamic_min": -0.9, + "weight_battery_discharge": 1.0, + "weight_battery_charge": 1.0, + "sensor_power_photovoltaics": "sensor.power_photovoltaics", + "sensor_power_load_no_var_loads": "sensor.power_load_no_var_loads", + "sensor_replace_zero": [ + "sensor.power_photovoltaics", + "sensor.power_load_no_var_loads" + ], + "sensor_linear_interp": [ + "sensor.power_photovoltaics", + "sensor.power_load_no_var_loads" + ], + "load_negative": false, + "set_zero_min": true, + "number_of_deferrable_loads": 2, + "nominal_power_of_deferrable_loads": [ + 3000.0, + 750.0 + ], + "operating_hours_of_each_deferrable_load": [ + 4, + 0 + ], + "weather_forecast_method": "scrapper", + "load_forecast_method": "naive", + "delta_forecast_daily": 1, + "load_cost_forecast_method": "hp_hc_periods", + "start_timesteps_of_each_deferrable_load": [ + 0, + 0 + ], + "end_timesteps_of_each_deferrable_load": [ + 0, + 0 + ], + "load_peak_hour_periods": { + "period_hp_1": [ + { + "start": "02:54" + }, + { + "end": "15:24" + } + ], + "period_hp_2": [ + { + "start": "17:24" + }, + { + "end": "20:24" + } + ] + }, + "treat_deferrable_load_as_semi_cont": [ + true, + true + ], + "set_deferrable_load_single_constant": [ + false, + false + ], + "set_deferrable_startup_penalty": [ + 0.0, + 0.0 + ], + "load_peak_hours_cost": 0.1907, + "load_offpeak_hours_cost": 0.1419, + "production_price_forecast_method": "constant", + "photovoltaic_production_sell_price": 0.1419, + "maximum_power_from_grid": 9000, + "maximum_power_to_grid": 9000, + "pv_module_model": [ + "CSUN_Eurasia_Energy_Systems_Industry_and_Trade_CSUN295_60M" + ], + "pv_inverter_model": [ + "Fronius_International_GmbH__Fronius_Primo_5_0_1_208_240__240V_" + ], + "surface_tilt": [ + 30 + ], + "surface_azimuth": [ + 205 + ], + "modules_per_string": [ + 16 + ], + "strings_per_inverter": [ + 1 + ], + "inverter_is_hybrid": false, + "compute_curtailment": false, + "set_use_battery": false, + "battery_discharge_power_max": 1000, + "battery_charge_power_max": 1000, + "battery_discharge_efficiency": 0.95, + "battery_charge_efficiency": 0.95, + "battery_nominal_energy_capacity": 5000, + "battery_minimum_state_of_charge": 0.3, + "battery_maximum_state_of_charge": 0.9, + "battery_target_state_of_charge": 0.6 +} \ No newline at end of file diff --git a/docs/config.md b/docs/config.md index e3cd9cd2..d56d40c2 100644 --- a/docs/config.md +++ b/docs/config.md @@ -1,30 +1,29 @@ # Configuration file -In this section, we will explain all the parts of the `config_emhass.yaml` needed to properly run EMHASS. +In this section, we will explain all parameters used by EMHASS. -We will find three main parts in the configuration file: - -- The parameters needed to retrieve data from Home Assistant (retrieve_hass_conf) -- The parameters to define the optimization problem (optim_conf) -- The parameters used to model the system (plant_conf) +Note: For some context, the parameters bellow are grouped in configuration catagories. EMHASS will receive the and secrets parameters from `config.json` file and secret locations, then sort and format the parameters into their retrospective categories when migrating from the `config` dictionary to the `params` dictionary. +- The parameters needed to retrieve data from Home Assistant (`retrieve_hass_conf`) +- The parameters to define the optimization problem (`optim_conf`) +- The parameters used to model the system (`plant_conf`) ## Retrieve HASS data configuration We will need to define these parameters to retrieve data from Home Assistant. There are no optional parameters. In the case of a list, an empty list is a valid entry. -- `freq`: The time step to resample retrieved data from hass. This parameter is given in minutes. It should not be defined too low or you will run into memory problems when defining the Linear Programming optimization. Defaults to 30. -- `days_to_retrieve`: We will retrieve data from now to days_to_retrieve days. Defaults to 2. -- `var_PV`: This is the name of the photovoltaic power-produced sensor in Watts from Home Assistant. For example: 'sensor.power_photovoltaics'. -- `var_load`: The name of the household power consumption sensor in Watts from Home Assistant. The deferrable loads that we will want to include in the optimization problem should be subtracted from this sensor in HASS. For example: 'sensor.power_load_no_var_loads' +- `optimization_time_step`: The time step to resample retrieved data from hass. This parameter is given in minutes. It should not be defined too low or you will run into memory problems when defining the Linear Programming optimization. Defaults to 30. +- `historic_days_to_retrieve`: We will retrieve data from now to historic_days_to_retrieve days. Defaults to 2. +- `sensor_power_photovoltaics`: This is the name of the photovoltaic power-produced sensor in Watts from Home Assistant. For example: 'sensor.power_photovoltaics'. +- `sensor_power_load_no_var_loads`: The name of the household power consumption sensor in Watts from Home Assistant. The deferrable loads that we will want to include in the optimization problem should be subtracted from this sensor in HASS. For example: 'sensor.power_load_no_var_loads' - `load_negative`: Set this parameter to True if the retrieved load variable is negative by convention. Defaults to False. - `set_zero_min`: Set this parameter to True to give a special treatment for a minimum value saturation to zero for power consumption data. Values below zero are replaced by nans. Defaults to True. - `var_replace_zero`: The list of retrieved variables that we would want to replace nans (if they exist) with zeros. For example: - 'sensor.power_photovoltaics' -- `var_interp`: The list of retrieved variables that we would want to interpolate nans values using linear interpolation. For example: +- `sensor_linear_interp`: The list of retrieved variables that we would want to interpolate nans values using linear interpolation. For example: - 'sensor.power_photovoltaics' - 'sensor.power_load_no_var_loads' - `method_ts_round`: Set the method for timestamp rounding, options are: first, last and nearest. -- `continual_publish`: set to True to save entities to .json after an optimization run. Then automatically republish the saved entities *(with updated current state value)* every freq minutes. *entity data saved to data_path/entities.* +- `continual_publish`: set to True to save entities to .json after an optimization run. Then automatically republish the saved entities *(with updated current state value)* every `optimization_time_step` minutes. *entity data saved to data_path/entities.* A second part of this section is given by some privacy-sensitive parameters that should be included in a `secrets_emhass.yaml` file alongside the `config_emhass.yaml` file. @@ -42,43 +41,43 @@ The parameters in the `secrets_emhass.yaml` file are: These are the parameters needed to properly define the optimization problem. - `set_use_battery`: Set to True if we should consider an energy storage device such as a Li-Ion battery. Defaults to False. -- `delta_forecast`: The number of days for forecasted data. Defaults to 1. -- `num_def_loads`: Define the number of deferrable loads to consider. Defaults to 2. -- `P_deferrable_nom`: The nominal power for each deferrable load in Watts. This is a list with a number of elements consistent with the number of deferrable loads defined before. For example: +- `delta_forecast_daily`: The number of days for forecasted data. Defaults to 1. +- `number_of_deferrable_loads`: Define the number of deferrable loads to consider. Defaults to 2. +- `nominal_power_of_deferrable_loads`: The nominal power for each deferrable load in Watts. This is a list with a number of elements consistent with the number of deferrable loads defined before. For example: - 3000 - 750 -- `def_total_hours`: The total number of hours that each deferrable load should operate. For example: +- `operating_hours_of_each_deferrable_load`: The total number of hours that each deferrable load should operate. For example: - 5 - 8 -- `def_start_timestep`: The timestep as from which each deferrable load is allowed to operate (if you don't want the deferrable load to use the whole optimization time window). If you specify a value of 0 (or negative), the deferrable load will be optimized as from the beginning of the complete prediction horizon window. For example: +- `start_timesteps_of_each_deferrable_load`: The timestep as from which each deferrable load is allowed to operate (if you don't want the deferrable load to use the whole optimization time window). If you specify a value of 0 (or negative), the deferrable load will be optimized as from the beginning of the complete prediction horizon window. For example: - 0 - 1 -- `def_end_timestep`: The timestep before which each deferrable load should operate. The deferrable load is not allowed to operate after the specified time step. If a value of 0 (or negative) is provided, the deferrable load is allowed to operate in the complete optimization window). For example: +- `end_timesteps_of_each_deferrable_load`: The timestep before which each deferrable load should operate. The deferrable load is not allowed to operate after the specified time step. If a value of 0 (or negative) is provided, the deferrable load is allowed to operate in the complete optimization window). For example: - 0 - 3 -- `treat_def_as_semi_cont`: Define if we should treat each deferrable load as a semi-continuous variable. Semi-continuous variables (`True`) are variables that must take a value that can be either their maximum or minimum/zero (for example On = Maximum load, Off = 0 W). Non semi-continuous (which means continuous) variables (`False`) can take any values between their maximum and minimum. For example: +- `treat_deferrable_load_as_semi_cont`: Define if we should treat each deferrable load as a semi-continuous variable. Semi-continuous variables (`True`) are variables that must take a value that can be either their maximum or minimum/zero (for example On = Maximum load, Off = 0 W). Non semi-continuous (which means continuous) variables (`False`) can take any values between their maximum and minimum. For example: - True - True -- `set_def_constant`: Define if we should set each deferrable load as a constant fixed value variable with just one startup for each optimization task. For example: +- `set_deferrable_load_single_constant`: Define if we should set each deferrable load as a constant fixed value variable with just one startup for each optimization task. For example: - False - False -- `def_start_penalty`: Set to a list of floats. For each deferrable load with a penalty `P`, each time the deferrable load turns on will incur an additional cost of `P * P_deferrable_nom * cost_of_electricity` at that time. +- `set_deferrable_startup_penalty`: Set to a list of floats. For each deferrable load with a penalty `P`, each time the deferrable load turns on will incur an additional cost of `P * nominal_power_of_deferrable_loads * cost_of_electricity` at that time. - `weather_forecast_method`: This will define the weather forecast method that will be used. The options are 'scrapper' for a scrapping method for weather forecast from clearoutside.com and 'csv' to load a CSV file. When loading a CSV file this will be directly considered as the PV power forecast in Watts. The default CSV file path that will be used is '/data/data_weather_forecast.csv'. Defaults to 'scrapper' method. - `load_forecast_method`: The load forecast method that will be used. The options are 'csv' to load a CSV file or 'naive' for a simple 1-day persistence model. The default CSV file path that will be used is '/data/data_load_forecast.csv'. Defaults to 'naive'. - `load_cost_forecast_method`: Define the method that will be used for load cost forecast. The options are 'hp_hc_periods' for peak and non-peak hours contracts and 'csv' to load custom cost from CSV file. The default CSV file path that will be used is '/data/data_load_cost_forecast.csv'. The following parameters and definitions are only needed if load_cost_forecast_method='hp_hc_periods': - - `list_hp_periods`: Define a list of peak hour periods for load consumption from the grid. This is useful if you have a contract with peak and non-peak hours. For example for two peak hour periods: + - `load_peak_hour_periods`: Define a list of peak hour periods for load consumption from the grid. This is useful if you have a contract with peak and non-peak hours. For example for two peak hour periods: - period_hp_1: - start: '02:54' - end: '15:24' - period_hp_2: - start: '17:24' - end: '20:24' - - `load_cost_hp`: The cost of the electrical energy from the grid during peak hours in €/kWh. Defaults to 0.1907. - - `load_cost_hc`: The cost of the electrical energy from the grid during non-peak hours in €/kWh. Defaults to 0.1419. + - `load_peak_hours_cost`: The cost of the electrical energy from the grid during peak hours in €/kWh. Defaults to 0.1907. + - `load_offpeak_hours_cost`: The cost of the electrical energy from the grid during non-peak hours in €/kWh. Defaults to 0.1419. -- `prod_price_forecast_method`: Define the method that will be used for PV power production price forecast. This is the price that is paid by the utility for energy injected into the grid. The options are 'constant' for a constant fixed value or 'csv' to load custom price forecasts from a CSV file. The default CSV file path that will be used is '/data/data_prod_price_forecast.csv'. -- `prod_sell_price`: The paid price for energy injected to the grid from excedent PV production in €/kWh. Defaults to 0.065. This parameter is only needed if prod_price_forecast_method='constant'. +- `production_price_forecast_method`: Define the method that will be used for PV power production price forecast. This is the price that is paid by the utility for energy injected into the grid. The options are 'constant' for a constant fixed value or 'csv' to load custom price forecasts from a CSV file. The default CSV file path that will be used is '/data/data_prod_price_forecast.csv'. +- `photovoltaic_production_sell_price`: The paid price for energy injected to the grid from excedent PV production in €/kWh. Defaults to 0.065. This parameter is only needed if production_price_forecast_method='constant'. - `set_total_pv_sell`: Set this parameter to true to consider that all the PV power produced is injected to the grid. No direct self-consumption. The default is false, for a system with direct self-consumption. - `lp_solver`: Set the name of the linear programming solver that will be used. Defaults to 'COIN_CMD'. The options are 'PULP_CBC_CMD', 'GLPK_CMD' and 'COIN_CMD'. - `lp_solver_path`: Set the path to the LP solver. Defaults to '/usr/bin/cbc'. @@ -94,16 +93,16 @@ The following parameters and definitions are only needed if load_cost_forecast_m These are the technical parameters of the energy system of the household. -- `P_from_grid_max`: The maximum power that can be supplied by the utility grid in Watts (consumption). Defaults to 9000. -- `P_to_grid_max`: The maximum power that can be supplied to the utility grid in Watts (injection). Defaults to 9000. +- `maximum_power_from_grid`: The maximum power that can be supplied by the utility grid in Watts (consumption). Defaults to 9000. +- `maximum_power_to_grid`: The maximum power that can be supplied to the utility grid in Watts (injection). Defaults to 9000. We will define the technical parameters of the PV installation. For the modeling task we rely on the PVLib Python package. For more information see: [https://pvlib-python.readthedocs.io/en/stable/](https://pvlib-python.readthedocs.io/en/stable/) A dedicated web app will help you search for your correct PV module and inverter names: [https://emhass-pvlib-database.streamlit.app/](https://emhass-pvlib-database.streamlit.app/) If your specific model is not found in these lists then solution (1) is to pick another model as close as possible as yours in terms of the nominal power. Solution (2) would be to use SolCast and pass that data directly to emhass as a list of values from a template. Take a look at this example here: [https://emhass.readthedocs.io/en/latest/forecasts.html#example-using-solcast-forecast-amber-prices](https://emhass.readthedocs.io/en/latest/forecasts.html#example-using-solcast-forecast-amber-prices) -- `module_model`: The PV module model. For example: 'CSUN_Eurasia_Energy_Systems_Industry_and_Trade_CSUN295_60M'. This parameter can be a list of items to enable the simulation of mixed orientation systems, for example one east-facing array (azimuth=90) and one west-facing array (azimuth=270). When finding the correct model for your installation remember to replace all the special characters in the model name with '_'. The name of the table column for your device on the webapp will already have the correct naming convention. -- `inverter_model`: The PV inverter model. For example: 'Fronius_International_GmbH__Fronius_Primo_5_0_1_208_240__240V_'. This parameter can be a list of items to enable the simulation of mixed orientation systems, for example, one east-facing array (azimuth=90) and one west-facing array (azimuth=270). When finding the correct model for your installation remember to replace all the special characters in the model name with '_'. The name of the table column for your device on the web app will already have the correct naming convention. +- `pv_module_model`: The PV module model. For example: 'CSUN_Eurasia_Energy_Systems_Industry_and_Trade_CSUN295_60M'. This parameter can be a list of items to enable the simulation of mixed orientation systems, for example one east-facing array (azimuth=90) and one west-facing array (azimuth=270). When finding the correct model for your installation remember to replace all the special characters in the model name with '_'. The name of the table column for your device on the webapp will already have the correct naming convention. +- `pv_inverter_model`: The PV inverter model. For example: 'Fronius_International_GmbH__Fronius_Primo_5_0_1_208_240__240V_'. This parameter can be a list of items to enable the simulation of mixed orientation systems, for example, one east-facing array (azimuth=90) and one west-facing array (azimuth=270). When finding the correct model for your installation remember to replace all the special characters in the model name with '_'. The name of the table column for your device on the web app will already have the correct naming convention. - `surface_tilt`: The tilt angle of your solar panels. Defaults to 30. This parameter can be a list of items to enable the simulation of mixed orientation systems, for example, one east-facing array (azimuth=90) and one west-facing array (azimuth=270). - `surface_azimuth`: The azimuth of your PV installation. Defaults to 205. This parameter can be a list of items to enable the simulation of mixed orientation systems, for example, one east-facing array (azimuth=90) and one west-facing array (azimuth=270). - `modules_per_string`: The number of modules per string. Defaults to 16. This parameter can be a list of items to enable the simulation of mixed orientation systems, for example, one east-facing array (azimuth=90) and one west-facing array (azimuth=270). @@ -113,11 +112,11 @@ Solution (2) would be to use SolCast and pass that data directly to emhass as a If your system has a battery (set_use_battery=True), then you should define the following parameters: -- `Pd_max`: The maximum discharge power in Watts. Defaults to 1000. -- `Pc_max`: The maximum charge power in Watts. Defaults to 1000. -- `eta_disch`: The discharge efficiency. Defaults to 0.95. -- `eta_ch`: The charge efficiency. Defaults to 0.95. -- `Enom`: The total capacity of the battery stack in Wh. Defaults to 5000. -- `SOCmin`: The minimum allowable battery state of charge. Defaults to 0.3. -- `SOCmax`: The maximum allowable battery state of charge. Defaults to 0.9. -- `SOCtarget`: The desired battery state of charge at the end of each optimization cycle. Defaults to 0.6. +- `battery_discharge_power_max`: The maximum discharge power in Watts. Defaults to 1000. +- `battery_charge_power_max`: The maximum charge power in Watts. Defaults to 1000. +- `battery_discharge_efficiency`: The discharge efficiency. Defaults to 0.95. +- `battery_charge_efficiency`: The charge efficiency. Defaults to 0.95. +- `battery_nominal_energy_capacity`: The total capacity of the battery stack in Wh. Defaults to 5000. +- `battery_minimum_state_of_charge`: The minimum allowable battery state of charge. Defaults to 0.3. +- `battery_maximum_state_of_charge`: The maximum allowable battery state of charge. Defaults to 0.9. +- `battery_target_state_of_charge`: The desired battery state of charge at the end of each optimization cycle. Defaults to 0.6. diff --git a/docs/develop.md b/docs/develop.md index 5c30cb0f..6cd9ca4c 100644 --- a/docs/develop.md +++ b/docs/develop.md @@ -1,9 +1,9 @@ # EMHASS Development -There are multiple different approaches to developing EMHASS. -The choice depends on EMHASS mode (standalone/add-on) and preference (Python venv/DevContainer/Docker). +There are multiple different approaches to developing for EMHASS. +The choice depends on your and preference (Python venv/DevContainer/Docker). Below are some development workflow examples: -_Note: It is preferred to run both addon mode, standalone mode and unittest once before submitting and pull request._ +_Note: It is preferred to run the actions and unittest once before submitting and pull request._ ## Step 1 - Fork @@ -27,13 +27,12 @@ To develop and test code choose one of the following methods: ### Method 1 - Python Virtual Environment We can use python virtual environments to build, develop and test/unittest the code. -This method works well with standalone mode. _confirm terminal is in the root `emhass` directory before starting_ **Install requirements** ```bash -python3 -m pip install -r requirements.txt #if arm try setting --extra-index-url=https://www.piwheels.org/simple +python3 -m pip install -r requirements.txt #if on ARM, try setting --extra-index-url=https://www.piwheels.org/simple ``` **Create a developer environment:** @@ -69,16 +68,16 @@ python3 -m pip install -e . - Linux ```bash export OPTIONS_PATH="${PWD}/options.json" && export USE_OPTIONS="True" ##optional to test options.json - export CONFIG_PATH="${PWD}/config_emhass.yaml" - export SECRETS_PATH="${PWD}/secrets_emhass.yaml" + export CONFIG_PATH="${PWD}/config.yaml" + export SECRETS_PATH="${PWD}/secrets_emhass.yaml" ##optional to test secrets_emhass.yaml export DATA_PATH="${PWD}/data/" ``` - windows - ```cmd + ```batch set "OPTIONS_PATH=%cd%/options.json" & :: optional to test options.json set "USE_OPTIONS=True" & :: optional to test options.json - set "CONFIG_PATH=%cd%/config_emhass.yaml" - set "SECRETS_PATH=%cd%/secrets_emhass.yaml" + set "CONFIG_PATH=%cd%/config.json" + set "SECRETS_PATH=%cd%/secrets_emhass.yaml" & :: optional to test secrets_emhass.yam set "DATA_PATH=%cd%/data/" ``` @@ -86,21 +85,24 @@ _Make sure `secrets_emhass.yaml` has been created and set. Copy `secrets_emhass( **Run EMHASS** -``` +```bash python3 src/emhass/web_server.py ``` +or +``` bash +emhass --action 'dayahead-optim' --config ./config.json --root ./src/emhass --costfun 'profit' --data ./data +``` **Run unittests** -``` +```bash python3 -m unittest discover -s ./tests -p 'test_*.py' ``` +_Note:unittest will need to be installed prior_ -_unittest will need to be installed prior_ - -### Method 2: VS-Code Debug and Run via DevContainer +### Method 2: VS-Code Debug and Run via Dev Container -In VS-Code, you can run a Docker DevContainer to set up a virtual environment. There you can edit and test EMHASS. +In VS-Code, you can run a Docker Dev Container to set up a virtual environment. The Dev Container's Container will be almost identical to the one build for EMHASS (Docker/Add-on). There you can edit and test EMHASS. The recommended steps to run are: @@ -109,149 +111,70 @@ The recommended steps to run are: - Edit some code... - Compile emhass by pressing `control+shift+p` > `Tasks: Run Task` > `EMHASS Install`. This has been set up in the [tasks.json](https://github.com/davidusb-geek/emhass/blob/master/.vscode/tasks.json) file. - Before _run & debug_, re-run `EMHASS Install` task every time a change has been made to emhass. +- Launch and debug the program via the [`Run and Debug`](https://code.visualstudio.com/docs/editor/debugging) tab /`Ctrl+Shift+D` > `EMHASS run` This has been set up in the [Launch.json](https://github.com/davidusb-geek/emhass/blob/master/.vscode/launch.json) . -#### Standalone Mode -- Launch and debug the application via selecting the [`Run and Debug`](https://code.visualstudio.com/docs/editor/debugging) tab /`Ctrl+Shift+D` > `EMHASS run` (standalone). This has been set up in the [Launch.json](https://github.com/davidusb-geek/emhass/blob/master/.vscode/launch.json) . -- you will need input your HomeAssistant URL and HomeAssistant KEY inside of secrets_emhass.yaml - - Both Add-On and Standalone mods can also accept secrets via environment variables, see [Docker section bellow](method-3---docker-virtual-environment) for examples of environment variole secrets in use. -- to change your parameters, you can edit emhass_config.yaml file before launch. - -#### Add-On Mode -- Launch and debug the application via selecting the [`Run and Debug`](https://code.visualstudio.com/docs/editor/debugging) tab /`Ctrl+Shift+D` > `EMHASS run Addon` (Add-on). This has been set up in the [Launch.json](https://github.com/davidusb-geek/emhass/blob/master/.vscode/launch.json) . -- You will need to modify the `EMHASS_URL` _(http://HAIPHERE:8123/)_ and `EMHASS_KEY` _(PLACEKEYHERE)_ inside of Launch.json that matches your HA environment before running. -- to change your parameters, you can edit options.json file before launch. +#### Simulate Docker Method or Add-on method +Since the main difference between the two methods are how secrets are passed. You can switch between the two methods by: + +**Docker**: + - Create a `secrets_emhass.yaml` file and append your secret parameters +**Add-on**: + - Modify the `options.json` file to contain you secret parameters #### Unittests -You can run all the unittests by heading to the [`Testing`](https://code.visualstudio.com/docs/python/testing) tab on the left hand side. - This is recommended before creating a pull request. +Lastly, you can run all the unittests by heading to the [`Testing`](https://code.visualstudio.com/docs/python/testing) tab on the left hand side. This is recommended before creating a pull request. ### Method 3 - Docker Virtual Environment -With Docker, you can test the production EMHASS environment in both standalone and add-on mode via modifying the build argument: `build_version` with values: `standalone`, `addon-pip`, `addon-git`, `addon-local`. +With Docker, you can test the production EMHASS environment for both Docker and Add-on methods. -Depending on your choice of running standalone or addon, `docker run` will require different passed variables/arguments to function. See following examples: -Depending on your siltation, you may wish to build EMHASS using a version from a particular git/branch or pip PyPI version. There are examples bellow for these alternative builds. +Depending on the method you wish to test, the `docker run` command will require different passed arguments to function. See following examples: -_Note: Make sure your terminal is in the root `emhass` directory before running the docker build._ +_Note: Make sure your terminal is in the root `emhass` repository directory before running the docker build._ -#### Docker run add-on via with local files: - -**addon-local** copies the local emhass files (from your device) to compile and run in addon mode. +#### Docker run Add-on Method: ```bash -docker build -t emhass/docker --build-arg build_version=addon-local . +docker build -t emhass/test . -docker run -it -p 5000:5000 --name emhass-container -e LAT="45.83" -e LON="6.86" -e ALT="4807.8" -e TIME_ZONE="Europe/Paris" emhass/docker --url YOURHAURLHERE --key YOURHAKEYHERE +# pass secrets via options.json (similar to what Home Assistant automatically creates from the addon configuration page) +docker run -it -p 5000:5000 --name emhass-test -v ./options.json:/data/options.json emhass/test ``` **Note:** +- to apply a file change in the local EMHASS repository, you will need to re-build and re-run the Docker image/container in order for the change to take effect. (excluding volume mounted (-v) files/folders) +- if you are planning to modify the configs: `options.json`, `secrets_emhass.yaml` or `config.json`, you can [volume mount](https://docs.docker.com/engine/storage/bind-mounts/) them with `-v`. This syncs the Host file to the file inside the container. +*If running inside of podman, add :z at the end of the volume mount E.g:`-v ./options.json:/data/options.json:z`* -- `addon` mode can have secret parameters passed in at run via variables `-e`, arguments (`--key`,`--url`) or via `secrets_emhass.yaml` with a volume mount -- on file change, you will need to re-build and re-run the Docker image/container in order for the change to take effect. (excluding volume mounted configs) -- if you are planning to modify the configs: options.json, secrets_emhass.yaml or config_emhass.yaml, you can volume mount them with `-v`: - - ```bash - docker build -t emhass/docker --build-arg build_version=addon-local . - - docker run -it -p 5000:5000 --name emhass-container -v $(pwd)/options.json:/app/options.json -e LAT="45.83" -e LON="6.86" -e ALT="4807.8" -e TIME_ZONE="Europe/Paris" emhass/docker --url YOURHAURLHERE --key YOURHAKEYHERE - ``` - - This allows the editing of config files without re-building the Docker Image. On config change, restart the container to take effect: - - ```bash - docker stop emhass-container - - docker start emhass-container - ``` - -#### Docker run Standalone with local files: - -**standalone** copies the local emhass files (from your device) to compile and run in standalone mode. +#### Docker run for Docker Method: ```bash -docker build -t emhass/docker --build-arg build_version=standalone . +docker build -t emhass/test . -docker run -it -p 5000:5000 --name emhass-container -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml emhass/docker -``` - -_Standalone mode can use `secrets_emhass.yaml` to pass secret parameters (overriding secrets provided by ARG/ENV's). Copy `secrets_emhass(example).yaml` for an example._ - -#### Docker run add-on with Git or pip: - -If you would like to test with the current production/master versions of emhass, you can do so via pip or Git. With Git, you can also specify other repos/branches outside of `davidusb-geek/emhass:master`. - -**addon-pip** will be the closest environment to the production emhass-add-on. -However, both come with the disadvantage of not easily being able to edit the emhass package itself. - -**Docker run add-on git** - -```bash -docker build -t emhass/docker --build-arg build_version=addon-git . - -docker run -it -p 5000:5000 --name emhass-container -e LAT="45.83" -e LON="6.86" -e ALT="4807.8" -e TIME_ZONE="Europe/Paris" -v $(pwd)/options.json:/app/options.json emhass/docker --url YOURHAURLHERE --key YOURHAKEYHERE -``` - -To test a repo and branch outside of `davidusb-geek/emhass:master`: -_(Utilizing build args `build_repo` and `build_branch`)_ -_Linux:_ -```bash -repo=https://github.com/davidusb-geek/emhass.git -branch=master - -docker build -t emhass/docker --build-arg build_version=addon-git --build-arg build_repo=$repo --build-arg build_branch=$branch . - -docker run -it -p 5000:5000 --name emhass-container -e LAT="45.83" -e LON="6.86" -e ALT="4807.8" -e TIME_ZONE="Europe/Paris" -v $(pwd)/options.json:/app/options.json emhass/docker --url YOURHAURLHERE --key YOURHAKEYHERE -``` - -**Docker run add-on pip:** - -```bash -docker build -t emhass/docker --build-arg build_version=addon-pip . - -docker run -it -p 5000:5000 --name emhass-container -e LAT="45.83" -e LON="6.86" -e ALT="4807.8" -e TIME_ZONE="Europe/Paris" -v $(pwd)/options.json:/app/options.json emhass/docker --url YOURHAURLHERE --key YOURHAKEYHERE -``` -To build with a specific pip version, set with build arg: `build_pip_version`: -```bash -docker build -t emhass/docker --build-arg build_version=addon-pip --build-arg build_pip_version='==0.7.7' . - -docker run -it -p 5000:5000 --name emhass-container -e LAT="45.83" -e LON="6.86" -e ALT="4807.8" -e TIME_ZONE="Europe/Paris" -v $(pwd)/options.json:/app/options.json emhass/docker --url YOURHAURLHERE --key YOURHAKEYHERE -``` -
- -_You can add or remove file volume mounts with the `-v` tag, this should override the file in the container (ex. options.json)_ - -#### EMHASS older then **0.7.9** -For older versions of EMHASS, you may wish to specify the _config_, _data_ and _options_ paths to avoid errors: -```bash -docker run ... -e OPTIONS_PATH='/app/options.json' -e CONFIG_PATH='/app/config_emhass.yaml' -e DATA_PATH='/app/data/' ... -``` -For example pip: -```bash -docker build -t emhass/docker --build-arg build_version=addon-pip . - -docker run -it -p 5000:5000 --name emhass-container -e LAT="45.83" -e LON="6.86" -e ALT="4807.8" -e TIME_ZONE="Europe/Paris" -e CONFIG_PATH='/app/config_emhass.yaml' -e DATA_PATH='/app/data/' -e OPTIONS_PATH='/app/options.json' -v $(pwd)/options.json:/app/options.json emhass/docker --url YOURHAURLHERE --key YOURHAKEYHERE +# pass the secrets_emhass.yaml +docker run -it -p 5000:5000 --name emhass-test -v ./secrets_emhass.yaml:/app/secrets_emhass.yaml emhass/test ``` #### Sync with local data folder -For those who wish to mount/sync the local `data` folder with the data folder from the docker container, volume mount the data folder with `-v` . +For those who wish to mount/sync the local `data` folder with the data folder from inside the docker container, volume mount the data folder with `-v` . ```bash -docker run ... -v $(pwd)/data/:/app/data ... +docker run ... -v ./data/:/app/data ... ``` -You can also mount data (ex .csv) files separately +You can also mount data files (ex .csv) separately ```bash -docker run... -v $(pwd)/data/heating_prediction.csv:/app/data/ ... +docker run... -v ./data/heating_prediction.csv:/app/data/ ... ``` #### Issue with TARGETARCH If your docker build fails with an error related to `TARGETARCH`. It may be best to add your device's architecture manually: -Example with armhf architecture +Example with `armhf` architecture ```bash docker build ... --build-arg TARGETARCH=armhf --build-arg os_version=raspbian ... ``` -*For `armhf` only, create a build-arg for `os_version=raspbian`* +*For `armhf` only, also pass a build-arg for `os_version=raspbian`* #### Delete built Docker image @@ -259,9 +182,11 @@ docker build ... --build-arg TARGETARCH=armhf --build-arg os_version=raspbian .. We can delete the Docker image and container via: ```bash -docker rm -f emhass-container #force delete Docker container +# force delete Docker container +docker rm -f emhass-test -docker rmi emhass/docker #delete Docker image +# delete Docker image +docker rmi emhass/test ``` #### Other Docker Options @@ -272,20 +197,18 @@ As editing and testing EMHASS via docker may be repetitive (rebuilding image and **For rapid Docker testing, try a command chain:** _Linux:_ ```bash -docker build -t emhass/docker --build-arg build_version=addon-local . && docker run --rm -it -p 5000:5000 -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml --name emhass-container emhass/docker +docker build -t emhass/test . && docker run --rm -it -p 5000:5000 -v ./secrets_emhass.yaml:/app/secrets_emhass.yaml --name emhass-test emhass/test ``` - -_The example command chain rebuilds the Docker image and runs a new container with the newly built image. `--rm` has been added to the `docker run` to delete the container once ended to avoid manual deletion every time._ -_This use case may not require any volume mounts (unless you use secrets_emhass.yaml) as the Docker build process will pull the latest versions of the configs as it builds._ - +_The example command chain rebuilds the Docker image, and runs a new container with the newly built image. The `--rm` has been added to the `docker run` to delete the container once ended to avoid manual deletion every time._ +_This use case may not require any volume mounts (unless you use secrets_emhass.yaml) as the Docker build process will pull the latest configs as it builds._ **Environment Variables** -Running addon mode, you can also pass location, key and url secret parameters via environment variables. + you can also pass location, key and url secret parameters via environment variables. ```bash -docker build -t emhass/docker --build-arg build_version=addon-local . +docker build -t emhass/test --build-arg build_version=addon-local . -docker run -it -p 5000:5000 --name emhass-container -e URL="YOURHAURLHERE" -e KEY="YOURHAKEYHERE" -e LAT="45.83" -e LON="6.86" -e ALT="4807.8" -e TIME_ZONE="Europe/Paris" emhass/docker +docker run -it -p 5000:5000 --name emhass-test -e URL="YOURHAURLHERE" -e KEY="YOURHAKEYHERE" -e LAT="45.83" -e LON="6.86" -e ALT="4807.8" -e TIME_ZONE="Europe/Paris" emhass/test ``` This allows the user to set variables before the build @@ -299,13 +222,13 @@ export LAT="45.83" export LON="6.86" export ALT="4807.8" -docker build -t emhass/docker --build-arg build_version=addon-local . +docker build -t emhass/test --build-arg build_version=addon-local . -docker run -it -p 5000:5000 --name emhass-container -e EMHASS_KEY -e EMHASS_URL -e TIME_ZONE -e LAT -e LON -e ALT emhass/docker +docker run -it -p 5000:5000 --name emhass-test -e EMHASS_KEY -e EMHASS_URL -e TIME_ZONE -e LAT -e LON -e ALT emhass/test ``` ### Example Docker testing pipeline -The following pipeline will run unittest and most of the EMHASS actions in both Standalone and Add-on mode. This may be a good options for those who wish to test their changes against the production EMHASS environment. +The following pipeline will run unittest and most of the EMHASS actions. This may be a good option for those who wish to test their changes against the production EMHASS environment. *Linux:* *Assuming docker and git installed* @@ -323,12 +246,12 @@ git checkout $branch ``` ```bash -#testing addon (build and run) -docker build -t emhass/docker --build-arg build_version=addon-local . -docker run --rm -it -p 5000:5000 --name emhass-container -v $(pwd)/data/heating_prediction.csv:/app/data/heating_prediction.csv -v $(pwd)/options.json:/app/options.json -e LAT="45.83" -e LON="6.86" -e ALT="4807.8" -e TIME_ZONE="Europe/Paris" emhass/docker --url $HAURL --key $HAKEY +# testing with option.json (replace -v options.json with secrets_emhass.yaml to test both secret files) +docker build -t emhass/test . +docker run --rm -it -p 5000:5000 --name emhass-test -v $(pwd)/data/heating_prediction.csv:/app/data/heating_prediction.csv -v $(pwd)/options.json:/app/options.json emhass/test ``` ```bash -#run actions on a separate terminal +# run actions one-by-one, on a separate terminal curl -i -H 'Content-Type:application/json' -X POST -d '{"pv_power_forecast":[0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93], "prediction_horizon":10, "soc_init":0.5,"soc_final":0.6}' http://localhost:5000/action/naive-mpc-optim curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/perfect-optim curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/dayahead-optim @@ -341,49 +264,22 @@ curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/a ``` ```bash -#testing standalone (build and run) -docker build -t emhass/docker --build-arg build_version=standalone . -#make secrets_emhass -cat < secrets_emhass.yaml -hass_url: $HAURL -long_lived_token: $HAKEY -time_zone: Europe/Paris -lat: 45.83 -lon: 6.86 -alt: 4807.8 -EOT -docker run --rm -it -p 5000:5000 --name emhass-container -v $(pwd)/data/heating_prediction.csv:/app/data/heating_prediction.csv -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml emhass/docker +# testing unittest (add extra necessary files via volume mount) +docker run --rm -it -p 5000:5000 --name emhass-test -v $(pwd)/tests/:/app/tests/ -v $(pwd)/data/:/app/data/ -v $(pwd)/"secrets_emhass(example).yaml":/app/"secrets_emhass(example).yaml" -v $(pwd)/options.json:/app/options.json -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml emhass/test ``` ```bash -#run actions on a separate terminal -curl -i -H 'Content-Type:application/json' -X POST -d '{"pv_power_forecast":[0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93], "prediction_horizon":10, "soc_init":0.5,"soc_final":0.6}' http://localhost:5000/action/naive-mpc-optim -curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/perfect-optim -curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/dayahead-optim -curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/forecast-model-fit -curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/forecast-model-predict -curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/forecast-model-tune -curl -i -H "Content-Type:application/json" -X POST -d '{"csv_file": "heating_prediction.csv", "features": ["degreeday", "solar"], "target": "hour", "regression_model": "RandomForestRegression", "model_type": "heating_hours_degreeday", "timestamp": "timestamp", "date_features": ["month", "day_of_week"], "new_values": [12.79, 4.766, 1, 2] }' http://localhost:5000/action/regressor-model-fit -curl -i -H "Content-Type:application/json" -X POST -d '{"mlr_predict_entity_id": "sensor.mlr_predict", "mlr_predict_unit_of_measurement": "h", "mlr_predict_friendly_name": "mlr predictor", "new_values": [8.2, 7.23, 2, 6], "model_type": "heating_hours_degreeday" }' http://localhost:5000/action/regressor-model-predict -curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/publish-data -``` - -```bash -#testing unittest (run standalone with extra files) -docker run --rm -it -p 5000:5000 --name emhass-container -v $(pwd)/tests/:/app/tests/ -v $(pwd)/data/:/app/data/ -v $(pwd)/"secrets_emhass(example).yaml":/app/"secrets_emhass(example).yaml" -v $(pwd)/options.json:/app/options.json -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml emhass/docker -``` -```bash -#run unittest's on separate terminal -docker exec emhass-container apt-get update -docker exec emhass-container apt-get install python3-requests-mock -y -docker exec emhass-container python3 -m unittest discover -s ./tests -p 'test_*.py' | grep error +# run unittest's on separate terminal after installing requests-mock +docker exec emhass-test apt-get update +docker exec emhass-test apt-get install python3-requests-mock -y +docker exec emhass-test python3 -m unittest discover -s ./tests -p 'test_*.py' | grep error ``` +*Note: may need to set `--build-arg TARGETARCH=YOUR-ARCH` in docker build* -User may wish to re-test with tweaked parameters such as `lp_solver`, `weather_forecast_method` and `load_forecast_method`, in `config_emhass.yaml` *(standalone)* or `options.json` *(addon)*, to broaden the testing scope. -*see [EMHASS & EMHASS-Add-on differences](https://emhass.readthedocs.io/en/latest/differences.html) for more information on how these config_emhass & options files differ* +User may wish to re-test with tweaked parameters such as `lp_solver`, `weather_forecast_method` and `load_forecast_method`, in `config.json` to broaden the testing scope. +*See [Differences](https://emhass.readthedocs.io/en/latest/differences.html) for more information on how the different methods of running EMHASS differ.* -*Note: may need to set `--build-arg TARGETARCH=YOUR-ARCH` in docker build* ## Step 3 - Pull request -Once developed, commit your code, and push to your fork. -Then submit a pull request with your fork to the [davidusb-geek/emhass@master](https://github.com/davidusb-geek/emhass) repository. +Once developed, commit your code, and push the commit to your fork on Github. +Once ready, submit a pull request with your fork to the [davidusb-geek/emhass@master](https://github.com/davidusb-geek/emhass) repository. diff --git a/docs/differences.md b/docs/differences.md index 837a22e2..50e349c0 100644 --- a/docs/differences.md +++ b/docs/differences.md @@ -1,19 +1,20 @@ # EMHASS & EMHASS-Add-on differences -Users will pass parameters into EMHASS differently, based on running *Standalone* mode or *addon* Mode. -This page tries to help to resolve the common confusion between the two. -_It's best to see EMHASS-Add-on as a Home Assistant Docker wrapper for EMHASS. However, because of this containerization, certain changes are made between the two modes._ +Users will pass parameters and secrets into EMHASS differently, based on what method your running emhass. (Add-on, Docker, Python) +This page tries to help to resolve the common confusion between the different methods. -## Configuration & parameter differences -Both EMHASS & EMHASS-Add-on utilize `config_emhass.yaml` for receiving parameters. -Where they diverge is EMHASS-Add-ons additional use of `options.json`, generated by Home Assistants `Configuration Page`. -Any passed parameters given in `options.json` will overwrite the parameters hidden in the `config_emhass.yaml` file in EMHASS. _(this results in `config_emhass.yaml` used for parameter default fallback if certain required parameters were missing in `options.json`)_ +## Legacy Parameter definitions +After EMHASS version:`0.10.6`, EMHASS has merged the parameter config from the legacy modes (`config_emhass.yaml` & `options.json`) to a central `config.json`. -The parameters naming convention has also been changed in `options.json`, designed to make it easier for the user to understand. +The resulting change saw a migration of the parameter naming conventions. +*Many of the new parameter definitions seen in `config.json` are copied from the Add-on, however not all.* -See below for a list of associations between the parameters from `config_emhass.yaml` and `options.json`: -*You can view the current parameter differences in the [`Utils.py`](https://github.com/davidusb-geek/emhass/blob/master/src/emhass/utils.py) file under the `build_params` function.* +To simply convert from the legacy method (EMHASS>=0.10.6) to the new `config.json` method, see this video guide: +- Standalone Mode: https://youtu.be/T85DAdXnGFY?feature=shared&t=938 +- Addon Mode: https://youtu.be/T85DAdXnGFY?feature=shared&t=1341 -| config | config_emhass.yaml | options.json | options.json list dictionary key | +See below for a list of associations between the parameters from `config_emhass.yaml` *(Legacy Standalone mode)*, `options.json` *(Legacy Add-on mode)* and the `config.json` parameter definitions: + +| config catagories | config_emhass.yaml *(Legacy)* | config.json | options.json list dictionary key *(Legacy)* | | ------ | ------------------ | ------------ | -------------------------------- | | retrieve_hass_conf | freq | optimization_time_step | | | retrieve_hass_conf | days_to_retrieve | historic_days_to_retrieve | | @@ -74,35 +75,46 @@ See below for a list of associations between the parameters from `config_emhass. | plant_conf | SOCtarget | battery_target_state_of_charge | | Descriptions of each parameter can be found at: -- [`Configuration file`](https://emhass.readthedocs.io/en/latest/config.html) on EMHASS -- [`en.yaml`](https://github.com/davidusb-geek/emhass-add-on/blob/main/emhass/translations/en.yaml) on EMHASS-Add-on +- [`Configuration Documentation`](https://emhass.readthedocs.io/en/latest/config.html) +- Configuration page on EMHASS web server (E.g. http://localhost:5000/configuration) ## Passing in secret parameters -Secret parameters get passed differently, depending on which mode you choose. Alternative options are also present for passing secrets if running EMHASS separately from Home Assistant. _(I.e. not via EMHASS-Add-on)_ +Secret parameters are passed differently, depending on which method you choose. Alternative options are also present for passing secrets, if you are running EMHASS separately from Home Assistant. _(I.e. not via EMHASS-Add-on)_ + +### EMHASS with Docker or Python +Running EMHASS in Docker or Python by default retrieves all secret parameters via a passed `secrets_emhass.yaml` file. An example template has been provided under the name `secrets_emhass(example).yaml` on the EMHASS repo. -### EMHASS (with standalone mode) -Running EMHASS in standalone mode's default workflow retrieves all secret parameters via a passed `secrets_emhass.yaml` file. An example template has been provided under the name `secrets_emhass(example).yaml`. +To pass the the secrets file: +- On Docker: *(via volume mount)* +```bash +Docker run ... -v ./secrets_emhass.yaml:/app/secrets_emhass.yaml ... +``` +- On Python: *(optional: specify path as a argument)* +```bash +emhass ... --secrets=./secrets_emhass.yaml ... +``` #### Alternative Options -For users who are running EMHASS with methods other than EMHASS-Add-on, secret parameters can be passed with the use of arguments and/or environment variables. _(instead of `secrets_emhass.yaml`)_ +For users who are running EMHASS with methods other than EMHASS-Add-on, secret parameters can be passed with the use of environment variables. _(instead of `secrets_emhass.yaml`)_ -Some arguments include: `--url` and `--key` Some environment variables include: `TIME_ZONE`, `LAT`, `LON`, `ALT`, `EMHASS_URL`, `EMHASS_KEY` -_Note: As of writing, EMHASS standalone will override ARG/ENV secret parameters if the file is present._ +_Note: As of writing, EMHASS will override ENV secret parameters if the file is present._ For more information on passing arguments and environment variables using docker, have a look at some examples from [Configuration and Installation](https://emhass.readthedocs.io/en/latest/intro.html#configuration-and-installation) and [EMHASS Development](https://emhass.readthedocs.io/en/latest/develop.html) pages. -### EMHASS-Add-on (addon mode) -By default, the `URL` and `KEY` parameters have been set to `empty`/blank. This results in EMHASS calling its Supervisor API to gain access locally. This is the easiest method, as there is no user input necessary. +### EMHASS-Add-on *(Emhass Add-on)* +By default, the `URL` and `KEY` parameters have been set to `empty`/blank in the Home Assistant configuration page for EMHASS addon. This results in EMHASS calling its Local `Supervisor API` to gain access. This is the easiest method, as there is no user input necessary. -However, if you wish to receive/send sensor data to a different Home Assistant environment, set url and key values in the `hass_url` & `long_lived_token` hidden parameters. +However, if you wish to receive/send sensor data to a different Home Assistant environment, set url and key values in the `hass_url` & `long_lived_token` hidden parameters on the Home Assistant EMHASS addon configuration page. *(E.g. http://localhost:8123/hassio/addon/emhass/config)* - `hass_url` example: `https://192.168.1.2:8123/` - `long_lived_token` generated from the `Long-lived access tokens` section in your user profile settings

-Secret Parameters such as: `time_zone`, `lon`, `lat` and `alt` are also automatically passed in via the Home Assistants environment. _(Values set in the Home Assistants config/general page)_ +Secret Parameters such as: `solcast_api_key`, `solcast_rooftop_id` and `solar_forecast_kwp` _(used by their respective `weather_forecast_method` parameter values)_, can also be set via hidden parameters in the addon configuration page. + +Secret Parameters such as: `time_zone`, `lon`, `lat` and `alt` are also automatically passed in via the Home Assistants `Supervisor API`. _(Values set in the Home Assistants config/general page)_ _Note: Local currency could also be obtained via the Home Assistant environment, however as of writing, this functionality has not yet been developed._ -Secret Parameters such as: `solcast_api_key`, `solcast_rooftop_id` and `solar_forecast_kwp` _(used by their respective `weather_forecast_method` parameter values)_, can also be set via hidden parameters in the configuration page. + diff --git a/docs/forecasts.md b/docs/forecasts.md index 76c60247..864d4ade 100644 --- a/docs/forecasts.md +++ b/docs/forecasts.md @@ -17,7 +17,7 @@ Then there are the methods that are specific to each type of forecast and that p The `get_power_from_weather` method is proposed here to convert irradiance data to electrical power. The PVLib module is used to model the PV plant. A dedicated web app will help you search for your correct PV module and inverter: [https://emhass-pvlib-database.streamlit.app/](https://emhass-pvlib-database.streamlit.app/) The specific methods for the load forecast is a first method (`naive`) that uses a naive approach, also called persistence. It simply assumes that the forecast for -a future period will be equal to the observed values in a past period. The past period is controlled using the parameter `delta_forecast`. A second method (`mlforecaster`) +a future period will be equal to the observed values in a past period. The past period is controlled using the parameter `delta_forecast_daily`. A second method (`mlforecaster`) uses an internal custom forecasting model using machine learning. There is a section in the documentation explaining how to use this method. ```{note} @@ -93,7 +93,7 @@ If you use the Solar.Forecast or Solcast methods, or explicitly pass the PV powe ## Load power forecast -The default method for load forecast is a naive method, also called persistence. This is obtained using `method=naive`. This method simply assumes that the forecast for a future period will be equal to the observed values in a past period. The past period is controlled using the parameter `delta_forecast` and the default value for this is 24h. +The default method for load forecast is a naive method, also called persistence. This is obtained using `method=naive`. This method simply assumes that the forecast for a future period will be equal to the observed values in a past period. The past period is controlled using the parameter `delta_forecast_daily` and the default value for this is 24h. This is presented graphically here: @@ -126,15 +126,15 @@ When using this method you can provide a list of peak-hour periods, so you can a As an example for a two peak-hour periods contract you will need to define the following list in the configuration file: - - list_hp_periods: + - load_peak_hour_periods: - period_hp_1: - start: '02:54' - end: '15:24' - period_hp_2: - start: '17:24' - end: '20:24' - - load_cost_hp: 0.1907 - - load_cost_hc: 0.1419 + - load_peak_hours_cost: 0.1907 + - load_offpeak_hours_cost: 0.1419 This example is presented graphically here: @@ -144,7 +144,7 @@ This example is presented graphically here: The default method for this forecast is simply a constant value. This can be obtained using `method=constant`. -Then you will need to define the `prod_sell_price` variable to provide the correct price for energy injected to the grid from excedent PV production in €/kWh. +Then you will need to define the `photovoltaic_production_sell_price` variable to provide the correct price for energy injected to the grid from excedent PV production in €/kWh. ## Passing your own forecast data @@ -169,7 +169,7 @@ The possible dictionary keys to pass data are: - `prod_price_forecast` for the PV production selling price forecast. -For example, if using the add-on or the standalone docker installation you can pass this data as a list of values to the data dictionary during the `curl` POST: +For example, if using the add-on or the docker method, you can pass this data as a list of values to the data dictionary during the `curl` POST: ```bash curl -i -H "Content-Type: application/json" -X POST -d '{"pv_power_forecast":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70, 141.22, 246.18, 513.5, 753.27, 1049.89, 1797.93, 1697.3, 3078.93, 1164.33, 1046.68, 1559.1, 2091.26, 1556.76, 1166.73, 1516.63, 1391.13, 1720.13, 820.75, 804.41, 251.63, 79.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}' http://localhost:5000/action/dayahead-optim ``` @@ -220,7 +220,7 @@ An MPC call may look like this for 4 deferrable loads: state_attr('sensor.amber_feed_in_forecast', 'forecasts')|map(attribute='per_kwh')|list)[:48]) }}, \"pv_power_forecast\":{{states('sensor.solcast_24hrs_forecast') }}, \"prediction_horizon\":48,\"soc_init\":{{(states('sensor.powerwall_charge')|float(0))/100 - }},\"soc_final\":0.05,\"def_total_hours\":[2,0,0,0]}' http://localhost:5000/action/naive-mpc-optim" + }},\"soc_final\":0.05,\"operating_hours_of_each_deferrable_load\":[2,0,0,0]}' http://localhost:5000/action/naive-mpc-optim" ``` Thanks to [@purcell_labs](https://github.com/purcell-lab) for this example configuration. @@ -317,7 +317,7 @@ shell_command: ## Now/current values in forecasts -When implementing MPC applications with high optimization frequencies it can be interesting if, at each MPC iteration, the forecast values are updated with the real now/current values measured from live data. This is useful to improve the accuracy of the short-term forecasts. As shown in some of the references below, mixing with a persistence model makes sense since this type of model performs very well at low temporal resolutions (intra-hour). +When implementing MPC applications with high optimization_time_step it can be interesting if, at each MPC iteration, the forecast values are updated with the real now/current values measured from live data. This is useful to improve the accuracy of the short-term forecasts. As shown in some of the references below, mixing with a persistence model makes sense since this type of model performs very well at low temporal resolutions (intra-hour). A simple integration of current/now values for PV and load forecast is implemented using a mixed one-observation persistence model and the one-step-ahead forecasted values from the current passed method. diff --git a/docs/lpems.md b/docs/lpems.md index 4c56eb29..03b86a0e 100644 --- a/docs/lpems.md +++ b/docs/lpems.md @@ -218,16 +218,16 @@ When applying this controller, the following `runtimeparams` should be defined: - `soc_final` for the final value of the battery SOC for the current iteration of the MPC. -- `def_total_hours` for the list of deferrable loads functioning hours. These values can decrease as the day advances to take into account receding horizon daily energy objectives for each deferrable load. +- `operating_hours_of_each_deferrable_load` for the list of deferrable loads functioning hours. These values can decrease as the day advances to take into account receding horizon daily energy objectives for each deferrable load. -- `def_start_timestep` for the timestep as from which each deferrable load is allowed to operate (if you don't want the deferrable load to use the whole optimization timewindow). If you specify a value of 0 (or negative), the deferrable load will be optimized as from the beginning of the complete prediction horizon window. +- `start_timesteps_of_each_deferrable_load` for the timestep as from which each deferrable load is allowed to operate (if you don't want the deferrable load to use the whole optimization timewindow). If you specify a value of 0 (or negative), the deferrable load will be optimized as from the beginning of the complete prediction horizon window. -- `def_end_timestep` for the timestep before which each deferrable load should operate (if you don't want the deferrable load to use the whole optimization timewindow). If you specify a value of 0 (or negative), the deferrable load will be optimized over the complete prediction horizon window. +- `end_timesteps_of_each_deferrable_load` for the timestep before which each deferrable load should operate (if you don't want the deferrable load to use the whole optimization timewindow). If you specify a value of 0 (or negative), the deferrable load will be optimized over the complete prediction horizon window. In a practical use case, the values for `soc_init` and `soc_final` for each MPC optimization can be taken from the initial day-ahead optimization performed at the beginning of each day. ### Time windows for deferrable loads -Since v0.7.0, the user has the possibility to limit the operation of each deferrable load to a specific timewindow, which can be smaller than the prediction horizon. This is done by means of the `def_start_timestep` and `def_end_timestep` parameters. These parameters can either be set in the configuration screen of the Home Assistant EMHASS add-on, or in the config_emhass.yaml file, or provided as runtime parameters. +Since v0.7.0, the user has the possibility to limit the operation of each deferrable load to a specific timewindow, which can be smaller than the prediction horizon. This is done by means of the `start_timesteps_of_each_deferrable_load` and `end_timesteps_of_each_deferrable_load` parameters. These parameters can either be set in the configuration screen of the Home Assistant EMHASS add-on, or in the config_emhass.yaml file, or provided as runtime parameters. Take the example of two electric vehicles that need to charge, but which are not available during the whole prediction horizon: ![image](./images/deferrable_timewindow_evexample.png) @@ -238,7 +238,7 @@ Either in the Home Assistant add-on config screen: Either as runtime parameter: ``` -curl -i -H 'Content-Type:application/json' -X POST -d '{"prediction_horizon":30, "def_total_hours":[4,2],"def_start_timestep":[4,0],"def_end_timestep":[27,23]}' http://localhost:5000/action/naive-mpc-optim +curl -i -H 'Content-Type:application/json' -X POST -d '{"prediction_horizon":30, 'operating_hours_of_each_deferrable_load':[4,2],'start_timesteps_of_each_deferrable_load':[4,0],'end_timesteps_of_each_deferrable_load':[27,23]}' http://localhost:5000/action/naive-mpc-optim ``` Please note that the proposed deferrable load time windows will be submitted to a validation step & can be automatically corrected. diff --git a/docs/mlforecaster.md b/docs/mlforecaster.md index 5933bae8..e3ae35bc 100644 --- a/docs/mlforecaster.md +++ b/docs/mlforecaster.md @@ -16,10 +16,10 @@ To train a model use the `forecast-model-fit` end point. Some parameters can be optionally defined at runtime: -- `days_to_retrieve`: the total days to retrieve from Home Assistant for model training. Define this to retrieve as much history data as possible. +- `historic_days_to_retrieve`: the total days to retrieve from Home Assistant for model training. Define this to retrieve as much history data as possible. ```{note} -The minimum number of `days_to_retrieve` is hard coded to 9 by default. However, it is advised to provide more data for better accuracy by modifying your Home Assistant recorder settings. +The minimum number of `historic_days_to_retrieve` is hard coded to 9 by default. However, it is advised to provide more data for better accuracy by modifying your Home Assistant recorder settings. ``` - `model_type`: define the type of model forecast that this will be used for. For example: `load_forecast`. This should be a unique name if you are using multiple custom forecast models. @@ -37,7 +37,7 @@ The minimum number of `days_to_retrieve` is hard coded to 9 by default. However, The default values for these parameters are: ```yaml runtimeparams = { - "days_to_retrieve": 9, + 'historic_days_to_retrieve': 9, "model_type": "load_forecast", "var_model": "sensor.power_load_no_var_loads", "sklearn_model": "KNeighborsRegressor", @@ -147,7 +147,7 @@ The hyperparameter tuning using Bayesian optimization improves the bare KNN regr ```{warning} -The tuning routine can be computing intense. If you have problems with computation times, try to reduce the `days_to_retrieve` parameter. In the example shown, for a 240-day train period, the optimization routine took almost 20 min to finish on an amd64 Linux architecture machine with an i5 processor and 8 GB of RAM. This is a task that should be performed once in a while, for example, every week. +The tuning routine can be computing intense. If you have problems with computation times, try to reduce the `historic_days_to_retrieve` parameter. In the example shown, for a 240-day train period, the optimization routine took almost 20 min to finish on an amd64 Linux architecture machine with an i5 processor and 8 GB of RAM. This is a task that should be performed once in a while, for example, every week. ``` ## How does this work? diff --git a/docs/mlregressor.md b/docs/mlregressor.md index 620c564e..defd5dd7 100644 --- a/docs/mlregressor.md +++ b/docs/mlregressor.md @@ -143,16 +143,16 @@ The predict method will publish the result to a Home Assistant sensor. ## Storing CSV files -### Standalone container - how to mount a .csv files in data_path folder -If running EMHASS as a standalone container, you will need to volume mount a folder to be the `data_path`, or mount a single .csv file inside `data_path` +### Docker container - how to mount a .csv files in data_path folder +If running EMHASS with the Docker method, you will need to volume mount a folder to be the `data_path`, or mount a single .csv file inside `data_path` Example of mounting a folder as data_path *(.csv files stored inside)* ```bash -docker run -it --restart always -p 5000:5000 -e LOCAL_COSTFUN="profit" -v $(pwd)/data:/app/data -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS +docker run -it --restart always -p 5000:5000 -e LOCAL_COSTFUN="profit" -v ./data:/app/data -v ./config_emhass.yaml:/app/config_emhass.yaml -v ./secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS ``` Example of mounting a single CSV file ```bash -docker run -it --restart always -p 5000:5000 -e LOCAL_COSTFUN="profit" -v $(pwd)/data/heating_prediction.csv:/app/data/heating_prediction.csv -v $(pwd)/config_emhass.yaml:/app/config_emhass.yaml -v $(pwd)/secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS +docker run -it --restart always -p 5000:5000 -e LOCAL_COSTFUN="profit" -v ./data/heating_prediction.csv:/app/data/heating_prediction.csv -v ./config_emhass.yaml:/app/config_emhass.yaml -v ./secrets_emhass.yaml:/app/secrets_emhass.yaml --name DockerEMHASS ``` ### Add-on - How to store data in a CSV file from Home Assistant diff --git a/docs/study_case.md b/docs/study_case.md index 26de4f78..4d73488b 100644 --- a/docs/study_case.md +++ b/docs/study_case.md @@ -86,7 +86,7 @@ For this system, the total value of the obtained cost function is -1.23 EUR, a s As we showed in the forecast module section, we can pass our own forecast data using lists of values passed at runtime using templates. However, it is possible to also pass other data during runtime to automate energy management. -For example, let's suppose that for the default configuration with two deferrable loads, we want to correlate and control them to the outside temperature. This will be used to build a list of the total number of hours for each deferrable load (`def_total_hours`). In this example, the first deferrable load is a water heater and the second is the pool pump. +For example, let's suppose that for the default configuration with two deferrable loads, we want to correlate and control them to the outside temperature. This will be used to build a list of the total number of hours for each deferrable load (`operating_hours_of_each_deferrable_load`). In this example, the first deferrable load is a water heater and the second is the pool pump. We will begin by defining a temperature sensor on a 12 hours sliding window using the filter platform for the outside temperature: ``` @@ -121,7 +121,7 @@ The values for the total number of operating hours were tuned by trial and error Finally, my two shell commands for EMHASS will look like this: ``` shell_command: - dayahead_optim: "curl -i -H \"Content-Type: application/json\" -X POST -d '{\"def_total_hours\":{{states('sensor.list_operating_hours_of_each_deferrable_load')}}}' http://localhost:5000/action/dayahead-optim" + dayahead_optim: "curl -i -H \"Content-Type: application/json\" -X POST -d '{\"operating_hours_of_each_deferrable_load\":{{states('sensor.list_operating_hours_of_each_deferrable_load')}}}' http://localhost:5000/action/dayahead-optim" publish_data: "curl -i -H \"Content-Type: application/json\" -X POST -d '{}' http://localhost:5000/action/publish-data" ``` The dedicated automation for these shell commands can be for example: diff --git a/options.json b/options.json index 01f83b52..6c4686ce 100644 --- a/options.json +++ b/options.json @@ -1,156 +1,13 @@ { "hass_url": "empty", "long_lived_token": "empty", - "logging_level": "INFO", - "costfun": "profit", - "optimization_time_step": 30, - "historic_days_to_retrieve": 2, - "method_ts_round": "nearest", - "continual_publish": false, - "optional_solcast_api_key": "empty", - "optional_solcast_rooftop_id": "empty", - "optional_solar_forecast_kwp": 5, - "data_path": "default", - "set_total_pv_sell": false, - "lp_solver": "COIN_CMD", - "lp_solver_path": "/usr/bin/cbc", - "set_nocharge_from_grid": false, - "set_nodischarge_to_grid": true, - "set_battery_dynamic": false, - "battery_dynamic_max": 0.9, - "battery_dynamic_min": -0.9, - "weight_battery_discharge": 1.0, - "weight_battery_charge": 1.0, - "sensor_power_photovoltaics": "sensor.power_photovoltaics", - "sensor_power_load_no_var_loads": "sensor.power_load_no_var_loads", - "load_negative": false, - "set_zero_min": true, - "number_of_deferrable_loads": 2, - "list_nominal_power_of_deferrable_loads": [ - { - "nominal_power_of_deferrable_loads": 3000 - }, - { - "nominal_power_of_deferrable_loads": 750 - } - ], - "list_operating_hours_of_each_deferrable_load": [ - { - "operating_hours_of_each_deferrable_load": 4 - }, - { - "operating_hours_of_each_deferrable_load": 0 - } - ], - "weather_forecast_method": "scrapper", + "solcast_api_key": "empty", + "solcast_rooftop_id": "empty", + "solar_forecast_kwp": 0, "time_zone": "Europe/Paris", - "Latitude": 45.83, - "Longitude": 6.86, - "Altitude": 4807.8, - "load_forecast_method": "naive", - "delta_forecast_daily": 1, - "load_cost_forecast_method": "hp_hc_periods", - "list_start_timesteps_of_each_deferrable_load": [ - { - "start_timesteps_of_each_deferrable_load": 0 - }, - { - "start_timesteps_of_each_deferrable_load": 0 - } - ], - "list_end_timesteps_of_each_deferrable_load": [ - { - "end_timesteps_of_each_deferrable_load": 0 - }, - { - "end_timesteps_of_each_deferrable_load": 0 - } - ], - "list_peak_hours_periods_start_hours": [ - { - "peak_hours_periods_start_hours": "02:54" - }, - { - "peak_hours_periods_start_hours": "17:24" - } - ], - "list_peak_hours_periods_end_hours": [ - { - "peak_hours_periods_end_hours": "15:24" - }, - { - "peak_hours_periods_end_hours": "20:54" - } - ], - "list_treat_deferrable_load_as_semi_cont": [ - { - "treat_deferrable_load_as_semi_cont": true - }, - { - "treat_deferrable_load_as_semi_cont": true - } - ], - "list_set_deferrable_load_single_constant": [ - { - "set_deferrable_load_single_constant": false - }, - { - "set_deferrable_load_single_constant": false - } - ], - "list_set_deferrable_startup_penalty": [ - { - "set_deferrable_startup_penalty": 0.0 - }, - { - "set_deferrable_startup_penalty": 0.0 - } - ], - "load_peak_hours_cost": 0.1907, - "load_offpeak_hours_cost": 0.1419, - "production_price_forecast_method": "constant", - "photovoltaic_production_sell_price": 0.1419, - "maximum_power_from_grid": 9000, - "maximum_power_to_grid": 9000, - "list_pv_module_model": [ - { - "pv_module_model": "CSUN_Eurasia_Energy_Systems_Industry_and_Trade_CSUN295_60M" - } - ], - "list_pv_inverter_model": [ - { - "pv_inverter_model": "Fronius_International_GmbH__Fronius_Primo_5_0_1_208_240__240V_" - } - ], - "list_surface_tilt": [ - { - "surface_tilt": 30 - } - ], - "list_surface_azimuth": [ - { - "surface_azimuth": 205 - } - ], - "list_modules_per_string": [ - { - "modules_per_string": 16 - } - ], - "list_strings_per_inverter": [ - { - "strings_per_inverter": 1 - } - ], - "inverter_is_hybrid": false, - "compute_curtailment": false, - "set_use_battery": false, - "battery_discharge_power_max": 1000, - "battery_charge_power_max": 1000, - "battery_discharge_efficiency": 0.95, - "battery_charge_efficiency": 0.95, - "battery_nominal_energy_capacity": 5000, - "battery_minimum_state_of_charge": 0.3, - "battery_maximum_state_of_charge": 0.9, - "battery_target_state_of_charge": 0.6 -} + "Latitude": 0, + "Longitude": 0, + "Altitude": 0, + "data_path": "default", + "server_ip": "0.0.0.0" +} \ No newline at end of file diff --git a/scripts/load_clustering.py b/scripts/load_clustering.py index db92ea2e..f627d8f1 100644 --- a/scripts/load_clustering.py +++ b/scripts/load_clustering.py @@ -11,7 +11,7 @@ from emhass.retrieve_hass import RetrieveHass from emhass.forecast import Forecast -from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger +from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger, build_secrets, build_params from sklearn.cluster import KMeans from sklearn.linear_model import LinearRegression @@ -33,11 +33,13 @@ # the root folder -root = str(get_root(__file__, num_parent=2)) +root = pathlib.Path(str(get_root(__file__, num_parent=2))) emhass_conf = {} -emhass_conf['config_path'] = pathlib.Path(root) / 'config_emhass.yaml' -emhass_conf['data_path'] = pathlib.Path(root) / 'data/' -emhass_conf['root_path'] = pathlib.Path(root) +emhass_conf['data_path'] = root / 'data/' +emhass_conf['root_path'] = root / 'src/emhass/' +emhass_conf['config_path'] = root / 'config.json' +emhass_conf['defaults_path'] = emhass_conf['root_path'] / 'data/config_defaults.json' +emhass_conf['associations_path'] = emhass_conf['root_path'] / 'data/associations.csv' # create logger logger, ch = get_logger(__name__, emhass_conf, save_to_file=True) @@ -47,9 +49,11 @@ days_to_retrieve = 240 model_type = "load_clustering" var_model = "sensor.power_load_positive" - + + # Build params with no config and default secrets data_path = emhass_conf['data_path'] / str('data_train_'+model_type+'.pkl') - params = None + _,secrets = build_secrets(emhass_conf,logger,no_response=True) + params = build_params(emhass_conf,secrets,{},logger) template = 'presentation' if data_path.is_file(): @@ -58,9 +62,9 @@ data, var_model = pickle.load(fid) else: logger.info("Using EMHASS methods to retrieve the new forecast model train data") - retrieve_hass_conf, _, _ = get_yaml_parse(emhass_conf, use_secrets=True) + retrieve_hass_conf, _, _ = get_yaml_parse(params,logger) rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'], - retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'], + retrieve_hass_conf['optimization_time_step'], retrieve_hass_conf['time_zone'], params, emhass_conf, logger, get_data_from_file=False) days_list = get_days_list(days_to_retrieve) diff --git a/scripts/load_forecast_sklearn.py b/scripts/load_forecast_sklearn.py index 82f5b3b3..8e09215d 100644 --- a/scripts/load_forecast_sklearn.py +++ b/scripts/load_forecast_sklearn.py @@ -10,7 +10,7 @@ from emhass.retrieve_hass import RetrieveHass from emhass.forecast import Forecast -from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger +from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger, build_secrets, build_params from sklearn.linear_model import LinearRegression from sklearn.linear_model import ElasticNet @@ -26,16 +26,18 @@ # the root folder -root = str(get_root(__file__, num_parent=2)) +root = pathlib.Path(str(get_root(__file__, num_parent=2))) emhass_conf = {} -emhass_conf['config_path'] = pathlib.Path(root) / 'config_emhass.yaml' -emhass_conf['data_path'] = pathlib.Path(root) / 'data/' -emhass_conf['root_path'] = pathlib.Path(root) +emhass_conf['data_path'] = root / 'data/' +emhass_conf['docs_path'] = root / 'docs/' +emhass_conf['root_path'] = root / 'src/emhass/' +emhass_conf['config_path'] = root / 'config.json' +emhass_conf['defaults_path'] = emhass_conf['root_path'] / 'data/config_defaults.json' +emhass_conf['associations_path'] = emhass_conf['root_path'] / 'data/associations.csv' # create logger logger, ch = get_logger(__name__, emhass_conf, save_to_file=True) - def add_date_features(data): df = copy.deepcopy(data) df['year'] = [i.year for i in df.index] @@ -57,8 +59,10 @@ def neg_r2_score(y_true, y_pred): sklearn_model = "KNeighborsRegressor" num_lags = 48 + # Build params with no config and default secrets data_path = emhass_conf['data_path'] / str('data_train_'+model_type+'.pkl') - params = None + _,secrets = build_secrets(emhass_conf,logger,no_response=True) + params = build_params(emhass_conf,secrets,{},logger) template = 'presentation' if data_path.is_file(): @@ -67,9 +71,9 @@ def neg_r2_score(y_true, y_pred): data, var_model = pickle.load(fid) else: logger.info("Using EMHASS methods to retrieve the new forecast model train data") - retrieve_hass_conf, _, _ = get_yaml_parse(emhass_conf, use_secrets=True) + retrieve_hass_conf, _, _ = get_yaml_parse(params,logger) rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'], - retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'], + retrieve_hass_conf['optimization_time_step'], retrieve_hass_conf['time_zone'], params, emhass_conf, logger, get_data_from_file=False) days_list = get_days_list(days_to_retrieve) @@ -87,7 +91,7 @@ def neg_r2_score(y_true, y_pred): fig.update_yaxes(title_text = "Power (W)") fig.update_xaxes(title_text = "Time") fig.show() - fig.write_image(emhass_conf['root_path'] / "docs/images/inputs_power_load_forecast.svg", width=1080, height=0.8*1080) + fig.write_image(emhass_conf['docs_path'] / "images/inputs_power_load_forecast.svg", width=1080, height=0.8*1080) data.index = pd.to_datetime(data.index) data.sort_index(inplace=True) @@ -140,7 +144,7 @@ def neg_r2_score(y_true, y_pred): fig.update_xaxes(title_text = "Time") fig.update_xaxes(range=[date_train+pd.Timedelta('10days'), data_exo.index[-1]]) fig.show() - fig.write_image(emhass_conf['root_path'] / "docs/images/load_forecast_knn_bare.svg", width=1080, height=0.8*1080) + fig.write_image(emhass_conf['docs_path'] / "images/load_forecast_knn_bare.svg", width=1080, height=0.8*1080) logger.info("Simple backtesting") start_time = time.time() @@ -166,7 +170,7 @@ def neg_r2_score(y_true, y_pred): fig.update_yaxes(title_text = "Power (W)") fig.update_xaxes(title_text = "Time") fig.show() - fig.write_image(emhass_conf['root_path'] / "docs/images/load_forecast_knn_bare_backtest.svg", width=1080, height=0.8*1080) + fig.write_image(emhass_conf['docs_path'] / "images/load_forecast_knn_bare_backtest.svg", width=1080, height=0.8*1080) # Bayesian search hyperparameter and lags with Skopt @@ -224,7 +228,7 @@ def neg_r2_score(y_true, y_pred): fig.update_xaxes(title_text = "Time") fig.update_xaxes(range=[date_train+pd.Timedelta('10days'), data_exo.index[-1]]) fig.show() - fig.write_image(emhass_conf['root_path'] / "docs/images/load_forecast_knn_optimized.svg", width=1080, height=0.8*1080) + fig.write_image(emhass_conf['docs_path'] / "images/load_forecast_knn_optimized.svg", width=1080, height=0.8*1080) logger.info("######################## Train/Test R2 score comparison ######################## ") pred_naive_metric_train = r2_score(df.loc[data_train.index,'train'],df.loc[data_train.index,'pred_naive']) @@ -251,11 +255,11 @@ def neg_r2_score(y_true, y_pred): # Then retrieve some data and perform a prediction mocking a production env rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'], - retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'], + retrieve_hass_conf['optimization_time_step'], retrieve_hass_conf['time_zone'], params, emhass_conf, logger, get_data_from_file=False) days_list = get_days_list(days_needed) - var_model = retrieve_hass_conf['var_load'] + var_model = retrieve_hass_conf['sensor_power_load_no_var_loads'] var_list = [var_model] rh.get_data(days_list, var_list) data_last_window = copy.deepcopy(rh.df_final) @@ -275,4 +279,4 @@ def neg_r2_score(y_true, y_pred): fig.update_yaxes(title_text = "Power (W)") fig.update_xaxes(title_text = "Time") fig.show() - fig.write_image(emhass_conf['root_path'] / "docs/images/load_forecast_production.svg", width=1080, height=0.8*1080) \ No newline at end of file + fig.write_image(emhass_conf['docs_path'] / "images/load_forecast_production.svg", width=1080, height=0.8*1080) \ No newline at end of file diff --git a/scripts/optim_results_analysis.py b/scripts/optim_results_analysis.py index 173c7045..07909805 100644 --- a/scripts/optim_results_analysis.py +++ b/scripts/optim_results_analysis.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import json import pickle import numpy as np import pandas as pd @@ -12,20 +13,24 @@ from emhass.retrieve_hass import RetrieveHass from emhass.optimization import Optimization from emhass.forecast import Forecast -from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger +from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger, build_config, build_secrets, build_params # the root folder -root = str(get_root(__file__, num_parent=2)) +root = pathlib.Path(str(get_root(__file__, num_parent=2))) emhass_conf = {} -emhass_conf['config_path'] = pathlib.Path(root) / 'config_emhass.yaml' -emhass_conf['data_path'] = pathlib.Path(root) / 'data/' -emhass_conf['root_path'] = pathlib.Path(root) +emhass_conf['data_path'] = root / 'data/' +emhass_conf['root_path'] = root / 'src/emhass/' +emhass_conf['docs_path'] = root / 'docs/' +emhass_conf['config_path'] = root / 'config.json' +emhass_conf['defaults_path'] = emhass_conf['root_path'] / 'data/config_defaults.json' +emhass_conf['associations_path'] = emhass_conf['root_path'] / 'data/associations.csv' # create logger logger, ch = get_logger(__name__, emhass_conf, save_to_file=False) def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, params, get_data_from_file): + fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf, params, emhass_conf, logger, get_data_from_file=get_data_from_file) df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method']) @@ -43,32 +48,35 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, save_figures = False save_html = False get_data_from_file = True - params = None - if get_data_from_file: - retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(emhass_conf, use_secrets=False) - else: - retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(emhass_conf) + # Build params with default config and default secrets + config = build_config(emhass_conf,logger,emhass_conf['defaults_path']) + _,secrets = build_secrets(emhass_conf,logger,no_response=True) + params = build_params(emhass_conf,secrets,config,logger) + # if get_data_from_file: + # retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse({},logger) + # else: + retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(params,logger) retrieve_hass_conf, optim_conf, plant_conf = \ retrieve_hass_conf, optim_conf, plant_conf rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'], - retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'], + retrieve_hass_conf['optimization_time_step'], retrieve_hass_conf['time_zone'], params, emhass_conf, logger) if get_data_from_file: with open(pathlib.Path(emhass_conf['data_path'] / 'test_df_final.pkl'), 'rb') as inp: rh.df_final, days_list, var_list = pickle.load(inp) - retrieve_hass_conf['var_load'] = str(var_list[0]) - retrieve_hass_conf['var_PV'] = str(var_list[1]) - retrieve_hass_conf['var_interp'] = [retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']] - retrieve_hass_conf['var_replace_zero'] = [retrieve_hass_conf['var_PV']] + retrieve_hass_conf['sensor_power_load_no_var_loads'] = str(var_list[0]) + retrieve_hass_conf['sensor_power_photovoltaics'] = str(var_list[1]) + retrieve_hass_conf['sensor_linear_interp'] = [retrieve_hass_conf['sensor_power_photovoltaics'], retrieve_hass_conf['sensor_power_load_no_var_loads']] + retrieve_hass_conf['sensor_replace_zero'] = [retrieve_hass_conf['sensor_power_photovoltaics']] else: - days_list = get_days_list(retrieve_hass_conf['days_to_retrieve']) - var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']] + days_list = get_days_list(retrieve_hass_conf['historic_days_to_retrieve']) + var_list = [retrieve_hass_conf['sensor_power_load_no_var_loads'], retrieve_hass_conf['sensor_power_photovoltaics']] rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False) - rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'], + rh.prepare_data(retrieve_hass_conf['sensor_power_load_no_var_loads'], load_negative = retrieve_hass_conf['load_negative'], set_zero_min = retrieve_hass_conf['set_zero_min'], - var_replace_zero = retrieve_hass_conf['var_replace_zero'], - var_interp = retrieve_hass_conf['var_interp']) + var_replace_zero = retrieve_hass_conf['sensor_replace_zero'], + var_interp = retrieve_hass_conf['sensor_linear_interp']) df_input_data = rh.df_final.copy() fcst, P_PV_forecast, P_load_forecast, df_input_data_dayahead, opt = \ @@ -80,15 +88,15 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, template = 'presentation' # Let's plot the input data - fig_inputs1 = df_input_data[[retrieve_hass_conf['var_PV'], - str(retrieve_hass_conf['var_load'] + '_positive')]].plot() + fig_inputs1 = df_input_data[[retrieve_hass_conf['sensor_power_photovoltaics'], + str(retrieve_hass_conf['sensor_power_load_no_var_loads'] + '_positive')]].plot() fig_inputs1.layout.template = template fig_inputs1.update_yaxes(title_text = "Powers (W)") fig_inputs1.update_xaxes(title_text = "Time") if show_figures: fig_inputs1.show() if save_figures: - fig_inputs1.write_image(emhass_conf['root_path'] / "docs/images/inputs_power.svg", + fig_inputs1.write_image(emhass_conf['docs_path'] / "images/inputs_power.svg", width=1080, height=0.8*1080) fig_inputs_dah = df_input_data_dayahead.plot() @@ -98,14 +106,14 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, if show_figures: fig_inputs_dah.show() if save_figures: - fig_inputs_dah.write_image(emhass_conf['root_path'] / "docs/images/inputs_dayahead.svg", + fig_inputs_dah.write_image(emhass_conf['docs_path'] / "images/inputs_dayahead.svg", width=1080, height=0.8*1080) # And then perform a dayahead optimization df_input_data_dayahead = fcst.get_load_cost_forecast(df_input_data_dayahead) df_input_data_dayahead = fcst.get_prod_price_forecast(df_input_data_dayahead) - optim_conf['treat_def_as_semi_cont'] = [True, True] - optim_conf['set_def_constant'] = [True, True] + optim_conf['treat_deferrable_load_as_semi_cont'] = [True, True] + optim_conf['set_deferrable_load_single_constant'] = [True, True] unit_load_cost = df_input_data[opt.var_load_cost].values unit_prod_price = df_input_data[opt.var_prod_price].values opt_res_dah = opt.perform_optimization(df_input_data_dayahead, P_PV_forecast.values.ravel(), @@ -122,7 +130,7 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, # if show_figures: fig_res_dah.show() if save_figures: - fig_res_dah.write_image(emhass_conf['root_path'] / "docs/images/optim_results_PV_defLoads_dayaheadOptim.svg", + fig_res_dah.write_image(emhass_conf['docs_path'] / "images/optim_results_PV_defLoads_dayaheadOptim.svg", width=1080, height=0.8*1080) print("System with: PV, two deferrable loads, dayahead optimization, profit >> total cost function sum: "+\ diff --git a/scripts/read_csv_plot_data.py b/scripts/read_csv_plot_data.py index 2b136bf3..2293725b 100644 --- a/scripts/read_csv_plot_data.py +++ b/scripts/read_csv_plot_data.py @@ -21,11 +21,14 @@ if __name__ == '__main__': # the root folder - root = str(get_root(__file__, num_parent=2)) + root = pathlib.Path(str(get_root(__file__, num_parent=2))) emhass_conf = {} - emhass_conf['config_path'] = pathlib.Path(root) / 'config_emhass.yaml' - emhass_conf['data_path'] = pathlib.Path(root) / 'data/' - emhass_conf['root_path'] = pathlib.Path(root) + emhass_conf['data_path'] = root / 'data/' + emhass_conf['root_path'] = root / 'src/emhass/' + emhass_conf['docs_path'] = root / 'docs/' + emhass_conf['config_path'] = root / 'config.json' + emhass_conf['defaults_path'] = emhass_conf['root_path'] / 'data/config_defaults.json' + emhass_conf['associations_path'] = emhass_conf['root_path'] / 'data/associations.csv' # create logger logger, ch = get_logger(__name__, emhass_conf, save_to_file=False) @@ -137,7 +140,7 @@ this_figure.show() if save_figs: - fig_filename = emhass_conf['root_path'] / "docs/images/optim_results" + fig_filename = emhass_conf['docs_path'] / "images/optim_results" this_figure.write_image(str(fig_filename) + ".png", width=1.5*768, height=1.5*1.5*768) fig_bar = px.bar(np.arange(len(cf)), x=[c+" (+"+"{:.2f}".format(np.sum(data['gain_'+c])*100/np.sum( @@ -151,5 +154,5 @@ fig_bar.show() if save_figs: - fig_filename = emhass_conf['root_path'] / "docs/images/optim_results_bar_plot" + fig_filename = emhass_conf['docs_path'] / "images/optim_results_bar_plot" fig_bar.write_image(str(fig_filename) + ".png", width=1080, height=0.8*1080) diff --git a/scripts/requirements.txt b/scripts/requirements.txt new file mode 100644 index 00000000..d6671a3a --- /dev/null +++ b/scripts/requirements.txt @@ -0,0 +1,3 @@ +kaleido +tslearn +tabulate diff --git a/scripts/save_pvlib_module_inverter_database.py b/scripts/save_pvlib_module_inverter_database.py index f09a0448..9adfe900 100644 --- a/scripts/save_pvlib_module_inverter_database.py +++ b/scripts/save_pvlib_module_inverter_database.py @@ -20,11 +20,14 @@ from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger # the root folder -root = str(get_root(__file__, num_parent=2)) +root = pathlib.Path(str(get_root(__file__, num_parent=2))) emhass_conf = {} -emhass_conf['config_path'] = pathlib.Path(root) / 'config_emhass.yaml' -emhass_conf['data_path'] = pathlib.Path(root) / 'data/' -emhass_conf['root_path'] = pathlib.Path(root) +emhass_conf['data_path'] = root / 'data/' +emhass_conf['root_path'] = root / 'src/emhass/' +emhass_conf['docs_path'] = root / 'docs/' +emhass_conf['config_path'] = root / 'config.json' +emhass_conf['defaults_path'] = emhass_conf['root_path'] / 'data/config_defaults.json' +emhass_conf['associations_path'] = emhass_conf['root_path'] / 'data/associations.csv' # create logger logger, ch = get_logger(__name__, emhass_conf, save_to_file=False) @@ -52,9 +55,9 @@ logger.info('Inverters databases') print(tabulate(cec_inverters.head(20).iloc[:,:3], headers='keys', tablefmt='psql')) if save_new_files: - with bz2.BZ2File(emhass_conf['root_path'] + '/src/emhass/data/cec_modules.pbz2', "w") as f: + with bz2.BZ2File(emhass_conf['root_path'] + '/data/cec_modules.pbz2', "w") as f: cPickle.dump(cec_modules, f) if save_new_files: - with bz2.BZ2File(emhass_conf['root_path'] + '/src/emhass/data/cec_inverters.pbz2', "w") as f: + with bz2.BZ2File(emhass_conf['root_path'] + '/data/cec_inverters.pbz2', "w") as f: cPickle.dump(cec_inverters, f) \ No newline at end of file diff --git a/scripts/script_debug_forecasts.py b/scripts/script_debug_forecasts.py index d2f377da..0525b759 100644 --- a/scripts/script_debug_forecasts.py +++ b/scripts/script_debug_forecasts.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import json import pickle import numpy as np import pandas as pd @@ -10,14 +11,17 @@ pd.options.plotting.backend = "plotly" from emhass.forecast import Forecast -from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger +from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger, build_config, build_secrets, build_params # the root folder -root = str(get_root(__file__, num_parent=2)) +root = pathlib.Path(str(get_root(__file__, num_parent=2))) emhass_conf = {} -emhass_conf['config_path'] = pathlib.Path(root) / 'config_emhass.yaml' -emhass_conf['data_path'] = pathlib.Path(root) / 'data/' -emhass_conf['root_path'] = pathlib.Path(root) +emhass_conf['data_path'] = root / 'data/' +emhass_conf['root_path'] = root / 'src/emhass/' +emhass_conf['config_path'] = root / 'config.json' +emhass_conf['secrets_path'] = root / 'secrets_emhass.yaml' +emhass_conf['defaults_path'] = emhass_conf['root_path'] / 'data/config_defaults.json' +emhass_conf['associations_path'] = emhass_conf['root_path'] / 'data/associations.csv' # create logger logger, ch = get_logger(__name__, emhass_conf, save_to_file=False) @@ -25,14 +29,19 @@ if __name__ == '__main__': get_data_from_file = True - params = None template = 'presentation' methods_list = ['solar.forecast', 'solcast', 'scrapper'] # for k, method in enumerate(methods_list): - retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(emhass_conf) - optim_conf['delta_forecast'] = pd.Timedelta(days=2) + # Build params with default config, weather_forecast_method=method and default secrets + config = build_config(emhass_conf,logger,emhass_conf['defaults_path']) + config['weather_forecast_method'] = method + _,secrets = build_secrets(emhass_conf,logger,secrets_path=emhass_conf['secrets_path'],no_response=True) + params = build_params(emhass_conf,secrets,config,logger) + + retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(params,logger) + optim_conf['delta_forecast_daily'] = pd.Timedelta(days=2) fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf, params, emhass_conf, logger, get_data_from_file=get_data_from_file) df_weather = fcst.get_weather_forecast(method=method) diff --git a/scripts/script_debug_optim.py b/scripts/script_debug_optim.py index 8ac42347..2ea31cd5 100644 --- a/scripts/script_debug_optim.py +++ b/scripts/script_debug_optim.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import json import pickle import numpy as np import pandas as pd @@ -12,46 +13,50 @@ from emhass.retrieve_hass import RetrieveHass from emhass.optimization import Optimization from emhass.forecast import Forecast -from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger +from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger, build_config, build_params # the root folder -root = str(get_root(__file__, num_parent=2)) +root = pathlib.Path(str(get_root(__file__, num_parent=2))) emhass_conf = {} -emhass_conf['config_path'] = pathlib.Path(root) / 'config_emhass.yaml' -emhass_conf['data_path'] = pathlib.Path(root) / 'data/' -emhass_conf['root_path'] = pathlib.Path(root) +emhass_conf['data_path'] = root / 'data/' +emhass_conf['root_path'] = root / 'src/emhass/' +emhass_conf['config_path'] = root / 'config.json' +emhass_conf['defaults_path'] = emhass_conf['root_path'] / 'data/config_defaults.json' +emhass_conf['associations_path'] = emhass_conf['root_path'] / 'data/associations.csv' # create logger logger, ch = get_logger(__name__, emhass_conf, save_to_file=False) if __name__ == '__main__': get_data_from_file = True - params = None show_figures = True template = 'presentation' - retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(emhass_conf, use_secrets=False) + # Build params with default config (no secrets) + config = build_config(emhass_conf,logger,emhass_conf['defaults_path']) + params = build_params(emhass_conf,{},config,logger) + retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(params, logger) retrieve_hass_conf, optim_conf, plant_conf = \ retrieve_hass_conf, optim_conf, plant_conf rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'], - retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'], + retrieve_hass_conf['optimization_time_step'], retrieve_hass_conf['time_zone'], params, emhass_conf, logger) if get_data_from_file: with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp: rh.df_final, days_list, var_list = pickle.load(inp) - retrieve_hass_conf['var_load'] = str(var_list[0]) - retrieve_hass_conf['var_PV'] = str(var_list[1]) - retrieve_hass_conf['var_interp'] = [retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']] - retrieve_hass_conf['var_replace_zero'] = [retrieve_hass_conf['var_PV']] + retrieve_hass_conf['sensor_power_load_no_var_loads'] = str(var_list[0]) + retrieve_hass_conf['sensor_power_photovoltaics'] = str(var_list[1]) + retrieve_hass_conf['sensor_linear_interp'] = [retrieve_hass_conf['sensor_power_photovoltaics'], retrieve_hass_conf['sensor_power_load_no_var_loads']] + retrieve_hass_conf['sensor_replace_zero'] = [retrieve_hass_conf['sensor_power_photovoltaics']] else: - days_list = get_days_list(retrieve_hass_conf['days_to_retrieve']) - var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']] + days_list = get_days_list(retrieve_hass_conf['historic_days_to_retrieve']) + var_list = [retrieve_hass_conf['sensor_power_load_no_var_loads'], retrieve_hass_conf['sensor_power_photovoltaics']] rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False) - rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'], + rh.prepare_data(retrieve_hass_conf['sensor_power_load_no_var_loads'], load_negative = retrieve_hass_conf['load_negative'], set_zero_min = retrieve_hass_conf['set_zero_min'], - var_replace_zero = retrieve_hass_conf['var_replace_zero'], - var_interp = retrieve_hass_conf['var_interp']) + var_replace_zero = retrieve_hass_conf['sensor_replace_zero'], + var_interp = retrieve_hass_conf['sensor_linear_interp']) df_input_data = rh.df_final.copy() fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf, @@ -73,11 +78,11 @@ optim_conf.update({'lp_solver_path': 'empty'}) # set the path to the LP solver, COIN_CMD default is /usr/bin/cbc # Semi continuous and constant values - optim_conf.update({'treat_def_as_semi_cont': [True, False]}) - optim_conf.update({'set_def_constant': [True, False]}) + optim_conf.update({'treat_deferrable_load_as_semi_cont': [True, False]}) + optim_conf.update({'set_deferrable_load_single_constant': [True, False]}) # A sequence of values - # optim_conf.update({'P_deferrable_nom': [[500.0, 100.0, 100.0, 500.0], 750.0]}) + # optim_conf.update({'nominal_power_of_deferrable_loads': [[500.0, 100.0, 100.0, 500.0], 750.0]}) # Using a battery optim_conf.update({'set_use_battery': False}) diff --git a/scripts/script_simple_thermal_model.py b/scripts/script_simple_thermal_model.py index 2a94bac6..6b95d434 100644 --- a/scripts/script_simple_thermal_model.py +++ b/scripts/script_simple_thermal_model.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import json import pickle import random import numpy as np @@ -13,46 +14,50 @@ from emhass.retrieve_hass import RetrieveHass from emhass.optimization import Optimization from emhass.forecast import Forecast -from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger +from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger, build_config, build_params # the root folder -root = str(get_root(__file__, num_parent=2)) +root = pathlib.Path(str(get_root(__file__, num_parent=2))) emhass_conf = {} -emhass_conf['config_path'] = pathlib.Path(root) / 'config_emhass.yaml' -emhass_conf['data_path'] = pathlib.Path(root) / 'data/' -emhass_conf['root_path'] = pathlib.Path(root) +emhass_conf['data_path'] = root / 'data/' +emhass_conf['root_path'] = root / 'src/emhass/' +emhass_conf['config_path'] = root / 'config.json' +emhass_conf['defaults_path'] = emhass_conf['root_path'] / 'data/config_defaults.json' +emhass_conf['associations_path'] = emhass_conf['root_path'] / 'data/associations.csv' # create logger logger, ch = get_logger(__name__, emhass_conf, save_to_file=False) if __name__ == '__main__': get_data_from_file = True - params = None show_figures = True template = 'presentation' - retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(emhass_conf, use_secrets=False) + # Build params with default config (no secrets) + config = build_config(emhass_conf,logger,emhass_conf['defaults_path']) + params = build_params(emhass_conf,{},config,logger) + retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(params, logger) retrieve_hass_conf, optim_conf, plant_conf = \ retrieve_hass_conf, optim_conf, plant_conf rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'], - retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'], + retrieve_hass_conf['optimization_time_step'], retrieve_hass_conf['time_zone'], params, emhass_conf, logger) if get_data_from_file: with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp: rh.df_final, days_list, var_list = pickle.load(inp) - retrieve_hass_conf['var_load'] = str(var_list[0]) - retrieve_hass_conf['var_PV'] = str(var_list[1]) - retrieve_hass_conf['var_interp'] = [retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']] - retrieve_hass_conf['var_replace_zero'] = [retrieve_hass_conf['var_PV']] + retrieve_hass_conf['sensor_power_load_no_var_loads'] = str(var_list[0]) + retrieve_hass_conf['sensor_power_photovoltaics'] = str(var_list[1]) + retrieve_hass_conf['sensor_linear_interp'] = [retrieve_hass_conf['sensor_power_photovoltaics'], retrieve_hass_conf['sensor_power_load_no_var_loads']] + retrieve_hass_conf['sensor_replace_zero'] = [retrieve_hass_conf['sensor_power_photovoltaics']] else: - days_list = get_days_list(retrieve_hass_conf['days_to_retrieve']) - var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']] + days_list = get_days_list(retrieve_hass_conf['historic_days_to_retrieve']) + var_list = [retrieve_hass_conf['sensor_power_load_no_var_loads'], retrieve_hass_conf['sensor_power_photovoltaics']] rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False) - rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'], + rh.prepare_data(retrieve_hass_conf['sensor_power_load_no_var_loads'], load_negative = retrieve_hass_conf['load_negative'], set_zero_min = retrieve_hass_conf['set_zero_min'], - var_replace_zero = retrieve_hass_conf['var_replace_zero'], - var_interp = retrieve_hass_conf['var_interp']) + var_replace_zero = retrieve_hass_conf['sensor_replace_zero'], + var_interp = retrieve_hass_conf['sensor_linear_interp']) df_input_data = rh.df_final.copy() fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf, @@ -74,14 +79,14 @@ optim_conf.update({'lp_solver_path': 'empty'}) # set the path to the LP solver, COIN_CMD default is /usr/bin/cbc # Config for a single thermal model - optim_conf.update({'num_def_loads': 1}) - optim_conf.update({'P_deferrable_nom': [1000.0]}) - optim_conf.update({'def_total_hours': [0]}) - optim_conf.update({'def_start_timestep': [0]}) - optim_conf.update({'def_end_timestep': [0]}) - optim_conf.update({'treat_def_as_semi_cont': [False]}) - optim_conf.update({'set_def_constant': [False]}) - optim_conf.update({'def_start_penalty': [0.0]}) + optim_conf.update({'number_of_deferrable_loads': 1}) + optim_conf.update({'nominal_power_of_deferrable_loads': [1000.0]}) + optim_conf.update({'operating_hours_of_each_deferrable_load': [0]}) + optim_conf.update({'start_timesteps_of_each_deferrable_load': [0]}) + optim_conf.update({'end_timesteps_of_each_deferrable_load': [0]}) + optim_conf.update({'treat_deferrable_load_as_semi_cont': [False]}) + optim_conf.update({'set_deferrable_load_single_constant': [False]}) + optim_conf.update({'set_deferrable_startup_penalty': [0.0]}) # Thermal modeling df_input_data['outdoor_temperature_forecast'] = [random.normalvariate(10.0, 3.0) for _ in range(48)] diff --git a/scripts/script_thermal_model_optim.py b/scripts/script_thermal_model_optim.py index 74440464..49073737 100644 --- a/scripts/script_thermal_model_optim.py +++ b/scripts/script_thermal_model_optim.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import json import pickle import random import numpy as np @@ -13,14 +14,16 @@ from emhass.retrieve_hass import RetrieveHass from emhass.optimization import Optimization from emhass.forecast import Forecast -from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger +from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger, build_config, build_params # the root folder -root = str(get_root(__file__, num_parent=2)) +root = pathlib.Path(str(get_root(__file__, num_parent=2))) emhass_conf = {} -emhass_conf['config_path'] = pathlib.Path(root) / 'config_emhass.yaml' -emhass_conf['data_path'] = pathlib.Path(root) / 'data/' -emhass_conf['root_path'] = pathlib.Path(root) +emhass_conf['data_path'] = root / 'data/' +emhass_conf['root_path'] = root / 'src/emhass/' +emhass_conf['config_path'] = root / 'config.json' +emhass_conf['defaults_path'] = emhass_conf['root_path'] / 'data/config_defaults.json' +emhass_conf['associations_path'] = emhass_conf['root_path'] / 'data/associations.csv' # create logger logger, ch = get_logger(__name__, emhass_conf, save_to_file=False) @@ -31,28 +34,31 @@ show_figures = True template = 'presentation' - retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(emhass_conf, use_secrets=False) + # Build params with default config (no secrets) + config = build_config(emhass_conf,logger,emhass_conf['defaults_path']) + params = build_params(emhass_conf,{},config,logger) + retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(params, logger) retrieve_hass_conf, optim_conf, plant_conf = \ retrieve_hass_conf, optim_conf, plant_conf rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'], - retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'], + retrieve_hass_conf['optimization_time_step'], retrieve_hass_conf['time_zone'], params, emhass_conf, logger) if get_data_from_file: with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp: rh.df_final, days_list, var_list = pickle.load(inp) - retrieve_hass_conf['var_load'] = str(var_list[0]) - retrieve_hass_conf['var_PV'] = str(var_list[1]) - retrieve_hass_conf['var_interp'] = [retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']] - retrieve_hass_conf['var_replace_zero'] = [retrieve_hass_conf['var_PV']] + retrieve_hass_conf['sensor_power_load_no_var_loads'] = str(var_list[0]) + retrieve_hass_conf['sensor_power_photovoltaics'] = str(var_list[1]) + retrieve_hass_conf['sensor_linear_interp'] = [retrieve_hass_conf['sensor_power_photovoltaics'], retrieve_hass_conf['sensor_power_load_no_var_loads']] + retrieve_hass_conf['sensor_replace_zero'] = [retrieve_hass_conf['sensor_power_photovoltaics']] else: - days_list = get_days_list(retrieve_hass_conf['days_to_retrieve']) - var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']] + days_list = get_days_list(retrieve_hass_conf['historic_days_to_retrieve']) + var_list = [retrieve_hass_conf['sensor_power_load_no_var_loads'], retrieve_hass_conf['sensor_power_photovoltaics']] rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False) - rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'], + rh.prepare_data(retrieve_hass_conf['sensor_power_load_no_var_loads'], load_negative = retrieve_hass_conf['load_negative'], set_zero_min = retrieve_hass_conf['set_zero_min'], - var_replace_zero = retrieve_hass_conf['var_replace_zero'], - var_interp = retrieve_hass_conf['var_interp']) + var_replace_zero = retrieve_hass_conf['sensor_replace_zero'], + var_interp = retrieve_hass_conf['sensor_linear_interp']) df_input_data = rh.df_final.copy() fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf, @@ -74,8 +80,8 @@ optim_conf.update({'lp_solver_path': 'empty'}) # set the path to the LP solver, COIN_CMD default is /usr/bin/cbc # Semi continuous and constant values - optim_conf.update({'treat_def_as_semi_cont': [True, False]}) - optim_conf.update({'set_def_constant': [True, False]}) + optim_conf.update({'treat_deferrable_load_as_semi_cont': [True, False]}) + optim_conf.update({'set_deferrable_load_single_constant': [True, False]}) # Thermal modeling df_input_data['outdoor_temperature_forecast'] = [random.normalvariate(10.0, 3.0) for _ in range(48)] diff --git a/scripts/special_config_analysis.py b/scripts/special_config_analysis.py index ace61ab8..ce07fb61 100644 --- a/scripts/special_config_analysis.py +++ b/scripts/special_config_analysis.py @@ -22,15 +22,18 @@ from emhass.retrieve_hass import RetrieveHass from emhass.optimization import Optimization from emhass.forecast import Forecast -from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger, build_params +from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger, build_config, build_secrets, build_params # the root folder -root = str(get_root(__file__, num_parent=2)) +root = pathlib.Path(str(get_root(__file__, num_parent=2))) emhass_conf = {} -emhass_conf['config_path'] = pathlib.Path(root) / 'config_emhass.yaml' -emhass_conf['data_path'] = pathlib.Path(root) / 'data/' -emhass_conf['root_path'] = pathlib.Path(root) - +emhass_conf['data_path'] = root / 'data/' +emhass_conf['root_path'] = root / 'src/emhass/' +emhass_conf['scripts_path'] = root / 'scripts/' +emhass_conf['config_path'] = root / 'config.json' +emhass_conf['secrets_path'] = root / 'secrets_emhass.yaml' +emhass_conf['defaults_path'] = emhass_conf['root_path'] / 'data/config_defaults.json' +emhass_conf['associations_path'] = emhass_conf['root_path'] / 'data/associations.csv' # create logger logger, ch = get_logger(__name__, emhass_conf, save_to_file=False) @@ -50,30 +53,12 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, if __name__ == '__main__': get_data_from_file = False - - with open(emhass_conf['config_path'], 'r') as file: - config = yaml.load(file, Loader=yaml.FullLoader) - retrieve_hass_conf = config['retrieve_hass_conf'] - optim_conf = config['optim_conf'] - plant_conf = config['plant_conf'] - - params = {} - params['retrieve_hass_conf'] = retrieve_hass_conf - params['optim_conf'] = optim_conf - params['plant_conf'] = plant_conf - - options_json = emhass_conf['root_path'] / 'scripts/special_options.json' - with options_json.open('r') as data: - options = json.load(data) - - # params = build_params(params, options) - - with open(emhass_conf['root_path'] / 'secrets_emhass.yaml', 'r') as file: - params_secrets = yaml.load(file, Loader=yaml.FullLoader) - - params = build_params(params, params_secrets, options, 1, logger) - # params['params_secrets'] = input_secrets - + + # Build params with defaults, secret file, and added special config and secrets + config = build_config(emhass_conf,logger,emhass_conf['defaults_path'],emhass_conf['scripts_path'] / 'special_options.json') + emhass_conf,secrets = build_secrets(emhass_conf,logger,options_path=emhass_conf['scripts_path'] / 'special_options.json',secrets_path=emhass_conf['secrets_path'],no_response=True) + params = build_params(emhass_conf, secrets, config, logger) + pv_power_forecast = [0, 8, 27, 42, 47, 41, 25, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 52, 73, 74, 68, 44, 12, 0, 0, 0, 0] load_power_forecast = [2850, 3021, 3107, 3582, 2551, 2554, 1856, 2505, 1768, 2540, 1722, 2463, 1670, 1379, 1165, 1000, 1641, 1181, 1861, 1414, 1467, 1344, 1209, 1531] load_cost_forecast = [17.836, 19.146, 18.753, 17.838, 17.277, 16.282, 16.736, 16.047, 17.004, 19.982, 17.17, 16.968, 16.556, 16.21, 12.333, 10.937] @@ -81,44 +66,44 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, prediction_horizon = 16 soc_init = 0.98 soc_final = 0.3 - def_total_hours = [0] + operating_hours_of_each_deferrable_load = [0] alpha = 1 beta = 0 params['passed_data'] = {'pv_power_forecast':pv_power_forecast,'load_power_forecast':load_power_forecast, 'load_cost_forecast':load_cost_forecast,'prod_price_forecast':prod_price_forecast, 'prediction_horizon':prediction_horizon,'soc_init':soc_init,'soc_final':soc_final, - 'def_total_hours':def_total_hours,'alpha':alpha,'beta':beta} + 'operating_hours_of_each_deferrable_load':operating_hours_of_each_deferrable_load,'alpha':alpha,'beta':beta} - optim_conf['weather_forecast_method'] = 'list' - optim_conf['load_forecast_method'] = 'list' - optim_conf['load_cost_forecast_method'] = 'list' - optim_conf['prod_price_forecast_method'] = 'list' + params['optim_conf']['weather_forecast_method'] = 'list' + params['optim_conf']['load_forecast_method'] = 'list' + params['optim_conf']['load_cost_forecast_method'] = 'list' + params['optim_conf']['production_price_forecast_method'] = 'list' - data_path = emhass_conf['root_path'] / 'scripts/data_temp.pkl' + data_path = emhass_conf['scripts_path'] / 'data_temp.pkl' + + retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(params, logger) if data_path.is_file(): logger.info("Loading a previous data file") with open(data_path, "rb") as fid: fcst, P_PV_forecast, P_load_forecast, df_input_data_dayahead, opt, df_input_data = pickle.load(fid) else: - - retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(emhass_conf, use_secrets=True, params = json.dumps(params)) rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'], - retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'], + retrieve_hass_conf['optimization_time_step'], retrieve_hass_conf['time_zone'], params, emhass_conf, logger) - days_list = get_days_list(retrieve_hass_conf['days_to_retrieve']) - var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']] + days_list = get_days_list(retrieve_hass_conf['historic_days_to_retrieve']) + var_list = [retrieve_hass_conf['sensor_power_load_no_var_loads'], retrieve_hass_conf['sensor_power_photovoltaics']] rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False) - rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'], + rh.prepare_data(retrieve_hass_conf['sensor_power_load_no_var_loads'], load_negative = retrieve_hass_conf['load_negative'], set_zero_min = retrieve_hass_conf['set_zero_min'], - var_replace_zero = retrieve_hass_conf['var_replace_zero'], - var_interp = retrieve_hass_conf['var_interp']) + var_replace_zero = retrieve_hass_conf['sensor_replace_zero'], + var_interp = retrieve_hass_conf['sensor_linear_interp']) df_input_data = rh.df_final.copy() fcst, P_PV_forecast, P_load_forecast, df_input_data_dayahead, opt = \ get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, - json.dumps(params), get_data_from_file) + params, get_data_from_file) df_input_data = fcst.get_load_cost_forecast(df_input_data) df_input_data = fcst.get_prod_price_forecast(df_input_data) @@ -128,8 +113,8 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, template = 'presentation' # Let's plot the input data - fig_inputs1 = df_input_data[[str(retrieve_hass_conf['var_PV']), - str(retrieve_hass_conf['var_load'] + '_positive')]].plot() + fig_inputs1 = df_input_data[[str(retrieve_hass_conf['sensor_power_photovoltaics']), + str(retrieve_hass_conf['sensor_power_load_no_var_loads'] + '_positive')]].plot() fig_inputs1.layout.template = template fig_inputs1.update_yaxes(title_text = "Powers (W)") fig_inputs1.update_xaxes(title_text = "Time") @@ -163,7 +148,7 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, \"prod_price_forecast\":[6.651, 7.743, 7.415, 6.653, 6.185, 5.356, 5.734, 5.16, 5.958, 8.439, 6.096, 5.928, 5.584, 5.296, 4.495, 3.332], \"prediction_horizon\":16, \"pv_power_forecast\": [0, 8, 27, 42, 47, 41, 25, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 52, 73, 74, 68, 44, 12, 0, 0, 0, 0], - \"alpha\": 1, \"beta\": 0, \"soc_init\":0.98, \"soc_final\":0.3, \"def_total_hours\":[0] + \"alpha\": 1, \"beta\": 0, \"soc_init\":0.98, \"soc_final\":0.3, \"operating_hours_of_each_deferrable_load\":[0] }' http://localhost:5000/action/naive-mpc-optim"''' # Perform a MPC optimization @@ -180,7 +165,7 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, opt_res_dayahead = opt.perform_naive_mpc_optim( df_input_data_dayahead, P_PV_forecast, P_load_forecast, prediction_horizon, - soc_init=soc_init, soc_final=soc_final, def_total_hours=def_total_hours) + soc_init=soc_init, soc_final=soc_final, def_total_hours=operating_hours_of_each_deferrable_load) fig_res_mpc = opt_res_dayahead[['P_batt', 'P_grid']].plot() fig_res_mpc.layout.template = template fig_res_mpc.update_yaxes(title_text = "Powers (W)") diff --git a/scripts/special_options.json b/scripts/special_options.json index 4c9e1037..f8ea5b73 100644 --- a/scripts/special_options.json +++ b/scripts/special_options.json @@ -28,16 +28,16 @@ "operating_hours_of_each_deferrable_load": 1 } ], - "list_peak_hours_periods_start_hours": [ - { - "peak_hours_periods_start_hours": "05:54" - } - ], - "list_peak_hours_periods_end_hours": [ - { - "peak_hours_periods_end_hours": "05:54" - } - ], + "load_peak_hour_periods": { + "period_hp_1": [ + { + "start": "05:54" + }, + { + "end": "05:54" + } + ] + }, "list_treat_deferrable_load_as_semi_cont": [ { "treat_deferrable_load_as_semi_cont": false diff --git a/scripts/use_cases_analysis.py b/scripts/use_cases_analysis.py index 0030784e..9ae5234c 100644 --- a/scripts/use_cases_analysis.py +++ b/scripts/use_cases_analysis.py @@ -7,6 +7,7 @@ Before running this script you should perform a perfect optimization for each type of cost function: profit, cost and self-consumption ''' +import json import numpy as np import pandas as pd import pathlib @@ -19,14 +20,18 @@ from emhass.retrieve_hass import RetrieveHass from emhass.optimization import Optimization from emhass.forecast import Forecast -from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger +from emhass.utils import get_root, get_yaml_parse, get_days_list, get_logger, build_config, build_secrets, build_params # the root folder -root = str(get_root(__file__, num_parent=2)) +root = pathlib.Path(str(get_root(__file__, num_parent=2))) emhass_conf = {} -emhass_conf['config_path'] = pathlib.Path(root) / 'config_emhass.yaml' -emhass_conf['data_path'] = pathlib.Path(root) / 'data/' -emhass_conf['root_path'] = pathlib.Path(root) +emhass_conf['data_path'] = root / 'data/' +emhass_conf['root_path'] = root / 'src/emhass/' +emhass_conf['docs_path'] = root / 'docs/' +emhass_conf['config_path'] = root / 'config.json' +emhass_conf['secrets_path'] = root / 'secrets_emhass.yaml' +emhass_conf['defaults_path'] = emhass_conf['root_path'] / 'data/config_defaults.json' +emhass_conf['associations_path'] = emhass_conf['root_path'] / 'data/associations.csv' # create logger logger, ch = get_logger(__name__, emhass_conf, save_to_file=False) @@ -49,18 +54,22 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, get_data_from_file = False params = None save_figures = False - retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(emhass_conf, use_secrets=True) + # Build params with default config and secrets file + config = build_config(emhass_conf,logger,emhass_conf['defaults_path']) + _,secrets = build_secrets(emhass_conf,logger,secrets_path=emhass_conf['secrets_path'],no_response=True) + params = build_params(emhass_conf,secrets,config,logger) + retrieve_hass_conf, optim_conf, plant_conf = get_yaml_parse(params, logger) rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'], - retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'], + retrieve_hass_conf['optimization_time_step'], retrieve_hass_conf['time_zone'], params, emhass_conf, logger) - days_list = get_days_list(retrieve_hass_conf['days_to_retrieve']) - var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']] + days_list = get_days_list(retrieve_hass_conf['historic_days_to_retrieve']) + var_list = [retrieve_hass_conf['sensor_power_load_no_var_loads'], retrieve_hass_conf['sensor_power_photovoltaics']] rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False) - rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'], + rh.prepare_data(retrieve_hass_conf['sensor_power_load_no_var_loads'], load_negative = retrieve_hass_conf['load_negative'], set_zero_min = retrieve_hass_conf['set_zero_min'], - var_replace_zero = retrieve_hass_conf['var_replace_zero'], - var_interp = retrieve_hass_conf['var_interp']) + var_replace_zero = retrieve_hass_conf['sensor_replace_zero'], + var_interp = retrieve_hass_conf['sensor_linear_interp']) df_input_data = rh.df_final.copy() fcst, P_PV_forecast, P_load_forecast, df_input_data_dayahead, opt = \ get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, @@ -71,14 +80,14 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, template = 'presentation' # Let's plot the input data - fig_inputs1 = df_input_data[[str(retrieve_hass_conf['var_PV']), - str(retrieve_hass_conf['var_load'] + '_positive')]].plot() + fig_inputs1 = df_input_data[[str(retrieve_hass_conf['sensor_power_photovoltaics']), + str(retrieve_hass_conf['sensor_power_load_no_var_loads'] + '_positive')]].plot() fig_inputs1.layout.template = template fig_inputs1.update_yaxes(title_text = "Powers (W)") fig_inputs1.update_xaxes(title_text = "Time") fig_inputs1.show() if save_figures: - fig_inputs1.write_image(emhass_conf['root_path'] / "docs/images/inputs_power.svg", + fig_inputs1.write_image(emhass_conf['docs_path'] / "images/inputs_power.svg", width=1080, height=0.8*1080) fig_inputs2 = df_input_data[['unit_load_cost', @@ -88,7 +97,7 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, fig_inputs2.update_xaxes(title_text = "Time") fig_inputs2.show() if save_figures: - fig_inputs2.write_image(emhass_conf['root_path'] / "docs/images/inputs_cost_price.svg", + fig_inputs2.write_image(emhass_conf['docs_path'] / "images/inputs_cost_price.svg", width=1080, height=0.8*1080) fig_inputs_dah = df_input_data_dayahead.plot() @@ -97,7 +106,7 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, fig_inputs_dah.update_xaxes(title_text = "Time") fig_inputs_dah.show() if save_figures: - fig_inputs_dah.write_image(emhass_conf['root_path'] / "docs/images/inputs_dayahead.svg", + fig_inputs_dah.write_image(emhass_conf['docs_path'] / "images/inputs_dayahead.svg", width=1080, height=0.8*1080) # Let's first perform a perfect optimization @@ -108,7 +117,7 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, fig_res.update_xaxes(title_text = "Time") fig_res.show() if save_figures: - fig_res.write_image(emhass_conf['root_path'] / "docs/images/optim_results_PV_defLoads_perfectOptim.svg", + fig_res.write_image(emhass_conf['docs_path'] / "images/optim_results_PV_defLoads_perfectOptim.svg", width=1080, height=0.8*1080) print("System with: PV, two deferrable loads, perfect optimization, profit >> total cost function sum: "+\ @@ -124,7 +133,7 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, fig_res_dah.update_xaxes(title_text = "Time") fig_res_dah.show() if save_figures: - fig_res_dah.write_image(emhass_conf['root_path'] / "docs/images/optim_results_PV_defLoads_dayaheadOptim.svg", + fig_res_dah.write_image(emhass_conf['docs_path'] / "images/optim_results_PV_defLoads_dayaheadOptim.svg", width=1080, height=0.8*1080) print("System with: PV, two deferrable loads, dayahead optimization, profit >> total cost function sum: "+\ @@ -144,7 +153,7 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, fig_res_dah.update_xaxes(title_text = "Time") fig_res_dah.show() if save_figures: - fig_res_dah.write_image(emhass_conf['root_path'] / "docs/images/optim_results_defLoads_dayaheadOptim.svg", + fig_res_dah.write_image(emhass_conf['docs_path'] / "images/optim_results_defLoads_dayaheadOptim.svg", width=1080, height=0.8*1080) print("System with: two deferrable loads, dayahead optimization, profit >> total cost function sum: "+\ @@ -165,7 +174,7 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, fig_res_dah.update_xaxes(title_text = "Time") fig_res_dah.show() if save_figures: - fig_res_dah.write_image(emhass_conf['root_path'] / "docs/images/optim_results_PV_Batt_defLoads_dayaheadOptim.svg", + fig_res_dah.write_image(emhass_conf['docs_path'] / "images/optim_results_PV_Batt_defLoads_dayaheadOptim.svg", width=1080, height=0.8*1080) fig_res_dah = opt_res_dah[['SOC_opt']].plot() fig_res_dah.layout.template = template @@ -173,7 +182,7 @@ def get_forecast_optim_objects(retrieve_hass_conf, optim_conf, plant_conf, fig_res_dah.update_xaxes(title_text = "Time") fig_res_dah.show() if save_figures: - fig_res_dah.write_image(emhass_conf['root_path'] / "docs/images/optim_results_PV_Batt_defLoads_dayaheadOptim_SOC.svg", + fig_res_dah.write_image(emhass_conf['docs_path'] / "images/optim_results_PV_Batt_defLoads_dayaheadOptim_SOC.svg", width=1080, height=0.8*1080) print("System with: PV, Battery, two deferrable loads, dayahead optimization, profit >> total cost function sum: "+\ diff --git a/secrets_emhass(example).yaml b/secrets_emhass(example).yaml index 312ae7b3..f1c63b51 100644 --- a/secrets_emhass(example).yaml +++ b/secrets_emhass(example).yaml @@ -1,12 +1,13 @@ # Use this file to store secrets like usernames and passwords. # Learn more at https://home-assistant.io/docs/configuration/secrets/ +server_ip: 0.0.0.0 hass_url: https://myhass.duckdns.org/ long_lived_token: thatverylongtokenhere time_zone: Europe/Paris -lat: 45.83 -lon: 6.86 -alt: 4807.8 +Latitude: 45.83 +Longitude: 6.86 +Altitude: 4807.8 solcast_api_key: yoursecretsolcastapikey solcast_rooftop_id: yourrooftopid solar_forecast_kwp: 5 \ No newline at end of file diff --git a/setup.py b/setup.py index 4482d4c0..60d2f325 100644 --- a/setup.py +++ b/setup.py @@ -62,7 +62,7 @@ 'emhass=emhass.command_line:main', ], }, - package_data={'emhass': ['templates/index.html','templates/template.html','static/advanced.html','static/basic.html', 'static/script.js', - 'static/style.css','static/img/emhass_icon.png','static/img/emhass_logo_short.svg', 'static/img/feather-sprite.svg', - 'data/cec_modules.pbz2', 'data/cec_inverters.pbz2']}, + package_data={'emhass': ['templates/index.html','templates/template.html','templates/configuration.html','static/advanced.html','static/basic.html', 'static/script.js', 'static/configuration_script.js', + 'static/style.css','static/configuration_list.html','static/img/emhass_icon.png','static/img/emhass_logo_short.svg', 'static/img/feather-sprite.svg','static/data/param_definitions.json', + 'data/cec_modules.pbz2', 'data/cec_inverters.pbz2','data/associations.csv','data/config_defaults.json']}, ) diff --git a/src/emhass/command_line.py b/src/emhass/command_line.py index 3dee844d..feb6d333 100644 --- a/src/emhass/command_line.py +++ b/src/emhass/command_line.py @@ -3,6 +3,7 @@ import argparse import os +import re import time import pathlib import logging @@ -50,15 +51,25 @@ def set_input_data_dict(emhass_conf: dict, costfun: str, """ logger.info("Setting up needed data") + + # check if passed params is a dict + if (params != None) and (params != "null"): + if type(params) is str: + params = json.loads(params) + else: + params = {} + # Parsing yaml - retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse( - emhass_conf, use_secrets=not(get_data_from_file), params=params) + retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(params,logger) + if type(retrieve_hass_conf) is bool: + return False + # Treat runtimeparams params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams( runtimeparams, params, retrieve_hass_conf, optim_conf, plant_conf, set_type, logger) # Define main objects rh = RetrieveHass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'], - retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'], + retrieve_hass_conf['optimization_time_step'], retrieve_hass_conf['time_zone'], params, emhass_conf, logger, get_data_from_file=get_data_from_file) fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf, params, emhass_conf, logger, get_data_from_file=get_data_from_file) @@ -71,24 +82,24 @@ def set_input_data_dict(emhass_conf: dict, costfun: str, if get_data_from_file: with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp: rh.df_final, days_list, var_list = pickle.load(inp) - retrieve_hass_conf['var_load'] = str(var_list[0]) - retrieve_hass_conf['var_PV'] = str(var_list[1]) - retrieve_hass_conf['var_interp'] = [ - retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']] - retrieve_hass_conf['var_replace_zero'] = [ - retrieve_hass_conf['var_PV']] + retrieve_hass_conf['sensor_power_load_no_var_loads'] = str(var_list[0]) + retrieve_hass_conf['sensor_power_photovoltaics'] = str(var_list[1]) + retrieve_hass_conf['sensor_linear_interp'] = [ + retrieve_hass_conf['sensor_power_photovoltaics'], retrieve_hass_conf['sensor_power_load_no_var_loads']] + retrieve_hass_conf['sensor_replace_zero'] = [ + retrieve_hass_conf['sensor_power_photovoltaics']] else: days_list = utils.get_days_list( - retrieve_hass_conf["days_to_retrieve"]) - var_list = [retrieve_hass_conf["var_load"], - retrieve_hass_conf["var_PV"]] + retrieve_hass_conf['historic_days_to_retrieve']) + var_list = [retrieve_hass_conf['sensor_power_load_no_var_loads'], + retrieve_hass_conf['sensor_power_photovoltaics']] if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False): return False - if not rh.prepare_data(retrieve_hass_conf["var_load"], - load_negative=retrieve_hass_conf["load_negative"], - set_zero_min=retrieve_hass_conf["set_zero_min"], - var_replace_zero=retrieve_hass_conf["var_replace_zero"], - var_interp=retrieve_hass_conf["var_interp"]): + if not rh.prepare_data(retrieve_hass_conf['sensor_power_load_no_var_loads'], + load_negative=retrieve_hass_conf['load_negative'], + set_zero_min=retrieve_hass_conf['set_zero_min'], + var_replace_zero=retrieve_hass_conf['sensor_replace_zero'], + var_interp=retrieve_hass_conf['sensor_linear_interp']): return False df_input_data = rh.df_final.copy() # What we don't need for this type of action @@ -96,7 +107,7 @@ def set_input_data_dict(emhass_conf: dict, costfun: str, elif set_type == "dayahead-optim": # Get PV and load forecasts df_weather = fcst.get_weather_forecast( - method=optim_conf["weather_forecast_method"]) + method=optim_conf['weather_forecast_method']) if isinstance(df_weather, bool) and not df_weather: return False P_PV_forecast = fcst.get_power_from_weather(df_weather) @@ -109,9 +120,12 @@ def set_input_data_dict(emhass_conf: dict, costfun: str, df_input_data_dayahead = pd.DataFrame(np.transpose(np.vstack( [P_PV_forecast.values, P_load_forecast.values])), index=P_PV_forecast.index, columns=["P_PV_forecast", "P_load_forecast"]) - if "freq" in retrieve_hass_conf and retrieve_hass_conf["freq"]: - freq = pd.to_timedelta(retrieve_hass_conf["freq"], "minute") - df_input_data_dayahead = df_input_data_dayahead.asfreq(freq) + if "optimization_time_step" in retrieve_hass_conf and retrieve_hass_conf["optimization_time_step"]: + if not isinstance(retrieve_hass_conf["optimization_time_step"], pd._libs.tslibs.timedeltas.Timedelta): + optimization_time_step = pd.to_timedelta(retrieve_hass_conf["optimization_time_step"], "minute") + else: + optimization_time_step = retrieve_hass_conf["optimization_time_step"] + df_input_data_dayahead = df_input_data_dayahead.asfreq(optimization_time_step) else: df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead) params = json.loads(params) @@ -126,23 +140,23 @@ def set_input_data_dict(emhass_conf: dict, costfun: str, if get_data_from_file: with open(emhass_conf['data_path'] / 'test_df_final.pkl', 'rb') as inp: rh.df_final, days_list, var_list = pickle.load(inp) - retrieve_hass_conf['var_load'] = str(var_list[0]) - retrieve_hass_conf['var_PV'] = str(var_list[1]) - retrieve_hass_conf['var_interp'] = [ - retrieve_hass_conf['var_PV'], retrieve_hass_conf['var_load']] - retrieve_hass_conf['var_replace_zero'] = [ - retrieve_hass_conf['var_PV']] + retrieve_hass_conf['sensor_power_load_no_var_loads'] = str(var_list[0]) + retrieve_hass_conf['sensor_power_photovoltaics'] = str(var_list[1]) + retrieve_hass_conf['sensor_linear_interp'] = [ + retrieve_hass_conf['sensor_power_photovoltaics'], retrieve_hass_conf['sensor_power_load_no_var_loads']] + retrieve_hass_conf['sensor_replace_zero'] = [ + retrieve_hass_conf['sensor_power_photovoltaics']] else: days_list = utils.get_days_list(1) - var_list = [retrieve_hass_conf["var_load"], - retrieve_hass_conf["var_PV"]] + var_list = [retrieve_hass_conf['sensor_power_load_no_var_loads'], + retrieve_hass_conf['sensor_power_photovoltaics']] if not rh.get_data(days_list, var_list, minimal_response=False, significant_changes_only=False): return False - if not rh.prepare_data(retrieve_hass_conf["var_load"], - load_negative=retrieve_hass_conf["load_negative"], - set_zero_min=retrieve_hass_conf["set_zero_min"], - var_replace_zero=retrieve_hass_conf["var_replace_zero"], - var_interp=retrieve_hass_conf["var_interp"]): + if not rh.prepare_data(retrieve_hass_conf['sensor_power_load_no_var_loads'], + load_negative=retrieve_hass_conf['load_negative'], + set_zero_min=retrieve_hass_conf['set_zero_min'], + var_replace_zero=retrieve_hass_conf['sensor_replace_zero'], + var_interp=retrieve_hass_conf['sensor_linear_interp']): return False df_input_data = rh.df_final.copy() # Get PV and load forecasts @@ -159,9 +173,12 @@ def set_input_data_dict(emhass_conf: dict, costfun: str, "Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data") return False df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1) - if "freq" in retrieve_hass_conf and retrieve_hass_conf["freq"]: - freq = pd.to_timedelta(retrieve_hass_conf["freq"], "minute") - df_input_data_dayahead = df_input_data_dayahead.asfreq(freq) + if "optimization_time_step" in retrieve_hass_conf and retrieve_hass_conf["optimization_time_step"]: + if not isinstance(retrieve_hass_conf["optimization_time_step"], pd._libs.tslibs.timedeltas.Timedelta): + optimization_time_step = pd.to_timedelta(retrieve_hass_conf["optimization_time_step"], "minute") + else: + optimization_time_step = retrieve_hass_conf["optimization_time_step"] + df_input_data_dayahead = df_input_data_dayahead.asfreq(optimization_time_step) else: df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead) df_input_data_dayahead.columns = ["P_PV_forecast", "P_load_forecast"] @@ -175,7 +192,7 @@ def set_input_data_dict(emhass_conf: dict, costfun: str, P_PV_forecast, P_load_forecast = None, None params = json.loads(params) # Retrieve data from hass - days_to_retrieve = params["passed_data"]["days_to_retrieve"] + days_to_retrieve = params["passed_data"]['historic_days_to_retrieve'] model_type = params["passed_data"]["model_type"] var_model = params["passed_data"]["var_model"] if get_data_from_file: @@ -274,8 +291,7 @@ def weather_forecast_cache(emhass_conf: dict, params: str, """ # Parsing yaml - retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse( - emhass_conf, use_secrets=True, params=params) + retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(params, logger) # Treat runtimeparams params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams( @@ -293,7 +309,7 @@ def weather_forecast_cache(emhass_conf: dict, params: str, fcst = Forecast(retrieve_hass_conf, optim_conf, plant_conf, params, emhass_conf, logger) - result = fcst.get_weather_forecast(optim_conf["weather_forecast_method"]) + result = fcst.get_weather_forecast(optim_conf['weather_forecast_method']) if isinstance(result, bool) and not result: return False @@ -326,7 +342,7 @@ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger, if isinstance(df_input_data, bool) and not df_input_data: return False df_input_data = input_data_dict['fcst'].get_prod_price_forecast( - df_input_data, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'], + df_input_data, method=input_data_dict['fcst'].optim_conf['production_price_forecast_method'], list_and_perfect=True) if isinstance(df_input_data, bool) and not df_input_data: return False @@ -347,7 +363,7 @@ def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger, params = input_data_dict["params"] # if continual_publish, save perfect results to data_path/entities json - if input_data_dict["retrieve_hass_conf"].get("continual_publish",False) or params["passed_data"].get("entity_save",False): + if input_data_dict["retrieve_hass_conf"].get('continual_publish',False) or params["passed_data"].get("entity_save",False): #Trigger the publish function, save entity data and not post to HA publish_data(input_data_dict, logger, entity_save=True, dont_post=True) @@ -380,7 +396,7 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger, return False df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast( df_input_data_dayahead, - method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method']) + method=input_data_dict['fcst'].optim_conf['production_price_forecast_method']) if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead: return False if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]: @@ -406,7 +422,7 @@ def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger, params = input_data_dict["params"] # if continual_publish, save day_ahead results to data_path/entities json - if input_data_dict["retrieve_hass_conf"].get("continual_publish",False) or params["passed_data"].get("entity_save",False): + if input_data_dict["retrieve_hass_conf"].get('continual_publish',False) or params["passed_data"].get("entity_save",False): #Trigger the publish function, save entity data and not post to HA publish_data(input_data_dict, logger, entity_save=True, dont_post=True) @@ -438,7 +454,7 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger, if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead: return False df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast( - df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method']) + df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['production_price_forecast_method']) if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead: return False if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]: @@ -448,9 +464,9 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger, prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"] soc_init = input_data_dict["params"]["passed_data"]["soc_init"] soc_final = input_data_dict["params"]["passed_data"]["soc_final"] - def_total_hours = input_data_dict["params"]["passed_data"]["def_total_hours"] - def_start_timestep = input_data_dict["params"]["passed_data"]["def_start_timestep"] - def_end_timestep = input_data_dict["params"]["passed_data"]["def_end_timestep"] + def_total_hours = input_data_dict["params"]["passed_data"]['operating_hours_of_each_deferrable_load'] + def_start_timestep = input_data_dict["params"]["passed_data"]['start_timesteps_of_each_deferrable_load'] + def_end_timestep = input_data_dict["params"]["passed_data"]['end_timesteps_of_each_deferrable_load'] opt_res_naive_mpc = input_data_dict["opt"].perform_naive_mpc_optim( df_input_data_dayahead, input_data_dict["P_PV_forecast"], input_data_dict["P_load_forecast"], prediction_horizon, soc_init, soc_final, def_total_hours, @@ -473,7 +489,7 @@ def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger, params = input_data_dict["params"] # if continual_publish, save mpc results to data_path/entities json - if input_data_dict["retrieve_hass_conf"].get("continual_publish",False) or params["passed_data"].get("entity_save",False): + if input_data_dict["retrieve_hass_conf"].get('continual_publish',False) or params["passed_data"].get("entity_save",False): #Trigger the publish function, save entity data and not post to HA publish_data(input_data_dict, logger, entity_save=True, dont_post=True) @@ -576,11 +592,11 @@ def forecast_model_predict(input_data_dict: dict, logger: logging.Logger, now_precise = datetime.now( input_data_dict["retrieve_hass_conf"]["time_zone"] ).replace(second=0, microsecond=0) - if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest": + if input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "nearest": idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0] - elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first": + elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "first": idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[0] - elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last": + elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "last": idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[0] if idx_closest == -1: idx_closest = predictions.index.get_indexer([now_precise], method="nearest")[0] @@ -676,7 +692,9 @@ def regressor_model_fit(input_data_dict: dict, logger: logging.Logger, # The MLRegressor object mlr = MLRegressor(data, model_type, regression_model, features, target, timestamp, logger) # Fit the ML model - mlr.fit(date_features=date_features) + fit = mlr.fit(date_features=date_features) + if not fit: + return False # Save model if not debug: filename = model_type + "_mlr.pkl" @@ -757,10 +775,13 @@ def publish_data(input_data_dict: dict, logger: logging.Logger, """ logger.info("Publishing data to HASS instance") - if not isinstance(input_data_dict["params"],dict): - params = json.loads(input_data_dict["params"]) - else: - params = input_data_dict["params"] + if input_data_dict: + if not isinstance(input_data_dict.get("params",{}),dict): + params = json.loads(input_data_dict["params"]) + else: + params = input_data_dict.get("params",{}) + + # Check if a day ahead optimization has been performed (read CSV file) if save_data_to_file: today = datetime.now(timezone.utc).replace( @@ -807,17 +828,17 @@ def publish_data(input_data_dict: dict, logger: logging.Logger, opt_res_latest = pd.read_csv( input_data_dict['emhass_conf']['data_path'] / filename, index_col='timestamp') opt_res_latest.index = pd.to_datetime(opt_res_latest.index) - opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"]["freq"] + opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"]['optimization_time_step'] # Estimate the current index now_precise = datetime.now( input_data_dict["retrieve_hass_conf"]["time_zone"] ).replace(second=0, microsecond=0) - if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest": + if input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "nearest": idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[0] - elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first": + elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "first": idx_closest = opt_res_latest.index.get_indexer( [now_precise], method="ffill")[0] - elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last": + elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "last": idx_closest = opt_res_latest.index.get_indexer( [now_precise], method="bfill")[0] if idx_closest == -1: @@ -885,7 +906,7 @@ def publish_data(input_data_dict: dict, logger: logging.Logger, custom_deferrable_forecast_id = params["passed_data"][ "custom_deferrable_forecast_id" ] - for k in range(input_data_dict["opt"].optim_conf["num_def_loads"]): + for k in range(input_data_dict["opt"].optim_conf['number_of_deferrable_loads']): if "P_deferrable{}".format(k) not in opt_res_latest.columns: logger.error( "P_deferrable{}".format(k) @@ -908,7 +929,7 @@ def publish_data(input_data_dict: dict, logger: logging.Logger, custom_predicted_temperature_id = params["passed_data"][ "custom_predicted_temperature_id" ] - for k in range(input_data_dict["opt"].optim_conf["num_def_loads"]): + for k in range(input_data_dict["opt"].optim_conf['number_of_deferrable_loads']): if "def_load_config" in input_data_dict["opt"].optim_conf.keys(): if "thermal_config" in input_data_dict["opt"].optim_conf["def_load_config"][k]: input_data_dict["rh"].post_data( @@ -924,7 +945,7 @@ def publish_data(input_data_dict: dict, logger: logging.Logger, ) cols_published = cols_published + ["predicted_temp_heater{}".format(k)] # Publish battery power - if input_data_dict["opt"].optim_conf["set_use_battery"]: + if input_data_dict["opt"].optim_conf['set_use_battery']: if "P_batt" not in opt_res_latest.columns: logger.error( "P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.", @@ -1053,7 +1074,7 @@ def continual_publish(input_data_dict: dict, entity_path: pathlib.Path, logger: """ logger.info("Continual publish thread service started") - freq = input_data_dict['retrieve_hass_conf'].get("freq", pd.to_timedelta(1, "minutes")) + freq = input_data_dict['retrieve_hass_conf'].get('optimization_time_step', pd.to_timedelta(1, "minutes")) entity_path_contents = [] while True: # Sleep for x seconds (using current time as a reference for time left) @@ -1064,7 +1085,14 @@ def continual_publish(input_data_dict: dict, entity_path: pathlib.Path, logger: for entity in entity_path_contents: if entity != "metadata.json": # Call publish_json with entity file, build entity, and publish - publish_json(entity, input_data_dict, entity_path, logger, "continual_publish") + publish_json(entity, input_data_dict, entity_path, logger, 'continual_publish') + # Retrieve entity metadata from file + if os.path.isfile(entity_path / "metadata.json"): + with open(entity_path / "metadata.json", "r") as file: + metadata = json.load(file) + # Check if freq should be shorter + if not metadata.get("lowest_time_step",None) == None: + freq = pd.to_timedelta(metadata["lowest_time_step"], "minutes") pass # This function should never return return False @@ -1089,9 +1117,7 @@ def publish_json(entity: dict, input_data_dict: dict, entity_path: pathlib.Path, # Retrieve entity metadata from file if os.path.isfile(entity_path / "metadata.json"): with open(entity_path / "metadata.json", "r") as file: - metadata = json.load(file) - if not metadata.get("lowest_freq",None) == None: - freq = pd.to_timedelta(metadata["lowest_freq"], "minutes") + metadata = json.load(file) else: logger.error("unable to located metadata.json in:" + entity_path) return False @@ -1105,18 +1131,18 @@ def publish_json(entity: dict, input_data_dict: dict, entity_path: pathlib.Path, entity_data.columns = [metadata[entity_id]["name"]] entity_data.index.name = "timestamp" entity_data.index = pd.to_datetime(entity_data.index).tz_convert(input_data_dict["retrieve_hass_conf"]["time_zone"]) - entity_data.index.freq = pd.to_timedelta(int(metadata[entity_id]["freq"]), "minutes") + entity_data.index.freq = pd.to_timedelta(int(metadata[entity_id]['optimization_time_step']), "minutes") # Calculate the current state value - if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest": + if input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "nearest": idx_closest = entity_data.index.get_indexer([now_precise], method="nearest")[0] - elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first": + elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "first": idx_closest = entity_data.index.get_indexer([now_precise], method="ffill")[0] - elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last": + elif input_data_dict["retrieve_hass_conf"]['method_ts_round'] == "last": idx_closest = entity_data.index.get_indexer([now_precise], method="bfill")[0] if idx_closest == -1: idx_closest = entity_data.index.get_indexer([now_precise], method="nearest")[0] # Call post data - if reference == "continual_publish": + if reference == 'continual_publish': logger.debug("Auto Published sensor:") logger_levels = "DEBUG" else: @@ -1161,7 +1187,9 @@ def main(): parser.add_argument('--action', type=str, help='Set the desired action, options are: perfect-optim, dayahead-optim,\ naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune') parser.add_argument('--config', type=str, - help='Define path to the config.yaml file') + help='Define path to the config.json/defaults.json file') + parser.add_argument('--params', type=str, default=None, + help='String of configuration parameters passed') parser.add_argument('--data', type=str, help='Define path to the Data files (.csv & .pkl)') parser.add_argument('--root', type=str, help='Define path emhass root') @@ -1169,19 +1197,19 @@ def main(): help='Define the type of cost function, options are: profit, cost, self-consumption') parser.add_argument('--log2file', type=strtobool, default='False', help='Define if we should log to a file or not') - parser.add_argument('--params', type=str, default=None, - help='Configuration parameters passed from data/options.json') + parser.add_argument('--secrets', type=str, default=None, + help='Define secret parameter file (secrets_emhass.yaml) path') parser.add_argument('--runtimeparams', type=str, default=None, help='Pass runtime optimization parameters as dictionnary') parser.add_argument('--debug', type=strtobool, default='False', help='Use True for testing purposes') args = parser.parse_args() + # The path to the configuration files if args.config is not None: config_path = pathlib.Path(args.config) else: - config_path = pathlib.Path( - str(utils.get_root(__file__, num_parent=2) / 'config_emhass.yaml')) + config_path = pathlib.Path(str(utils.get_root(__file__, num_parent=3) / 'config.json')) if args.data is not None: data_path = pathlib.Path(args.data) else: @@ -1189,30 +1217,50 @@ def main(): if args.root is not None: root_path = pathlib.Path(args.root) else: - root_path = config_path.parent + root_path = utils.get_root(__file__, num_parent=1) + if args.secrets is not None: + secrets_path = pathlib.Path(args.secrets) + else: + secrets_path = pathlib.Path(config_path.parent / 'secrets_emhass.yaml') + + associations_path = root_path / 'data/associations.csv' + defaults_path = root_path / 'data/config_defaults.json' + emhass_conf = {} emhass_conf['config_path'] = config_path emhass_conf['data_path'] = data_path emhass_conf['root_path'] = root_path + emhass_conf['associations_path'] = associations_path + emhass_conf['defaults_path'] = defaults_path # create logger logger, ch = utils.get_logger( __name__, emhass_conf, save_to_file=bool(args.log2file)) + + # Check paths logger.debug("config path: " + str(config_path)) logger.debug("data path: " + str(data_path)) logger.debug("root path: " + str(root_path)) - if not config_path.exists(): + if not associations_path.exists(): logger.error( - "Could not find config_emhass.yaml file in: " + str(config_path)) - logger.error("Try setting config file path with --config") + "Could not find associations.csv file in: " + str(associations_path)) + logger.error("Try setting config file path with --associations") return False + if not config_path.exists(): + logger.warning( + "Could not find config.json file in: " + str(config_path)) + logger.warning("Try setting config file path with --config") + if not secrets_path.exists(): + logger.warning("Could not find secrets file in: " + str(secrets_path)) + logger.warning("Try setting secrets file path with --secrets") if not os.path.isdir(data_path): - logger.error("Could not find data foulder in: " + str(data_path)) + logger.error("Could not find data folder in: " + str(data_path)) logger.error("Try setting data path with --data") return False - if not os.path.isdir(root_path / 'src'): - logger.error("Could not find emhass/src foulder in: " + str(root_path)) + if not os.path.isdir(root_path): + logger.error("Could not find emhass/src folder in: " + str(root_path)) logger.error("Try setting emhass root path with --root") return False + # Additional argument try: parser.add_argument( @@ -1225,10 +1273,47 @@ def main(): logger.info( "Version not found for emhass package. Or importlib exited with PackageNotFoundError.", ) - # Setup parameters + + # Setup config + config = {} + # Check if passed config file is yaml of json, build config accordingly + if config_path.exists(): + config_file_ending = re.findall("(?<=\.).*$", str(config_path)) + if len(config_file_ending) > 0: + match(config_file_ending[0]): + case "json": + config = utils.build_config(emhass_conf,logger,defaults_path,config_path) + case "yaml": + config = utils.build_config(emhass_conf,logger,defaults_path,config_path=config_path) + case "yml": + config = utils.build_config(emhass_conf,logger,defaults_path,config_path=config_path) + # If unable to find config file, use only defaults_config.json + else: + logger.warning("Unable to obtain config.json file, building parameters with only defaults") + config = utils.build_config(emhass_conf,logger,defaults_path) + if type(config) is bool and not config: + raise Exception("Failed to find default config") + + + # Obtain secrets from secrets_emhass.yaml? + params_secrets = {} + emhass_conf, built_secrets = utils.build_secrets(emhass_conf,logger,secrets_path=secrets_path) + params_secrets.update(built_secrets) + + # Build params + params = utils.build_params(emhass_conf, params_secrets, config, logger) + if type(params) is bool: + raise Exception("A error has occurred while building parameters") + # Add any passed params from args to params + if args.params: + params.update(json.loads(args.params)) + input_data_dict = set_input_data_dict(emhass_conf, - args.costfun, args.params, args.runtimeparams, args.action, + args.costfun, json.dumps(params), args.runtimeparams, args.action, logger, args.debug) + if type(input_data_dict) is bool: + raise Exception("A error has occurred while creating action objects") + # Perform selected action if args.action == "perfect-optim": opt_res = perfect_forecast_optim( diff --git a/src/emhass/data/associations.csv b/src/emhass/data/associations.csv new file mode 100644 index 00000000..e6094557 --- /dev/null +++ b/src/emhass/data/associations.csv @@ -0,0 +1,61 @@ +config_categorie,legacy_parameter_name,parameter,list_name +retrieve_hass_conf,freq,optimization_time_step +retrieve_hass_conf,days_to_retrieve,historic_days_to_retrieve +retrieve_hass_conf,var_PV,sensor_power_photovoltaics +retrieve_hass_conf,var_load,sensor_power_load_no_var_loads +retrieve_hass_conf,load_negative,load_negative +retrieve_hass_conf,set_zero_min,set_zero_min +retrieve_hass_conf,var_replace_zero,sensor_replace_zero,list_sensor_replace_zero +retrieve_hass_conf,var_interp,sensor_linear_interp,list_sensor_linear_interp +retrieve_hass_conf,method_ts_round,method_ts_round +retrieve_hass_conf,continual_publish,continual_publish +params_secrets,time_zone,time_zone +params_secrets,lat,Latitude +params_secrets,lon,Longitude +params_secrets,alt,Altitude +optim_conf,set_use_battery,set_use_battery +optim_conf,num_def_loads,number_of_deferrable_loads +optim_conf,P_deferrable_nom,nominal_power_of_deferrable_loads,list_nominal_power_of_deferrable_loads +optim_conf,def_total_hours,operating_hours_of_each_deferrable_load,list_operating_hours_of_each_deferrable_load +optim_conf,treat_def_as_semi_cont,treat_deferrable_load_as_semi_cont,list_treat_deferrable_load_as_semi_cont +optim_conf,set_def_constant,set_deferrable_load_single_constant,list_set_deferrable_load_single_constant +optim_conf,def_start_penalty,set_deferrable_startup_penalty,list_set_deferrable_startup_penalty +optim_conf,delta_forecast,delta_forecast_daily +optim_conf,load_forecast_method,load_forecast_method +optim_conf,load_cost_forecast_method,load_cost_forecast_method +optim_conf,load_cost_hp,load_peak_hours_cost +optim_conf,load_cost_hc,load_offpeak_hours_cost +optim_conf,prod_price_forecast_method,production_price_forecast_method +optim_conf,prod_sell_price,photovoltaic_production_sell_price +optim_conf,set_total_pv_sell,set_total_pv_sell +optim_conf,lp_solver,lp_solver +optim_conf,lp_solver_path,lp_solver_path +optim_conf,set_nocharge_from_grid,set_nocharge_from_grid +optim_conf,set_nodischarge_to_grid,set_nodischarge_to_grid +optim_conf,set_battery_dynamic,set_battery_dynamic +optim_conf,battery_dynamic_max,battery_dynamic_max +optim_conf,battery_dynamic_min,battery_dynamic_min +optim_conf,weight_battery_discharge,weight_battery_discharge +optim_conf,weight_battery_charge,weight_battery_charge +optim_conf,weather_forecast_method,weather_forecast_method +optim_conf,def_start_timestep,start_timesteps_of_each_deferrable_load,list_start_timesteps_of_each_deferrable_load +optim_conf,def_end_timestep,end_timesteps_of_each_deferrable_load,list_end_timesteps_of_each_deferrable_load +optim_conf,list_hp_periods,load_peak_hour_periods +plant_conf,P_from_grid_max,maximum_power_from_grid +plant_conf,P_to_grid_max,maximum_power_to_grid +plant_conf,module_model,pv_module_model,list_pv_module_model +plant_conf,inverter_model,pv_inverter_model,list_pv_inverter_model +plant_conf,surface_tilt,surface_tilt,list_surface_tilt +plant_conf,surface_azimuth,surface_azimuth,list_surface_azimuth +plant_conf,modules_per_string,modules_per_string,list_modules_per_string +plant_conf,strings_per_inverter,strings_per_inverter,list_strings_per_inverter +plant_conf,inverter_is_hybrid,inverter_is_hybrid +plant_conf,compute_curtailment,compute_curtailment +plant_conf,Pd_max,battery_discharge_power_max +plant_conf,Pc_max,battery_charge_power_max +plant_conf,eta_disch,battery_discharge_efficiency +plant_conf,eta_ch,battery_charge_efficiency +plant_conf,Enom,battery_nominal_energy_capacity +plant_conf,SOCmin,battery_minimum_state_of_charge +plant_conf,SOCmax,battery_maximum_state_of_charge +plant_conf,SOCtarget,battery_target_state_of_charge \ No newline at end of file diff --git a/src/emhass/data/config_defaults.json b/src/emhass/data/config_defaults.json new file mode 100644 index 00000000..d1517e5e --- /dev/null +++ b/src/emhass/data/config_defaults.json @@ -0,0 +1,117 @@ +{ + "logging_level": "INFO", + "costfun": "profit", + "optimization_time_step": 30, + "historic_days_to_retrieve": 2, + "method_ts_round": "nearest", + "continual_publish": false, + "data_path": "default", + "set_total_pv_sell": false, + "lp_solver": "default", + "lp_solver_path": "empty", + "set_nocharge_from_grid": false, + "set_nodischarge_to_grid": true, + "set_battery_dynamic": false, + "battery_dynamic_max": 0.9, + "battery_dynamic_min": -0.9, + "weight_battery_discharge": 1.0, + "weight_battery_charge": 1.0, + "sensor_power_photovoltaics": "sensor.power_photovoltaics", + "sensor_power_load_no_var_loads": "sensor.power_load_no_var_loads", + "sensor_replace_zero": [ + "sensor.power_photovoltaics", + "sensor.power_load_no_var_loads" + ], + "sensor_linear_interp": [ + "sensor.power_photovoltaics", + "sensor.power_load_no_var_loads" + ], + "load_negative": false, + "set_zero_min": true, + "number_of_deferrable_loads": 2, + "nominal_power_of_deferrable_loads": [ + 3000.0, + 750.0 + ], + "operating_hours_of_each_deferrable_load": [ + 4, + 0 + ], + "weather_forecast_method": "scrapper", + "load_forecast_method": "naive", + "delta_forecast_daily": 1, + "load_cost_forecast_method": "hp_hc_periods", + "start_timesteps_of_each_deferrable_load": [ + 0, + 0 + ], + "end_timesteps_of_each_deferrable_load": [ + 0, + 0 + ], + "load_peak_hour_periods": { + "period_hp_1": [ + { + "start": "02:54" + }, + { + "end": "15:24" + } + ], + "period_hp_2": [ + { + "start": "17:24" + }, + { + "end": "20:24" + } + ] + }, + "treat_deferrable_load_as_semi_cont": [ + true, + true + ], + "set_deferrable_load_single_constant": [ + false, + false + ], + "set_deferrable_startup_penalty": [ + 0.0, + 0.0 + ], + "load_peak_hours_cost": 0.1907, + "load_offpeak_hours_cost": 0.1419, + "production_price_forecast_method": "constant", + "photovoltaic_production_sell_price": 0.1419, + "maximum_power_from_grid": 9000, + "maximum_power_to_grid": 9000, + "pv_module_model": [ + "CSUN_Eurasia_Energy_Systems_Industry_and_Trade_CSUN295_60M" + ], + "pv_inverter_model": [ + "Fronius_International_GmbH__Fronius_Primo_5_0_1_208_240__240V_" + ], + "surface_tilt": [ + 30 + ], + "surface_azimuth": [ + 205 + ], + "modules_per_string": [ + 16 + ], + "strings_per_inverter": [ + 1 + ], + "inverter_is_hybrid": false, + "compute_curtailment": false, + "set_use_battery": false, + "battery_discharge_power_max": 1000, + "battery_charge_power_max": 1000, + "battery_discharge_efficiency": 0.95, + "battery_charge_efficiency": 0.95, + "battery_nominal_energy_capacity": 5000, + "battery_minimum_state_of_charge": 0.3, + "battery_maximum_state_of_charge": 0.9, + "battery_target_state_of_charge": 0.6 +} \ No newline at end of file diff --git a/src/emhass/forecast.py b/src/emhass/forecast.py index 7058c220..366c6925 100644 --- a/src/emhass/forecast.py +++ b/src/emhass/forecast.py @@ -132,22 +132,24 @@ def __init__(self, retrieve_hass_conf: dict, optim_conf: dict, plant_conf: dict, self.retrieve_hass_conf = retrieve_hass_conf self.optim_conf = optim_conf self.plant_conf = plant_conf - self.freq = self.retrieve_hass_conf['freq'] + self.freq = self.retrieve_hass_conf['optimization_time_step'] self.time_zone = self.retrieve_hass_conf['time_zone'] self.method_ts_round = self.retrieve_hass_conf['method_ts_round'] self.timeStep = self.freq.seconds/3600 # in hours self.time_delta = pd.to_timedelta(opt_time_delta, "hours") - self.var_PV = self.retrieve_hass_conf['var_PV'] - self.var_load = self.retrieve_hass_conf['var_load'] + self.var_PV = self.retrieve_hass_conf['sensor_power_photovoltaics'] + self.var_load = self.retrieve_hass_conf['sensor_power_load_no_var_loads'] self.var_load_new = self.var_load+'_positive' - self.lat = self.retrieve_hass_conf['lat'] - self.lon = self.retrieve_hass_conf['lon'] + self.lat = self.retrieve_hass_conf['Latitude'] + self.lon = self.retrieve_hass_conf['Longitude'] self.emhass_conf = emhass_conf self.logger = logger self.get_data_from_file = get_data_from_file self.var_load_cost = 'unit_load_cost' self.var_prod_price = 'unit_prod_price' - if params is None: + if (params == None) or (params == "null"): + self.params = {} + elif type(params) is dict: self.params = params else: self.params = json.loads(params) @@ -159,10 +161,10 @@ def __init__(self, retrieve_hass_conf: dict, optim_conf: dict, plant_conf: dict, self.start_forecast = pd.Timestamp(datetime.now(), tz=self.time_zone).replace(microsecond=0).ceil(freq=self.freq) else: self.logger.error("Wrong method_ts_round passed parameter") - self.end_forecast = (self.start_forecast + self.optim_conf['delta_forecast']).replace(microsecond=0) + self.end_forecast = (self.start_forecast + self.optim_conf['delta_forecast_daily']).replace(microsecond=0) self.forecast_dates = pd.date_range(start=self.start_forecast, end=self.end_forecast-self.freq, - freq=self.freq).round(self.freq, ambiguous='infer', nonexistent='shift_forward') + freq=self.freq, tz=self.time_zone).tz_convert('utc').round(self.freq, ambiguous='infer', nonexistent='shift_forward').tz_convert(self.time_zone) if params is not None: if 'prediction_horizon' in list(self.params['passed_data'].keys()): if self.params['passed_data']['prediction_horizon'] is not None: @@ -190,7 +192,7 @@ def get_weather_forecast(self, method: Optional[str] = 'scrapper', freq_scrap = pd.to_timedelta(60, "minutes") # The scrapping time step is 60min on clearoutside forecast_dates_scrap = pd.date_range(start=self.start_forecast, end=self.end_forecast-freq_scrap, - freq=freq_scrap).round(freq_scrap, ambiguous='infer', nonexistent='shift_forward') + freq=freq_scrap, tz=self.time_zone).tz_convert('utc').round(freq_scrap, ambiguous='infer', nonexistent='shift_forward').tz_convert(self.time_zone) # Using the clearoutside webpage response = get("https://clearoutside.com/forecast/"+str(round(self.lat, 2))+"/"+str(round(self.lon, 2))+"?desktop=true") '''import bz2 # Uncomment to save a serialized data for tests @@ -226,9 +228,9 @@ def get_weather_forecast(self, method: Optional[str] = 'scrapper', data['temp_air'], data['relative_humidity']) elif method == 'solcast': # using Solcast API # Check if weather_forecast_cache is true or if forecast_data file does not exist - if self.params["passed_data"]["weather_forecast_cache"] or not os.path.isfile(w_forecast_cache_path): + if not os.path.isfile(w_forecast_cache_path): # Check if weather_forecast_cache_only is true, if so produce error for not finding cache file - if not self.params["passed_data"]["weather_forecast_cache_only"]: + if not self.params["passed_data"].get("weather_forecast_cache_only",False): # Retrieve data from the Solcast API if 'solcast_api_key' not in self.retrieve_hass_conf: self.logger.error("The solcast_api_key parameter was not defined") @@ -243,7 +245,7 @@ def get_weather_forecast(self, method: Optional[str] = 'scrapper', } days_solcast = int(len(self.forecast_dates)*self.freq.seconds/3600) # If weather_forecast_cache, set request days as twice as long to avoid length issues (add a buffer) - if self.params["passed_data"]["weather_forecast_cache"]: + if self.params["passed_data"].get("weather_forecast_cache",False): days_solcast = min((days_solcast * 2), 336) url = "https://api.solcast.com.au/rooftop_sites/"+self.retrieve_hass_conf['solcast_rooftop_id']+"/forecasts?hours="+str(days_solcast) response = get(url, headers=headers) @@ -269,7 +271,7 @@ def get_weather_forecast(self, method: Optional[str] = 'scrapper', self.logger.error("Not enough data retried from Solcast service, try increasing the time step or use MPC.") else: # If runtime weather_forecast_cache is true save forecast result to file as cache - if self.params["passed_data"]["weather_forecast_cache"]: + if self.params["passed_data"].get("weather_forecast_cache",False): # Add x2 forecast periods for cached results. This adds a extra delta_forecast amount of days for a buffer cached_forecast_dates = self.forecast_dates.union(pd.date_range(self.forecast_dates[-1], periods=(len(self.forecast_dates) +1), freq=self.freq)[1:]) cache_data_list = data_list[0:len(cached_forecast_dates)] @@ -289,11 +291,11 @@ def get_weather_forecast(self, method: Optional[str] = 'scrapper', data = pd.DataFrame.from_dict(data_dict) # Define index data.set_index('ts', inplace=True) - # Else, notify user to update cache + # Else, notify user to update cache else: self.logger.error("Unable to obtain Solcast cache file.") self.logger.error("Try running optimization again with 'weather_forecast_cache_only': false") - self.logger.error("Optionally, obtain new Solcast cache with runtime parameter 'weather_forecast_cache': true in an optimization, or run the `forecast-cache` action, to pull new data from Solcast and cache.") + self.logger.error("Optionally, obtain new Solcast cache with runtime parameter 'weather_forecast_cache': true in an optimization, or run the `weather-forecast-cache` action, to pull new data from Solcast and cache.") return False # Else, open stored weather_forecast_data.pkl file for previous forecast data (cached data) else: @@ -301,7 +303,7 @@ def get_weather_forecast(self, method: Optional[str] = 'scrapper', data = cPickle.load(file) if not isinstance(data, pd.DataFrame) or len(data) < len(self.forecast_dates): self.logger.error("There has been a error obtaining cached Solcast forecast data.") - self.logger.error("Try running optimization again with 'weather_forecast_cache': true, or run action `forecast-cache`, to pull new data from Solcast and cache.") + self.logger.error("Try running optimization again with 'weather_forecast_cache': true, or run action `weather-forecast-cache`, to pull new data from Solcast and cache.") self.logger.warning("Removing old Solcast cache file. Next optimization will pull data from Solcast, unless 'weather_forecast_cache_only': true") os.remove(w_forecast_cache_path) return False @@ -323,17 +325,17 @@ def get_weather_forecast(self, method: Optional[str] = 'scrapper', if self.retrieve_hass_conf['solar_forecast_kwp'] == 0: self.logger.warning("The solar_forecast_kwp parameter is set to zero, setting to default 5") self.retrieve_hass_conf['solar_forecast_kwp'] = 5 - if self.optim_conf['delta_forecast'].days > 1: + if self.optim_conf['delta_forecast_daily'].days > 1: self.logger.warning("The free public tier for solar.forecast only provides one day forecasts") self.logger.warning("Continuing with just the first day of data, the other days are filled with 0.0.") - self.logger.warning("Use the other available methods for delta_forecast > 1") + self.logger.warning("Use the other available methods for delta_forecast_daily > 1") headers = { "Accept": "application/json" } data = pd.DataFrame() - for i in range(len(self.plant_conf['module_model'])): + for i in range(len(self.plant_conf['pv_module_model'])): url = "https://api.forecast.solar/estimate/"+str(round(self.lat, 2))+"/"+str(round(self.lon, 2))+\ - "/"+str(self.plant_conf["surface_tilt"][i])+"/"+str(self.plant_conf["surface_azimuth"][i]-180)+\ + "/"+str(self.plant_conf['surface_tilt'][i])+"/"+str(self.plant_conf['surface_azimuth'][i]-180)+\ "/"+str(self.retrieve_hass_conf["solar_forecast_kwp"]) response = get(url, headers=headers) '''import bz2 # Uncomment to save a serialized data for tests @@ -485,12 +487,12 @@ def get_power_from_weather(self, df_weather: pd.DataFrame, cec_modules = cPickle.load(cec_modules) cec_inverters = bz2.BZ2File(self.emhass_conf['root_path'] / 'data' / 'cec_inverters.pbz2', "rb") cec_inverters = cPickle.load(cec_inverters) - if type(self.plant_conf['module_model']) == list: + if type(self.plant_conf['pv_module_model']) == list: P_PV_forecast = pd.Series(0, index=df_weather.index) - for i in range(len(self.plant_conf['module_model'])): + for i in range(len(self.plant_conf['pv_module_model'])): # Selecting correct module and inverter - module = cec_modules[self.plant_conf['module_model'][i]] - inverter = cec_inverters[self.plant_conf['inverter_model'][i]] + module = cec_modules[self.plant_conf['pv_module_model'][i]] + inverter = cec_inverters[self.plant_conf['pv_inverter_model'][i]] # Building the PV system in PVLib system = PVSystem(surface_tilt=self.plant_conf['surface_tilt'][i], surface_azimuth=self.plant_conf['surface_azimuth'][i], @@ -506,8 +508,8 @@ def get_power_from_weather(self, df_weather: pd.DataFrame, P_PV_forecast = P_PV_forecast + mc.results.ac else: # Selecting correct module and inverter - module = cec_modules[self.plant_conf['module_model']] - inverter = cec_inverters[self.plant_conf['inverter_model']] + module = cec_modules[self.plant_conf['pv_module_model']] + inverter = cec_inverters[self.plant_conf['pv_inverter_model']] # Building the PV system in PVLib system = PVSystem(surface_tilt=self.plant_conf['surface_tilt'], surface_azimuth=self.plant_conf['surface_azimuth'], @@ -544,10 +546,10 @@ def get_forecast_days_csv(self, timedelta_days: Optional[int] = 1) -> pd.date_ra start_forecast_csv = pd.Timestamp(datetime.now(), tz=self.time_zone).replace(microsecond=0).ceil(freq=self.freq) else: self.logger.error("Wrong method_ts_round passed parameter") - end_forecast_csv = (start_forecast_csv + self.optim_conf['delta_forecast']).replace(microsecond=0) + end_forecast_csv = (start_forecast_csv + self.optim_conf['delta_forecast_daily']).replace(microsecond=0) forecast_dates_csv = pd.date_range(start=start_forecast_csv, end=end_forecast_csv+timedelta(days=timedelta_days)-self.freq, - freq=self.freq).round(self.freq, ambiguous='infer', nonexistent='shift_forward') + freq=self.freq, tz=self.time_zone).tz_convert('utc').round(self.freq, ambiguous='infer', nonexistent='shift_forward').tz_convert(self.time_zone) if self.params is not None: if 'prediction_horizon' in list(self.params['passed_data'].keys()): if self.params['passed_data']['prediction_horizon'] is not None: @@ -561,7 +563,7 @@ def get_forecast_out_from_csv_or_list(self, df_final: pd.DataFrame, forecast_dat Get the forecast data as a DataFrame from a CSV file. The data contained in the CSV file should be a 24h forecast with the same frequency as - the main 'freq' parameter in the configuration file. The timestamp will not be used and + the main 'optimization_time_step' parameter in the configuration file. The timestamp will not be used and a new DateTimeIndex is generated to fit the timestamp index of the input data in 'df_final'. :param df_final: The DataFrame containing the input data. @@ -695,7 +697,7 @@ def get_load_forecast(self, days_min_load_forecast: Optional[int] = 3, method: O with open(filename_path, 'rb') as inp: rh.df_final, days_list, var_list = pickle.load(inp) self.var_load = var_list[0] - self.retrieve_hass_conf['var_load'] = self.var_load + self.retrieve_hass_conf['sensor_power_load_no_var_loads'] = self.var_load var_interp = [var_list[0]] self.var_list = [var_list[0]] self.var_load_new = self.var_load+'_positive' @@ -704,13 +706,13 @@ def get_load_forecast(self, days_min_load_forecast: Optional[int] = 3, method: O if not rh.get_data(days_list, var_list): return False if not rh.prepare_data( - self.retrieve_hass_conf['var_load'], load_negative = self.retrieve_hass_conf['load_negative'], + self.retrieve_hass_conf['sensor_power_load_no_var_loads'], load_negative = self.retrieve_hass_conf['load_negative'], set_zero_min = self.retrieve_hass_conf['set_zero_min'], var_replace_zero = var_replace_zero, var_interp = var_interp): return False df = rh.df_final.copy()[[self.var_load_new]] if method == 'naive': # using a naive approach - mask_forecast_out = (df.index > days_list[-1] - self.optim_conf['delta_forecast']) + mask_forecast_out = (df.index > days_list[-1] - self.optim_conf['delta_forecast_daily']) forecast_out = df.copy().loc[mask_forecast_out] forecast_out = forecast_out.rename(columns={self.var_load_new: 'yhat'}) # Force forecast_out length to avoid mismatches @@ -812,13 +814,13 @@ def get_load_cost_forecast(self, df_final: pd.DataFrame, method: Optional[str] = """ csv_path = self.emhass_conf['data_path'] / csv_path if method == 'hp_hc_periods': - df_final[self.var_load_cost] = self.optim_conf['load_cost_hc'] + df_final[self.var_load_cost] = self.optim_conf['load_offpeak_hours_cost'] list_df_hp = [] - for key, period_hp in self.optim_conf['list_hp_periods'].items(): + for key, period_hp in self.optim_conf['load_peak_hour_periods'].items(): list_df_hp.append(df_final[self.var_load_cost].between_time( period_hp[0]['start'], period_hp[1]['end'])) for df_hp in list_df_hp: - df_final.loc[df_hp.index, self.var_load_cost] = self.optim_conf['load_cost_hp'] + df_final.loc[df_hp.index, self.var_load_cost] = self.optim_conf['load_peak_hours_cost'] elif method == 'csv': forecast_dates_csv = self.get_forecast_days_csv(timedelta_days=0) forecast_out = self.get_forecast_out_from_csv_or_list( @@ -871,7 +873,7 @@ def get_prod_price_forecast(self, df_final: pd.DataFrame, method: Optional[str] """ csv_path = self.emhass_conf['data_path'] / csv_path if method == 'constant': - df_final[self.var_prod_price] = self.optim_conf['prod_sell_price'] + df_final[self.var_prod_price] = self.optim_conf['photovoltaic_production_sell_price'] elif method == 'csv': forecast_dates_csv = self.get_forecast_days_csv(timedelta_days=0) forecast_out = self.get_forecast_out_from_csv_or_list( diff --git a/src/emhass/machine_learning_forecaster.py b/src/emhass/machine_learning_forecaster.py index ea44a4ee..3691ead6 100644 --- a/src/emhass/machine_learning_forecaster.py +++ b/src/emhass/machine_learning_forecaster.py @@ -141,7 +141,8 @@ def fit(self, split_date_delta: Optional[str] = '48h', perform_backtest: Optiona elif self.sklearn_model == 'KNeighborsRegressor': base_model = KNeighborsRegressor() else: - self.logger.error("Passed sklearn model "+self.sklearn_model+" is not valid") + self.logger.error("Passed sklearn model "+self.sklearn_model+" is not valid. Defaulting to KNeighborsRegressor") + base_model = KNeighborsRegressor() # Define the forecaster object self.forecaster = ForecasterAutoreg( regressor = base_model, diff --git a/src/emhass/machine_learning_regressor.py b/src/emhass/machine_learning_regressor.py index a44df8db..9e443c6b 100644 --- a/src/emhass/machine_learning_regressor.py +++ b/src/emhass/machine_learning_regressor.py @@ -176,15 +176,17 @@ def get_regression_model(self: MLRegressor) -> tuple[str, str]: "Passed model %s is not valid", self.regression_model, ) - return None + return None, None return base_model, param_grid - def fit(self: MLRegressor, date_features: list | None = None) -> None: + def fit(self: MLRegressor, date_features: list | None = None) -> bool: r"""Fit the model using the provided data. :param date_features: A list of 'date_features' to take into account when \ fitting the model. :type data: list + :return: bool if successful + :rtype: bool """ self.logger.info("Performing a MLRegressor fit for %s", self.model_type) self.data_exo = pd.DataFrame(self.data) @@ -217,6 +219,8 @@ def fit(self: MLRegressor, date_features: list | None = None) -> None: X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) self.steps = len(X_test) base_model, param_grid = self.get_regression_model() + if base_model is None: + return False self.model = make_pipeline(StandardScaler(), base_model) # Create a grid search object self.grid_search = GridSearchCV(self.model, param_grid, cv=5, scoring="neg_mean_squared_error", @@ -235,6 +239,7 @@ def fit(self: MLRegressor, date_features: list | None = None) -> None: "Prediction R2 score of fitted model on test data: %s", pred_metric, ) + return True def predict(self: MLRegressor, new_values: list) -> np.ndarray: """Predict a new value. diff --git a/src/emhass/optimization.py b/src/emhass/optimization.py index 3a646fb7..1af1cf00 100644 --- a/src/emhass/optimization.py +++ b/src/emhass/optimization.py @@ -66,12 +66,12 @@ def __init__(self, retrieve_hass_conf: dict, optim_conf: dict, plant_conf: dict, self.retrieve_hass_conf = retrieve_hass_conf self.optim_conf = optim_conf self.plant_conf = plant_conf - self.freq = self.retrieve_hass_conf['freq'] + self.freq = self.retrieve_hass_conf['optimization_time_step'] self.time_zone = self.retrieve_hass_conf['time_zone'] self.timeStep = self.freq.seconds/3600 # in hours self.time_delta = pd.to_timedelta(opt_time_delta, "hours") # The period of optimization - self.var_PV = self.retrieve_hass_conf['var_PV'] - self.var_load = self.retrieve_hass_conf['var_load'] + self.var_PV = self.retrieve_hass_conf['sensor_power_photovoltaics'] + self.var_load = self.retrieve_hass_conf['sensor_power_load_no_var_loads'] self.var_load_new = self.var_load+'_positive' self.costfun = costfun # self.emhass_conf = emhass_conf @@ -143,18 +143,18 @@ def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: n if soc_final is not None: soc_init = soc_final else: - soc_init = self.plant_conf['SOCtarget'] + soc_init = self.plant_conf['battery_target_state_of_charge'] if soc_final is None: if soc_init is not None: soc_final = soc_init else: - soc_final = self.plant_conf['SOCtarget'] + soc_final = self.plant_conf['battery_target_state_of_charge'] if def_total_hours is None: - def_total_hours = self.optim_conf['def_total_hours'] + def_total_hours = self.optim_conf['operating_hours_of_each_deferrable_load'] if def_start_timestep is None: - def_start_timestep = self.optim_conf['def_start_timestep'] + def_start_timestep = self.optim_conf['start_timesteps_of_each_deferrable_load'] if def_end_timestep is None: - def_end_timestep = self.optim_conf['def_end_timestep'] + def_end_timestep = self.optim_conf['end_timesteps_of_each_deferrable_load'] type_self_conso = 'bigm' # maxmin #### The LP problem using Pulp #### @@ -166,19 +166,19 @@ def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: n ## Add decision variables P_grid_neg = {(i):plp.LpVariable(cat='Continuous', - lowBound=-self.plant_conf['P_to_grid_max'], upBound=0, + lowBound=-self.plant_conf['maximum_power_to_grid'], upBound=0, name="P_grid_neg{}".format(i)) for i in set_I} P_grid_pos = {(i):plp.LpVariable(cat='Continuous', - lowBound=0, upBound=self.plant_conf['P_from_grid_max'], + lowBound=0, upBound=self.plant_conf['maximum_power_from_grid'], name="P_grid_pos{}".format(i)) for i in set_I} P_deferrable = [] P_def_bin1 = [] - for k in range(self.optim_conf['num_def_loads']): - if type(self.optim_conf['P_deferrable_nom'][k]) == list: - upBound = np.max(self.optim_conf['P_deferrable_nom'][k]) + for k in range(self.optim_conf['number_of_deferrable_loads']): + if type(self.optim_conf['nominal_power_of_deferrable_loads'][k]) == list: + upBound = np.max(self.optim_conf['nominal_power_of_deferrable_loads'][k]) else: - upBound = self.optim_conf['P_deferrable_nom'][k] - if self.optim_conf['treat_def_as_semi_cont'][k]: + upBound = self.optim_conf['nominal_power_of_deferrable_loads'][k] + if self.optim_conf['treat_deferrable_load_as_semi_cont'][k]: P_deferrable.append({(i):plp.LpVariable(cat='Continuous', name="P_deferrable{}_{}".format(k, i)) for i in set_I}) else: @@ -189,7 +189,7 @@ def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: n name="P_def{}_bin1_{}".format(k, i)) for i in set_I}) P_def_start = [] P_def_bin2 = [] - for k in range(self.optim_conf['num_def_loads']): + for k in range(self.optim_conf['number_of_deferrable_loads']): P_def_start.append({(i):plp.LpVariable(cat='Binary', name="P_def{}_start_{}".format(k, i)) for i in set_I}) P_def_bin2.append({(i):plp.LpVariable(cat='Binary', @@ -200,10 +200,10 @@ def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: n name="E_{}".format(i)) for i in set_I} if self.optim_conf['set_use_battery']: P_sto_pos = {(i):plp.LpVariable(cat='Continuous', - lowBound=0, upBound=self.plant_conf['Pd_max'], + lowBound=0, upBound=self.plant_conf['battery_discharge_power_max'], name="P_sto_pos_{0}".format(i)) for i in set_I} P_sto_neg = {(i):plp.LpVariable(cat='Continuous', - lowBound=-self.plant_conf['Pc_max'], upBound=0, + lowBound=-self.plant_conf['battery_charge_power_max'], upBound=0, name="P_sto_neg_{0}".format(i)) for i in set_I} else: P_sto_pos = {(i):i*0 for i in set_I} @@ -221,7 +221,7 @@ def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: n ## Define objective P_def_sum= [] for i in set_I: - P_def_sum.append(plp.lpSum(P_deferrable[k][i] for k in range(self.optim_conf['num_def_loads']))) + P_def_sum.append(plp.lpSum(P_deferrable[k][i] for k in range(self.optim_conf['number_of_deferrable_loads']))) if self.costfun == 'profit': if self.optim_conf['set_total_pv_sell']: objective = plp.lpSum(-0.001*self.timeStep*(unit_load_cost[i]*(P_load[i] + P_def_sum[i]) + \ @@ -252,12 +252,12 @@ def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: n self.optim_conf['weight_battery_charge']*P_sto_neg[i]) for i in set_I) # Add term penalizing each startup where configured - if ("def_start_penalty" in self.optim_conf and self.optim_conf["def_start_penalty"]): - for k in range(self.optim_conf["num_def_loads"]): - if (len(self.optim_conf["def_start_penalty"]) > k and self.optim_conf["def_start_penalty"][k]): + if ('set_deferrable_startup_penalty' in self.optim_conf and self.optim_conf['set_deferrable_startup_penalty']): + for k in range(self.optim_conf['number_of_deferrable_loads']): + if (len(self.optim_conf['set_deferrable_startup_penalty']) > k and self.optim_conf['set_deferrable_startup_penalty'][k]): objective = objective + plp.lpSum( - -0.001 * self.timeStep * self.optim_conf["def_start_penalty"][k] * P_def_start[k][i] *\ - unit_load_cost[i] * self.optim_conf['P_deferrable_nom'][k] + -0.001 * self.timeStep * self.optim_conf['set_deferrable_startup_penalty'][k] * P_def_start[k][i] *\ + unit_load_cost[i] * self.optim_conf['nominal_power_of_deferrable_loads'][k] for i in set_I) opt_model.setObjective(objective) @@ -288,24 +288,24 @@ def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: n for i in set_I} # Constraint for hybrid inverter and curtailment cases - if type(self.plant_conf['module_model']) == list: + if type(self.plant_conf['pv_module_model']) == list: P_nom_inverter = 0.0 - for i in range(len(self.plant_conf['inverter_model'])): - if type(self.plant_conf['inverter_model'][i]) == str: + for i in range(len(self.plant_conf['pv_inverter_model'])): + if type(self.plant_conf['pv_inverter_model'][i]) == str: cec_inverters = bz2.BZ2File(pathlib.Path(__file__).parent / 'data/cec_inverters.pbz2', "rb") cec_inverters = cPickle.load(cec_inverters) - inverter = cec_inverters[self.plant_conf['inverter_model'][i]] + inverter = cec_inverters[self.plant_conf['pv_inverter_model'][i]] P_nom_inverter += inverter.Paco else: - P_nom_inverter += self.plant_conf['inverter_model'][i] + P_nom_inverter += self.plant_conf['pv_inverter_model'][i] else: - if type(self.plant_conf['inverter_model'][i]) == str: + if type(self.plant_conf['pv_inverter_model'][i]) == str: cec_inverters = bz2.BZ2File(pathlib.Path(__file__).parent / 'data/cec_inverters.pbz2', "rb") cec_inverters = cPickle.load(cec_inverters) - inverter = cec_inverters[self.plant_conf['inverter_model']] + inverter = cec_inverters[self.plant_conf['pv_inverter_model']] P_nom_inverter = inverter.Paco else: - P_nom_inverter = self.plant_conf['inverter_model'] + P_nom_inverter = self.plant_conf['pv_inverter_model'] if self.plant_conf['inverter_is_hybrid']: constraints.update({"constraint_hybrid_inverter1_{}".format(i) : plp.LpConstraint( @@ -347,26 +347,26 @@ def perform_optimization(self, data_opt: pd.DataFrame, P_PV: np.array, P_load: n # Avoid injecting and consuming from grid at the same time constraints.update({"constraint_pgridpos_{}".format(i) : plp.LpConstraint( - e = P_grid_pos[i] - self.plant_conf['P_from_grid_max']*D[i], + e = P_grid_pos[i] - self.plant_conf['maximum_power_from_grid']*D[i], sense = plp.LpConstraintLE, rhs = 0) for i in set_I}) constraints.update({"constraint_pgridneg_{}".format(i) : plp.LpConstraint( - e = -P_grid_neg[i] - self.plant_conf['P_to_grid_max']*(1-D[i]), + e = -P_grid_neg[i] - self.plant_conf['maximum_power_to_grid']*(1-D[i]), sense = plp.LpConstraintLE, rhs = 0) for i in set_I}) # Treat deferrable loads constraints predicted_temps = {} - for k in range(self.optim_conf['num_def_loads']): + for k in range(self.optim_conf['number_of_deferrable_loads']): - if type(self.optim_conf['P_deferrable_nom'][k]) == list: + if type(self.optim_conf['nominal_power_of_deferrable_loads'][k]) == list: # Constraint for sequence of deferrable # WARNING: This is experimental, formulation seems correct but feasibility problems. # Probably uncomptabile with other constraints - power_sequence = self.optim_conf['P_deferrable_nom'][k] + power_sequence = self.optim_conf['nominal_power_of_deferrable_loads'][k] sequence_length = len(power_sequence) def create_matrix(input_list, n): matrix = [] @@ -421,7 +421,7 @@ def create_matrix(input_list, n): continue predicted_temp.append( predicted_temp[I-1] - + (P_deferrable[k][I-1] * (heating_rate * self.timeStep / self.optim_conf['P_deferrable_nom'][k])) + + (P_deferrable[k][I-1] * (heating_rate * self.timeStep / self.optim_conf['nominal_power_of_deferrable_loads'][k])) - (cooling_constant * (predicted_temp[I-1] - outdoor_temperature_forecast[I-1]))) if len(desired_temperatures) > I and desired_temperatures[I]: constraints.update({"constraint_defload{}_temperature_{}".format(k, I): @@ -448,7 +448,7 @@ def create_matrix(input_list, n): plp.LpConstraint( e = plp.lpSum(P_deferrable[k][i]*self.timeStep for i in set_I), sense = plp.LpConstraintEQ, - rhs = def_total_hours[k]*self.optim_conf['P_deferrable_nom'][k]) + rhs = def_total_hours[k]*self.optim_conf['nominal_power_of_deferrable_loads'][k]) }) # Ensure deferrable loads consume energy between def_start_timestep & def_end_timestep @@ -516,7 +516,7 @@ def create_matrix(input_list, n): for i in set_I}) # Treat deferrable as a fixed value variable with just one startup - if self.optim_conf['set_def_constant'][k]: + if self.optim_conf['set_deferrable_load_single_constant'][k]: # P_def_start[i] must be 1 for exactly 1 value of i constraints.update({"constraint_pdef{}_start4".format(k) : plp.LpConstraint( @@ -533,23 +533,23 @@ def create_matrix(input_list, n): }) # Treat deferrable load as a semi-continuous variable - if self.optim_conf['treat_def_as_semi_cont'][k]: + if self.optim_conf['treat_deferrable_load_as_semi_cont'][k]: constraints.update({"constraint_pdef{}_semicont1_{}".format(k, i) : plp.LpConstraint( - e=P_deferrable[k][i] - self.optim_conf['P_deferrable_nom'][k]*P_def_bin1[k][i], + e=P_deferrable[k][i] - self.optim_conf['nominal_power_of_deferrable_loads'][k]*P_def_bin1[k][i], sense=plp.LpConstraintGE, rhs=0) for i in set_I}) constraints.update({"constraint_pdef{}_semicont2_{}".format(k, i) : plp.LpConstraint( - e=P_deferrable[k][i] - self.optim_conf['P_deferrable_nom'][k]*P_def_bin1[k][i], + e=P_deferrable[k][i] - self.optim_conf['nominal_power_of_deferrable_loads'][k]*P_def_bin1[k][i], sense=plp.LpConstraintLE, rhs=0) for i in set_I}) # Treat the number of starts for a deferrable load (old method, kept here just in case) - # if self.optim_conf['set_def_constant'][k]: + # if self.optim_conf['set_deferrable_load_single_constant'][k]: # constraints.update({"constraint_pdef{}_start1_{}".format(k, i) : # plp.LpConstraint( # e=P_deferrable[k][i] - P_def_bin2[k][i]*M, @@ -592,53 +592,53 @@ def create_matrix(input_list, n): constraints.update({"constraint_pos_batt_dynamic_max_{}".format(i) : plp.LpConstraint(e = P_sto_pos[i+1] - P_sto_pos[i], sense = plp.LpConstraintLE, - rhs = self.timeStep*self.optim_conf['battery_dynamic_max']*self.plant_conf['Pd_max']) + rhs = self.timeStep*self.optim_conf['battery_dynamic_max']*self.plant_conf['battery_discharge_power_max']) for i in range(n-1)}) constraints.update({"constraint_pos_batt_dynamic_min_{}".format(i) : plp.LpConstraint(e = P_sto_pos[i+1] - P_sto_pos[i], sense = plp.LpConstraintGE, - rhs = self.timeStep*self.optim_conf['battery_dynamic_min']*self.plant_conf['Pd_max']) + rhs = self.timeStep*self.optim_conf['battery_dynamic_min']*self.plant_conf['battery_discharge_power_max']) for i in range(n-1)}) constraints.update({"constraint_neg_batt_dynamic_max_{}".format(i) : plp.LpConstraint(e = P_sto_neg[i+1] - P_sto_neg[i], sense = plp.LpConstraintLE, - rhs = self.timeStep*self.optim_conf['battery_dynamic_max']*self.plant_conf['Pc_max']) + rhs = self.timeStep*self.optim_conf['battery_dynamic_max']*self.plant_conf['battery_charge_power_max']) for i in range(n-1)}) constraints.update({"constraint_neg_batt_dynamic_min_{}".format(i) : plp.LpConstraint(e = P_sto_neg[i+1] - P_sto_neg[i], sense = plp.LpConstraintGE, - rhs = self.timeStep*self.optim_conf['battery_dynamic_min']*self.plant_conf['Pc_max']) + rhs = self.timeStep*self.optim_conf['battery_dynamic_min']*self.plant_conf['battery_charge_power_max']) for i in range(n-1)}) # Then the classic battery constraints constraints.update({"constraint_pstopos_{}".format(i) : plp.LpConstraint( - e=P_sto_pos[i] - self.plant_conf['eta_disch']*self.plant_conf['Pd_max']*E[i], + e=P_sto_pos[i] - self.plant_conf['battery_discharge_efficiency']*self.plant_conf['battery_discharge_power_max']*E[i], sense=plp.LpConstraintLE, rhs=0) for i in set_I}) constraints.update({"constraint_pstoneg_{}".format(i) : plp.LpConstraint( - e=-P_sto_neg[i] - (1/self.plant_conf['eta_ch'])*self.plant_conf['Pc_max']*(1-E[i]), + e=-P_sto_neg[i] - (1/self.plant_conf['battery_charge_efficiency'])*self.plant_conf['battery_charge_power_max']*(1-E[i]), sense=plp.LpConstraintLE, rhs=0) for i in set_I}) constraints.update({"constraint_socmax_{}".format(i) : plp.LpConstraint( - e=-plp.lpSum(P_sto_pos[j]*(1/self.plant_conf['eta_disch']) + self.plant_conf['eta_ch']*P_sto_neg[j] for j in range(i)), + e=-plp.lpSum(P_sto_pos[j]*(1/self.plant_conf['battery_discharge_efficiency']) + self.plant_conf['battery_charge_efficiency']*P_sto_neg[j] for j in range(i)), sense=plp.LpConstraintLE, - rhs=(self.plant_conf['Enom']/self.timeStep)*(self.plant_conf['SOCmax'] - soc_init)) + rhs=(self.plant_conf['battery_nominal_energy_capacity']/self.timeStep)*(self.plant_conf['battery_maximum_state_of_charge'] - soc_init)) for i in set_I}) constraints.update({"constraint_socmin_{}".format(i) : plp.LpConstraint( - e=plp.lpSum(P_sto_pos[j]*(1/self.plant_conf['eta_disch']) + self.plant_conf['eta_ch']*P_sto_neg[j] for j in range(i)), + e=plp.lpSum(P_sto_pos[j]*(1/self.plant_conf['battery_discharge_efficiency']) + self.plant_conf['battery_charge_efficiency']*P_sto_neg[j] for j in range(i)), sense=plp.LpConstraintLE, - rhs=(self.plant_conf['Enom']/self.timeStep)*(soc_init - self.plant_conf['SOCmin'])) + rhs=(self.plant_conf['battery_nominal_energy_capacity']/self.timeStep)*(soc_init - self.plant_conf['battery_minimum_state_of_charge'])) for i in set_I}) constraints.update({"constraint_socfinal_{}".format(0) : plp.LpConstraint( - e=plp.lpSum(P_sto_pos[i]*(1/self.plant_conf['eta_disch']) + self.plant_conf['eta_ch']*P_sto_neg[i] for i in set_I), + e=plp.lpSum(P_sto_pos[i]*(1/self.plant_conf['battery_discharge_efficiency']) + self.plant_conf['battery_charge_efficiency']*P_sto_neg[i] for i in set_I), sense=plp.LpConstraintEQ, - rhs=(soc_init - soc_final)*self.plant_conf['Enom']/self.timeStep) + rhs=(soc_init - soc_final)*self.plant_conf['battery_nominal_energy_capacity']/self.timeStep) }) opt_model.constraints = constraints @@ -667,16 +667,16 @@ def create_matrix(input_list, n): opt_tp = pd.DataFrame() opt_tp["P_PV"] = [P_PV[i] for i in set_I] opt_tp["P_Load"] = [P_load[i] for i in set_I] - for k in range(self.optim_conf['num_def_loads']): + for k in range(self.optim_conf['number_of_deferrable_loads']): opt_tp["P_deferrable{}".format(k)] = [P_deferrable[k][i].varValue for i in set_I] opt_tp["P_grid_pos"] = [P_grid_pos[i].varValue for i in set_I] opt_tp["P_grid_neg"] = [P_grid_neg[i].varValue for i in set_I] opt_tp["P_grid"] = [P_grid_pos[i].varValue + P_grid_neg[i].varValue for i in set_I] if self.optim_conf['set_use_battery']: opt_tp["P_batt"] = [P_sto_pos[i].varValue + P_sto_neg[i].varValue for i in set_I] - SOC_opt_delta = [(P_sto_pos[i].varValue*(1/self.plant_conf['eta_disch']) + \ - self.plant_conf['eta_ch']*P_sto_neg[i].varValue)*( - self.timeStep/(self.plant_conf['Enom'])) for i in set_I] + SOC_opt_delta = [(P_sto_pos[i].varValue*(1/self.plant_conf['battery_discharge_efficiency']) + \ + self.plant_conf['battery_charge_efficiency']*P_sto_neg[i].varValue)*( + self.timeStep/(self.plant_conf['battery_nominal_energy_capacity'])) for i in set_I] SOCinit = copy.copy(soc_init) SOC_opt = [] for i in set_I: @@ -692,7 +692,7 @@ def create_matrix(input_list, n): # Lets compute the optimal cost function P_def_sum_tp = [] for i in set_I: - P_def_sum_tp.append(sum(P_deferrable[k][i].varValue for k in range(self.optim_conf['num_def_loads']))) + P_def_sum_tp.append(sum(P_deferrable[k][i].varValue for k in range(self.optim_conf['number_of_deferrable_loads']))) opt_tp["unit_load_cost"] = [unit_load_cost[i] for i in set_I] opt_tp["unit_prod_price"] = [unit_prod_price[i] for i in set_I] if self.optim_conf['set_total_pv_sell']: @@ -728,7 +728,7 @@ def create_matrix(input_list, n): # Debug variables if debug: - for k in range(self.optim_conf["num_def_loads"]): + for k in range(self.optim_conf['number_of_deferrable_loads']): opt_tp[f"P_def_start_{k}"] = [P_def_start[k][i].varValue for i in set_I] opt_tp[f"P_def_bin2_{k}"] = [P_def_bin2[k][i].varValue for i in set_I] for i, predicted_temp in predicted_temps.items(): diff --git a/src/emhass/retrieve_hass.py b/src/emhass/retrieve_hass.py index d86b7512..7b4dcf78 100644 --- a/src/emhass/retrieve_hass.py +++ b/src/emhass/retrieve_hass.py @@ -62,7 +62,12 @@ def __init__(self, hass_url: str, long_lived_token: str, freq: pd.Timedelta, self.long_lived_token = long_lived_token self.freq = freq self.time_zone = time_zone - self.params = params + if (params == None) or (params == "null"): + self.params = {} + elif type(params) is dict: + self.params = params + else: + self.params = json.loads(params) self.emhass_conf = emhass_conf self.logger = logger self.get_data_from_file = get_data_from_file @@ -450,11 +455,11 @@ class response: metadata = {} with open(entities_path / "metadata.json", "w") as file: # Save entity metadata, key = entity_id - metadata[entity_id] = {'name': data_df.name, 'unit_of_measurement': unit_of_measurement,'friendly_name': friendly_name,'type_var': type_var, 'freq': int(self.freq.seconds / 60)} + metadata[entity_id] = {'name': data_df.name, 'unit_of_measurement': unit_of_measurement,'friendly_name': friendly_name,'type_var': type_var, 'optimization_time_step': int(self.freq.seconds / 60)} # Find lowest frequency to set for continual loop freq - if metadata.get("lowest_freq",None) == None or metadata["lowest_freq"] > int(self.freq.seconds / 60): - metadata["lowest_freq"] = int(self.freq.seconds / 60) + if metadata.get("lowest_time_step",None) == None or metadata["lowest_time_step"] > int(self.freq.seconds / 60): + metadata["lowest_time_step"] = int(self.freq.seconds / 60) json.dump(metadata,file, indent=4) self.logger.debug("Saved " + entity_id + " to json file") diff --git a/src/emhass/static/advanced.html b/src/emhass/static/advanced.html index 6595520c..87172f86 100644 --- a/src/emhass/static/advanced.html +++ b/src/emhass/static/advanced.html @@ -1,3 +1,4 @@ +

Use the buttons below to manually launch different optimization tasks

@@ -18,7 +19,7 @@

Use the buttons below to fit, predict and tune a machine learning model for - +

Input Runtime Parameters

diff --git a/src/emhass/static/basic.html b/src/emhass/static/basic.html index 7bc78114..9a3f0647 100644 --- a/src/emhass/static/basic.html +++ b/src/emhass/static/basic.html @@ -1,3 +1,4 @@ +

Use the button below to manually launch optimization task

@@ -7,6 +8,7 @@

Use the button below to manually launch optimization task

The day-ahead optimization button will run once, based on the values entered into the configuration page.
- After a few seconds, the charts and table below should be updated to reflect the optimization plan for the next 24 hours. + After a few seconds, the charts and table below should be updated to reflect the optimization plan for the next + 24 hours.

-
+
\ No newline at end of file diff --git a/src/emhass/static/configuration_list.html b/src/emhass/static/configuration_list.html new file mode 100644 index 00000000..523f34f9 --- /dev/null +++ b/src/emhass/static/configuration_list.html @@ -0,0 +1,44 @@ + + +
+
+

Local

+
+
+
+
+
+

System

+
+
+
+
+
+

Tariff

+
+
+
+
+
+

Deferrable Loads

+ + +
+
+
+
+
+

Solar System (PV)

+
+
+
+
+
+

Battery

+ +
+
+
\ No newline at end of file diff --git a/src/emhass/static/configuration_script.js b/src/emhass/static/configuration_script.js new file mode 100644 index 00000000..0a12a6c0 --- /dev/null +++ b/src/emhass/static/configuration_script.js @@ -0,0 +1,871 @@ +//javascript file for dynamically processing configuration page + +//used static files +//param_definitions.json : stores information about parameters (E.g. their defaults, their type, and what parameter section to be in) +//configuration_list.html : template html to act as a base for the list view. (Params get dynamically added after) + +//Div layout +/*
+
+
POSSIBLE HEADER INPUT HERE WITH PARAMETER ID
+
+
+
input/s here
+
+
+
+
; */ + +//on page reload +window.onload = async function () { + ///fetch configuration parameters from definitions json file + param_definitions = await getParamDefinitions(); + //obtain configuration from emhass (pull) + config = await obtainConfig(); + //obtain configuration_list.html html as a template to dynamically to render parameters in a list view (parameters as input items) + list_html = await getListHTML(); + //load list parameter page (default) + loadConfigurationListView(param_definitions, config, list_html); + + //add event listener to save button + document + .getElementById("save") + .addEventListener("click", () => saveConfiguration(param_definitions)); + + //add event listener to yaml button (convert yaml to json in box view) + document.getElementById("yaml").addEventListener("click", () => yamlToJson()); + //hide yaml button by default (display in box view) + document.getElementById("yaml").style.display = "none"; + + //add event listener to defaults button + document + .getElementById("defaults") + .addEventListener("click", () => + ToggleView(param_definitions, list_html, true) + ); + + //add event listener to json-toggle button (toggle between json box and list view) + document + .getElementById("json-toggle") + .addEventListener("click", () => + ToggleView(param_definitions, list_html, false) + ); +}; + +//obtain file containing information about parameters (definitions) +async function getParamDefinitions() { + const response = await fetch(`static/data/param_definitions.json`); + if (response.status !== 200 && response.status !== 201) { + //alert error in alert box + errorAlert("Unable to obtain definitions file"); + return {}; + } + const param_definitions = await response.json(); + return await param_definitions; +} + +//obtain emhass config (from saved params extracted/simplified into the config format) +async function obtainConfig() { + config = {}; + const response = await fetch(`get-config`, { + method: "GET", + }); + response_status = await response.status; //return status + //if request failed + if (response_status !== 200 && response_status !== 201) { + showChangeStatus(response_status, await response.json()); + return {}; + } + //else extract json rom data + blob = await response.blob(); //get data blob + config = await new Response(blob).json(); //obtain json from blob + showChangeStatus(response_status, {}); + return config; +} + +//obtain emhass default config (to present the default parameters in view) +async function ObtainDefaultConfig() { + config = {}; + const response = await fetch(`get-config/defaults`, { + method: "GET", + }); + //if request failed + response_status = await response.status; //return status + if (response_status !== 200 && response_status !== 201) { + showChangeStatus(response_status, await response.json()); + return {}; + } + //else extract json rom data + blob = await response.blob(); //get data blob + config = await new Response(blob).json(); //obtain json from blob + showChangeStatus(response_status, {}); + return config; +} + +//get html data from configuration_list.html (list template) +async function getListHTML() { + const response = await fetch(`static/configuration_list.html`); + if (response.status !== 200 && response.status !== 201) { + errorAlert("Unable to obtain configuration_list.html file"); + return {}; + } + blob = await response.blob(); //get data blob + htmlTemplateData = await new Response(blob).text(); //obtain html from blob + return await htmlTemplateData; +} + +//load list configuration view +function loadConfigurationListView(param_definitions, config, list_html) { + if (list_html == null || config == null || param_definitions == null) { + return 1; + } + + //list parameters used in the section headers + header_input_list = ["set_use_battery", "number_of_deferrable_loads"]; + + //get the main container and append list template html + document.getElementById("configuration-container").innerHTML = list_html; + + //loop though configuration sections ('Local','System','Tariff','Solar System (PV)') in definitions file + for (var section in param_definitions) { + // build each section by adding parameters with their corresponding input elements + buildParamContainers( + section, + param_definitions[section], + config, + header_input_list + ); + + //after sections have been built, add event listeners for section header inputs + //loop though headers + for (header_input_param of header_input_list) { + if (param_definitions[section].hasOwnProperty(header_input_param)) { + //grab default from definitions file + value = param_definitions[section][header_input_param]["default_value"]; + //find input element (using the parameter name as the input element ID) + header_input_element = document.getElementById(header_input_param); + if (header_input_element !== null) { + //add event listener to element (trigger on input change) + header_input_element.addEventListener("input", (e) => + headerElement(e.target, param_definitions, config) + ); + //check the EMHASS config to see if it contains a stored param value + //else keep default + value = checkConfigParam(value, config, header_input_param); + //set value of input + header_input_element.value = value; + //checkboxes (for Booleans) also set value to "checked" + if (header_input_element.type == "checkbox") { + header_input_element.checked = value; + } + //manually trigger the header parameter input event listener for setting up initial section state + headerElement(header_input_element, param_definitions, config); + } + } + } + } +} + +//build sections body, containing parameter/param containers (containing parameter/param inputs) +function buildParamContainers( + section, + section_parameters_definitions, + config, + header_input_list +) { + //get the section container element + SectionContainer = document.getElementById(section); + //get the body container inside the section (where the parameters will be appended) + SectionParamElement = SectionContainer.getElementsByClassName("section-body"); + if (SectionContainer == null || SectionParamElement.length == 0) { + console.error("Unable to find Section container or Section Body"); + return 0; + } + + //loop though the sections parameters in definition file, generate and append param (div) elements for the section + for (const [ + parameter_definition_name, + parameter_definition_object, + ] of Object.entries(section_parameters_definitions)) { + //check parameter definitions have the required key values + if ( + !("friendly_name" in parameter_definition_object) || + !("Description" in parameter_definition_object) || + !("input" in parameter_definition_object) || + !("default_value" in parameter_definition_object) + ) { + console.log( + parameter_definition_name + + " is missing some required values in the definitions file" + ); + continue; + } + if ( + parameter_definition_object["input"] === "select" && + !("select_options" in parameter_definition_object) + ) { + console.log( + parameter_definition_name + + " is missing select_options values in the definitions file" + ); + continue; + } + + //check if param is set in the section header, if so skip building param + if (header_input_list.includes(parameter_definition_name)) { + continue; + } + + //if parameter type == array.* and not in "Deferrable Loads" section, append plus and minus buttons in param div + array_buttons = ""; + if ( + parameter_definition_object["input"].search("array.") > -1 && + section != "Deferrable Loads" + ) { + array_buttons = ` + + +
+ `; + } + + //generates and appends param container into section + //buildParamElement() builds the parameter input/s and returns html to append in param-input + SectionParamElement[0].innerHTML += ` +
+
${ + parameter_definition_object["friendly_name"] + }:
${parameter_definition_name}
+ ${array_buttons} +
+ ${buildParamElement( + parameter_definition_object, + parameter_definition_name, + config + )} +
+

${parameter_definition_object["Description"]}

+
+ `; + } + + //After looping though, build and appending the parameters in the corresponding section: + //create add button (array plus) event listeners + let plus = SectionContainer.querySelectorAll(".input-plus"); + plus.forEach(function (answer) { + answer.addEventListener("click", () => + plusElements(answer.classList[1], param_definitions, section, {}) + ); + }); + + //create subtract button (array minus) event listeners + let minus = SectionContainer.querySelectorAll(".input-minus"); + minus.forEach(function (answer) { + answer.addEventListener("click", () => minusElements(answer.classList[1])); + }); + + //check initial checkbox state, check "value" of input and match to "checked" value + let checkbox = document.querySelectorAll("input[type='checkbox']"); + checkbox.forEach(function (answer) { + let value = answer.value === "true"; + answer.checked = value; + }); + + //loop though sections params again, check if param has a requirement, if so add a event listener to the required param input + //if required param gets changed, trigger function to check if that required parameter matches the required value for the param + //if false, add css class to param element to shadow it, to show that its unaccessible + for (const [ + parameter_definition_name, + parameter_definition_object, + ] of Object.entries(section_parameters_definitions)) { + //check if param has a requirement from definitions file + if ("requires" in parameter_definition_object) { + // get param requirement element + const requirement_element = document.getElementById( + Object.keys(parameter_definition_object["requires"])[0] + ); + if (requirement_element == null) { + console.debug( + "unable to find " + + Object.keys(parameter_definition_object["requires"])[0] + + " param div container element" + ); + continue; + } + + // get param element that has requirement + const param_element = document.getElementById(parameter_definition_name); + if (param_element == null) { + console.debug( + "unable to find " + + parameter_definition_name + + " param div container element" + ); + continue; + } + + //obtain required param inputs, add event listeners + requirement_inputs = + requirement_element.getElementsByClassName("param_input"); + //grab required value + const requirement_value = Object.values( + parameter_definition_object["requires"] + )[0]; + + //for all required inputs + for (const input of requirement_inputs) { + //if listener not already attached + if (input.getAttribute("listener") !== "true") { + //create event listener with arguments referencing the required param. param with requirement and required value + input.addEventListener("input", () => + checkRequirements(input, param_element, requirement_value) + ); + //manually run function to gain initial param element initial state + checkRequirements(input, param_element, requirement_value); + } + } + } + } +} + +//create html input element/s for a param container (called by buildParamContainers) +function buildParamElement( + parameter_definition_object, + parameter_definition_name, + config +) { + var type = ""; + var inputs = ""; + var type_specific_html = ""; + var type_specific_html_end = ""; + + //switch statement to adjust generated html according to the parameter data type (definitions in definitions file) + switch (parameter_definition_object["input"]) { + case "array.int": + //number + case "int": + type = "number"; + placeholder = parseInt(parameter_definition_object["default_value"]); + break; + case "array.float": + case "float": + type = "number"; + placeholder = parseFloat(parameter_definition_object["default_value"]); + break; + //text (string) + case "array.string": + case "string": + type = "text"; + placeholder = parameter_definition_object["default_value"]; + break; + case "array.time": + //time ("00:00") + case "time": + type = "time"; + break; + //checkbox (boolean) + case "array.boolean": + case "boolean": + type = "checkbox"; + type_specific_html = ` + + `; + placeholder = parameter_definition_object["default_value"] === "true"; + break; + //selects (pick) + case "select": + //format selects later + break; + } + + //check default values saved in param definitions + //definitions default value is used if none is found in the configs, or an array element has been added in the ui (deferrable load number increase or plus button pressed) + value = parameter_definition_object["default_value"]; + //check if a param value is saved in the config file (if so overwrite definition default) + value = checkConfigParam(value, config, parameter_definition_name); + + //generate and return param input html, + //check if param value is not an object, if so assume its a single value. + if (typeof value !== "object") { + //if select, generate and return select elements instead of input + if (parameter_definition_object["input"] == "select") { + let inputs = ``; + return inputs; + } + // generate param input html and return + else { + return ` + ${type_specific_html} + + ${type_specific_html_end} + `; + } + } + // else if object, loop though array of values, generate input element per value, and and return + else { + //for items such as load_peak_hour_periods (object of objects with arrays) + if (typeof Object.values(value)[0] === "object") { + for (param of Object.values(value)) { + for (items of Object.values(param)) { + inputs += ``; + } + inputs += `
`; + } + return inputs; + } + // array of values + else { + let inputs = ""; + for (param of value) { + inputs += ` + ${type_specific_html} + + ${type_specific_html_end} + `; + } + return inputs; + } + } +} + +//add param inputs in param div container (for type array) +function plusElements( + parameter_definition_name, + param_definitions, + section, + config +) { + param_element = document.getElementById(parameter_definition_name); + if (param_element == null) { + console.log( + "Unable to find " + parameter_definition_name + " param div container" + ); + return 1; + } + param_input_container = + param_element.getElementsByClassName("param-input")[0]; + // Add a copy of the param element + param_input_container.innerHTML += buildParamElement( + param_definitions[section][parameter_definition_name], + parameter_definition_name, + config + ); +} + +//Remove param inputs in param div container (minimum 1) +function minusElements(param) { + param_element = document.getElementById(param); + if (param_element == null) { + console.log( + "Unable to find " + parameter_definition_name + " param div container" + ); + return 1; + } + param_input_list = param_element.getElementsByTagName("input"); + if (param_input_list.length == 0) { + console.log( + "Unable to find " + parameter_definition_name + " param input/s" + ); + } + + //verify if input is a boolean (if so remove parent slider/switch element with input) + if ( + param_input_list[param_input_list.length - 1].parentNode.tagName === "LABEL" + ) { + param_input = param_input_list[param_input_list.length - 1].parentNode; + } else { + param_input = param_input_list[param_input_list.length - 1]; + } + + //if param is "load_peak_hour_periods", remove both start and end param inputs as well as the line brake tag separating the inputs + if (param == "load_peak_hour_periods") { + if (param_input_list.length > 2) { + brs = document.getElementById(param).getElementsByTagName("br"); + param_input_list[param_input_list.length - 1].remove(); + param_input_list[param_input_list.length - 1].remove(); + brs[brs.length - 1].remove(); + } + } else if (param_input_list.length > 1) { + param_input.remove(); + } +} + +//check requirement_element inputs, +//if requirement_element don't match requirement_value, add .requirement-disable class to param_element +//else remove class +function checkRequirements( + requirement_element, + param_element, + requirement_value +) { + //get current value of required element + if (requirement_element.type == "checkbox") { + requirement_element_value = requirement_element.checked; + } else { + requirement_element_value = requirement_element.value; + } + + if (requirement_element_value != requirement_value) { + if (!param_element.classList.contains("requirement-disable")) { + param_element.classList.add("requirement-disable"); + } + } else { + if (param_element.classList.contains("requirement-disable")) { + param_element.classList.remove("requirement-disable"); + } + } +} + +//on header input change, execute accordingly +function headerElement(element, param_definitions, config) { + //obtain section body element + section_card = element.closest(".section-card"); + if (section_card == null) { + console.log("Unable to obtain section-card"); + return 1; + } + param_container = section_card.getElementsByClassName("section-body"); + if (param_container.length > 0) { + param_container = section_card.getElementsByClassName("section-body")[0]; + } else { + console.log("Unable to obtain section-body"); + return 1; + } + + switch (element.id) { + //if set_use_battery, add or remove battery section (inc. params) + case "set_use_battery": + if (element.checked) { + buildParamContainers("Battery", param_definitions["Battery"], config, [ + "set_use_battery", + ]); + element.checked = true; + } else { + param_container.innerHTML = ""; + } + break; + + //if number_of_deferrable_loads, the number of inputs in the "Deferrable Loads" section should add up to number_of_deferrable_loads value in header + case "number_of_deferrable_loads": + //get a list of param in section + param_list = param_container.getElementsByClassName("param"); + if (param_list.length <= 0) { + console.log( + "There has been an issue counting the amount of params in number_of_deferrable_loads" + ); + return 1; + } + //calculate how much off the fist parameters input elements amount to is, compering to the number_of_deferrable_loads value + difference = + parseInt(element.value) - + param_container.firstElementChild.querySelectorAll("input").length; + //add elements based on how many elements are missing + if (difference > 0) { + for (let i = difference; i >= 1; i--) { + for (const param of param_list) { + //append element, do not pass config to obtain default parameter from definitions file + plusElements(param.id, param_definitions, "Deferrable Loads", {}); + } + } + } + //subtract elements based how many elements its over + if (difference < 0) { + for (let i = difference; i <= -1; i++) { + for (const param of param_list) { + minusElements(param.id); + } + } + } + break; + } +} + +//checks parameter value in config, updates value if exists +function checkConfigParam(value, config, parameter_definition_name) { + if (config !== null && config !== undefined) { + //check if parameter has a saved value + if (parameter_definition_name in config) { + value = config[parameter_definition_name]; + } + } + return value; +} + +//send all parameter input values to EMHASS, to save to config.json and param.pkl +async function saveConfiguration(param_definitions) { + //start wth none + config = {}; + + //if section-cards (config sections/list) exists + config_card = document.getElementsByClassName("section-card"); + //check if page is in list or box view + config_box_element = document.getElementById("config-box"); + + //if true, in list view + if (Boolean(config_card.length)) { + //retrieve params and their input/s by looping though param_definitions list + //loop through the sections + for (var [section_name, section_object] of Object.entries( + param_definitions + )) { + //loop through parameters + for (var [ + parameter_definition_name, + parameter_definition_object, + ] of Object.entries(section_object)) { + let param_values = []; //stores the obtained param input values + let param_array = false; + //get param container + param_element = document.getElementById(parameter_definition_name); + if (param_element == null) { + console.debug( + "unable to find " + + parameter_definition_name + + " param div container element, skipping this param" + ); + continue; + } + //extract input/s and their value/s from param container div + else { + if (param_element.tagName !== "INPUT") { + param_inputs = param_element.getElementsByClassName("param_input"); + } else { + //check if param_element is also param_input (ex. for header parameters) + param_inputs = [param_element]; + } + + // loop though param_inputs, extract the element/s values + for (var input of param_inputs) { + switch (input.type) { + case "number": + param_values.push(parseFloat(input.value)); + break; + case "checkbox": + param_values.push(input.checked); + break; + default: + param_values.push(input.value); + break; + } + } + //obtain param input type from param_definitions, check if param should be formatted as an array + param_array = Boolean( + !parameter_definition_object["input"].search("array") + ); + + //build parameters using values extracted from param_inputs + + // If time with 2 sets (load_peak_hour_periods) + if ( + parameter_definition_object["input"] == "array.time" && + param_values.length % 2 === 0 + ) { + config[parameter_definition_name] = {}; + for (let i = 0; i < param_values.length; i++) { + config[parameter_definition_name][ + "period_hp_" + + (Object.keys(config[parameter_definition_name]).length + 1) + ] = [{ start: param_values[i] }, { end: param_values[++i] }]; + } + continue; + } + + //single value + if (param_values.length && !param_array) { + config[parameter_definition_name] = param_values[0]; + } + + //array value + else if (param_values.length) { + config[parameter_definition_name] = param_values; + } + } + } + } + } + + //if box view, extract json from box view + else if (config_box_element !== null) { + //try and parse json from box + try { + config = JSON.parse(config_box_element.value); + } catch (error) { + //if json error, show in alert box + document.getElementById("alert-text").textContent = + "\r\n" + + error + + "\r\n" + + "JSON Error: String values may not be wrapped in quotes"; + document.getElementById("alert").style.display = "block"; + document.getElementById("alert").style.textAlign = "center"; + return 0; + } + } + // else, cant find box or list view + else { + errorAlert("There has been an error verifying box or list view"); + } + + //finally, send built config to emhass + const response = await fetch(`set-config`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify(config), + }); + showChangeStatus(response.status, await response.json()); +} + +//Toggle between box (json) and list view +async function ToggleView(param_definitions, list_html, default_reset) { + let selected = ""; + config = {}; + + //find out if list or box view is active + configuration_container = document.getElementById("configuration-container"); + if (configuration_container == null) { + errorAlert("Unable to find Configuration Container element"); + } + //get yaml button + yaml_button = document.getElementById("yaml"); + if (yaml_button == null) { + console.log("Unable to obtain yaml button"); + } + + // if section-cards (config sections/list) exists + config_card = configuration_container.getElementsByClassName("section-card"); + //selected view (0 = box) + selected_view = Boolean(config_card.length); + + //if default_reset is passed do not switch views, instead reinitialize view with default config as values + if (default_reset) { + selected_view = !selected_view; + //obtain default config as config (when pressing the default button) + config = await ObtainDefaultConfig(); + } else { + //obtain latest config + config = await obtainConfig(); + } + + //if array is empty assume json box is selected + if (selected_view) { + selected = "list"; + } else { + selected = "box"; + } + //remove contents of current view + configuration_container.innerHTML = ""; + //build new view + switch (selected) { + case "box": + //load list + loadConfigurationListView(param_definitions, config, list_html); + yaml_button.style.display = "none"; + break; + case "list": + //load box + loadConfigurationBoxPage(config); + yaml_button.style.display = "block"; + break; + } +} + +//load box (json textarea) view +async function loadConfigurationBoxPage(config) { + //get configuration container element + configuration_container = document.getElementById("configuration-container"); + if (configuration_container == null) { + errorAlert("Unable to find Configuration Container element"); + } + //append configuration container with textbox area + configuration_container.innerHTML = ` + + `; + //set created textarea box with retrieved config + document.getElementById("config-box").innerHTML = JSON.stringify( + config, + null, + 2 + ); +} + +//function in control of status icons and alert box from a fetch request +async function showChangeStatus(status, logJson) { + var loading = document.getElementById("loader"); //element showing statuses + if (loading === null) { + console.log("unable to find loader element"); + return 1; + } + if (status === 200 || status === 201) { + //if status is 200 or 201, then show a tick + loading.innerHTML = `

`; + } else { + //then show a cross + loading.classList.remove("loading"); + loading.innerHTML = `

`; //show cross icon to indicate an error + if (logJson.length != 0 && document.getElementById("alert-text") !== null) { + document.getElementById("alert-text").textContent = + "\r\n\u2022 " + logJson.join("\r\n\u2022 "); //show received log data in alert box + document.getElementById("alert").style.display = "block"; + document.getElementById("alert").style.textAlign = "left"; + } + } + //remove tick/cross after some time + setTimeout(() => { + loading.innerHTML = ""; + }, 4000); +} + +//simple function to write text to the alert box +async function errorAlert(text) { + if ( + document.getElementById("alert-text") !== null && + document.getElementById("alert") !== null + ) { + document.getElementById("alert-text").textContent = "\r\n" + text + "\r\n"; + document.getElementById("alert").style.display = "block"; + document.getElementById("alert").style.textAlign = "left"; + } + return 0; +} + +//convert yaml box into json box +async function yamlToJson() { + //get box element + config_box_element = document.getElementById("config-box"); + if (config_box_element == null) { + errorAlert("Unable to obtain config box"); + } else { + const response = await fetch(`get-json`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: config_box_element.value, + }); + response_status = await response.status; //return status + if (response_status == 201) { + showChangeStatus(response_status, {}); + blob = await response.blob(); //get data blob + config = await new Response(blob).json(); //obtain json from blob + config_box_element.value = JSON.stringify(config, null, 2); + } else { + showChangeStatus(response_status, await response.json()); + } + } + return 0; +} diff --git a/src/emhass/static/data/param_definitions.json b/src/emhass/static/data/param_definitions.json new file mode 100644 index 00000000..aa507bdf --- /dev/null +++ b/src/emhass/static/data/param_definitions.json @@ -0,0 +1,424 @@ +{ + "Local": { + "costfun": { + "friendly_name": "Cost function", + "Description": "Define the type of cost function.", + "input": "select", + "select_options": [ + "profit", + "cost", + "self-consumption" + ], + "default_value": "profit" + }, + "sensor_power_photovoltaics": { + "friendly_name": "Sensor power sensor", + "Description": "This is the name of the photovoltaic power-produced sensor in Watts from Home Assistant. For example: ‘sensor.power_photovoltaics’.", + "input": "string", + "default_value": "sensor.power_photovoltaics" + }, + "sensor_power_load_no_var_loads": { + "friendly_name": "Sensor power loads with no variable loads", + "Description": "The name of the household power consumption sensor in Watts from Home Assistant. The deferrable loads that we will want to include in the optimization problem should be subtracted from this sensor in HASS. For example: ‘sensor.power_load_no_var_loads’", + "input": "string", + "default_value": "sensor.power_load_no_var_loads" + }, + "sensor_replace_zero": { + "friendly_name": "Sensor to replace NAN values with 0s", + "Description": "The list of retrieved variables that we would want to replace NANs (if they exist) with zeros.", + "input": "array.string", + "default_value": "sensor.power_photovoltaics" + }, + "sensor_linear_interp": { + "friendly_name": "Sensor to replace NAN values with linear interpolation", + "Description": "The list of retrieved variables that we would want to interpolate NANs values using linear interpolation", + "input": "array.string", + "default_value": "sensor.power_photovoltaics" + }, + "continual_publish": { + "friendly_name": "Continually publish optimization results", + "Description": "set to True to save entities to .json after an optimization run. Then automatically republish the saved entities (with updated current state value) every freq minutes. entity data saved to data_path/entities.", + "input": "boolean", + "default_value": false + }, + "logging_level": { + "friendly_name": "Logging level", + "Description": "This is the name of the photovoltaic power-produced sensor in Watts from Home Assistant. For example: ‘sensor.power_photovoltaics’.", + "input": "select", + "select_options": [ + "INFO", + "DEBUG", + "WARNING", + "ERROR" + ], + "default_value": "INFO" + } + }, + "System": { + "optimization_time_step": { + "friendly_name": "Optimization steps per minute (timesteps)", + "Description": "The time step to resample retrieved data from hass. This parameter is given in minutes. It should not be defined too low or you will run into memory problems when defining the Linear Programming optimization. Defaults to 30", + "input": "int", + "default_value": 30 + }, + "historic_days_to_retrieve": { + "friendly_name": "Historic days to retrieve", + "Description": "We will retrieve data from now to days_to_retrieve days. Defaults to 2", + "input": "int", + "default_value": 2 + }, + "load_negative": { + "friendly_name": "Load negative values", + "Description": "Set this parameter to True if the retrieved load variable is negative by convention. Defaults to False", + "input": "boolean", + "default_value": false + }, + "set_zero_min": { + "friendly_name": "Remove Negatives", + "Description": "Set this parameter to True to give a special treatment for a minimum value saturation to zero for power consumption data. Values below zero are replaced by nans. Defaults to True.", + "input": "boolean", + "default_value": true + }, + "method_ts_round": { + "friendly_name": "Timestamp rounding method", + "Description": "Set the method for timestamp rounding, options are: first, last and nearest.", + "input": "select", + "select_options": [ + "nearest", + "first", + "last" + ], + "default_value": "nearest" + }, + "delta_forecast_daily": { + "friendly_name": "Number of forecasted days", + "Description": "The number of days for forecasted data. Defaults to 1.", + "input": "int", + "default_value": 1 + }, + "load_forecast_method": { + "friendly_name": "Load forecast method", + "Description": "The load forecast method that will be used. The options are ‘csv’ to load a CSV file or ‘naive’ for a simple 1-day persistence model.", + "input": "select", + "select_options": [ + "naive", + "csv" + ], + "default_value": "naive" + }, + "set_total_pv_sell": { + "friendly_name": "PV straight to grid", + "Description": "Set this parameter to true to consider that all the PV power produced is injected to the grid. No direct self-consumption. The default is false, for a system with direct self-consumption.", + "input": "boolean", + "default_value": false + }, + "lp_solver": { + "friendly_name": "Linear programming solver", + "Description": "Set the name of the linear programming solver that will be used. Defaults to ‘COIN_CMD’. The options are ‘PULP_CBC_CMD’, ‘GLPK_CMD’ and ‘COIN_CMD’.", + "input": "select", + "select_options": [ + "default", + "COIN_CMD", + "PULP_CBC_CMD", + "GLPK_CMD" + ], + "default_value": "COIN_CMD" + }, + "lp_solver_path": { + "friendly_name": "Linear programming solver program path", + "Description": "Set the path to the LP solver. Defaults to ‘/usr/bin/cbc’.", + "input": "text", + "default_value": "/usr/bin/cbc" + }, + "weather_forecast_method": { + "friendly_name": "Weather forecast method", + "Description": "This will define the weather forecast method that will be used. options are 'scrapper' (ClearOutside), 'Solcast', 'solar.forecast' (forecast.solar) and 'csv' to load a CSV file. When loading a CSV file this will be directly considered as the PV power forecast in Watts.", + "input": "select", + "select_options": [ + "scrapper", + "solcast", + "solar.forecast", + "csv" + ], + "default_value": "scrapper" + }, + "maximum_power_from_grid": { + "friendly_name": "Max power from grid", + "Description": "The maximum power that can be supplied by the utility grid in Watts (consumption). Defaults to 9000.", + "input": "int", + "default_value": 9000 + }, + "maximum_power_to_grid": { + "friendly_name": "Max export power to grid", + "Description": "The maximum power that can be supplied to the utility grid in Watts (injection). Defaults to 9000.", + "input": "int", + "default_value": 9000 + }, + "inverter_is_hybrid": { + "friendly_name": "Inverter is a hybrid", + "Description": "Set to True to consider that the installation inverter is hybrid for PV and batteries (Default False)", + "input": "boolean", + "default_value": false + }, + "compute_curtailment": { + "friendly_name": "Set compute curtailment (grid export limit)", + "Description": "Set to True to compute a special PV curtailment variable (Default False)", + "input": "boolean", + "default_value": false + } + }, + "Tariff": { + "load_cost_forecast_method": { + "friendly_name": "Load cost method", + "Description": "Define the method that will be used for load cost forecast. The options are ‘hp_hc_periods’ for peak and non-peak hours contracts, and ‘csv’ to load custom cost from CSV file.", + "input": "select", + "select_options": [ + "hp_hc_periods", + "csv" + ], + "default_value": "hp_hc_periods" + }, + "load_peak_hour_periods": { + "friendly_name": "List peak hour periods", + "Description": "A list of peak hour periods for load consumption from the grid. This is useful if you have a contract with peak and non-peak hours.", + "input": "array.time", + "default_value": { + "period_hp_template": [{"start": "02:54"},{"end": "15:24"}] + }, + "requires": { + "load_cost_forecast_method": "hp_hc_periods" + } + }, + "load_peak_hours_cost": { + "friendly_name": "Peak hours electrical energy cost", + "Description": "The cost of the electrical energy during peak hours", + "input": "float", + "requires": { + "load_cost_forecast_method": "hp_hc_periods" + }, + "default_value": 0.1907 + }, + "load_offpeak_hours_cost": { + "friendly_name": "Off-peak hours electrical energy cost", + "Description": "The cost of the electrical energy during off-peak hours", + "input": "float", + "requires": { + "load_cost_forecast_method": "hp_hc_periods" + }, + "default_value": 0.1419 + }, + "production_price_forecast_method": { + "friendly_name": "PV power production price forecast method", + "Description": "Define the method that will be used for PV power production price forecast. This is the price that is paid by the utility for energy injected into the grid. The options are ‘constant’ for a constant fixed value or ‘csv’ to load custom price forecasts from a CSV file.", + "input": "select", + "select_options": [ + "constant", + "csv" + ], + "default_value": "constant" + }, + "photovoltaic_production_sell_price": { + "friendly_name": "Constant PV power production price", + "Description": "The paid price for energy injected to the grid from excess PV production in €/kWh.", + "input": "float", + "default_value": 0.1419, + "requires": { + "production_price_forecast_method": "constant" + } + } + }, + "Solar System (PV)": { + "pv_module_model": { + "friendly_name": "PV module model name", + "Description": "The PV module model. This parameter can be a list of items to enable the simulation of mixed orientation systems.", + "input": "array.string", + "input_attributes": "_'s", + "default_value": "CSUN_Eurasia_Energy_Systems_Industry_and_Trade_CSUN295_60M" + }, + "pv_inverter_model": { + "friendly_name": "The PV inverter model name", + "Description": "The PV inverter model. This parameter can be a list of items to enable the simulation of mixed orientation systems.", + "input": "array.string", + "input_attributes": "_'s", + "default_value": "Fronius_International_GmbH__Fronius_Primo_5_0_1_208_240__240V_" + }, + "surface_tilt": { + "friendly_name": "The PV panel tilt", + "Description": "The tilt angle of your solar panels. Defaults to 30. This parameter can be a list of items to enable the simulation of mixed orientation systems.", + "input": "array.int", + "default_value": 30 + }, + "surface_azimuth": { + "friendly_name": "The PV azimuth (direction)", + "Description": "The azimuth of your PV installation. Defaults to 205. This parameter can be a list of items to enable the simulation of mixed orientation systems.", + "input": "array.int", + "default_value": 205 + }, + "modules_per_string": { + "friendly_name": "Number of modules per string", + "Description": "The number of modules per string. Defaults to 16. This parameter can be a list of items to enable the simulation of mixed orientation systems.", + "input": "array.int", + "default_value": 16 + }, + "strings_per_inverter": { + "friendly_name": "Number of strings per inverter", + "Description": "The number of used strings per inverter. Defaults to 1. This parameter can be a list of items to enable the simulation of mixed orientation systems.", + "input": "array.int", + "default_value": 1 + } + }, + "Deferrable Loads": { + "number_of_deferrable_loads": { + "friendly_name": "Number of deferrable loads", + "Description": "Define the number of deferrable loads (appliances to shift) to consider. Defaults to 2.", + "input": "int", + "default_value": 2 + }, + "nominal_power_of_deferrable_loads": { + "friendly_name": "Deferrable load nominal power", + "Description": "The nominal (calculated max) power for each deferrable load in Watts.", + "input": "array.float", + "default_value": 3000.0 + }, + "operating_hours_of_each_deferrable_load": { + "friendly_name": "Deferrable load operating hours", + "Description": "The total number of hours that each deferrable load should operate", + "input": "array.int", + "default_value": 0 + }, + "treat_deferrable_load_as_semi_cont": { + "friendly_name": "Deferrable load as semi-continuous (on/off) variable", + "Description": "Semi-continuous variables (True) are variables that must take a value that can be either their maximum or minimum/zero (for example On = Maximum load, Off = 0 W). Non semi-continuous (which means continuous) variables (False) can take any values between their maximum and minimum", + "input": "array.boolean", + "default_value": true + }, + "set_deferrable_load_single_constant": { + "friendly_name": "Deferrable load run single constant per optimization", + "Description": "Define if we should set each deferrable load as a constant fixed value variable with just one startup for each optimization task", + "input": "array.boolean", + "default_value": false + }, + "set_deferrable_startup_penalty": { + "friendly_name": "Set deferrable startup penalty", + "Description": "For penalty P, each time the deferrable load turns on will incur an additional cost of P * number_of_deferrable_loads * cost_of_electricity at that time", + "input": "array.float", + "default_value": 0.0 + }, + "start_timesteps_of_each_deferrable_load": { + "friendly_name": "Deferrable start timestamp", + "Description": "The timestep as from which each deferrable load is allowed to operate (if you don’t want the deferrable load to use the whole optimization time window). If you specify a value of 0 (or negative), the deferrable load will be optimized as from the beginning of the complete prediction horizon window.", + "input": "array.int", + "default_value": 0 + }, + "end_timesteps_of_each_deferrable_load": { + "friendly_name": "Deferrable end timestamp", + "Description": "The timestep before which each deferrable load should operate. The deferrable load is not allowed to operate after the specified time step. If a value of 0 (or negative) is provided, the deferrable load is allowed to operate in the complete optimization window)", + "input": "array.int", + "default_value": 0 + } + }, + "Battery": { + "set_use_battery": { + "friendly_name": "Enable Battery", + "Description": "Set to True if we should consider an energy storage device such as a Li-Ion battery. Defaults to False", + "input": "boolean", + "default_value": false + }, + "set_nocharge_from_grid": { + "friendly_name": "Forbid charging battery from grid", + "Description": "Set this to true if you want to forbid charging the battery from the grid. The battery will only be charged from excess PV", + "input": "boolean", + "default_value": false + }, + "set_nodischarge_to_grid": { + "friendly_name": "Forbid battery discharge to the grid", + "Description": "Set this to true if you want to forbid discharging battery power to the grid.", + "input": "boolean", + "default_value": true + }, + "set_battery_dynamic": { + "friendly_name": "Set Battery dynamic (dis)charge power limiting", + "Description": "Set a power dynamic limiting condition to the battery power. This is an additional constraint on the battery dynamic in power per unit of time (timestep), which allows you to set a percentage of the battery’s nominal full power as the maximum power allowed for (dis)charge.", + "input": "boolean", + "default_value": false + }, + "battery_dynamic_max": { + "friendly_name": "Maximum percentage of battery discharge per timestep", + "Description": "The maximum positive (for discharge) battery power dynamic. This is the allowed power variation (in percentage) of battery maximum power per unit of timestep", + "input": "float", + "default_value": 0.9, + "requires": { + "set_battery_dynamic": true + } + }, + "battery_dynamic_min": { + "friendly_name": "Maximum percentage of battery charge per timestep", + "Description": "The maximum negative (for charge) battery power dynamic. This is the allowed power variation (in percentage) of battery maximum power per timestep.", + "input": "float", + "default_value": -0.9, + "requires": { + "set_battery_dynamic": true + } + }, + "weight_battery_discharge": { + "friendly_name": "Add cost weight for battery discharge", + "Description": "An additional weight (currency/ kWh) applied in the cost function to battery usage for discharging", + "input": "float", + "default_value": 1.0 + }, + "weight_battery_charge": { + "friendly_name": "Add cost weight for battery charge", + "Description": "An additional weight (currency/ kWh) applied in the cost function to battery usage for charging", + "input": "float", + "default_value": 1.0 + }, + "battery_discharge_power_max": { + "friendly_name": "Max battery discharge power", + "Description": "The maximum discharge power in Watts", + "input": "int", + "default_value": 1000 + }, + "battery_charge_power_max": { + "friendly_name": "Max battery charge power", + "Description": "The maximum charge power in Watts", + "input": "int", + "default_value": 1000 + }, + "battery_discharge_efficiency": { + "friendly_name": "Battery discharge efficiency", + "Description": "The discharge efficiency. (percentage/100)", + "input": "float", + "default_value": 0.95 + }, + "battery_charge_efficiency": { + "friendly_name": "Battery charge efficiency", + "Description": "The charge efficiency. (percentage/100)", + "input": "float", + "default_value": 0.95 + }, + "battery_nominal_energy_capacity": { + "friendly_name": "Battery total capacity", + "Description": "The total capacity of the battery stack in Wh", + "input": "int", + "default_value": 5000 + }, + "battery_minimum_state_of_charge": { + "friendly_name": "Minimum Battery charge percentage", + "Description": "The minimum allowable battery state of charge. (percentage/100)", + "input": "float", + "default_value": 0.3 + }, + "battery_maximum_state_of_charge": { + "friendly_name": "Maximum Battery charge percentage", + "Description": "The maximum allowable battery state of charge. (percentage/100)", + "input": "float", + "default_value": 0.9 + }, + "battery_target_state_of_charge": { + "friendly_name": "Battery desired percentage after optimization", + "Description": "The desired battery state of charge at the end of each optimization cycle. (percentage/100)", + "input": "float", + "default_value": 0.6 + } + } +} \ No newline at end of file diff --git a/src/emhass/static/script.js b/src/emhass/static/script.js index ad02b578..294244ca 100644 --- a/src/emhass/static/script.js +++ b/src/emhass/static/script.js @@ -1,419 +1,442 @@ +//configuration for dynamically processing index page +//loads either the basic or advance html + +//used static files +//advanced.html : template html for displaying all the actions + runtime parameter input +//basic.html : template html for displaying a minimal view of actions + //on page reload get saved data window.onload = async function () { + pageSelected = await loadBasicOrAdvanced(); - pageSelected = await loadBasicOrAdvanced(); - - //add listener for basic and advanced html switch - document.getElementById("basicOrAdvanced").addEventListener("click", () => SwitchBasicOrAdvanced()); + //add listener for basic and advanced html switch + document + .getElementById("basicOrAdvanced") + .addEventListener("click", () => SwitchBasicOrAdvanced()); }; //add listeners to buttons (based on page) function loadButtons(page) { - switch (page) { - case "advanced": - [ - "dayahead-optim", - "forecast-model-fit", - "forecast-model-predict", - "forecast-model-tune", - "regressor-model-fit", - "regressor-model-predict", - "perfect-optim", - "publish-data", - "naive-mpc-optim" - ].forEach((id) => - document.getElementById(id).addEventListener("click", () => formAction(id, "advanced")) - ); - ["input-plus", "input-minus"].forEach((id) => - document.getElementById(id).addEventListener("click", () => dictInputs(id)) - ); - document.getElementById("input-select").addEventListener("change", () => getSavedData()); - document.getElementById("input-clear").addEventListener("click", () => ClearInputData()); - break; - case "basic": - document.getElementById("dayahead-optim-basic").addEventListener("click", () => formAction("dayahead-optim", "basic")); - break; - } + switch (page) { + case "advanced": + [ + "dayahead-optim", + "forecast-model-fit", + "forecast-model-predict", + "forecast-model-tune", + "regressor-model-fit", + "regressor-model-predict", + "perfect-optim", + "publish-data", + "naive-mpc-optim", + ].forEach((id) => + document + .getElementById(id) + .addEventListener("click", () => formAction(id, "advanced")) + ); + ["input-plus", "input-minus"].forEach((id) => + document + .getElementById(id) + .addEventListener("click", () => dictInputs(id)) + ); + document + .getElementById("input-select") + .addEventListener("change", () => getSavedData()); + document + .getElementById("input-clear") + .addEventListener("click", () => ClearInputData()); + break; + case "basic": + document + .getElementById("dayahead-optim-basic") + .addEventListener("click", () => formAction("dayahead-optim", "basic")); + break; + } } //on check present basic or advanced html inside form element async function loadBasicOrAdvanced(RequestedPage) { - let basicFile = "basic.html"; - let advencedFile = "advanced.html"; - var formContainer = document.getElementById("TabSelection"); //container element to house basic or advanced data - //first check any function arg - if (arguments.length == 1) { - switch (RequestedPage) { - case "basic": - htmlData = await getHTMLData(basicFile); - formContainer.innerHTML = htmlData; - loadButtons("basic"); //load buttons based on basic or advanced - if (testStorage()) { localStorage.setItem("TabSelection", "basic") } //remember mode (save to localStorage) - return "basic"; //return basic to get saved data - case "advanced": - htmlData = await getHTMLData(advencedFile); - formContainer.innerHTML = htmlData; - loadButtons("advanced"); - if (testStorage()) { localStorage.setItem("TabSelection", "advanced") } - getSavedData(); - return "advanced"; - default: - htmlData = await getHTMLData(advencedFile); - formContainer.innerHTML = htmlData; - loadButtons("advanced"); - getSavedData(); - return "advanced"; - } - } - //then check localStorage - if (testStorage()) { - if (localStorage.getItem("TabSelection") !== null && localStorage.getItem("TabSelection") === "advanced") { //if advance - htmlData = await getHTMLData(advencedFile); - formContainer.innerHTML = htmlData; - loadButtons("advanced"); - getSavedData(); - return "advanced"; - } - else { //else run basic (first time) - htmlData = await getHTMLData(basicFile); - formContainer.innerHTML = htmlData; - loadButtons("basic"); - return "basic"; + let basicFile = "basic.html"; + let advencedFile = "advanced.html"; + var formContainer = document.getElementById("TabSelection"); //container element to house basic or advanced data + //first check any function arg + if (arguments.length == 1) { + switch (RequestedPage) { + case "basic": + htmlData = await getHTMLData(basicFile); + formContainer.innerHTML = htmlData; + loadButtons("basic"); //load buttons based on basic or advanced + if (testStorage()) { + localStorage.setItem("TabSelection", "basic"); + } //remember mode (save to localStorage) + return "basic"; //return basic to get saved data + case "advanced": + htmlData = await getHTMLData(advencedFile); + formContainer.innerHTML = htmlData; + loadButtons("advanced"); + if (testStorage()) { + localStorage.setItem("TabSelection", "advanced"); } - } else { - //if localStorage not supported, set to advanced page + getSavedData(); + return "advanced"; + default: htmlData = await getHTMLData(advencedFile); formContainer.innerHTML = htmlData; loadButtons("advanced"); + getSavedData(); return "advanced"; } + } + //then check localStorage + if (testStorage()) { + if ( + localStorage.getItem("TabSelection") !== null && + localStorage.getItem("TabSelection") === "advanced" + ) { + //if advance + htmlData = await getHTMLData(advencedFile); + formContainer.innerHTML = htmlData; + loadButtons("advanced"); + getSavedData(); + return "advanced"; + } else { + //else run basic (first time) + htmlData = await getHTMLData(basicFile); + formContainer.innerHTML = htmlData; + loadButtons("basic"); + return "basic"; + } + } else { + //if localStorage not supported, set to advanced page + htmlData = await getHTMLData(advencedFile); + formContainer.innerHTML = htmlData; + loadButtons("advanced"); + return "advanced"; + } } //on button press, check current displayed page data and switch function SwitchBasicOrAdvanced() { - var formContainerChildID = document.getElementById("TabSelection").firstElementChild.id - if (formContainerChildID === 'basic') { - loadBasicOrAdvanced("advanced") - } - else { - loadBasicOrAdvanced("basic") - } + var formContainerChildID = + document.getElementById("TabSelection").firstElementChild.id; + if (formContainerChildID === "basic") { + loadBasicOrAdvanced("advanced"); + } else { + loadBasicOrAdvanced("basic"); + } } - //get html data from basic.html or advanced.html async function getHTMLData(htmlFile) { - const response = await fetch(`static/` + htmlFile); - blob = await response.blob(); //get data blob - htmlTemplateData = await new Response(blob).text(); //obtain html from blob - return await htmlTemplateData; + const response = await fetch(`static/` + htmlFile); + blob = await response.blob(); //get data blob + htmlTemplateData = await new Response(blob).text(); //obtain html from blob + return await htmlTemplateData; } //function pushing data via post, triggered by button action async function formAction(action, page) { + if (page !== "basic") { + //dont try to get input data in basic mode + var data = inputToJson(page); + } else { + var data = {}; + } //send no data - if (page !== "basic") { //dont try to get input data in basic mode - var data = inputToJson(page); - } - else { var data = {} } //send no data - - if (data !== 0) { //don't run if there is an error in the input (box/list) Json data - showChangeStatus("loading", {}); // show loading div for status - const response = await fetch(`action/` + action, { - //fetch data from webserver.py - method: "POST", - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify(data), //note that post can only send data via strings - }); - if (response.status == 201) { - showChangeStatus(response.status, {}); - if (page !== "basic") { - saveStorage(); //save to storage if successful - } - return true - } //if successful - else { - showChangeStatus(response.status, await response.json()); - return false - } // else get Log data from response - } else { - showChangeStatus("remove"); //replace loading, show tick or cross with none - return false - } + if (data !== 0) { + //don't run if there is an error in the input (box/list) Json data + showChangeStatus("loading", {}); // show loading div for status + const response = await fetch(`action/` + action, { + //fetch data from webserver.py + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify(data), //note that post can only send data via strings + }); + if (response.status == 201) { + showChangeStatus(response.status, {}); + if (page !== "basic") { + saveStorage(); //save to storage if successful + } + return true; + } //if successful + else { + showChangeStatus(response.status, await response.json()); + return false; + } // else get Log data from response + } else { + showChangeStatus("remove"); //replace loading, show tick or cross with none + return false; + } } //function in control of status icons of post above async function showChangeStatus(status, logJson) { - var loading = document.getElementById("loader"); //element showing statuses - if (status === "remove") { - //remove all - loading.innerHTML = ""; - loading.classList.remove("loading"); - } else if (status === "loading") { - //show loading logo - loading.innerHTML = ""; - loading.classList.add("loading"); //append class with loading animation styling - } else if (status === 201) { - //if status is 201, then show a tick - loading.classList.remove("loading"); - loading.innerHTML = `

`; - getTemplate(); //get updated templates - } else { - //then show a cross - loading.classList.remove("loading"); - loading.innerHTML = `

`; //show cross icon to indicate an error - if (logJson.length != 0) { - document.getElementById("alert-text").textContent = - "\r\n\u2022 " + logJson.join("\r\n\u2022 "); //show received log data in alert box - document.getElementById("alert").style.display = "block"; - document.getElementById("alert").style.textAlign = "left"; - } + var loading = document.getElementById("loader"); //element showing statuses + if (status === "remove") { + //remove all + loading.innerHTML = ""; + loading.classList.remove("loading"); + } else if (status === "loading") { + //show loading logo + loading.innerHTML = ""; + loading.classList.add("loading"); //append class with loading animation styling + } else if (status === 201) { + //if status is 201, then show a tick + loading.classList.remove("loading"); + loading.innerHTML = `

`; + getTemplate(); //get updated templates + } else { + //then show a cross + loading.classList.remove("loading"); + loading.innerHTML = `

`; //show cross icon to indicate an error + if (logJson.length != 0 && document.getElementById("alert-text") !== null) { + document.getElementById("alert-text").textContent = + "\r\n\u2022 " + logJson.join("\r\n\u2022 "); //show received log data in alert box + document.getElementById("alert").style.display = "block"; + document.getElementById("alert").style.textAlign = "left"; } + } } //get rendered html template with containing new table data async function getTemplate() { - //fetch data from webserver.py - let htmlTemplateData = ""; - response = await fetch(`template/table-template`, { - method: "GET", - }); - blob = await response.blob(); //get data blob - htmlTemplateData = await new Response(blob).text(); //obtain html from blob - templateDiv = document.getElementById("template"); //get template container element to override - templateDiv.innerHTML = htmlTemplateData; //override container inner html with new data - var scripts = Array.from(templateDiv.getElementsByTagName("script")); //replace script tags manually - for (const script of scripts) { - var TempScript = document.createElement("script"); - TempScript.innerHTML = script.innerHTML; - script.parentElement.appendChild(TempScript); - } + //fetch data from webserver.py + let htmlTemplateData = ""; + response = await fetch(`template`, { + method: "GET", + }); + blob = await response.blob(); //get data blob + htmlTemplateData = await new Response(blob).text(); //obtain html from blob + templateDiv = document.getElementById("template"); //get template container element to override + templateDiv.innerHTML = htmlTemplateData; //override container inner html with new data + var scripts = Array.from(templateDiv.getElementsByTagName("script")); //replace script tags manually + for (const script of scripts) { + var TempScript = document.createElement("script"); + TempScript.innerHTML = script.innerHTML; + script.parentElement.appendChild(TempScript); + } } //test localStorage support function testStorage() { - try { - localStorage.setItem("test", { test: "123" }); - localStorage.removeItem("test"); - return true; - } catch (error) { - return false; - } + try { + localStorage.setItem("test", { test: "123" }); + localStorage.removeItem("test"); + return true; + } catch (error) { return false; + } + return false; } //function gets saved data (if any) function getSavedData() { - dictInputs(); //check selected current (List or Box) is correct - if (testStorage()) { - //if local storage exists and works - let selectElement = document.getElementById("input-select"); // select button element - var input_container = document.getElementById("input-container"); // container div containing all dynamic input elements (Box/List) - if ( - localStorage.getItem("input_container_content") && - localStorage.getItem("input_container_content") !== "{}" - ) { - //If items already stored in local storage, then override default - if (selectElement.value == "Box") { - //if Box is selected, show saved json data into box - document.getElementById("text-area").value = localStorage.getItem( - "input_container_content" - ); - } - if (selectElement.value == "List") { - //if List is selected, show saved json data into box - storedJson = JSON.parse( - localStorage.getItem("input_container_content") - ); - if (Object.keys(storedJson).length > 0) { - input_container.innerHTML = ""; - i = 1; - for (const ikey in storedJson) { - input_container.appendChild( - createInputListDiv(ikey, JSON.stringify(storedJson[ikey])) - ); //call function to present each key as an list div element (with saved values) - } - } - } + dictInputs(); //check selected current (List or Box) is correct + if (testStorage()) { + //if local storage exists and works + let selectElement = document.getElementById("input-select"); // select button element + var input_container = document.getElementById("input-container"); // container div containing all dynamic input elements (Box/List) + if ( + localStorage.getItem("input_container_content") && + localStorage.getItem("input_container_content") !== "{}" + ) { + //If items already stored in local storage, then override default + if (selectElement.value == "Box") { + //if Box is selected, show saved json data into box + document.getElementById("text-area").value = localStorage.getItem( + "input_container_content" + ); + } + if (selectElement.value == "List") { + //if List is selected, show saved json data into box + storedJson = JSON.parse( + localStorage.getItem("input_container_content") + ); + if (Object.keys(storedJson).length > 0) { + input_container.innerHTML = ""; + i = 1; + for (const ikey in storedJson) { + input_container.appendChild( + createInputListDiv(ikey, JSON.stringify(storedJson[ikey])) + ); //call function to present each key as an list div element (with saved values) + } } + } } + } } //using localStorage, store json data from input-list(List)/text-area(from input-box) elements for saved state save on page refresh (will save state on successful post) function saveStorage() { - var data = JSON.stringify(inputToJson()); - if (testStorage() && data != "{}") { - //don't bother saving if empty and/or storage don't exist - localStorage.setItem("input_container_content", data); - } + var data = JSON.stringify(inputToJson()); + if (testStorage() && data != "{}") { + //don't bother saving if empty and/or storage don't exist + localStorage.setItem("input_container_content", data); + } } //function gets values from input-list/text-area(from input-box) elements and return json dict object function inputToJson() { - var input_container = document.getElementById("input-container"); //container - let inputListArr = document.getElementsByClassName("input-list"); //list - let inputTextArea = document.getElementById("text-area"); //box - let input_container_child = null; - input_container_child = input_container.firstElementChild; //work out which element is first inside container div - var jsonReturnData = {}; + var input_container = document.getElementById("input-container"); //container + let inputListArr = document.getElementsByClassName("input-list"); //list + let inputTextArea = document.getElementById("text-area"); //box + let input_container_child = null; + input_container_child = input_container.firstElementChild; //work out which element is first inside container div + var jsonReturnData = {}; - if (input_container_child == null) { - //if no elements in container then return empty - return jsonReturnData; - } - //if List return box json - if ( - input_container_child.className == "input-list" && - inputListArr.length > 0 - ) { - //if list is first and if list is greater then 0, otherwise give empty dict + if (input_container_child == null) { + //if no elements in container then return empty + return jsonReturnData; + } + //if List return box json + if ( + input_container_child.className == "input-list" && + inputListArr.length > 0 + ) { + //if list is first and if list is greater then 0, otherwise give empty dict - let jsonTempData = "{"; - for (let i = 0; i < inputListArr.length; i++) { - let key = inputListArr[i].getElementsByClassName("input-key")[0].value; - var value = - inputListArr[i].getElementsByClassName("input-value")[0].value; - //curate a string with list elements to parse into json later - if (key !== "") { - //key must not be empty - if (i !== 0) { - jsonTempData = jsonTempData.concat(","); - } //add comma before every parameter, exuding the first - jsonTempData = jsonTempData.concat('"' + key + '":' + value); - } - } - jsonTempData = jsonTempData.concat("}"); - try { - jsonReturnData = JSON.parse(jsonTempData); - } catch (error) { - //if json error, show in alert box - document.getElementById("alert-text").textContent = - "\r\n" + - error + - "\r\n" + - "JSON Error: String values may not be wrapped in quotes"; - document.getElementById("alert").style.display = "block"; - document.getElementById("alert").style.textAlign = "center"; - return 0; - } + let jsonTempData = "{"; + for (let i = 0; i < inputListArr.length; i++) { + let key = inputListArr[i].getElementsByClassName("input-key")[0].value; + var value = + inputListArr[i].getElementsByClassName("input-value")[0].value; + //curate a string with list elements to parse into json later + if (key !== "") { + //key must not be empty + if (i !== 0) { + jsonTempData = jsonTempData.concat(","); + } //add comma before every parameter, exuding the first + jsonTempData = jsonTempData.concat('"' + key + '":' + value); + } } - //if Box return box json - if ( - input_container_child.className == "input-box" && - inputTextArea.value != "" - ) { - //if Box is first and text is not empty, otherwise give empty dict - try { - jsonReturnData = JSON.parse(inputTextArea.value); - } catch (error) { - //if json error, show in alert box - document.getElementById("alert-text").textContent = "\r\n" + error; - document.getElementById("alert").style.display = "block"; - return 0; - } + jsonTempData = jsonTempData.concat("}"); + try { + jsonReturnData = JSON.parse(jsonTempData); + } catch (error) { + //if json error, show in alert box + document.getElementById("alert-text").textContent = + "\r\n" + + error + + "\r\n" + + "JSON Error: String values may not be wrapped in quotes"; + document.getElementById("alert").style.display = "block"; + document.getElementById("alert").style.textAlign = "center"; + return 0; } - return jsonReturnData; + } + //if Box return box json + if ( + input_container_child.className == "input-box" && + inputTextArea.value != "" + ) { + //if Box is first and text is not empty, otherwise give empty dict + try { + jsonReturnData = JSON.parse(inputTextArea.value); + } catch (error) { + //if json error, show in alert box + document.getElementById("alert-text").textContent = "\r\n" + error; + document.getElementById("alert").style.display = "block"; + return 0; + } + } + return jsonReturnData; } //function creates input list div element (and pass it values if given) function createInputListDiv(ikey, ivalue) { - let div = document.createElement("div"); - div.className = "input-list"; - div.innerHTML = ` + let div = document.createElement("div"); + div.className = "input-list"; + div.innerHTML = `

:

`; - if (ikey && ivalue) { - //if value and key is provided (from local storage) then add as elements values - div.getElementsByClassName("input-key")[0].value = String(ikey); - div.getElementsByClassName("input-value")[0].value = String(ivalue); - } + if (ikey && ivalue) { + //if value and key is provided (from local storage) then add as elements values + div.getElementsByClassName("input-key")[0].value = String(ikey); + div.getElementsByClassName("input-value")[0].value = String(ivalue); + } - return div; + return div; } //function assigned to control (add and remove) input (Box and List) elements function dictInputs(action) { - var input_container = document.getElementById("input-container"); // container div containing all dynamic input elements - let selectElement = document.getElementById("input-select"); // select button - let input_container_child = null; - let input_container_child_name = null; - if (input_container.children.length > 0) { - input_container_child = input_container.firstElementChild; // figure out what is the first element inside of container (ie: "text-area" (input-box) or "input-list" (list)) - input_container_child_name = input_container.firstElementChild.className; + var input_container = document.getElementById("input-container"); // container div containing all dynamic input elements + let selectElement = document.getElementById("input-select"); // select button + let input_container_child = null; + let input_container_child_name = null; + if (input_container.children.length > 0) { + input_container_child = input_container.firstElementChild; // figure out what is the first element inside of container (ie: "text-area" (input-box) or "input-list" (list)) + input_container_child_name = input_container.firstElementChild.className; + } + //if list is selected, remove text-area (from Box) element and replace (with input-list) + if (selectElement.value == "List") { + if (action == "input-plus" || input_container_child_name == "input-box") { + //if plus button pressed, or Box element exists + if (input_container_child_name == "input-box") { + input_container_child.remove(); + } + input_container.appendChild(createInputListDiv(false, false)); //call to createInputListDiv function to craft input-list element (with no values) and append inside container element } - //if list is selected, remove text-area (from Box) element and replace (with input-list) - if (selectElement.value == "List") { - if (action == "input-plus" || input_container_child_name == "input-box") { - //if plus button pressed, or Box element exists - if (input_container_child_name == "input-box") { - input_container_child.remove(); - } - input_container.appendChild(createInputListDiv(false, false)); //call to createInputListDiv function to craft input-list element (with no values) and append inside container element - } - if (action == "input-minus") { - //minus button pressed, remove input-list element - if (input_container.children.length > 0) { - let inputListArr = document.getElementsByClassName("input-list"); - let obj = inputListArr.item(inputListArr.length - 1); - obj.innerHTML = ""; - obj.remove(); - } - } + if (action == "input-minus") { + //minus button pressed, remove input-list element + if (input_container.children.length > 0) { + let inputListArr = document.getElementsByClassName("input-list"); + let obj = inputListArr.item(inputListArr.length - 1); + obj.innerHTML = ""; + obj.remove(); + } } - //if box is selected, remove input-list elements and replace (with text-area) - if (selectElement.value == "Box") { - if ( - input_container_child_name == "input-list" || - input_container_child === null - ) { - // if input list exists or no Box element - input_container.innerHTML = ""; //remove input-list list elements via erasing container innerHTML - let div = document.createElement("div"); //add input-box element - div.className = "input-box"; - div.innerHTML = ` + } + //if box is selected, remove input-list elements and replace (with text-area) + if (selectElement.value == "Box") { + if ( + input_container_child_name == "input-list" || + input_container_child === null + ) { + // if input list exists or no Box element + input_container.innerHTML = ""; //remove input-list list elements via erasing container innerHTML + let div = document.createElement("div"); //add input-box element + div.className = "input-box"; + div.innerHTML = ` `; - input_container.appendChild(div); //append inside of container element - } + input_container.appendChild(div); //append inside of container element } + } } //clear stored input data from localStorage (if any), clear input elements async function ClearInputData(id) { - if ( - testStorage() && - localStorage.getItem("input_container_content") !== null - ) { - localStorage.setItem("input_container_content", "{}"); - } - ClearInputElements(); + if ( + testStorage() && + localStorage.getItem("input_container_content") !== null + ) { + localStorage.setItem("input_container_content", "{}"); + } + ClearInputElements(); } //clear input elements async function ClearInputElements() { - let selectElement = document.getElementById("input-select"); - var input_container = document.getElementById("input-container"); - if (selectElement.value == "Box") { - document.getElementById("text-area").value = "{}"; - } - if (selectElement.value == "List") { - input_container.innerHTML = ""; - } + let selectElement = document.getElementById("input-select"); + var input_container = document.getElementById("input-container"); + if (selectElement.value == "Box") { + document.getElementById("text-area").value = "{}"; + } + if (selectElement.value == "List") { + input_container.innerHTML = ""; + } } // //Run day ahead, then publish actions // async function DayheadOptimPublish() { // response = await formAction("dayahead-optim", "basic") -// if (response) { //if successful publish data +// if (response) { //if successful publish data // formAction("publish-data", "basic") // } //} - - - - diff --git a/src/emhass/static/style.css b/src/emhass/static/style.css index fd87ba06..3a4a8627 100644 --- a/src/emhass/static/style.css +++ b/src/emhass/static/style.css @@ -1,3 +1,4 @@ +/*! EMHASS Style Sheet */ /*! style.css v1.0.0 (modified version) | ISC License | https://github.com/ungoldman/style.css */ /* color reference css */ @@ -570,6 +571,8 @@ button, .mystyle, .alert, .info, +.section-card, +select, table { border-radius: 7px; /* overflow: visible; */ @@ -578,6 +581,7 @@ table { button, .alert, .info, +.section-card, select { text-transform: none; border-width: 1px; @@ -599,6 +603,8 @@ select { h2 { margin-bottom: .3em; + margin-right: .3em; + margin-left: .3em; } .table_div h4 { @@ -747,12 +753,12 @@ tr:hover td:last-child { margin-right: 13px; } -#loader { +/* #loader { min-width: 3.5em; min-height: 3.5em; width: 3.5em; height: 3.5em; -} +} */ .loading { /* loading animation */ @@ -787,7 +793,7 @@ tr:hover td:last-child { .tick { /* tick symbol */ - color: #158b00; + color: #bfebbc; vertical-align: text-top; font-size: 4.0em; animation-name: fadeInOpacity; @@ -798,7 +804,7 @@ tr:hover td:last-child { .cross { /* cross symbol */ - color: #a71515; + color: #e3b7b7; vertical-align: text-top; font-size: 4.0em; animation-name: fadeInOpacity; @@ -941,7 +947,8 @@ select, .input-list input, -.input-box textarea { +.input-box textarea, +#config-box { border-collapse: collapse; border-radius: 7px; border-style: solid; @@ -970,7 +977,229 @@ select, margin: 0; } -/* */ +/* config page */ +#configuration-container { + border: 1px solid; + border-width: 1px 0px; +} + +#configuration-container, +.header-footer { + margin-bottom: 5px; + padding: 20px 0px; + border-radius: 7px; + max-width: 90%; + margin: auto; +} + + +.header-footer { + background-color: #0000; + display: flex; + justify-content: space-between; +} + +.header-footer h4, +.header-footer div, +.header-footer a { + line-height: 0; + margin: auto 0; + +} + +/* loading icons */ +.header-footer p { + margin: 20px 0; +} + + +.header-footer h4 { + color: darkblue; +} + +.header-footer .feather { + height: 3em !important; + stroke-width: 3 !important; +} + +#save, +#json { + min-width: 15%; + height: auto; + display: block; + min-height: fit-content; + margin-left: auto; +} + +/* configuration list page */ + +.section-card { + max-width: 99%; + margin-left: auto; + margin-right: auto; + margin-bottom: 1vh; +} + +.section-card h4 { + margin-top: 5px; + background-color: #0000 !important; +} + +.section-card-header { + background-color: #e1e1e15e; + display: flex; + align-items: center; + padding: 0px 10px; + border-bottom: 1px solid rgba(0, 0, 0, 0.355); + /* justify-content: center; */ +} + +.section-body { + -webkit-transition: .4s; + transition: .4s; +} + +.switch, +.section-card-header input { + position: relative; + display: inline-block; + width: 52px; + height: 27px; + margin-left: auto; +} + +.switch input { + opacity: 0; + width: 0; + height: 0; +} + +.slider { + position: absolute; + cursor: pointer; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: #ccc; + -webkit-transition: .4s; + transition: .4s; +} + +.slider:before { + position: absolute; + content: ""; + height: calc(27px - 7px); + width: calc(27px - 7px); + left: 4px; + bottom: 4px; + background-color: white; + -webkit-transition: .4s; + transition: .4s; +} + +input:checked+.slider { + background-color: darkblue; +} + +input:checked+.slider:before { + -webkit-transform: translateX(26px); + -ms-transform: translateX(26px); + transform: translateX(26px); +} + +.slider, +.slider:before { + border-radius: 7px +} + +/* param container and content styling */ +.param { + text-align: left; + padding: 5px; + border-bottom: 1px solid rgba(0, 0, 0, 0.355); + transition: 1s; +} + +.param input, +.section-card-header input { + background-color: #ffffff11; + border-radius: 7px; + border: solid 1px; + color: #181818; + min-width: 40%; + max-width: 70%; +} + + +.section-card-header input { + min-width: calc(27px - 7px); +} + +.param p, +.param i { + font-size: .7em; + margin-top: 4px; +} + +.param i { + font-size: .7em; + margin-bottom: 5px; +} + +.param p { + padding-right: 5px; + max-width: 777px; +} + +.param h5 { + font-size: 1em; +} + +.param h5, +p { + margin: 5px; + margin-left: 0px; + margin-bottom: 0px; +} + +.param button { + width: 20px; + height: 20px; + line-height: 0; + padding: 1px; + box-shadow: none; + margin-bottom: 5px; +} + +.param-input { + display: block; + float: left; + min-width: 100%; +} + +.param-input input { + min-width: 70%; +} + +.param-input input[type="time"] { + min-width: 35%; +} + +/* when requirement param is not met */ +.requirement-disable { + pointer-events: none; + filter: opacity(25%); +} + +/* configuration box page */ + +#config-box { + min-width: 100%; + min-height: 85vh; +} + + /* mobile only changes */ @@ -989,6 +1218,9 @@ select, } } + + + /* Dark Theme Support */ @media (prefers-color-scheme: dark) { html.adaptive { @@ -1031,6 +1263,9 @@ select, } h2, + h3, + h4, + .header-footer h4, kbd, a { background-color: #111111; @@ -1094,7 +1329,8 @@ select, } .input-list input, - .input-box textarea { + .input-box textarea, + #config-box { background-color: #282928; border-color: #e1e1e1; color: #e1e1e1 @@ -1110,5 +1346,28 @@ select, border-top: 16px solid #ccf3ff; } + input:checked+.slider { + background-color: #ccf3ff; + } -} + .param { + border-color: rgba(255, 255, 255, 0.355); + } + + .param input, + .section-card-header input { + color: #e1e1e1; + } + + .section-card-header { + background-color: #ffffff11; + } + + + #configuration-container { + background-color: #ffffff07; + border: 0; + } + + +} \ No newline at end of file diff --git a/src/emhass/templates/configuration.html b/src/emhass/templates/configuration.html new file mode 100644 index 00000000..40c1183d --- /dev/null +++ b/src/emhass/templates/configuration.html @@ -0,0 +1,75 @@ + + + + + + EMHASS: Energy Management Optimization for Home Assistant + + + + + + + + +
+ + + + + +

EMHASS: Energy Management Optimization for Home Assistant

+
+ + + +
+
+ + + + +
+

© MIT License | Copyright (c) 2021-2023 David + HERNANDEZ

+
+
+ + + \ No newline at end of file diff --git a/src/emhass/templates/index.html b/src/emhass/templates/index.html index 237cd328..5c416e4b 100644 --- a/src/emhass/templates/index.html +++ b/src/emhass/templates/index.html @@ -1,10 +1,11 @@ + EMHASS: Energy Management Optimization for Home Assistant - + @@ -14,19 +15,25 @@
@@ -60,7 +67,7 @@

EMHASS: Energy Management Optimization for Home Assistant

{% endfor %}
- +