diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 383f3850..e21e1e5b 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -9,10 +9,8 @@ }, "features": { "ghcr.io/devcontainers/features/common-utils:2": { - "installZsh": "true", - "configureZshAsDefaultShell": "true", - "installOhMyZsh": "false", - "installOhMyZshConfig": "false" + "installBash": "true", + "configureBashAsDefaultShell": "true" } // "ghcr.io/devcontainers/features/docker-outside-of-docker:1": {} }, @@ -46,6 +44,6 @@ } }, - "postCreateCommand": ["pip3", "install", "--break-system-packages" , ".[test]"] + "postCreateCommand": ["uv", "pip", "install", "--cache-dir", ".cache", "--verbose", ".[test]"] } diff --git a/.github/workflows/codecov.yaml b/.github/workflows/codecov.yaml index d1f5a3cf..cfa87c21 100644 --- a/.github/workflows/codecov.yaml +++ b/.github/workflows/codecov.yaml @@ -16,14 +16,16 @@ jobs: PYTHON: '3.11' steps: - uses: actions/checkout@master - - name: Setup Python - uses: actions/setup-python@master + - name: Install uv + uses: astral-sh/setup-uv@v5 + - name: Set up Python + uses: actions/setup-python@v5 with: - python-version: 3.11 + python-version-file: ".python-version" - name: Generate Report run: | - python -m pip install --upgrade pip - pip install .[test] + uv venv + uv pip install .[test] coverage run -m --source=emhass unittest coverage report coverage xml diff --git a/.github/workflows/docker-build-test.yaml b/.github/workflows/docker-build-test.yaml index db5fc192..f635dc44 100644 --- a/.github/workflows/docker-build-test.yaml +++ b/.github/workflows/docker-build-test.yaml @@ -50,7 +50,7 @@ jobs: - name: Export Debian package list run: mkdir OSV && docker run --rm --entrypoint '/bin/cat' ${{ steps.build.outputs.imageid }} /var/lib/dpkg/status >> ./OSV/${{ matrix.platform.target_arch }}.status - name: Export Python package list - run: docker run --rm --entrypoint '/usr/bin/pip3' ${{ steps.build.outputs.imageid }} freeze >> ./OSV/${{ matrix.platform.target_arch }}-requirements.txt + run: docker run --rm --entrypoint '/bin/cat' ${{ steps.build.outputs.imageid }} uv.lock >> ./OSV/${{ matrix.platform.target_arch }}.lock - name: Upload package list as digest uses: actions/upload-artifact@v4 with: @@ -77,10 +77,10 @@ jobs: with: download-artifact: "${{ matrix.platform.target_arch }}-packages" matrix-property: "${{ matrix.platform.target_arch }}-" + fail-on-vuln: false scan-args: |- --lockfile=dpkg-status:./${{ matrix.platform.target_arch }}.status - --lockfile=requirements.txt:./${{matrix.platform.target_arch }}-requirements.txt - --fail-on-vuln: false + --lockfile=poetry.lock:./${{matrix.platform.target_arch }}.lock permissions: security-events: write contents: read diff --git a/.github/workflows/publish_docker-test.yaml b/.github/workflows/publish_docker-test.yaml index f67eb0c7..c8b644fd 100644 --- a/.github/workflows/publish_docker-test.yaml +++ b/.github/workflows/publish_docker-test.yaml @@ -4,7 +4,7 @@ name: "Publish Docker test image" on: push: - branches: [master] + branches: [testing] workflow_dispatch: env: @@ -62,7 +62,7 @@ jobs: - name: Export Debian package list run: mkdir OSV && docker run --rm --entrypoint '/bin/cat' ${{ steps.cache.outputs.imageid }} /var/lib/dpkg/status >> ./OSV/${{ matrix.platform.target_arch }}.status - name: Export Python package list - run: docker run --rm --entrypoint '/usr/bin/pip3' ${{ steps.cache.outputs.imageid }} freeze >> ./OSV/${{ matrix.platform.target_arch }}-requirements.txt + run: docker run --rm --entrypoint '/bin/cat' ${{ steps.cache.outputs.imageid }} uv.lock >> ./OSV/${{ matrix.platform.target_arch }}.lock - name: Upload package list as digest uses: actions/upload-artifact@v4 with: @@ -120,10 +120,10 @@ jobs: with: download-artifact: "${{ matrix.platform.target_arch }}-packages" matrix-property: "${{ matrix.platform.target_arch }}-" + fail-on-vuln: false scan-args: |- --lockfile=dpkg-status:./${{ matrix.platform.target_arch }}.status - --lockfile=requirements.txt:./${{matrix.platform.target_arch }}-requirements.txt - --fail-on-vuln: false + --lockfile=poetry.lock:./${{matrix.platform.target_arch }}.lock permissions: security-events: write contents: read diff --git a/.github/workflows/publish_docker.yaml b/.github/workflows/publish_docker.yaml index aace1d32..94df23df 100644 --- a/.github/workflows/publish_docker.yaml +++ b/.github/workflows/publish_docker.yaml @@ -61,7 +61,7 @@ jobs: - name: Export Debian package list run: mkdir OSV && docker run --rm --entrypoint '/bin/cat' ${{ steps.cache.outputs.imageid }} /var/lib/dpkg/status >> ./OSV/${{ matrix.platform.target_arch }}.status - name: Export Python package list - run: docker run --rm --entrypoint '/usr/bin/pip3' ${{ steps.cache.outputs.imageid }} freeze >> ./OSV/${{ matrix.platform.target_arch }}-requirements.txt + run: docker run --rm --entrypoint '/bin/cat' ${{ steps.cache.outputs.imageid }} uv.lock >> ./OSV/${{ matrix.platform.target_arch }}.lock - name: Upload package list as digest uses: actions/upload-artifact@v4 with: @@ -119,10 +119,10 @@ jobs: with: download-artifact: "${{ matrix.platform.target_arch }}-packages" matrix-property: "${{ matrix.platform.target_arch }}-" + fail-on-vuln: false scan-args: |- --lockfile=dpkg-status:./${{ matrix.platform.target_arch }}.status - --lockfile=requirements.txt:./${{matrix.platform.target_arch }}-requirements.txt - --fail-on-vuln: false + --lockfile=poetry.lock:./${{matrix.platform.target_arch }}.lock permissions: security-events: write contents: read diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml index b78beefa..8f7a7c6f 100644 --- a/.github/workflows/python-test.yml +++ b/.github/workflows/python-test.yml @@ -12,20 +12,8 @@ permissions: jobs: # Google OSV-Scanner - scan-pr: - uses: "geoderp/osv-scanner-action/.github/workflows/osv-scanner-reusable-pr.yml@v0.0.1" - with: - scan-args: |- - --recursive - ./ - permissions: - security-events: write - contents: read - actions: read build: runs-on: ${{ matrix.os }} - needs: - - scan-pr strategy: fail-fast: false matrix: @@ -37,23 +25,36 @@ jobs: steps: - uses: actions/checkout@v4 - - - name: Set up Python ${{ matrix.python-version }} + - name: Install uv + uses: astral-sh/setup-uv@v5 + - name: Set up Python uses: actions/setup-python@v5 with: - python-version: ${{ matrix.python-version }} - + python-version-file: ".python-version" + - name: Set up Python venv + run: uv venv - name: Special dependencies for macos run: | brew install hdf5 - pip3 install numpy==1.26.0 - pip3 install tables==3.9.1 + uv pip install numpy==1.26.0 + uv pip install tables==3.9.1 if: ${{ matrix.os == 'macos-latest' }} - - name: Install EMHASS with test dependencies run: | - pip install .[test] - + uv pip install .[test] && uv lock - name: Test with pytest run: | - pytest + uv run pytest + scan-pr: + needs: + - build + uses: "geoderp/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@v0.0.1" + with: + fail-on-vuln: false + scan-args: |- + --recursive + ./ + permissions: + security-events: write + contents: read + actions: read \ No newline at end of file diff --git a/.github/workflows/upload-package-to-pypi.yaml b/.github/workflows/upload-package-to-pypi.yaml index 29e443f7..0f026444 100644 --- a/.github/workflows/upload-package-to-pypi.yaml +++ b/.github/workflows/upload-package-to-pypi.yaml @@ -7,40 +7,40 @@ on: jobs: # Google OSV-Scanner - osv-scan: - uses: "geoderp/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@v0.0.1" - with: - scan-args: |- - --recursive - ./ - permissions: - security-events: write - contents: read - actions: read build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Setup Python + - name: Install uv + uses: astral-sh/setup-uv@v5 + - name: Set up Python uses: actions/setup-python@v5 with: - python-version: 3.11 - - name: Install pypa/build - run: >- - python3 -m pip install build --user + python-version-file: ".python-version" - name: Build a binary wheel and a source tarball - run: python3 -m build + run: uv build - name: Store the distribution packages uses: actions/upload-artifact@v4 with: name: python-package-distributions path: dist/ + osv-scan: + uses: "geoderp/osv-scanner-action/.github/workflows/osv-scanner-reusable.yml@v0.0.1" + needs: + - build + with: + scan-args: |- + --recursive + ./ + permissions: + security-events: write + contents: read + actions: read publish-to-pypi: name: >- Publish Python 🐍 distribution 📦 to PyPI needs: - osv-scan - - build runs-on: ubuntu-latest environment: name: pypi diff --git a/.gitignore b/.gitignore index 7879b3f6..e01d32bf 100644 --- a/.gitignore +++ b/.gitignore @@ -56,6 +56,7 @@ data/entities/*.json # Installer logs pip-log.txt pip-delete-this-directory.txt +uv.lock # Unit test / coverage reports htmlcov/ diff --git a/.python-version b/.python-version new file mode 100644 index 00000000..2c073331 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.11 diff --git a/.vscode/settings.json b/.vscode/settings.json index a3b632e7..2ac1c0db 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -10,6 +10,7 @@ ], "python.testing.pytestEnabled": false, "python.testing.unittestEnabled": true, + "python.defaultInterpreterPath": ".venv/bin/python", "cSpell.words": [ "automations" ] diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 9188cd10..03e373ee 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -3,18 +3,13 @@ "tasks": [ { "label": "EMHASS install", - "command": "pip3", + "command": "uv", "group": { "kind": "build", "isDefault": true }, "args": [ - "install", - "--no-deps", - "--force-reinstall", - "--break-system-packages", - "--editable", - "." + "pip", "install", "--force-reinstall", ".[test]" ], "presentation": { "echo": true, @@ -24,13 +19,13 @@ }, { "label": "EMHASS install with dependencies", - "command": "pip3", + "command": "uv", "group": { "kind": "build", "isDefault": true }, "args": [ - "install", "--break-system-packages", "--force-reinstall", "." + "pip", "install", "--reinstall", ".[test]" ], "presentation": { "echo": true, diff --git a/Dockerfile b/Dockerfile index d501a2dc..72f1b379 100755 --- a/Dockerfile +++ b/Dockerfile @@ -16,49 +16,60 @@ ARG TARGETARCH ENV TARGETARCH=${TARGETARCH:?} WORKDIR /app -COPY requirements.txt /app/ +COPY pyproject.toml /app/ +COPY .python-version /app/ +COPY gunicorn.conf.py /app/ -# apt package install RUN apt-get update \ && apt-get install -y --no-install-recommends \ - libffi-dev \ - python3.11 \ - python3-pip \ - python3.11-dev \ - git \ - gcc \ - patchelf \ - cmake \ - meson \ - ninja-build \ - build-essential \ - libhdf5-dev \ - libhdf5-serial-dev \ - pkg-config \ - gfortran \ - netcdf-bin \ - libnetcdf-dev \ - coinor-cbc \ - coinor-libcbc-dev \ - libglpk-dev \ - glpk-utils \ + # Numpy + libgfortran5 \ + libopenblas0-pthread \ + libopenblas-dev \ libatlas3-base \ libatlas-base-dev \ - libopenblas-dev \ - libopenblas0-pthread \ - libgfortran5 \ + # h5py / tables libsz2 \ libaec0 \ libhdf5-hl-100 \ - libhdf5-103-1 + libhdf5-103-1 \ + libhdf5-dev \ + libhdf5-serial-dev + # # cbc + # coinor-cbc \ + # coinor-libcbc-dev + + # libffi-dev \ + # gfortran \ + # netcdf-bin \ + # libnetcdf-dev \ + # libglpk-dev \ + # glpk-utils \ + # libatlas3-base \ + # libatlas-base-dev \ + # libopenblas-dev \ + # libopenblas0-pthread \ + # libgfortran5 \ + +# add build packadges (just in case wheel does not exist) +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + gcc \ + patchelf \ + cmake \ + meson \ + ninja-build + +# Install uv (pip alternative) +RUN curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="/usr/local/bin" sh +# Install python (version based on .python-version) +RUN uv python install + # specify hdf5 RUN ln -s /usr/include/hdf5/serial /usr/include/hdf5/include && export HDF5_DIR=/usr/include/hdf5 # note, its a good idea to remove the "llvm-dev" package and "LLVM_CONFIG=/usr/bin/llvm-config pip3 install 'llvmlite>=0.43'" once the llvmlite package has been fixed in piwheels -RUN [[ "${TARGETARCH}" == "armhf" || "${TARGETARCH}" == "armv7" ]] && apt-get update && apt-get install -y --no-install-recommends llvm-dev && LLVM_CONFIG=/usr/bin/llvm-config pip3 install --no-cache-dir --break-system-packages 'llvmlite>=0.43' || echo "skipping llvm-dev install" - -# install packages from pip, use piwheels if arm 32bit -RUN [[ "${TARGETARCH}" == "armhf" || "${TARGETARCH}" == "armv7" ]] && pip3 install --index-url=https://www.piwheels.org/simple --no-cache-dir --break-system-packages -r requirements.txt || pip3 install --no-cache-dir --break-system-packages -r requirements.txt +RUN [[ "${TARGETARCH}" == "armhf" || "${TARGETARCH}" == "armv7" ]] && apt-get update && apt-get install -y --no-install-recommends llvm-dev && LLVM_CONFIG=/usr/bin/llvm-config uv pip install --break-system-packages --no-cache-dir --system 'llvmlite>=0.43' || echo "skipping llvm-dev install" # try, symlink apt cbc, to pulp cbc, in python directory (for 32bit) RUN [[ "${TARGETARCH}" == "armhf" || "${TARGETARCH}" == "armv7" ]] && ln -sf /usr/bin/cbc /usr/local/lib/python3.11/dist-packages/pulp/solverdir/cbc/linux/32/cbc || echo "cbc symlink didnt work/not required" @@ -66,20 +77,6 @@ RUN [[ "${TARGETARCH}" == "armhf" || "${TARGETARCH}" == "armv7" ]] && ln -sf / # if armv7, try install libatomic1 to fix scipy issue RUN [[ "${TARGETARCH}" == "armv7" ]] && apt-get update && apt-get install libatomic1 || echo "libatomic1 cant be installed" -# remove build only packages -RUN apt-get purge -y --auto-remove \ - gcc \ - patchelf \ - cmake \ - meson \ - ninja-build \ - build-essential \ - pkg-config \ - gfortran \ - netcdf-bin \ - libnetcdf-dev \ - && rm -rf /var/lib/apt/lists/* - # make sure data directory exists RUN mkdir -p /app/data/ @@ -121,11 +118,25 @@ LABEL \ org.opencontainers.image.description="EMHASS python package and requirements, in Home Assistant Debian container." # build EMHASS -RUN pip3 install --no-cache-dir --break-system-packages --no-deps --force-reinstall . -ENTRYPOINT [ "python3", "-m", "emhass.web_server"] +RUN uv venv && . .venv/bin/activate +RUN [[ "${TARGETARCH}" == "armhf" || "${TARGETARCH}" == "armv7" ]] && uv pip install --verbose --extra-index-url https://www.piwheels.org/simple . || uv pip install --verbose . +RUN uv lock + +# remove build only packages +RUN apt-get remove --purge -y --auto-remove \ + gcc \ + patchelf \ + cmake \ + meson \ + ninja-build \ + && rm -rf /var/lib/apt/lists/* + +ENTRYPOINT [ "uv", "run", "gunicorn", "emhass.web_server:create_app()" ] +# old +# ENTRYPOINT [ "uv", "run", "--link-mode=copy", "--allow-insecure-host=localhost:5000", "--frozen", "-m", "emhass.web_server"] # for running Unittest #COPY tests/ /app/tests #RUN apt-get update && apt-get install python3-requests-mock -y #COPY data/ /app/data/ -#ENTRYPOINT ["python3","-m","unittest","discover","-s","./tests","-p","test_*.py"] +#ENTRYPOINT ["uv","run","unittest","discover","-s","./tests","-p","test_*.py"] diff --git a/README.md b/README.md index 76f3c038..d585a67f 100644 --- a/README.md +++ b/README.md @@ -528,6 +528,7 @@ Here is the list of the other additional dictionary keys that can be passed at r - `nominal_power_of_deferrable_loads` for the nominal power for each deferrable load in Watts. - `operating_hours_of_each_deferrable_load` for the total number of hours that each deferrable load should operate. + - Alteratively, you can pass `operating_timesteps_of_each_deferrable_load` to set the total number of timesteps for each deferrable load. *(better parameter to use for setting under 1 hr)* - `start_timesteps_of_each_deferrable_load` for the timestep from which each deferrable load is allowed to operate (if you don't want the deferrable load to use the whole optimization timewindow). diff --git a/docs/requirements.txt b/docs/requirements.txt index d365c1c3..5f563b0b 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,6 +1,6 @@ -wheel -sphinx==7.2.6 -sphinx-rtd-theme==2.0.0 -docutils==0.18.1 -attrs==21.4.0 -myst-parser==3.0.1 \ No newline at end of file +wheel>=0.45.0 +sphinx==8.1.1 +sphinx-rtd-theme==3.0.2 +docutils==0.21.1 +attrs==24.3.0 +myst-parser==4.0.0 \ No newline at end of file diff --git a/gunicorn.conf.py b/gunicorn.conf.py new file mode 100755 index 00000000..fb54825a --- /dev/null +++ b/gunicorn.conf.py @@ -0,0 +1,11 @@ +import os +from distutils.util import strtobool + +bind = f"{os.getenv('IP', '0.0.0.0')}:{os.getenv('PORT', '5000')}" + +workers = int(os.getenv("WEB_CONCURRENCY", 1)) +threads = int(os.getenv("PYTHON_MAX_THREADS", 8)) + +reload = bool(strtobool(os.getenv("WEB_RELOAD", "true"))) + +timeout = int(os.getenv("WEB_TIMEOUT", 240)) diff --git a/pyproject.toml b/pyproject.toml index 4121623b..37d579ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,8 @@ [build-system] # These are the assumed default build requirements from pip: # https://pip.pypa.io/en/stable/reference/pip/#pep-517-and-518-support -requires = ["setuptools>=62.0.0", "wheel"] -build-backend = "setuptools.build_meta" +requires = ["hatchling"] +build-backend = "hatchling.build" [project] name = "emhass" @@ -21,30 +21,33 @@ classifiers = [ "Programming Language :: Python :: 3.11", "Operating System :: OS Independent", ] - dependencies = [ - "numpy==1.26.4", - "scipy==1.12.0", - "pandas<=2.0.3", - "pvlib>=0.10.2", - "protobuf>=3.0.0", - "pytz>=2021.1", + "numpy>=2.0.0, <2.3.0", + "scipy>=1.14.0", + "pandas>=2.1.1", + "pvlib>=0.10.3", + "protobuf>=5.29.1", + "pytz>=2023.4", "requests>=2.25.1", - "beautifulsoup4>=4.9.3", - "h5py==3.12.1", - "pulp>=2.4", - "pyyaml>=5.4.1", - "tables<=3.9.1", - "skforecast==0.14.0", - "flask>=2.0.3", - "waitress>=2.1.1", - "plotly>=5.6.0", + "beautifulsoup4>=4.12.3", + "h5py>=3.12.1", + "pulp>=2.8.0", + "pyyaml>=6.0.1", + "tables>=3.9.1", + "skforecast>=0.9.0", + "flask>=3.1.0", + "waitress>=3.0.2", + "plotly>=6.0.0rc0", + "gunicorn>=23.0.0", ] [project.optional-dependencies] docs = ["sphinx", "sphinx-rtd-theme", "myst-parser"] test = ["requests_mock", "pytest", "coverage", "snakeviz", "ruff", "tabulate"] +[tool.uv.workspace] +members = ["emhass"] + [tool.setuptools.packages] find = { where = ["src"] } diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index a3a41dff..00000000 --- a/requirements.txt +++ /dev/null @@ -1,17 +0,0 @@ -numpy>=1.22 -scipy==1.12.0 -pandas>=1.5 -pvlib>=0.10.2 -protobuf>=5.29.1 -pytz>=2021.1 -requests>=2.32.3 -beautifulsoup4>=4.9.3 -h5py==3.12.1 -pulp>=2.4 -pyyaml>=5.4.1 -tables<=3.9.1 -skforecast==0.14.0 -# web server packages -flask>=3.1.0 -waitress>=3.0.2 -plotly>=5.6.0 \ No newline at end of file diff --git a/scripts/load_clustering.py b/scripts/load_clustering.py index ef847f73..83448925 100644 --- a/scripts/load_clustering.py +++ b/scripts/load_clustering.py @@ -7,15 +7,12 @@ import pandas as pd import numpy as np -import plotly.express as px import plotly.io as pio import plotly.graph_objects as go pio.renderers.default = "browser" pd.options.plotting.backend = "plotly" -from sklearn.cluster import KMeans -from sklearn.metrics import silhouette_score # from skopt.space import Categorical, Real, Integer # from tslearn.clustering import TimeSeriesKMeans diff --git a/src/emhass/command_line.py b/src/emhass/command_line.py index 1e57fabd..8dde7e3d 100644 --- a/src/emhass/command_line.py +++ b/src/emhass/command_line.py @@ -11,7 +11,6 @@ import re import time from datetime import datetime, timezone -from distutils.util import strtobool from importlib.metadata import version from typing import Optional, Tuple @@ -646,9 +645,12 @@ def naive_mpc_optim( prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"] soc_init = input_data_dict["params"]["passed_data"]["soc_init"] soc_final = input_data_dict["params"]["passed_data"]["soc_final"] - def_total_hours = input_data_dict["params"]["optim_conf"][ - "operating_hours_of_each_deferrable_load" - ] + def_total_hours = input_data_dict["params"]["optim_conf"].get( + "operating_hours_of_each_deferrable_load", None + ) + def_total_timestep = input_data_dict["params"]["optim_conf"].get( + "operating_timesteps_of_each_deferrable_load", None + ) def_start_timestep = input_data_dict["params"]["optim_conf"][ "start_timesteps_of_each_deferrable_load" ] @@ -663,6 +665,7 @@ def naive_mpc_optim( soc_init, soc_final, def_total_hours, + def_total_timestep, def_start_timestep, def_end_timestep, ) @@ -1515,8 +1518,8 @@ def main(): ) parser.add_argument( "--log2file", - type=strtobool, - default="False", + type=bool, + default=False, help="Define if we should log to a file or not", ) parser.add_argument( @@ -1532,7 +1535,10 @@ def main(): help="Pass runtime optimization parameters as dictionnary", ) parser.add_argument( - "--debug", type=strtobool, default="False", help="Use True for testing purposes" + "--debug", + type=bool, + default=False, + help="Use True for testing purposes", ) args = parser.parse_args() diff --git a/src/emhass/data/cec_inverters.pbz2 b/src/emhass/data/cec_inverters.pbz2 index 4031693f..787c1f1f 100644 Binary files a/src/emhass/data/cec_inverters.pbz2 and b/src/emhass/data/cec_inverters.pbz2 differ diff --git a/src/emhass/data/cec_modules.pbz2 b/src/emhass/data/cec_modules.pbz2 index 47fa782a..a138ed43 100644 Binary files a/src/emhass/data/cec_modules.pbz2 and b/src/emhass/data/cec_modules.pbz2 differ diff --git a/src/emhass/data/emhass_inverters.csv b/src/emhass/data/emhass_inverters.csv index b0a254e1..56bf5653 100644 --- a/src/emhass/data/emhass_inverters.csv +++ b/src/emhass/data/emhass_inverters.csv @@ -1,6 +1,8 @@ -Name,Vac,Pso,Paco,Pdco,Vdco,C0,C1,C2,C3,Pnt,Vdcmax,Idcmax,Mppt_low,Mppt_high,CEC_Date,CEC_hybrid -Units,V,W,W,W,V,1/W,1/V,1/V,1/V,W,V,A,V,V,, -[0],inv_snl_ac_voltage,inv_snl_pso,inv_snl_paco,inv_snl_pdco,inv_snl_vdco,inv_snl_c0,inv_snl_c1,inv_snl_c2,inv_snl_c3,inv_snl_pnt,inv_snl_vdcmax,inv_snl_idcmax,inv_snl_mppt_low,inv_snl_mppt_hi,inv_cec_date,inv_cec_hybrid -Sungrow SH20T, 400, 2.5, 20000, 21000.0, 600, 0.0011, 0.0105, 0.0023, 0.00055, 2.5, 1100, 40.0, 200, 1000, 01/01/2025, Y -Sungrow SH25T, 400, 2.5, 25000, 26000.0, 600, 0.0012, 0.011, 0.0025, 0.0006, 2.5, 1100, 45.0, 200, 1000, 01/01/2025, Y -Sungrow SH15T, 400, 2.5, 15000, 15700.0, 600, 0.001, 0.01, 0.002, 0.0005, 2.5, 1100, 30.0, 200, 1000, 01/01/2025, Y \ No newline at end of file +Name,Vac,Pso,Paco,Pdco,Vdco,C0,C1,C2,C3,Pnt,Vdcmax,Idcmax,Mppt_low,Mppt_high,CEC_Date,CEC_hybrid,CEC_Type +Units,V,W,W,W,V,1/W,1/V,1/V,1/V,W,V,A,V,V,,, +[0],inv_snl_ac_voltage,inv_snl_pso,inv_snl_paco,inv_snl_pdco,inv_snl_vdco,inv_snl_c0,inv_snl_c1,inv_snl_c2,inv_snl_c3,inv_snl_pnt,inv_snl_vdcmax,inv_snl_idcmax,inv_snl_mppt_low,inv_snl_mppt_hi,inv_cec_date,inv_cec_hybrid,inv_cec_type +Sungrow: SH25T,400,250,27500,26000,600,-0.0000828,-0.000759,-0.0001722,-0.0000414,25,1100,80,200,950,01/01/2025,Y,Hybrid +Sungrow: SH20T,400,200,22000,21000,600,-0.00007584,-0.0007245,-0.0001587,-0.00003834,25,1100,80,200,950,01/01/2025,Y,Hybrid +Sungrow: SH15T,400,150,16500,15700,600,-0.000069,-0.00069,-0.000138,-0.0000345,25,1100,80,200,950,01/01/2025,Y,Hybrid +Sungrow: SH10RT,400,100,11000,10467,600,-0.00005892,-0.0006555,-0.0001173,-0.00002934,25,1100,80,200,950,01/01/2025,Y,Hybrid +Sungrow: SH10RS,400,100,10000,9500,600,-0.000065,-0.00075,-0.00013,-0.000032,25,1000,25,250,850,01/01/2025,N,Grid-Tie diff --git a/src/emhass/data/emhass_modules.csv b/src/emhass/data/emhass_modules.csv index 20944c33..ef947e54 100644 --- a/src/emhass/data/emhass_modules.csv +++ b/src/emhass/data/emhass_modules.csv @@ -1,3 +1,6 @@ Name,Manufacturer,Technology,Bifacial,STC,PTC,A_c,Length,Width,N_s,I_sc_ref,V_oc_ref,I_mp_ref,V_mp_ref,alpha_sc,beta_oc,T_NOCT,a_ref,I_L_ref,I_o_ref,R_s,R_sh_ref,Adjust,gamma_pmp,BIPV,Version,Date Units,,,,,,m2,m,m,,A,V,A,V,A/K,V/K,C,V,A,A,Ohm,Ohm,%,%/K,,, -[0],lib_manufacturer,cec_material,lib_is_bifacial,,,cec_area,lib_length,lib_width,cec_n_s,cec_i_sc_ref,cec_v_oc_ref,cec_i_mp_ref,cec_v_mp_ref,cec_alpha_sc,cec_beta_oc,cec_t_noct,cec_a_ref,cec_i_l_ref,cec_i_o_ref,cec_r_s,cec_r_sh_ref,cec_adjust,cec_gamma_pmp,,, \ No newline at end of file +[0],lib_manufacturer,cec_material,lib_is_bifacial,,,cec_area,lib_length,lib_width,cec_n_s,cec_i_sc_ref,cec_v_oc_ref,cec_i_mp_ref,cec_v_mp_ref,cec_alpha_sc,cec_beta_oc,cec_t_noct,cec_a_ref,cec_i_l_ref,cec_i_o_ref,cec_r_s,cec_r_sh_ref,cec_adjust,cec_gamma_pmp,,, +Jinko Solar JKM475N-60HL4-V,Jinko Solar,Mono-c-Si,0,475.0,432.0,2.16,1.903,1.134,120,14.23,42.54,13.49,35.21,0.046,-0.17,45.0,1.3,14.23,2e-7,0.2,300.0,1,-0.003,N,2021.08.01,01/03/2021 +Hanwha Q CELLS Q.PEAK L-G5 300W,Hanwha Q CELLS,Mono-c-Si,N,300.0,273.0,1.67,1.67,1.0,72,9.57,39.58,9.01,33.29,0.0006,-0.33,45.0,1.2,9.58,1e-10,0.5,1500,1,-0.38,N,2019.08.01,01/03/2019 +Risen RSM40-8-390M,Risen,Mono-c-Si,0,390.0,355.0,1.82,1.755,1.038,80,13.98,40.70,13.19,31.8,0.048,-0.15,45.0,1.25,13.98,1.9e-7,0.38,445.0,1,-0.35,N,2024.01.01,01/04/2024 diff --git a/src/emhass/forecast.py b/src/emhass/forecast.py index 25c68485..f507e0f0 100644 --- a/src/emhass/forecast.py +++ b/src/emhass/forecast.py @@ -916,33 +916,35 @@ def resample_data(data, freq, current_freq): # Upsampling # Use 'asfreq' to create empty slots, then interpolate resampled_data = data.resample(freq).asfreq() - resampled_data = resampled_data.interpolate(method='time') + resampled_data = resampled_data.interpolate(method="time") else: # No resampling needed resampled_data = data.copy() return resampled_data - + @staticmethod def get_typical_load_forecast(data, forecast_date): r""" Forecast the load profile for the next day based on historic data. - :param data: A DataFrame with a DateTimeIndex containing the historic load data. + :param data: A DataFrame with a DateTimeIndex containing the historic load data. Must include a 'load' column. :type data: pd.DataFrame :param forecast_date: The date for which the forecast will be generated. :type forecast_date: pd.Timestamp - :return: A Series with the forecasted load profile for the next day and a list of days used + :return: A Series with the forecasted load profile for the next day and a list of days used to calculate the forecast. :rtype: tuple (pd.Series, list) """ # Ensure the 'load' column exists - if 'load' not in data.columns: + if "load" not in data.columns: raise ValueError("Data must have a 'load' column.") # Filter historic data for the same month and day of the week month = forecast_date.month day_of_week = forecast_date.dayofweek - historic_data = data[(data.index.month == month) & (data.index.dayofweek == day_of_week)] + historic_data = data[ + (data.index.month == month) & (data.index.dayofweek == day_of_week) + ] used_days = np.unique(historic_data.index.date) # Align all historic data to the forecast day aligned_data = [] @@ -950,7 +952,11 @@ def get_typical_load_forecast(data, forecast_date): daily_data = data[data.index.date == pd.Timestamp(day).date()] aligned_daily_data = daily_data.copy() aligned_daily_data.index = aligned_daily_data.index.map( - lambda x: x.replace(year=forecast_date.year, month=forecast_date.month, day=forecast_date.day) + lambda x: x.replace( + year=forecast_date.year, + month=forecast_date.month, + day=forecast_date.day, + ) ) aligned_data.append(aligned_daily_data) # Combine all aligned historic data into a single DataFrame @@ -958,7 +964,7 @@ def get_typical_load_forecast(data, forecast_date): # Compute the mean load for each timestamp forecast = combined_data.groupby(combined_data.index).mean() return forecast, used_days - + def get_load_forecast( self, days_min_load_forecast: Optional[int] = 3, @@ -1051,14 +1057,18 @@ def get_load_forecast( ): return False df = rh.df_final.copy()[[self.var_load_new]] - if method == "typical": # using typical statistical data from a household power consumption + if ( + method == "typical" + ): # using typical statistical data from a household power consumption # Loading data from history file model_type = "load_clustering" - data_path = self.emhass_conf["data_path"] / str("data_train_" + model_type + ".pkl") + data_path = self.emhass_conf["data_path"] / str( + "data_train_" + model_type + ".pkl" + ) with open(data_path, "rb") as fid: data, _ = pickle.load(fid) # Resample the data if needed - current_freq = pd.Timedelta('30min') + current_freq = pd.Timedelta("30min") if self.freq != current_freq: data = Forecast.resample_data(data, self.freq, current_freq) # Generate forecast @@ -1067,20 +1077,28 @@ def get_load_forecast( forecast = pd.DataFrame() for date in dates_list: forecast_date = pd.Timestamp(date) - data.columns = ['load'] - forecast_tmp, used_days = Forecast.get_typical_load_forecast(data, forecast_date) - self.logger.debug(f"Using {len(used_days)} days of data to generate the forecast.") + data.columns = ["load"] + forecast_tmp, used_days = Forecast.get_typical_load_forecast( + data, forecast_date + ) + self.logger.debug( + f"Using {len(used_days)} days of data to generate the forecast." + ) # Normalize the forecast - forecast_tmp = forecast_tmp*self.plant_conf['maximum_power_from_grid']/9000 + forecast_tmp = ( + forecast_tmp * self.plant_conf["maximum_power_from_grid"] / 9000 + ) data_list.extend(forecast_tmp.values.ravel().tolist()) if len(forecast) == 0: forecast = forecast_tmp else: forecast = pd.concat([forecast, forecast_tmp], axis=0) forecast.index = forecast.index.tz_convert(self.time_zone) - forecast_out = forecast.loc[forecast.index.intersection(self.forecast_dates)] - forecast_out.index.name = 'ts' - forecast_out = forecast_out.rename(columns={'load': 'yhat'}) + forecast_out = forecast.loc[ + forecast.index.intersection(self.forecast_dates) + ] + forecast_out.index.name = "ts" + forecast_out = forecast_out.rename(columns={"load": "yhat"}) elif method == "naive": # using a naive approach mask_forecast_out = ( df.index > days_list[-1] - self.optim_conf["delta_forecast_daily"] diff --git a/src/emhass/machine_learning_forecaster.py b/src/emhass/machine_learning_forecaster.py index 331b015d..e4b6ae4c 100644 --- a/src/emhass/machine_learning_forecaster.py +++ b/src/emhass/machine_learning_forecaster.py @@ -1,22 +1,22 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -import logging import copy +import logging import time +import warnings from typing import Optional, Tuple -import pandas as pd -import numpy as np -from sklearn.linear_model import LinearRegression -from sklearn.linear_model import ElasticNet -from sklearn.neighbors import KNeighborsRegressor +import numpy as np +import pandas as pd +from skforecast.ForecasterAutoreg import ForecasterAutoreg +from skforecast.model_selection import ( + backtesting_forecaster, + bayesian_search_forecaster, +) +from sklearn.linear_model import ElasticNet, LinearRegression from sklearn.metrics import r2_score - -from skforecast.recursive import ForecasterRecursive -from skforecast.model_selection import bayesian_search_forecaster, backtesting_forecaster, TimeSeriesFold - -import warnings +from sklearn.neighbors import KNeighborsRegressor warnings.filterwarnings("ignore", category=DeprecationWarning) @@ -169,10 +169,7 @@ def fit( ) base_model = KNeighborsRegressor() # Define the forecaster object - self.forecaster = ForecasterRecursive( - regressor = base_model, - lags = self.num_lags - ) + self.forecaster = ForecasterAutoreg(regressor=base_model, lags=self.num_lags) # Fit and time it self.logger.info("Training a " + self.sklearn_model + " model") start_time = time.time() @@ -201,22 +198,18 @@ def fit( # Using backtesting tool to evaluate the model self.logger.info("Performing simple backtesting of fitted model") start_time = time.time() - cv = TimeSeriesFold( - steps = self.num_lags, - initial_train_size = None, - fixed_train_size = False, - gap = 0, - allow_incomplete_fold = True, - refit = False - ) metric, predictions_backtest = backtesting_forecaster( - forecaster = self.forecaster, - y = self.data_train[self.var_model], - exog = self.data_train.drop(self.var_model, axis=1), - cv = cv, - metric = MLForecaster.neg_r2_score, - verbose = False, - show_progress = True + forecaster=self.forecaster, + y=self.data_train[self.var_model], + exog=self.data_train.drop(self.var_model, axis=1), + steps=self.num_lags, + initial_train_size=None, + allow_incomplete_fold=True, + gap=0, + metric=MLForecaster.neg_r2_score, + verbose=False, + refit=False, + show_progress=True, ) self.logger.info(f"Elapsed backtesting time: {time.time() - start_time}") self.logger.info(f"Backtest R2 score: {-metric}") @@ -356,25 +349,24 @@ def search_space(trial): # The optimization routine call self.logger.info("Bayesian hyperparameter optimization with backtesting") start_time = time.time() - cv = TimeSeriesFold( - steps = num_lags, - initial_train_size = len(self.data_exo.loc[:self.date_train]), - fixed_train_size = True, - gap = 0, - skip_folds = None, - allow_incomplete_fold = True, - refit = refit - ) - self.optimize_results, self.optimize_results_object = bayesian_search_forecaster( - forecaster = self.forecaster, - y = self.data_train[self.var_model], - exog = self.data_train.drop(self.var_model, axis=1), - cv = cv, - search_space = search_space, - metric = MLForecaster.neg_r2_score, - n_trials = 10, - random_state = 123, - return_best = True + self.optimize_results, self.optimize_results_object = ( + bayesian_search_forecaster( + forecaster=self.forecaster, + y=self.data_train[self.var_model], + exog=self.data_train.drop(self.var_model, axis=1), + search_space=search_space, + metric=MLForecaster.neg_r2_score, + n_trials=10, + random_state=123, + steps=num_lags, + initial_train_size=len(self.data_exo.loc[: self.date_train]), + return_best=True, + fixed_train_size=True, + gap=0, + allow_incomplete_fold=True, + skip_folds=None, + refit=refit, + ) ) self.logger.info(f"Elapsed time: {time.time() - start_time}") self.is_tuned = True diff --git a/src/emhass/optimization.py b/src/emhass/optimization.py index acee4a15..6506e384 100644 --- a/src/emhass/optimization.py +++ b/src/emhass/optimization.py @@ -4,7 +4,6 @@ import bz2 import copy import logging -import pathlib import pickle as cPickle from math import ceil from typing import Optional, Tuple @@ -85,7 +84,7 @@ def __init__( self.var_load = self.retrieve_hass_conf["sensor_power_load_no_var_loads"] self.var_load_new = self.var_load + "_positive" self.costfun = costfun - # self.emhass_conf = emhass_conf + self.emhass_conf = emhass_conf self.logger = logger self.var_load_cost = var_load_cost self.var_prod_price = var_prod_price @@ -120,6 +119,7 @@ def perform_optimization( soc_init: Optional[float] = None, soc_final: Optional[float] = None, def_total_hours: Optional[list] = None, + def_total_timestep: Optional[list] = None, def_start_timestep: Optional[list] = None, def_end_timestep: Optional[list] = None, debug: Optional[bool] = False, @@ -152,6 +152,9 @@ def perform_optimization( :param def_total_hours: The functioning hours for this iteration for each deferrable load. \ (For continuous deferrable loads: functioning hours at nominal power) :type def_total_hours: list + :param def_total_timestep: The functioning timesteps for this iteration for each deferrable load. \ + (For continuous deferrable loads: functioning timesteps at nominal power) + :type def_total_timestep: list :param def_start_timestep: The timestep as from which each deferrable load is allowed to operate. :type def_start_timestep: list :param def_end_timestep: The timestep before which each deferrable load should operate. @@ -173,8 +176,13 @@ def perform_optimization( soc_final = soc_init else: soc_final = self.plant_conf["battery_target_state_of_charge"] - if def_total_hours is None: + + # If def_total_timestep os set, bypass def_total_hours + if def_total_timestep is not None: + def_total_hours = [0 if x != 0 else x for x in def_total_hours] + elif def_total_hours is None: def_total_hours = self.optim_conf["operating_hours_of_each_deferrable_load"] + if def_start_timestep is None: def_start_timestep = self.optim_conf[ "start_timesteps_of_each_deferrable_load" @@ -462,7 +470,8 @@ def perform_optimization( for i in range(len(self.plant_conf["pv_inverter_model"])): if type(self.plant_conf["pv_inverter_model"][i]) == str: cec_inverters = bz2.BZ2File( - pathlib.Path(__file__).parent / "data/cec_inverters.pbz2", "rb" + self.emhass_conf["root_path"] / "data" / "cec_inverters.pbz2", + "rb", ) cec_inverters = cPickle.load(cec_inverters) inverter = cec_inverters[self.plant_conf["pv_inverter_model"][i]] @@ -472,7 +481,7 @@ def perform_optimization( else: if type(self.plant_conf["pv_inverter_model"][i]) == str: cec_inverters = bz2.BZ2File( - pathlib.Path(__file__).parent / "data/cec_inverters.pbz2", "rb" + self.emhass_conf["root_path"] / "data" / "cec_inverters.pbz2", "rb" ) cec_inverters = cPickle.load(cec_inverters) inverter = cec_inverters[self.plant_conf["pv_inverter_model"]] @@ -699,8 +708,7 @@ def create_matrix(input_list, n): predicted_temps[k] = predicted_temp else: - if def_total_hours[k] > 0: - # Total time of deferrable load + if def_total_timestep and def_total_timestep[k] > 0: constraints.update( { "constraint_defload{}_energy".format(k): plp.LpConstraint( @@ -708,13 +716,33 @@ def create_matrix(input_list, n): P_deferrable[k][i] * self.timeStep for i in set_I ), sense=plp.LpConstraintEQ, - rhs=def_total_hours[k] + rhs=(self.timeStep * def_total_timestep[k]) * self.optim_conf["nominal_power_of_deferrable_loads"][ k ], ) } ) + else: + if def_total_hours[k] > 0: + # Total time of deferrable load + constraints.update( + { + "constraint_defload{}_energy".format( + k + ): plp.LpConstraint( + e=plp.lpSum( + P_deferrable[k][i] * self.timeStep + for i in set_I + ), + sense=plp.LpConstraintEQ, + rhs=def_total_hours[k] + * self.optim_conf[ + "nominal_power_of_deferrable_loads" + ][k], + ) + } + ) # Ensure deferrable loads consume energy between def_start_timestep & def_end_timestep self.logger.debug( @@ -722,12 +750,23 @@ def create_matrix(input_list, n): k, def_start_timestep[k], def_end_timestep[k] ) ) - def_start, def_end, warning = Optimization.validate_def_timewindow( - def_start_timestep[k], - def_end_timestep[k], - ceil(def_total_hours[k] / self.timeStep), - n, - ) + if def_total_timestep and def_total_timestep[k] > 0: + def_start, def_end, warning = Optimization.validate_def_timewindow( + def_start_timestep[k], + def_end_timestep[k], + ceil( + (60 / ((self.freq.seconds / 60) * def_total_timestep[k])) + / self.timeStep + ), + n, + ) + else: + def_start, def_end, warning = Optimization.validate_def_timewindow( + def_start_timestep[k], + def_end_timestep[k], + ceil(def_total_hours[k] / self.timeStep), + n, + ) if warning is not None: self.logger.warning("Deferrable load {} : {}".format(k, warning)) self.logger.debug( @@ -837,15 +876,35 @@ def create_matrix(input_list, n): } ) # P_def_bin2 must be 1 for exactly the correct number of timesteps. - constraints.update( - { - "constraint_pdef{}_start5".format(k): plp.LpConstraint( - e=plp.lpSum(P_def_bin2[k][i] for i in set_I), - sense=plp.LpConstraintEQ, - rhs=def_total_hours[k] / self.timeStep, - ) - } - ) + if def_total_timestep and def_total_timestep[k] > 0: + constraints.update( + { + "constraint_pdef{}_start5".format(k): plp.LpConstraint( + e=plp.lpSum(P_def_bin2[k][i] for i in set_I), + sense=plp.LpConstraintEQ, + rhs=( + ( + 60 + / ( + (self.freq.seconds / 60) + * def_total_timestep[k] + ) + ) + / self.timeStep + ), + ) + } + ) + else: + constraints.update( + { + "constraint_pdef{}_start5".format(k): plp.LpConstraint( + e=plp.lpSum(P_def_bin2[k][i] for i in set_I), + sense=plp.LpConstraintEQ, + rhs=def_total_hours[k] / self.timeStep, + ) + } + ) # Treat deferrable load as a semi-continuous variable if self.optim_conf["treat_deferrable_load_as_semi_cont"][k]: @@ -1336,6 +1395,7 @@ def perform_naive_mpc_optim( soc_init: Optional[float] = None, soc_final: Optional[float] = None, def_total_hours: Optional[list] = None, + def_total_timestep: Optional[list] = None, def_start_timestep: Optional[list] = None, def_end_timestep: Optional[list] = None, ) -> pd.DataFrame: @@ -1362,6 +1422,9 @@ def perform_naive_mpc_optim( :param soc_final: The final battery SOC for the optimization. This parameter \ is optional, if not given soc_init = soc_final = soc_target from the configuration file. :type soc_final: + :param def_total_timestep: The functioning timesteps for this iteration for each deferrable load. \ + (For continuous deferrable loads: functioning timesteps at nominal power) + :type def_total_timestep: list :param def_total_hours: The functioning hours for this iteration for each deferrable load. \ (For continuous deferrable loads: functioning hours at nominal power) :type def_total_hours: list @@ -1395,6 +1458,7 @@ def perform_naive_mpc_optim( soc_init=soc_init, soc_final=soc_final, def_total_hours=def_total_hours, + def_total_timestep=def_total_timestep, def_start_timestep=def_start_timestep, def_end_timestep=def_end_timestep, ) diff --git a/src/emhass/retrieve_hass.py b/src/emhass/retrieve_hass.py index 361b7221..dfe22fb2 100644 --- a/src/emhass/retrieve_hass.py +++ b/src/emhass/retrieve_hass.py @@ -80,6 +80,7 @@ def __init__( self.emhass_conf = emhass_conf self.logger = logger self.get_data_from_file = get_data_from_file + self.var_list = [] def get_ha_config(self): """ @@ -286,6 +287,7 @@ def get_data( + str(self.freq) ) return False + self.var_list = var_list return True def prepare_data( @@ -336,7 +338,21 @@ def prepare_data( "sensor.power_photovoltaics and sensor.power_load_no_var_loads should not be the same" ) return False - if set_zero_min: # Apply minimum values + # Confirm var_replace_zero & var_interp contain only sensors contained in var_list + if isinstance(var_replace_zero, list) and all( + item in var_replace_zero for item in self.var_list + ): + pass + else: + var_replace_zero = [] + if isinstance(var_interp, list) and all( + item in var_interp for item in self.var_list + ): + pass + else: + var_interp = [] + # Apply minimum values + if set_zero_min: self.df_final.clip(lower=0.0, inplace=True, axis=1) self.df_final.replace(to_replace=0.0, value=np.nan, inplace=True) new_var_replace_zero = [] @@ -347,6 +363,12 @@ def prepare_data( new_string = string.replace(var_load, var_load + "_positive") new_var_replace_zero.append(new_string) else: + self.logger.warning( + "Unable to find all the sensors in sensor_replace_zero parameter" + ) + self.logger.warning( + "Confirm sure all sensors in sensor_replace_zero are sensor_power_photovoltaics and/or ensor_power_load_no_var_loads " + ) new_var_replace_zero = None if var_interp is not None: for string in var_interp: @@ -354,6 +376,12 @@ def prepare_data( new_var_interp.append(new_string) else: new_var_interp = None + self.logger.warning( + "Unable to find all the sensors in sensor_linear_interp parameter" + ) + self.logger.warning( + "Confirm all sensors in sensor_linear_interp are sensor_power_photovoltaics and/or ensor_power_load_no_var_loads " + ) # Treating NaN replacement: either by zeros or by linear interpolation if new_var_replace_zero is not None: self.df_final[new_var_replace_zero] = self.df_final[ diff --git a/src/emhass/utils.py b/src/emhass/utils.py index c7ef0c92..db1de811 100644 --- a/src/emhass/utils.py +++ b/src/emhass/utils.py @@ -161,52 +161,52 @@ def update_params_with_ha_config( params = json.loads(params) # Update params currency_to_symbol = { - 'EUR': '€', - 'USD': '$', - 'GBP': '£', - 'YEN': '¥', - 'JPY': '¥', - 'AUD': 'A$', - 'CAD': 'C$', - 'CHF': 'CHF', # Swiss Franc has no special symbol - 'CNY': '¥', - 'INR': '₹', - 'CZK': 'Kč', - 'BGN': 'лв', - 'DKK': 'kr', - 'HUF': 'Ft', - 'PLN': 'zł', - 'RON': 'Leu', - 'SEK': 'kr', - 'TRY': 'Lira', - 'VEF': 'Bolivar', - 'VND': 'Dong', - 'THB': 'Baht', - 'SGD': 'S$', - 'IDR': 'Roepia', - 'ZAR': 'Rand', + "EUR": "€", + "USD": "$", + "GBP": "£", + "YEN": "¥", + "JPY": "¥", + "AUD": "A$", + "CAD": "C$", + "CHF": "CHF", # Swiss Franc has no special symbol + "CNY": "¥", + "INR": "₹", + "CZK": "Kč", + "BGN": "лв", + "DKK": "kr", + "HUF": "Ft", + "PLN": "zł", + "RON": "Leu", + "SEK": "kr", + "TRY": "Lira", + "VEF": "Bolivar", + "VND": "Dong", + "THB": "Baht", + "SGD": "S$", + "IDR": "Roepia", + "ZAR": "Rand", # Add more as needed } - if 'currency' in ha_config.keys(): - ha_config['currency'] = currency_to_symbol.get(ha_config['currency'], 'Unknown') + if "currency" in ha_config.keys(): + ha_config["currency"] = currency_to_symbol.get(ha_config["currency"], "Unknown") else: - ha_config['currency'] = '€' - if 'unit_system' not in ha_config.keys(): - ha_config['unit_system'] = {'temperature': '°C'} - + ha_config["currency"] = "€" + if "unit_system" not in ha_config.keys(): + ha_config["unit_system"] = {"temperature": "°C"} + number_of_deferrable_loads = params["optim_conf"]["number_of_deferrable_loads"] - if 'num_def_loads' in params['passed_data'].keys(): - number_of_deferrable_loads = params['passed_data']['num_def_loads'] - if 'number_of_deferrable_loads' in params['passed_data'].keys(): - number_of_deferrable_loads = params['passed_data']['number_of_deferrable_loads'] - + if "num_def_loads" in params["passed_data"].keys(): + number_of_deferrable_loads = params["passed_data"]["num_def_loads"] + if "number_of_deferrable_loads" in params["passed_data"].keys(): + number_of_deferrable_loads = params["passed_data"]["number_of_deferrable_loads"] + for k in range(number_of_deferrable_loads): - params['passed_data']['custom_predicted_temperature_id'][k].update( - {"unit_of_measurement": ha_config['unit_system']['temperature']} + params["passed_data"]["custom_predicted_temperature_id"][k].update( + {"unit_of_measurement": ha_config["unit_system"]["temperature"]} ) updated_passed_dict = { "custom_cost_fun_id": { - "unit_of_measurement": ha_config['currency'], + "unit_of_measurement": ha_config["currency"], }, "custom_unit_load_cost_id": { "unit_of_measurement": f"{ha_config['currency']}/kWh", @@ -268,9 +268,9 @@ def treat_runtimeparams( params["plant_conf"].update(plant_conf) # Check defaults on HA retrieved config - default_currency_unit = '€' - default_temperature_unit = '°C' - + default_currency_unit = "€" + default_temperature_unit = "°C" + # Some default data needed custom_deferrable_forecast_id = [] custom_predicted_temperature_id = [] @@ -481,10 +481,17 @@ def treat_runtimeparams( else: soc_final = runtimeparams["soc_final"] params["passed_data"]["soc_final"] = soc_final - - params["passed_data"]["operating_hours_of_each_deferrable_load"] = params[ - "optim_conf" - ].get("operating_hours_of_each_deferrable_load", None) + if "operating_timesteps_of_each_deferrable_load" in runtimeparams.keys(): + params["passed_data"]["operating_timesteps_of_each_deferrable_load"] = ( + runtimeparams["operating_timesteps_of_each_deferrable_load"] + ) + params["optim_conf"]["operating_timesteps_of_each_deferrable_load"] = ( + runtimeparams["operating_timesteps_of_each_deferrable_load"] + ) + if "operating_hours_of_each_deferrable_load" in params["optim_conf"].keys(): + params["passed_data"]["operating_hours_of_each_deferrable_load"] = ( + params["optim_conf"]["operating_hours_of_each_deferrable_load"] + ) params["passed_data"]["start_timesteps_of_each_deferrable_load"] = params[ "optim_conf" ].get("start_timesteps_of_each_deferrable_load", None) diff --git a/src/emhass/web_server.py b/src/emhass/web_server.py old mode 100644 new mode 100755 index dc975326..03715117 --- a/src/emhass/web_server.py +++ b/src/emhass/web_server.py @@ -8,9 +8,9 @@ import pickle import re import threading -from distutils.util import strtobool from importlib.metadata import PackageNotFoundError, version from pathlib import Path +from typing import Optional import yaml from flask import Flask, make_response, request @@ -46,6 +46,21 @@ # Define the Flask instance app = Flask(__name__) emhass_conf = {} +entity_path = Path +params_secrets = {} +continual_publish_thread = [] +injection_dict = {} + + +def create_app(settings_override=None): + """ + Create a Flask application. + :param settings_override: Override settings + :return: Flask app + """ + global app + main() + return app def checkFileLog(refString=None) -> bool: @@ -153,6 +168,10 @@ def configuration(): """ app.logger.info("serving configuration.html...") + # get params + if (emhass_conf["data_path"] / "params.pkl").exists(): + with open(str(emhass_conf["data_path"] / "params.pkl"), "rb") as fid: + emhass_conf["config_path"], params = pickle.load(fid) # Load HTML template file_loader = PackageLoader("emhass", "templates") env = Environment(loader=file_loader) @@ -328,7 +347,7 @@ def parameter_set(): with open(str(emhass_conf["data_path"] / "params.pkl"), "wb") as fid: pickle.dump( ( - config_path, + emhass_conf["config_path"], build_params(emhass_conf, params_secrets, config, app.logger), ), fid, @@ -349,6 +368,9 @@ def action_call(action_name): :type action_name: String """ + global continual_publish_thread + global injection_dict + # Setting up parameters # Params ActionStr = " >> Obtaining params: " @@ -524,31 +546,17 @@ def action_call(action_name): return make_response(msg, 400) -if __name__ == "__main__": - # Parsing arguments - parser = argparse.ArgumentParser() - parser.add_argument( - "--url", - type=str, - help="The URL to your Home Assistant instance, ex the external_url in your hass configuration", - ) - parser.add_argument( - "--key", - type=str, - help="Your access key. If using EMHASS in standalone this should be a Long-Lived Access Token", - ) - parser.add_argument( - "--no_response", - type=strtobool, - default="False", - help="This is set if json response errors occur", - ) - args = parser.parse_args() - +def main( + args: Optional[dict] = None, +): + global continual_publish_thread + global emhass_conf + global entity_path + global injection_dict # Pre formatted config parameters config = {} # Secrets - params_secrets = {} + global params_secrets # Built parameters (formatted config + secrets) params = None @@ -601,11 +609,16 @@ def action_call(action_name): app.logger.setLevel(logging.DEBUG) ## Secrets + # Argument argument = {} - if args.url: - argument["url"] = args.url - if args.key: - argument["key"] = args.key + no_response = False + if args is not None: + if args.get("url", None): + argument["url"] = args["url"] + if args.get("key", None): + argument["key"] = args["key"] + if args.get("no_response", None): + no_response = args["no_response"] # Combine secrets from ENV, Arguments/ARG, Secrets file (secrets_emhass.yaml), options (options.json from addon configuration file) and/or Home Assistant Standalone API (if exist) emhass_conf, secrets = build_secrets( emhass_conf, @@ -613,7 +626,7 @@ def action_call(action_name): argument, options_path, os.getenv("SECRETS_PATH", default="/app/secrets_emhass.yaml"), - bool(args.no_response), + bool(no_response), ) params_secrets.update(secrets) @@ -706,4 +719,33 @@ def action_call(action_name): app.logger.info("Using core emhass version: " + version("emhass")) except PackageNotFoundError: app.logger.info("Using development emhass version") + + return server_ip, port + + +if __name__ == "__main__": + # Parsing arguments + parser = argparse.ArgumentParser() + parser.add_argument( + "--url", + type=str, + help="The URL to your Home Assistant instance, ex the external_url in your hass configuration", + ) + parser.add_argument( + "--key", + type=str, + help="Your access key. If using EMHASS in standalone this should be a Long-Lived Access Token", + ) + parser.add_argument( + "--no_response", + type=bool, + default=False, + help="This is set if json response errors occur", + ) + args = parser.parse_args() + + server_ip, port = main(vars(args)) + os.environ["IP"] = str(server_ip) + os.environ["PORT"] = str(port) + serve(app, host=server_ip, port=port, threads=8) diff --git a/tests/test_forecast.py b/tests/test_forecast.py index 6d0b115f..d80e6bfd 100644 --- a/tests/test_forecast.py +++ b/tests/test_forecast.py @@ -8,11 +8,11 @@ import os import pathlib import pickle +import re import unittest import pandas as pd import requests_mock -import re from emhass import utils from emhass.command_line import set_input_data_dict @@ -77,7 +77,9 @@ def setUp(self): # Obtain sensor values from saved file if self.get_data_from_file: with open(emhass_conf["data_path"] / "test_df_final.pkl", "rb") as inp: - self.rh.df_final, self.days_list, self.var_list, self.rh.ha_config = pickle.load(inp) + self.rh.df_final, self.days_list, self.var_list, self.rh.ha_config = ( + pickle.load(inp) + ) self.retrieve_hass_conf["sensor_power_load_no_var_loads"] = str( self.var_list[0] ) @@ -334,7 +336,9 @@ def test_get_weather_forecast_solcast_multiroofs_method_mock(self): } self.fcst.retrieve_hass_conf["solcast_api_key"] = "123456" self.fcst.retrieve_hass_conf["solcast_rooftop_id"] = "111111,222222,333333" - roof_ids = re.split(r"[,\s]+", self.fcst.retrieve_hass_conf["solcast_rooftop_id"].strip()) + roof_ids = re.split( + r"[,\s]+", self.fcst.retrieve_hass_conf["solcast_rooftop_id"].strip() + ) if os.path.isfile(emhass_conf["data_path"] / "weather_forecast_data.pkl"): os.rename( emhass_conf["data_path"] / "weather_forecast_data.pkl", @@ -343,13 +347,14 @@ def test_get_weather_forecast_solcast_multiroofs_method_mock(self): with requests_mock.mock() as m: for roof_id in roof_ids: data = bz2.BZ2File( - str(emhass_conf["data_path"] / "test_response_solcast_get_method.pbz2"), + str( + emhass_conf["data_path"] + / "test_response_solcast_get_method.pbz2" + ), "rb", ) data = cPickle.load(data) - get_url = ( - f"https://api.solcast.com.au/rooftop_sites/{roof_id}/forecasts?hours=24" - ) + get_url = f"https://api.solcast.com.au/rooftop_sites/{roof_id}/forecasts?hours=24" m.get(get_url, json=data.json()) df_weather_scrap = self.fcst.get_weather_forecast(method="solcast") self.assertIsInstance(df_weather_scrap, type(pd.DataFrame())) @@ -959,7 +964,7 @@ def test_get_load_forecast_mlforecaster(self): # Test load forecast with typical statistics method def test_get_load_forecast_typical(self): - P_load_forecast = self.fcst.get_load_forecast(method='typical') + P_load_forecast = self.fcst.get_load_forecast(method="typical") self.assertIsInstance(P_load_forecast, pd.core.series.Series) self.assertIsInstance( P_load_forecast.index, pd.core.indexes.datetimes.DatetimeIndex diff --git a/tests/test_machine_learning_forecaster.py b/tests/test_machine_learning_forecaster.py index 43f7613d..418b539f 100644 --- a/tests/test_machine_learning_forecaster.py +++ b/tests/test_machine_learning_forecaster.py @@ -9,7 +9,7 @@ import numpy as np import pandas as pd -from skforecast.recursive import ForecasterRecursive +from skforecast.ForecasterAutoreg import ForecasterAutoreg from emhass import utils from emhass.command_line import set_input_data_dict @@ -100,16 +100,18 @@ def setUp(self): ) # Open and extract saved sensor data to test against with open(emhass_conf["data_path"] / "test_df_final.pkl", "rb") as inp: - self.rh.df_final, self.days_list, self.var_list, self.rh.ha_config = pickle.load(inp) + self.rh.df_final, self.days_list, self.var_list, self.rh.ha_config = ( + pickle.load(inp) + ) def test_fit(self): df_pred, df_pred_backtest = self.mlf.fit() - self.assertIsInstance(self.mlf.forecaster, ForecasterRecursive) + self.assertIsInstance(self.mlf.forecaster, ForecasterAutoreg) self.assertIsInstance(df_pred, pd.DataFrame) self.assertTrue(df_pred_backtest == None) # Refit with backtest evaluation df_pred, df_pred_backtest = self.mlf.fit(perform_backtest=True) - self.assertIsInstance(self.mlf.forecaster, ForecasterRecursive) + self.assertIsInstance(self.mlf.forecaster, ForecasterAutoreg) self.assertIsInstance(df_pred, pd.DataFrame) self.assertIsInstance(df_pred_backtest, pd.DataFrame) diff --git a/tests/test_optimization.py b/tests/test_optimization.py index 5aa1d8cb..2cdee51c 100644 --- a/tests/test_optimization.py +++ b/tests/test_optimization.py @@ -73,7 +73,9 @@ def setUp(self): # Obtain sensor values from saved file if get_data_from_file: with open(emhass_conf["data_path"] / "test_df_final.pkl", "rb") as inp: - self.rh.df_final, self.days_list, self.var_list, self.rh.ha_config = pickle.load(inp) + self.rh.df_final, self.days_list, self.var_list, self.rh.ha_config = ( + pickle.load(inp) + ) self.retrieve_hass_conf["sensor_power_load_no_var_loads"] = str( self.var_list[0] ) @@ -512,6 +514,7 @@ def test_perform_naive_mpc_optim(self): soc_init=soc_init, soc_final=soc_final, def_total_hours=def_total_hours, + def_total_timestep=None, def_start_timestep=def_start_timestep, def_end_timestep=def_end_timestep, ) @@ -543,6 +546,7 @@ def test_perform_naive_mpc_optim(self): soc_init=soc_init, soc_final=soc_final, def_total_hours=def_total_hours, + def_total_timestep=None, def_start_timestep=def_start_timestep, def_end_timestep=def_end_timestep, ) @@ -651,6 +655,7 @@ def run_penalty_test_forecast(self): self.P_load_forecast, prediction_horizon, def_total_hours=def_total_hours, + def_total_timestep=None, def_start_timestep=def_start_timestep, def_end_timestep=def_end_timestep, ) diff --git a/tests/test_retrieve_hass.py b/tests/test_retrieve_hass.py index 30a59f5a..27941838 100644 --- a/tests/test_retrieve_hass.py +++ b/tests/test_retrieve_hass.py @@ -92,7 +92,9 @@ def setUp(self): # Obtain sensor values from saved file if self.get_data_from_file: with open(emhass_conf["data_path"] / "test_df_final.pkl", "rb") as inp: - self.rh.df_final, self.days_list, self.var_list, self.rh.ha_config = pickle.load(inp) + self.rh.df_final, self.days_list, self.var_list, self.rh.ha_config = ( + pickle.load(inp) + ) # Else obtain sensor values from HA else: self.days_list = get_days_list( @@ -110,28 +112,33 @@ def setUp(self): ) # Mocking retrieve of ha_config using: self.rh.get_ha_config() self.rh.ha_config = { - 'country': 'FR', - 'currency': 'EUR', - 'elevation': 4807, - 'latitude': 48.83, - 'longitude': 6.86, - 'time_zone': 'Europe/Paris', - 'unit_system': { - 'length': 'km', - 'accumulated_precipitation': 'mm', - 'area': 'm²', - 'mass': 'g', - 'pressure': 'Pa', - 'temperature': '°C', - 'volume': 'L', - 'wind_speed': 'm/s' - } + "country": "FR", + "currency": "EUR", + "elevation": 4807, + "latitude": 48.83, + "longitude": 6.86, + "time_zone": "Europe/Paris", + "unit_system": { + "length": "km", + "accumulated_precipitation": "mm", + "area": "m²", + "mass": "g", + "pressure": "Pa", + "temperature": "°C", + "volume": "L", + "wind_speed": "m/s", + }, } # Check to save updated data to file if save_data_to_file: with open(emhass_conf["data_path"] / "test_df_final.pkl", "wb") as outp: pickle.dump( - (self.rh.df_final, self.days_list, self.var_list, self.rh.ha_config), + ( + self.rh.df_final, + self.days_list, + self.var_list, + self.rh.ha_config, + ), outp, pickle.HIGHEST_PROTOCOL, ) diff --git a/tests/test_utils.py b/tests/test_utils.py index d9dae94d..5276d46e 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -455,38 +455,253 @@ def test_update_params_with_ha_config(self): logger, emhass_conf, ) - ha_config = { - 'currency': 'USD', - 'unit_system': {'temperature': '°F'} - } + ha_config = {"currency": "USD", "unit_system": {"temperature": "°F"}} params_with_ha_config_json = utils.update_params_with_ha_config( params, ha_config, ) params_with_ha_config = json.loads(params_with_ha_config_json) - self.assertTrue(params_with_ha_config["passed_data"]["custom_predicted_temperature_id"][0]["unit_of_measurement"] == "°F") - self.assertTrue(params_with_ha_config["passed_data"]["custom_predicted_temperature_id"][1]["unit_of_measurement"] == "°F") - self.assertTrue(params_with_ha_config["passed_data"]["custom_cost_fun_id"]["unit_of_measurement"] == '$') - self.assertTrue(params_with_ha_config["passed_data"]["custom_unit_load_cost_id"]["unit_of_measurement"] == '$/kWh') - self.assertTrue(params_with_ha_config["passed_data"]["custom_unit_prod_price_id"]["unit_of_measurement"] == '$/kWh') + self.assertTrue( + params_with_ha_config["passed_data"]["custom_predicted_temperature_id"][0][ + "unit_of_measurement" + ] + == "°F" + ) + self.assertTrue( + params_with_ha_config["passed_data"]["custom_predicted_temperature_id"][1][ + "unit_of_measurement" + ] + == "°F" + ) + self.assertTrue( + params_with_ha_config["passed_data"]["custom_cost_fun_id"][ + "unit_of_measurement" + ] + == "$" + ) + self.assertTrue( + params_with_ha_config["passed_data"]["custom_unit_load_cost_id"][ + "unit_of_measurement" + ] + == "$/kWh" + ) + self.assertTrue( + params_with_ha_config["passed_data"]["custom_unit_prod_price_id"][ + "unit_of_measurement" + ] + == "$/kWh" + ) def test_update_params_with_ha_config_special_case(self): # Test special passed runtime params runtimeparams = { - 'prediction_horizon': 28, - 'pv_power_forecast': [523, 873, 1059, 1195, 1291, 1352, 1366, 1327, 1254, 1150, 1004, 813, 589, 372, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 153, 228, 301, 363, 407, 438, 456, 458, 443, 417, 381, 332, 269, 195, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'num_def_loads': 2, 'P_deferrable_nom': [0, 0], 'def_total_hours': [0, 0], 'treat_def_as_semi_cont': [1, 1], 'set_def_constant': [1, 1], - 'def_start_timestep': [0, 0], 'def_end_timestep': [0, 0], 'soc_init': 0.64, 'soc_final': 0.9, - 'load_cost_forecast': [0.2751, 0.2751, 0.2729, 0.2729, 0.2748, 0.2748, 0.2746, 0.2746, 0.2815, 0.2815, 0.2841, 0.2841, 0.282, 0.282, 0.288, 0.288, 0.29, 0.29, 0.2841, 0.2841, 0.2747, 0.2747, 0.2677, 0.2677, 0.2628, 0.2628, 0.2532, 0.2532], - 'prod_price_forecast': [0.1213, 0.1213, 0.1192, 0.1192, 0.121, 0.121, 0.1208, 0.1208, 0.1274, 0.1274, 0.1298, 0.1298, 0.1278, 0.1278, 0.1335, 0.1335, 0.1353, 0.1353, 0.1298, 0.1298, 0.1209, 0.1209, 0.1143, 0.1143, 0.1097, 0.1097, 0.1007, 0.1007], - 'alpha': 1, 'beta': 0, - 'load_power_forecast': [399, 300, 400, 600, 300, 200, 200, 200, 200, 300, 300, 200, 400, 200, 200, 400, 400, 400, 300, 300, 300, 600, 800, 500, 400, 400, 500, 500, 2400, 2300, 2400, 2400, 2300, 2400, 2400, 2400, 2300, 2400, 2400, 200, 200, 300, 300, 300, 300, 300, 300, 300]} + "prediction_horizon": 28, + "pv_power_forecast": [ + 523, + 873, + 1059, + 1195, + 1291, + 1352, + 1366, + 1327, + 1254, + 1150, + 1004, + 813, + 589, + 372, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 153, + 228, + 301, + 363, + 407, + 438, + 456, + 458, + 443, + 417, + 381, + 332, + 269, + 195, + 123, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + "num_def_loads": 2, + "P_deferrable_nom": [0, 0], + "def_total_hours": [0, 0], + "treat_def_as_semi_cont": [1, 1], + "set_def_constant": [1, 1], + "def_start_timestep": [0, 0], + "def_end_timestep": [0, 0], + "soc_init": 0.64, + "soc_final": 0.9, + "load_cost_forecast": [ + 0.2751, + 0.2751, + 0.2729, + 0.2729, + 0.2748, + 0.2748, + 0.2746, + 0.2746, + 0.2815, + 0.2815, + 0.2841, + 0.2841, + 0.282, + 0.282, + 0.288, + 0.288, + 0.29, + 0.29, + 0.2841, + 0.2841, + 0.2747, + 0.2747, + 0.2677, + 0.2677, + 0.2628, + 0.2628, + 0.2532, + 0.2532, + ], + "prod_price_forecast": [ + 0.1213, + 0.1213, + 0.1192, + 0.1192, + 0.121, + 0.121, + 0.1208, + 0.1208, + 0.1274, + 0.1274, + 0.1298, + 0.1298, + 0.1278, + 0.1278, + 0.1335, + 0.1335, + 0.1353, + 0.1353, + 0.1298, + 0.1298, + 0.1209, + 0.1209, + 0.1143, + 0.1143, + 0.1097, + 0.1097, + 0.1007, + 0.1007, + ], + "alpha": 1, + "beta": 0, + "load_power_forecast": [ + 399, + 300, + 400, + 600, + 300, + 200, + 200, + 200, + 200, + 300, + 300, + 200, + 400, + 200, + 200, + 400, + 400, + 400, + 300, + 300, + 300, + 600, + 800, + 500, + 400, + 400, + 500, + 500, + 2400, + 2300, + 2400, + 2400, + 2300, + 2400, + 2400, + 2400, + 2300, + 2400, + 2400, + 200, + 200, + 300, + 300, + 300, + 300, + 300, + 300, + 300, + ], + } params_ = json.loads(self.params_json) - params_['passed_data'].update(runtimeparams) - + params_["passed_data"].update(runtimeparams) + runtimeparams_json = json.dumps(runtimeparams) params_json = json.dumps(params_) - + retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse( params_json, logger ) @@ -501,32 +716,243 @@ def test_update_params_with_ha_config_special_case(self): logger, emhass_conf, ) - ha_config = { - 'currency': 'USD', - 'unit_system': {'temperature': '°F'} - } + ha_config = {"currency": "USD", "unit_system": {"temperature": "°F"}} params_with_ha_config_json = utils.update_params_with_ha_config( params, ha_config, ) params_with_ha_config = json.loads(params_with_ha_config_json) - self.assertTrue(params_with_ha_config["passed_data"]["custom_predicted_temperature_id"][0]["unit_of_measurement"] == "°F") - self.assertTrue(params_with_ha_config["passed_data"]["custom_predicted_temperature_id"][1]["unit_of_measurement"] == "°F") - self.assertTrue(params_with_ha_config["passed_data"]["custom_cost_fun_id"]["unit_of_measurement"] == '$') - self.assertTrue(params_with_ha_config["passed_data"]["custom_unit_load_cost_id"]["unit_of_measurement"] == '$/kWh') - self.assertTrue(params_with_ha_config["passed_data"]["custom_unit_prod_price_id"]["unit_of_measurement"] == '$/kWh') + self.assertTrue( + params_with_ha_config["passed_data"]["custom_predicted_temperature_id"][0][ + "unit_of_measurement" + ] + == "°F" + ) + self.assertTrue( + params_with_ha_config["passed_data"]["custom_predicted_temperature_id"][1][ + "unit_of_measurement" + ] + == "°F" + ) + self.assertTrue( + params_with_ha_config["passed_data"]["custom_cost_fun_id"][ + "unit_of_measurement" + ] + == "$" + ) + self.assertTrue( + params_with_ha_config["passed_data"]["custom_unit_load_cost_id"][ + "unit_of_measurement" + ] + == "$/kWh" + ) + self.assertTrue( + params_with_ha_config["passed_data"]["custom_unit_prod_price_id"][ + "unit_of_measurement" + ] + == "$/kWh" + ) # Test with 0 deferrable loads runtimeparams = { - 'prediction_horizon': 28, - 'pv_power_forecast': [523, 873, 1059, 1195, 1291, 1352, 1366, 1327, 1254, 1150, 1004, 813, 589, 372, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 153, 228, 301, 363, 407, 438, 456, 458, 443, 417, 381, 332, 269, 195, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - 'num_def_loads': 0, - 'def_start_timestep': [0, 0], 'def_end_timestep': [0, 0], 'soc_init': 0.64, 'soc_final': 0.9, - 'load_cost_forecast': [0.2751, 0.2751, 0.2729, 0.2729, 0.2748, 0.2748, 0.2746, 0.2746, 0.2815, 0.2815, 0.2841, 0.2841, 0.282, 0.282, 0.288, 0.288, 0.29, 0.29, 0.2841, 0.2841, 0.2747, 0.2747, 0.2677, 0.2677, 0.2628, 0.2628, 0.2532, 0.2532], - 'prod_price_forecast': [0.1213, 0.1213, 0.1192, 0.1192, 0.121, 0.121, 0.1208, 0.1208, 0.1274, 0.1274, 0.1298, 0.1298, 0.1278, 0.1278, 0.1335, 0.1335, 0.1353, 0.1353, 0.1298, 0.1298, 0.1209, 0.1209, 0.1143, 0.1143, 0.1097, 0.1097, 0.1007, 0.1007], - 'alpha': 1, 'beta': 0, - 'load_power_forecast': [399, 300, 400, 600, 300, 200, 200, 200, 200, 300, 300, 200, 400, 200, 200, 400, 400, 400, 300, 300, 300, 600, 800, 500, 400, 400, 500, 500, 2400, 2300, 2400, 2400, 2300, 2400, 2400, 2400, 2300, 2400, 2400, 200, 200, 300, 300, 300, 300, 300, 300, 300]} + "prediction_horizon": 28, + "pv_power_forecast": [ + 523, + 873, + 1059, + 1195, + 1291, + 1352, + 1366, + 1327, + 1254, + 1150, + 1004, + 813, + 589, + 372, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 153, + 228, + 301, + 363, + 407, + 438, + 456, + 458, + 443, + 417, + 381, + 332, + 269, + 195, + 123, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ], + "num_def_loads": 0, + "def_start_timestep": [0, 0], + "def_end_timestep": [0, 0], + "soc_init": 0.64, + "soc_final": 0.9, + "load_cost_forecast": [ + 0.2751, + 0.2751, + 0.2729, + 0.2729, + 0.2748, + 0.2748, + 0.2746, + 0.2746, + 0.2815, + 0.2815, + 0.2841, + 0.2841, + 0.282, + 0.282, + 0.288, + 0.288, + 0.29, + 0.29, + 0.2841, + 0.2841, + 0.2747, + 0.2747, + 0.2677, + 0.2677, + 0.2628, + 0.2628, + 0.2532, + 0.2532, + ], + "prod_price_forecast": [ + 0.1213, + 0.1213, + 0.1192, + 0.1192, + 0.121, + 0.121, + 0.1208, + 0.1208, + 0.1274, + 0.1274, + 0.1298, + 0.1298, + 0.1278, + 0.1278, + 0.1335, + 0.1335, + 0.1353, + 0.1353, + 0.1298, + 0.1298, + 0.1209, + 0.1209, + 0.1143, + 0.1143, + 0.1097, + 0.1097, + 0.1007, + 0.1007, + ], + "alpha": 1, + "beta": 0, + "load_power_forecast": [ + 399, + 300, + 400, + 600, + 300, + 200, + 200, + 200, + 200, + 300, + 300, + 200, + 400, + 200, + 200, + 400, + 400, + 400, + 300, + 300, + 300, + 600, + 800, + 500, + 400, + 400, + 500, + 500, + 2400, + 2300, + 2400, + 2400, + 2300, + 2400, + 2400, + 2400, + 2300, + 2400, + 2400, + 200, + 200, + 300, + 300, + 300, + 300, + 300, + 300, + 300, + ], + } params_ = json.loads(self.params_json) - params_['passed_data'].update(runtimeparams) + params_["passed_data"].update(runtimeparams) runtimeparams_json = json.dumps(runtimeparams) params_json = json.dumps(params_) retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse( @@ -543,18 +969,30 @@ def test_update_params_with_ha_config_special_case(self): logger, emhass_conf, ) - ha_config = { - 'currency': 'USD', - 'unit_system': {'temperature': '°F'} - } + ha_config = {"currency": "USD", "unit_system": {"temperature": "°F"}} params_with_ha_config_json = utils.update_params_with_ha_config( params, ha_config, ) params_with_ha_config = json.loads(params_with_ha_config_json) - self.assertTrue(params_with_ha_config["passed_data"]["custom_cost_fun_id"]["unit_of_measurement"] == '$') - self.assertTrue(params_with_ha_config["passed_data"]["custom_unit_load_cost_id"]["unit_of_measurement"] == '$/kWh') - self.assertTrue(params_with_ha_config["passed_data"]["custom_unit_prod_price_id"]["unit_of_measurement"] == '$/kWh') + self.assertTrue( + params_with_ha_config["passed_data"]["custom_cost_fun_id"][ + "unit_of_measurement" + ] + == "$" + ) + self.assertTrue( + params_with_ha_config["passed_data"]["custom_unit_load_cost_id"][ + "unit_of_measurement" + ] + == "$/kWh" + ) + self.assertTrue( + params_with_ha_config["passed_data"]["custom_unit_prod_price_id"][ + "unit_of_measurement" + ] + == "$/kWh" + ) def test_build_secrets(self): # Test the build_secrets defaults from get_test_params()