diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml index e8831d525e..2adce37686 100644 --- a/.github/workflows/build-python.yml +++ b/.github/workflows/build-python.yml @@ -57,28 +57,39 @@ jobs: os: [ubuntu-latest] pydantic_v1: [false] pandas_v1: [false] + langchain_minimal: [false] # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources include: - python-version: "3.10" os: windows-2019 pydantic_v1: false pandas_v1: false + langchain_minimal: false - python-version: "3.10" os: windows-2022 pydantic_v1: false pandas_v1: false + langchain_minimal: false - python-version: "3.10" os: macos-latest pydantic_v1: false pandas_v1: false + langchain_minimal: false - python-version: "3.10" os: ubuntu-latest pydantic_v1: true pandas_v1: false + langchain_minimal: false - python-version: "3.10" os: ubuntu-latest pydantic_v1: false pandas_v1: true + langchain_minimal: false + - python-version: "3.10" + os: ubuntu-latest + pydantic_v1: false + pandas_v1: false + langchain_minimal: true continue-on-error: false # https://ncorti.com/blog/howto-github-actions-build-matrix steps: - name: Checkout code @@ -115,6 +126,18 @@ jobs: pdm run pip uninstall pydantic pydantic_core -y pdm run pip install "pydantic>=1,<2" + - name: Install langchain minimal version + if: ${{ matrix.langchain_minimal }} + run: | + pdm run pip uninstall langchain -y + pdm run pip install "langchain==0.0.275" + + - name: Check Pydantic installed version + if: ${{ matrix.langchain_minimal }} + run: | + pdm run pip freeze | grep '^langchain' + pdm run pip freeze | grep -q '^langchain==0.0.275' + - name: Check Pydantic installed version run: | pdm run pip freeze | grep '^pydantic' diff --git a/pdm.lock b/pdm.lock index 89856131d3..c045e58d04 100644 --- a/pdm.lock +++ b/pdm.lock @@ -6,7 +6,7 @@ groups = ["default", "dev", "doc", "llm", "ml_runtime", "server", "test"] cross_platform = true static_urls = false lock_version = "4.3" -content_hash = "sha256:35884f991e684f142bffe4568a240fd082e4cdea85e9397019d190eaefa0b64f" +content_hash = "sha256:8ff0b91745da2639c74332dedaed4d484c5d0c0ab8f349dd27861ec776614468" [[package]] name = "absl-py" @@ -1337,6 +1337,7 @@ files = [ {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b641161c302efbb860ae6b081f406839a8b7d5573f20a455539823802c655f63"}, {file = "greenlet-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d57e20ba591727da0c230ab2c3f200ac9d6d333860d85348816e1dca4cc4792e"}, {file = "greenlet-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5805e71e5b570d490938d55552f5a9e10f477c19400c38bf1d5190d760691846"}, + {file = "greenlet-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:52e93b28db27ae7d208748f45d2db8a7b6a380e0d703f099c949d0f0d80b70e9"}, {file = "greenlet-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f7bfb769f7efa0eefcd039dd19d843a4fbfbac52f1878b1da2ed5793ec9b1a65"}, {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91e6c7db42638dc45cf2e13c73be16bf83179f7859b07cfc139518941320be96"}, {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1757936efea16e3f03db20efd0cd50a1c86b06734f9f7338a90c4ba85ec2ad5a"}, @@ -1345,6 +1346,7 @@ files = [ {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2847e5d7beedb8d614186962c3d774d40d3374d580d2cbdab7f184580a39d234"}, {file = "greenlet-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:97e7ac860d64e2dcba5c5944cfc8fa9ea185cd84061c623536154d5a89237884"}, {file = "greenlet-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b2c02d2ad98116e914d4f3155ffc905fd0c025d901ead3f6ed07385e19122c94"}, + {file = "greenlet-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:22f79120a24aeeae2b4471c711dcf4f8c736a2bb2fabad2a67ac9a55ea72523c"}, {file = "greenlet-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ce85c43ae54845272f6f9cd8320d034d7a946e9773c693b27d620edec825e376"}, {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89ee2e967bd7ff85d84a2de09df10e021c9b38c7d91dead95b406ed6350c6997"}, {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87c8ceb0cf8a5a51b8008b643844b7f4a8264a2c13fcbcd8a8316161725383fe"}, @@ -1353,6 +1355,8 @@ files = [ {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85d2b77e7c9382f004b41d9c72c85537fac834fb141b0296942d52bf03fe4a3d"}, {file = "greenlet-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:696d8e7d82398e810f2b3622b24e87906763b6ebfd90e361e88eb85b0e554dc8"}, {file = "greenlet-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:329c5a2e5a0ee942f2992c5e3ff40be03e75f745f48847f118a3cfece7a28546"}, + {file = "greenlet-3.0.1-cp39-cp39-win32.whl", hash = "sha256:cf868e08690cb89360eebc73ba4be7fb461cfbc6168dd88e2fbbe6f31812cd57"}, + {file = "greenlet-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:ac4a39d1abae48184d420aa8e5e63efd1b75c8444dd95daa3e03f6c6310e9619"}, {file = "greenlet-3.0.1.tar.gz", hash = "sha256:816bd9488a94cba78d93e1abb58000e8266fa9cc2aa9ccdd6eb0696acb24005b"}, ] diff --git a/pyproject.toml b/pyproject.toml index 42988cb31a..1be60ba9c9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,7 +59,7 @@ dev = [ "pydantic>=2", ] ml_runtime = [ - "langchain>=0.0.187", + "langchain>=0.0.275", "nltk>=3.8.1", "xgboost>=1.7.5", "lightgbm>=3.3.5", diff --git a/tests/llm/test_talk.py b/tests/llm/test_talk.py index 106b713081..4f2ab85314 100644 --- a/tests/llm/test_talk.py +++ b/tests/llm/test_talk.py @@ -4,11 +4,28 @@ import pytest from langchain.agents import AgentExecutor from langchain.llms import FakeListLLM +from langchain.prompts.chat import ChatPromptTemplate from giskard import llm_config from giskard.llm.talk.talk import ModelSpec +def test_create_prompt_langchain(): + ChatPromptTemplate.from_messages( + [ + ( + "system", + """ + Your task is to critisize an article based on its summary. + Please remember to always give unbiased and respectful critisize. + You should always starts with: "I believe this article is really ..." + """, + ), + ("user", "Article: {article_summary}"), + ] + ) + + def test_predict(german_credit_test_data, german_credit_model): model_spec = ModelSpec(model=german_credit_model)