diff --git a/examples/image_generation_crewai.py b/examples/image_generation_crewai.py index 32c0de22..b584fdc9 100644 --- a/examples/image_generation_crewai.py +++ b/examples/image_generation_crewai.py @@ -1,5 +1,3 @@ -import os.path - from dotenv import load_dotenv from motleycrew import MotleyCrew, Task @@ -10,7 +8,7 @@ load_dotenv() configure_logging(verbose=True) -image_generator_tool = DallEImageGeneratorTool(os.path.realpath("./images")) +image_generator_tool = DallEImageGeneratorTool() # For saving images locally use the line below # image_generator_tool = DallEImageGeneratorTool(images_directory="images") diff --git a/motleycrew/tool/mermaid_evaluator_tool.py b/motleycrew/tool/mermaid_evaluator_tool.py new file mode 100644 index 00000000..534aabd1 --- /dev/null +++ b/motleycrew/tool/mermaid_evaluator_tool.py @@ -0,0 +1,102 @@ +# https://nodejs.org/en/download +# npm install -g @mermaid-js/mermaid-cli +import os.path +import subprocess +import io +import tempfile +from typing import Optional + +from langchain_core.pydantic_v1 import create_model, Field +from langchain_core.tools import Tool +from motleycrew.tool import MotleyTool + + +class MermaidEvaluatorTool(MotleyTool): + def __init__(self, format: Optional[str] = "svg"): + def eval_mermaid_partial(mermaid_code: str): + return eval_mermaid(mermaid_code, format) + + langchain_tool = Tool.from_function( + func=eval_mermaid_partial, + name="Mermaid Evaluator Tool", + description="Evaluates Mermaid code and returns the output as a BytesIO object.", + args_schema=create_model( + "MermaidEvaluatorToolInput", + mermaid_code=(str, Field(description="The Mermaid code to evaluate.")), + ), + ) + super().__init__(langchain_tool) + + +def eval_mermaid(mermaid_code: str, format: Optional[str] = "svg") -> io.BytesIO: + with tempfile.NamedTemporaryFile(delete=True, mode="w+", suffix=".mmd") as temp_in: + temp_in.write(mermaid_code) + temp_in.flush() # Ensure all data is written to disk + + if format in ["md", "markdown"]: + raise NotImplementedError( + "Markdown format is not yet supported in this wrapper." + ) + assert format in [ + "svg", + "png", + "pdf", + ], "Invalid format specified, must be svg, png, or pdf." + out_file = f"output.{format}" + + # Prepare the command to call the mermaid CLI + full_code = f"mmdc -i {temp_in.name} -o {out_file} -b transparent" + + try: + # Execute the command + subprocess.run( + full_code, + shell=True, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + # If the process succeeds, read the output file into BytesIO + with open(out_file, "rb") as f: + output_bytes = io.BytesIO(f.read()) + return output_bytes + except subprocess.CalledProcessError as e: + # If the process fails, print the error message + return e.stderr.decode() + finally: + # Clean up the output file if it exists + try: + os.remove(out_file) + except FileNotFoundError: + pass + + +# Define the Mermaid code for the flowchart + +if __name__ == "__main__": + mermaid_code = """ + graph TD; + A[Start] --> B[Decision] + B -- Yes --> C[Option 1] + B -- No --> D[Option 2] + C --> E[End] + D --> E + E[End] --> F[End] + + [[ + """ + + out1 = eval_mermaid(mermaid_code) + output_file_path = "output_file.bin" + if isinstance(out1, str): + print(out1) + exit(1) + # Ensure the pointer is at the beginning of the BytesIO object + out1.seek(0) + + # Open the output file in binary write mode and write the contents of the BytesIO object + with open(output_file_path, "wb") as file_output: + file_output.write(out1.read()) + tool = MermaidEvaluatorTool() + out2 = tool.invoke({"mermaid_code": mermaid_code}) + print(out2) diff --git a/poetry.lock b/poetry.lock index ab3325f2..709fdb41 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1343,13 +1343,13 @@ llama-index-llms-openai = ">=0.1.1,<0.2.0" [[package]] name = "llama-index-core" -version = "0.10.32" +version = "0.10.33" description = "Interface between LLMs and your data" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index_core-0.10.32-py3-none-any.whl", hash = "sha256:215f7389dadb78f2df13c20312a3e1e03c41f23e3063907469c4bae67bfd458c"}, - {file = "llama_index_core-0.10.32.tar.gz", hash = "sha256:0078c06d9143390e14c86a40e69716c88c7828533341559edd15e52249ede65a"}, + {file = "llama_index_core-0.10.33-py3-none-any.whl", hash = "sha256:943114fb02dfe62fec5d882d749ad8adf113081aadcb0d4cb2c083b2c9052ed0"}, + {file = "llama_index_core-0.10.33.tar.gz", hash = "sha256:21b98b2c45e0c6b673aa505c7add1e8b730f472ad58d4572b909a34f4a22c36c"}, ] [package.dependencies] @@ -1401,18 +1401,18 @@ llama-index-core = ">=0.10.1,<0.11.0" [[package]] name = "llama-index-indices-managed-llama-cloud" -version = "0.1.5" +version = "0.1.6" description = "llama-index indices llama-cloud integration" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index_indices_managed_llama_cloud-0.1.5-py3-none-any.whl", hash = "sha256:79f636cb6f4fabb12fec153564110f7f4dfda3cacb087793a5fec988484d7d2c"}, - {file = "llama_index_indices_managed_llama_cloud-0.1.5.tar.gz", hash = "sha256:47cdde9f06bbddd508f0efcf41de425e85171ac2c8fda8a5fb2a89673e1c8c71"}, + {file = "llama_index_indices_managed_llama_cloud-0.1.6-py3-none-any.whl", hash = "sha256:cba33e1a3677b2a2ae7f239119acbf6dc3818f105edc92315729842b56fbc949"}, + {file = "llama_index_indices_managed_llama_cloud-0.1.6.tar.gz", hash = "sha256:74b3b0e9ebf9d348d3054f9fc0c657031acceb9351c31116ad8d5a7ae4729f5c"}, ] [package.dependencies] llama-index-core = ">=0.10.0,<0.11.0" -llamaindex-py-client = ">=0.1.13,<0.2.0" +llamaindex-py-client = ">=0.1.19,<0.2.0" [[package]] name = "llama-index-legacy" @@ -1565,13 +1565,13 @@ llama-index-core = ">=0.10.29" [[package]] name = "llamaindex-py-client" -version = "0.1.18" +version = "0.1.19" description = "" optional = false python-versions = "<4,>=3.8" files = [ - {file = "llamaindex_py_client-0.1.18-py3-none-any.whl", hash = "sha256:5417e41666504a77ecf5bdd9b403ffff1d714880ee30d49e234fb7686177eeeb"}, - {file = "llamaindex_py_client-0.1.18.tar.gz", hash = "sha256:091ee49a92592e3894777ade12516c2137093f9d6441a549f406461917ce9b7e"}, + {file = "llamaindex_py_client-0.1.19-py3-none-any.whl", hash = "sha256:fd9416fd78b97209bf323bc3c7fab314499778563e7274f10853ad560563d10e"}, + {file = "llamaindex_py_client-0.1.19.tar.gz", hash = "sha256:73f74792bb8c092bae6dc626627a09ac13a099fa8d10f8fcc83e17a2b332cca7"}, ] [package.dependencies] @@ -2518,13 +2518,13 @@ image = ["Pillow (>=8.0.0)"] [[package]] name = "pytest" -version = "8.1.1" +version = "8.2.0" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.1.1-py3-none-any.whl", hash = "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7"}, - {file = "pytest-8.1.1.tar.gz", hash = "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044"}, + {file = "pytest-8.2.0-py3-none-any.whl", hash = "sha256:1733f0620f6cda4095bbf0d9ff8022486e91892245bb9e7d5542c018f612f233"}, + {file = "pytest-8.2.0.tar.gz", hash = "sha256:d507d4482197eac0ba2bae2e9babf0672eb333017bcedaa5fb1a3d42c1174b3f"}, ] [package.dependencies] @@ -2532,11 +2532,11 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" -pluggy = ">=1.4,<2.0" +pluggy = ">=1.5,<2.0" tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-cov"