From 06127ec09966e8df2fcd4f03a1b53ec566b4a43d Mon Sep 17 00:00:00 2001 From: Jerry Liu Date: Wed, 11 Oct 2023 08:20:31 -0700 Subject: [PATCH] add multi-document agents v1 (#8065) --- .../query_modules/query_engine/modules.md | 1 + .../agent/multi_document_agents-v1.ipynb | 836 ++++++++++++++++++ llama_index/objects/base.py | 3 + 3 files changed, 840 insertions(+) create mode 100644 docs/examples/agent/multi_document_agents-v1.ipynb diff --git a/docs/core_modules/query_modules/query_engine/modules.md b/docs/core_modules/query_modules/query_engine/modules.md index 255edb7f8f069..e4133573a2549 100644 --- a/docs/core_modules/query_modules/query_engine/modules.md +++ b/docs/core_modules/query_modules/query_engine/modules.md @@ -60,6 +60,7 @@ maxdepth: 1 /examples/query_engine/sub_question_query_engine.ipynb /examples/query_engine/recursive_retriever_agents.ipynb /examples/agent/multi_document_agents.ipynb +/examples/agent/multi_document_agents-v1.ipynb ``` diff --git a/docs/examples/agent/multi_document_agents-v1.ipynb b/docs/examples/agent/multi_document_agents-v1.ipynb new file mode 100644 index 0000000000000..7ec4aedf1c329 --- /dev/null +++ b/docs/examples/agent/multi_document_agents-v1.ipynb @@ -0,0 +1,836 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "43497beb-817d-4366-9156-f4d7f0d44942", + "metadata": { + "tags": [] + }, + "source": [ + "# Multi-Document Agents (V1)\n", + "\n", + "In this guide, you learn towards setting up a multi-document agent over the LlamaIndex documentation.\n", + "\n", + "This is an extension of V0 multi-document agents with the additional features:\n", + "- Reranking during document (tool) retrieval\n", + "- Query planning tool that the agent can use to plan \n", + "\n", + "\n", + "We do this with the following architecture:\n", + "\n", + "- setup a \"document agent\" over each Document: each doc agent can do QA/summarization within its doc\n", + "- setup a top-level agent over this set of document agents. Do tool retrieval and then do CoT over the set of tools to answer a question." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "1f0e47ac-ec6d-48eb-93a3-0e1fcab22112", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "markdown", + "id": "9be00aba-b6c5-4940-9825-81c5d2cd2f0b", + "metadata": {}, + "source": [ + "## Setup and Download Data\n", + "\n", + "In this section, we'll load in the LlamaIndex documentation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "49893d69-c106-4169-92c3-6b5b751066e9", + "metadata": {}, + "outputs": [], + "source": [ + "domain = \"docs.llamaindex.ai\"\n", + "docs_url = \"https://docs.llamaindex.ai/en/latest/\"\n", + "!wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent {docs_url}" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "c661cb62-1e18-410c-bc2e-e707b66596a3", + "metadata": {}, + "outputs": [], + "source": [ + "from llama_hub.file.unstructured.base import UnstructuredReader\n", + "from pathlib import Path\n", + "from llama_index.llms import OpenAI\n", + "from llama_index import ServiceContext" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "dd2452e8-061b-45b0-990d-36aa39ae02a4", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package punkt to /Users/jerryliu/nltk_data...\n", + "[nltk_data] Package punkt is already up-to-date!\n", + "[nltk_data] Downloading package averaged_perceptron_tagger to\n", + "[nltk_data] /Users/jerryliu/nltk_data...\n", + "[nltk_data] Package averaged_perceptron_tagger is already up-to-\n", + "[nltk_data] date!\n" + ] + } + ], + "source": [ + "reader = UnstructuredReader()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "44feebd5-0430-4d73-9cb1-a3de73c1f13e", + "metadata": {}, + "outputs": [], + "source": [ + "all_files_gen = Path(\"./docs.llamaindex.ai/\").rglob(\"*\")\n", + "all_files = [f.resolve() for f in all_files_gen]" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "3d837b4b-130c-493c-b62e-6662904c20ca", + "metadata": {}, + "outputs": [], + "source": [ + "all_html_files = [f for f in all_files if f.suffix.lower() == \".html\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "3cddf0f5-3c5f-4d42-868d-54bedb12d02b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "418" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(all_html_files)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1a1dd0cf-5da2-4ac0-bfd1-8f48921518c5", + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index import Document\n", + "\n", + "# TODO: set to higher value if you want more docs\n", + "doc_limit = 100\n", + "\n", + "docs = []\n", + "for idx, f in enumerate(all_html_files):\n", + " if idx > doc_limit:\n", + " break\n", + " print(f\"Idx {idx}/{len(all_html_files)}\")\n", + " loaded_docs = reader.load_data(file=f, split_documents=True)\n", + " # Hardcoded Index. Everything before this is ToC for all pages\n", + " start_idx = 72\n", + " loaded_doc = Document(\n", + " text=\"\\n\\n\".join([d.get_content() for d in loaded_docs[72:]]),\n", + " metadata={\"path\": str(f)},\n", + " )\n", + " print(loaded_doc.metadata[\"path\"])\n", + " docs.append(loaded_doc)" + ] + }, + { + "cell_type": "markdown", + "id": "6189aaf4-2eb7-40bc-9e83-79ce4f221b4b", + "metadata": {}, + "source": [ + "Define LLM + Service Context + Callback Manager" + ] + }, + { + "cell_type": "code", + "execution_count": 137, + "id": "dd6e5e48-91b9-4701-a85d-d98c92323350", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n", + "service_context = ServiceContext.from_defaults(llm=llm)" + ] + }, + { + "cell_type": "markdown", + "id": "4eeef31a-fc25-4367-a5ba-945f81d04cf9", + "metadata": {}, + "source": [ + "## Building Multi-Document Agents\n", + "\n", + "In this section we show you how to construct the multi-document agent. We first build a document agent for each document, and then define the top-level parent agent with an object index." + ] + }, + { + "cell_type": "code", + "execution_count": 141, + "id": "c9c3c2a9-c546-410d-9fbd-1a76f8da4ecc", + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index import VectorStoreIndex, SummaryIndex" + ] + }, + { + "cell_type": "code", + "execution_count": 142, + "id": "e2a26f16-cfe9-4221-b169-57df91b197da", + "metadata": {}, + "outputs": [], + "source": [ + "import nest_asyncio\n", + "\n", + "nest_asyncio.apply()" + ] + }, + { + "cell_type": "markdown", + "id": "976cd798-2e8d-474c-922a-51b12c5c6f36", + "metadata": {}, + "source": [ + "### Build Document Agent for each Document\n", + "\n", + "In this section we define \"document agents\" for each document.\n", + "\n", + "We define both a vector index (for semantic search) and summary index (for summarization) for each document. The two query engines are then converted into tools that are passed to an OpenAI function calling agent.\n", + "\n", + "This document agent can dynamically choose to perform semantic search or summarization within a given document.\n", + "\n", + "We create a separate document agent for each city." + ] + }, + { + "cell_type": "code", + "execution_count": 143, + "id": "eacdf3a7-cfe3-4c2b-9037-b28a065ed148", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from llama_index.agent import OpenAIAgent\n", + "from llama_index import load_index_from_storage, StorageContext\n", + "from llama_index.tools import QueryEngineTool, ToolMetadata\n", + "from llama_index.node_parser import SimpleNodeParser\n", + "import os\n", + "from tqdm.notebook import tqdm\n", + "import pickle\n", + "\n", + "\n", + "async def build_agent_per_doc(nodes, file_base):\n", + " print(file_base)\n", + "\n", + " vi_out_path = f\"./data/llamaindex_docs/{file_base}\"\n", + " summary_out_path = f\"./data/llamaindex_docs/{file_base}_summary.pkl\"\n", + " if not os.path.exists(vi_out_path):\n", + " Path(\"./data/llamaindex_docs/\").mkdir(parents=True, exist_ok=True)\n", + " # build vector index\n", + " vector_index = VectorStoreIndex(nodes, service_context=service_context)\n", + " vector_index.storage_context.persist(persist_dir=vi_out_path)\n", + " else:\n", + " vector_index = load_index_from_storage(\n", + " StorageContext.from_defaults(persist_dir=vi_out_path),\n", + " service_context=service_context,\n", + " )\n", + "\n", + " # build summary index\n", + " summary_index = SummaryIndex(nodes, service_context=service_context)\n", + "\n", + " # define query engines\n", + " vector_query_engine = vector_index.as_query_engine()\n", + " summary_query_engine = summary_index.as_query_engine(response_mode=\"tree_summarize\")\n", + "\n", + " # extract a summary\n", + " if not os.path.exists(summary_out_path):\n", + " Path(summary_out_path).parent.mkdir(parents=True, exist_ok=True)\n", + " summary = str(\n", + " await summary_query_engine.aquery(\n", + " \"Extract a concise 1-2 line summary of this document\"\n", + " )\n", + " )\n", + " pickle.dump(summary, open(summary_out_path, \"wb\"))\n", + " else:\n", + " summary = pickle.load(open(summary_out_path, \"rb\"))\n", + "\n", + " # define tools\n", + " query_engine_tools = [\n", + " QueryEngineTool(\n", + " query_engine=vector_query_engine,\n", + " metadata=ToolMetadata(\n", + " name=f\"vector_tool_{file_base}\",\n", + " description=f\"Useful for questions related to specific facts\",\n", + " ),\n", + " ),\n", + " QueryEngineTool(\n", + " query_engine=summary_query_engine,\n", + " metadata=ToolMetadata(\n", + " name=f\"summary_tool_{file_base}\",\n", + " description=f\"Useful for summarization questions\",\n", + " ),\n", + " ),\n", + " ]\n", + "\n", + " # build agent\n", + " function_llm = OpenAI(model=\"gpt-4\")\n", + " agent = OpenAIAgent.from_tools(\n", + " query_engine_tools,\n", + " llm=function_llm,\n", + " verbose=True,\n", + " system_prompt=f\"\"\"\\\n", + "You are a specialized agent designed to answer queries about the `{file_base}.html` part of the LlamaIndex docs.\n", + "You must ALWAYS use at least one of the tools provided when answering a question; do NOT rely on prior knowledge.\\\n", + "\"\"\",\n", + " )\n", + "\n", + " return agent, summary\n", + "\n", + "\n", + "async def build_agents(docs):\n", + " node_parser = SimpleNodeParser.from_defaults()\n", + "\n", + " # Build agents dictionary\n", + " agents_dict = {}\n", + " extra_info_dict = {}\n", + "\n", + " # # this is for the baseline\n", + " # all_nodes = []\n", + "\n", + " for idx, doc in enumerate(tqdm(docs)):\n", + " nodes = node_parser.get_nodes_from_documents([doc])\n", + " # all_nodes.extend(nodes)\n", + "\n", + " # ID will be base + parent\n", + " file_path = Path(doc.metadata[\"path\"])\n", + " file_base = str(file_path.parent.stem) + \"_\" + str(file_path.stem)\n", + " agent, summary = await build_agent_per_doc(nodes, file_base)\n", + "\n", + " agents_dict[file_base] = agent\n", + " extra_info_dict[file_base] = {\"summary\": summary, \"nodes\": nodes}\n", + "\n", + " return agents_dict, extra_info_dict" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44748b46-dd6b-4d4f-bc70-7022ae96413f", + "metadata": {}, + "outputs": [], + "source": [ + "agents_dict, extra_info_dict = await build_agents(docs)" + ] + }, + { + "cell_type": "markdown", + "id": "899ca55b-0c02-429b-a765-8e4f806d503f", + "metadata": {}, + "source": [ + "### Build Retriever-Enabled OpenAI Agent\n", + "\n", + "We build a top-level agent that can orchestrate across the different document agents to answer any user query.\n", + "\n", + "This `RetrieverOpenAIAgent` performs tool retrieval before tool use (unlike a default agent that tries to put all tools in the prompt).\n", + "\n", + "**Improvements from V0**: We make the following improvements compared to the \"base\" version in V0.\n", + "\n", + "- Adding in reranking: we use Cohere reranker to better filter the candidate set of documents.\n", + "- Adding in a query planning tool: we add an explicit query planning tool that's dynamically created based on the set of retrieved tools.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 145, + "id": "6884ff15-bf40-4bdd-a1e3-58cbd056a12a", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# define tool for each document agent\n", + "all_tools = []\n", + "for file_base, agent in agents_dict.items():\n", + " summary = extra_info_dict[file_base][\"summary\"]\n", + " doc_tool = QueryEngineTool(\n", + " query_engine=agent,\n", + " metadata=ToolMetadata(\n", + " name=f\"tool_{file_base}\",\n", + " description=summary,\n", + " ),\n", + " )\n", + " all_tools.append(doc_tool)" + ] + }, + { + "cell_type": "code", + "execution_count": 146, + "id": "346ed0e1-b96f-446b-a768-4f11a9a1a7f6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ToolMetadata(description='LlamaIndex is a data framework that allows LLM applications to ingest, structure, and access private or domain-specific data by providing tools such as data connectors, data indexes, engines, data agents, and application integrations. It is designed for beginners, advanced users, and everyone in between, and offers both high-level and lower-level APIs for customization. LlamaIndex can be installed using pip and has detailed documentation and tutorials available. It is available on GitHub and PyPi, and there is also a Typescript package available. The LlamaIndex community can be joined on Twitter and Discord.', name='tool_latest_index', fn_schema=)\n" + ] + } + ], + "source": [ + "print(all_tools[0].metadata)" + ] + }, + { + "cell_type": "code", + "execution_count": 147, + "id": "b266ad43-c3fd-41cb-9e3b-4cb2bb2c2e5f", + "metadata": {}, + "outputs": [], + "source": [ + "# define an \"object\" index and retriever over these tools\n", + "from llama_index import VectorStoreIndex\n", + "from llama_index.objects import ObjectIndex, SimpleToolNodeMapping, ObjectRetriever\n", + "from llama_index.retrievers import BaseRetriever\n", + "from llama_index.indices.postprocessor import CohereRerank\n", + "from llama_index.tools import QueryPlanTool\n", + "from llama_index.query_engine import SubQuestionQueryEngine\n", + "from llama_index.llms import OpenAI\n", + "\n", + "llm = OpenAI(model_name=\"gpt-4-0613\")\n", + "\n", + "tool_mapping = SimpleToolNodeMapping.from_objects(all_tools)\n", + "obj_index = ObjectIndex.from_objects(\n", + " all_tools,\n", + " tool_mapping,\n", + " VectorStoreIndex,\n", + ")\n", + "vector_node_retriever = obj_index.as_node_retriever(similarity_top_k=10)\n", + "\n", + "\n", + "# define a custom retriever with reranking\n", + "class CustomRetriever(BaseRetriever):\n", + " def __init__(self, vector_retriever, postprocessor=None):\n", + " self._vector_retriever = vector_retriever\n", + " self._postprocessor = postprocessor or CohereRerank(top_n=5)\n", + "\n", + " def _retrieve(self, query_bundle):\n", + " retrieved_nodes = self._vector_retriever.retrieve(query_bundle)\n", + " filtered_nodes = self._postprocessor.postprocess_nodes(\n", + " retrieved_nodes, query_bundle=query_bundle\n", + " )\n", + "\n", + " return filtered_nodes\n", + "\n", + "\n", + "# define a custom object retriever that adds in a query planning tool\n", + "class CustomObjectRetriever(ObjectRetriever):\n", + " def __init__(self, retriever, object_node_mapping, all_tools, llm=None):\n", + " self._retriever = retriever\n", + " self._object_node_mapping = object_node_mapping\n", + " self._llm = llm or OpenAI(\"gpt-4-0613\")\n", + "\n", + " def retrieve(self, query_bundle):\n", + " nodes = self._retriever.retrieve(query_bundle)\n", + " tools = [self._object_node_mapping.from_node(n.node) for n in nodes]\n", + "\n", + " sub_question_sc = ServiceContext.from_defaults(llm=self._llm)\n", + " sub_question_engine = SubQuestionQueryEngine.from_defaults(\n", + " query_engine_tools=tools, service_context=sub_question_sc\n", + " )\n", + " sub_question_description = f\"\"\"\\\n", + "Useful for any queries that involve comparing multiple documents. ALWAYS use this tool for comparison queries - make sure to call this \\\n", + "tool with the original query. Do NOT use the other tools for any queries involving multiple documents.\n", + "\"\"\"\n", + " sub_question_tool = QueryEngineTool(\n", + " query_engine=sub_question_engine,\n", + " metadata=ToolMetadata(\n", + " name=\"compare_tool\", description=sub_question_description\n", + " ),\n", + " )\n", + "\n", + " return tools + [sub_question_tool]" + ] + }, + { + "cell_type": "code", + "execution_count": 148, + "id": "0ba0d1a6-e324-4faa-b72b-d340904e65b2", + "metadata": {}, + "outputs": [], + "source": [ + "custom_node_retriever = CustomRetriever(vector_node_retriever)\n", + "\n", + "# wrap it with ObjectRetriever to return objects\n", + "custom_obj_retriever = CustomObjectRetriever(\n", + " custom_node_retriever, tool_mapping, all_tools, llm=llm\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 149, + "id": "8654ce2a-cce7-44fc-8445-8bbcfdf7ee91", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "6\n" + ] + } + ], + "source": [ + "tmps = custom_obj_retriever.retrieve(\"hello\")\n", + "print(len(tmps))" + ] + }, + { + "cell_type": "code", + "execution_count": 150, + "id": "fed38942-1e37-4c61-89fa-d2ef41151831", + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index.agent import FnRetrieverOpenAIAgent, ReActAgent\n", + "\n", + "top_agent = FnRetrieverOpenAIAgent.from_retriever(\n", + " custom_obj_retriever,\n", + " system_prompt=\"\"\" \\\n", + "You are an agent designed to answer queries about the documentation.\n", + "Please always use the tools provided to answer a question. Do not rely on prior knowledge.\\\n", + "\n", + "\"\"\",\n", + " llm=llm,\n", + " verbose=True,\n", + ")\n", + "\n", + "# top_agent = ReActAgent.from_tools(\n", + "# tool_retriever=custom_obj_retriever,\n", + "# system_prompt=\"\"\" \\\n", + "# You are an agent designed to answer queries about the documentation.\n", + "# Please always use the tools provided to answer a question. Do not rely on prior knowledge.\\\n", + "\n", + "# \"\"\",\n", + "# llm=llm,\n", + "# verbose=True,\n", + "# )" + ] + }, + { + "cell_type": "markdown", + "id": "aa32b97c-6779-4b60-823d-6ca3be6f358a", + "metadata": {}, + "source": [ + "### Define Baseline Vector Store Index\n", + "\n", + "As a point of comparison, we define a \"naive\" RAG pipeline which dumps all docs into a single vector index collection.\n", + "\n", + "We set the top_k = 4" + ] + }, + { + "cell_type": "code", + "execution_count": 129, + "id": "f2f54834-1597-46ce-b0d3-0456bfa0d368", + "metadata": {}, + "outputs": [], + "source": [ + "all_nodes = [n for extra_info in extra_info_dict.values() for n in extra_info[\"nodes\"]]" + ] + }, + { + "cell_type": "code", + "execution_count": 130, + "id": "60dfc88f-6f47-4ef2-9ae6-74abde06a485", + "metadata": {}, + "outputs": [], + "source": [ + "base_index = VectorStoreIndex(all_nodes)\n", + "base_query_engine = base_index.as_query_engine(similarity_top_k=4)" + ] + }, + { + "cell_type": "markdown", + "id": "8dedb927-a992-4f21-a0fb-4ce4361adcb3", + "metadata": {}, + "source": [ + "## Running Example Queries\n", + "\n", + "Let's run some example queries, ranging from QA / summaries over a single document to QA / summarization over multiple documents." + ] + }, + { + "cell_type": "code", + "execution_count": 151, + "id": "8e743c62-7dd8-4ac9-85a5-f1cbc112a79c", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== Calling Function ===\n", + "Calling function: tool_api_reference_evaluation with args: {\n", + " \"input\": \"types of evaluation\"\n", + "}\n", + "=== Calling Function ===\n", + "Calling function: vector_tool_api_reference_evaluation with args: {\n", + " \"input\": \"types of evaluation\"\n", + "}\n", + "Got output: The types of evaluation can include correctness evaluation, faithfulness evaluation, guideline evaluation, hit rate evaluation, MRR (Mean Reciprocal Rank) evaluation, pairwise comparison evaluation, relevancy evaluation, and response evaluation.\n", + "========================\n", + "Got output: The types of evaluation mentioned in the `api_reference_evaluation.html` part of the LlamaIndex docs include:\n", + "\n", + "1. Correctness Evaluation\n", + "2. Faithfulness Evaluation\n", + "3. Guideline Evaluation\n", + "4. Hit Rate Evaluation\n", + "5. MRR (Mean Reciprocal Rank) Evaluation\n", + "6. Pairwise Comparison Evaluation\n", + "7. Relevancy Evaluation\n", + "8. Response Evaluation\n", + "========================\n" + ] + } + ], + "source": [ + "response = top_agent.query(\n", + " \"Tell me about the different types of evaluation in LlamaIndex\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 152, + "id": "a4ce2a76-5779-4acf-9337-69109dae7fd6", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "There are several types of evaluation in LlamaIndex:\n", + "\n", + "1. Correctness Evaluation: This type of evaluation measures the accuracy of the retrieval results. It checks if the retrieved documents are correct and relevant to the query.\n", + "\n", + "2. Faithfulness Evaluation: Faithfulness evaluation measures how faithfully the retrieved documents represent the original data. It checks if the retrieved documents accurately reflect the information in the original documents.\n", + "\n", + "3. Guideline Evaluation: Guideline evaluation involves comparing the retrieval results against a set of guidelines or ground truth. It checks if the retrieval results align with the expected or desired outcomes.\n", + "\n", + "4. Hit Rate Evaluation: Hit rate evaluation measures the percentage of queries that return at least one relevant document. It is a binary evaluation metric that indicates the effectiveness of the retrieval system in finding relevant documents.\n", + "\n", + "5. MRR (Mean Reciprocal Rank) Evaluation: MRR evaluation measures the average rank of the first relevant document in the retrieval results. It provides a single value that represents the effectiveness of the retrieval system in ranking relevant documents.\n", + "\n", + "6. Pairwise Comparison Evaluation: Pairwise comparison evaluation involves comparing the retrieval results of different systems or algorithms. It helps determine which system performs better in terms of retrieval accuracy and relevance.\n", + "\n", + "7. Relevancy Evaluation: Relevancy evaluation measures the relevance of the retrieved documents to the query. It can be done using various metrics such as precision, recall, and F1 score.\n", + "\n", + "8. Response Evaluation: Response evaluation measures the quality of the response generated by the retrieval system. It checks if the response is informative, accurate, and helpful to the user.\n", + "\n", + "These evaluation types help assess the performance and effectiveness of the retrieval system in LlamaIndex.\n" + ] + } + ], + "source": [ + "print(response)" + ] + }, + { + "cell_type": "code", + "execution_count": 101, + "id": "af28b422-fb73-4b59-9e77-3ba3afa87795", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LlamaIndex utilizes various types of evaluation methods to assess its performance and effectiveness. These evaluation methods include RelevancyEvaluator, RetrieverEvaluator, SemanticSimilarityEvaluator, PairwiseComparisonEvaluator, CorrectnessEvaluator, FaithfulnessEvaluator, and GuidelineEvaluator. Each of these evaluators serves a specific purpose in evaluating different aspects of the LlamaIndex system.\n" + ] + } + ], + "source": [ + "# baseline\n", + "response = base_query_engine.query(\n", + " \"Tell me about the different types of evaluation in LlamaIndex\"\n", + ")\n", + "print(str(response))" + ] + }, + { + "cell_type": "code", + "execution_count": 153, + "id": "ee6ef20c-3ccc-46c3-ad87-667138d78d5d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "=== Calling Function ===\n", + "Calling function: compare_tool with args: {\n", + " \"input\": \"content in the contributions page vs. index page\"\n", + "}\n", + "Generated 2 sub questions.\n", + "\u001b[1;3;38;2;237;90;200m[tool_development_contributing] Q: What is the content of the contributions page?\n", + "\u001b[0m\u001b[1;3;38;2;90;149;237m[tool_latest_index] Q: What is the content of the index page?\n", + "\u001b[0m=== Calling Function ===\n", + "Calling function: summary_tool_development_contributing with args: {\n", + " \"input\": \"development_contributing.html\"\n", + "}\n", + "=== Calling Function ===\n", + "Calling function: vector_tool_latest_index with args: {\n", + " \"input\": \"content of the index page\"\n", + "}\n", + "Got output: The development_contributing.html file provides information on how to contribute to LlamaIndex. It includes guidelines on what to work on, such as extending core modules, fixing bugs, adding usage examples, adding experimental features, and improving code quality and documentation. The file also provides details on each module, including data loaders, node parsers, text splitters, document/index/KV stores, managed index, vector stores, retrievers, query engines, query transforms, token usage optimizers, node postprocessors, and output parsers. Additionally, the file includes a development guideline section that covers environment setup, validating changes, formatting/linting, testing, creating example notebooks, and creating a pull request.\n", + "========================\n", + "Got output: The content of the index page provides information about LlamaIndex, a data framework for LLM applications. It explains why LlamaIndex is useful for augmenting LLM models with private or domain-specific data that may be distributed across different applications and data stores. LlamaIndex offers tools such as data connectors, data indexes, engines, and data agents to ingest, structure, and access data. It is designed for beginners as well as advanced users who can customize and extend its modules. The page also provides installation instructions, tutorials, and links to the LlamaIndex ecosystem and associated projects.\n", + "========================\n", + "\u001b[1;3;38;2;90;149;237m[tool_latest_index] A: The content of the `latest_index.html` page provides comprehensive information about LlamaIndex, a data framework for LLM applications. It explains the utility of LlamaIndex in augmenting LLM models with private or domain-specific data that may be distributed across different applications and data stores. \n", + "\n", + "The page details the tools offered by LlamaIndex, such as data connectors, data indexes, engines, and data agents, which are used to ingest, structure, and access data. It is designed to cater to both beginners and advanced users, with the flexibility to customize and extend its modules.\n", + "\n", + "Additionally, the page provides installation instructions and tutorials for users. It also includes links to the LlamaIndex ecosystem and associated projects for further exploration and understanding.\n", + "\u001b[0m\u001b[1;3;38;2;237;90;200m[tool_development_contributing] A: The `development_contributing.html` page of the LlamaIndex docs provides comprehensive information on how to contribute to the project. It includes guidelines on the areas to focus on, such as extending core modules, fixing bugs, adding usage examples, adding experimental features, and improving code quality and documentation.\n", + "\n", + "The page also provides detailed information on each module, including data loaders, node parsers, text splitters, document/index/KV stores, managed index, vector stores, retrievers, query engines, query transforms, token usage optimizers, node postprocessors, and output parsers.\n", + "\n", + "In addition, there is a development guideline section that covers various aspects of the development process, including environment setup, validating changes, formatting/linting, testing, creating example notebooks, and creating a pull request.\n", + "\u001b[0mGot output: The content in the contributions page of the LlamaIndex documentation provides comprehensive information on how to contribute to the project, including guidelines on areas to focus on and detailed information on each module. It also covers various aspects of the development process. \n", + "\n", + "On the other hand, the content in the index page of the LlamaIndex documentation provides comprehensive information about LlamaIndex itself, explaining its utility in augmenting LLM models with private or domain-specific data. It details the tools offered by LlamaIndex and provides installation instructions, tutorials, and links to the LlamaIndex ecosystem and associated projects.\n", + "========================\n" + ] + } + ], + "source": [ + "response = top_agent.query(\n", + " \"Compare the content in the contributions page vs. index page.\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 154, + "id": "cfe1dd4c-8bfd-43d0-99bc-ca60861dc418", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The contributions page of the LlamaIndex documentation provides guidelines for contributing to LlamaIndex, including extending core modules, fixing bugs, adding usage examples, adding experimental features, and improving code quality and documentation. It also includes information on the environment setup, validating changes, formatting and linting, testing, creating example notebooks, and creating a pull request.\n", + "\n", + "On the other hand, the index page of the LlamaIndex documentation provides information about LlamaIndex itself. It explains that LlamaIndex is a data framework that allows LLM applications to ingest, structure, and access private or domain-specific data. It provides tools such as data connectors, data indexes, engines, data agents, and application integrations. The index page also mentions that LlamaIndex is designed for beginners, advanced users, and everyone in between, and offers both high-level and lower-level APIs for customization. It provides installation instructions, links to the GitHub and PyPi repositories, and information about the LlamaIndex community on Twitter and Discord.\n", + "\n", + "In summary, the contributions page focuses on contributing to LlamaIndex, while the index page provides an overview of LlamaIndex and its features.\n" + ] + } + ], + "source": [ + "print(response)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8d97266-8e22-43a8-adfe-b9a7f833c06d", + "metadata": {}, + "outputs": [], + "source": [ + "response = top_agent.query(\n", + " \"Can you compare the tree index and list index at a very high-level?\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 162, + "id": "7401a80c-3cc7-4c72-9c45-82ffc1bd6816", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "At a high level, the Tree Index and List Index are two different types of indexes used in the system. \n", + "\n", + "The Tree Index is a tree-structured index that is built specifically for each query. It allows for the construction of a query-specific tree from leaf nodes to return a response. The Tree Index is designed to provide a more optimized and efficient way of retrieving nodes based on a query.\n", + "\n", + "On the other hand, the List Index is a keyword table index that supports operations such as inserting and deleting documents, retrieving nodes based on a query, and refreshing the index with updated documents. The List Index is a simpler index that uses a keyword table approach for retrieval.\n", + "\n", + "Both indexes have their own advantages and use cases. The choice between them depends on the specific requirements and constraints of the system.\n" + ] + } + ], + "source": [ + "print(str(response))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0bd95a16-606c-4d6c-a01d-872afb572774", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llama_index_v2", + "language": "python", + "name": "llama_index_v2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/llama_index/objects/base.py b/llama_index/objects/base.py index 01cc0d174a3db..7730eb1186bf8 100644 --- a/llama_index/objects/base.py +++ b/llama_index/objects/base.py @@ -61,3 +61,6 @@ def as_retriever(self, **kwargs: Any) -> ObjectRetriever: retriever=self._index.as_retriever(**kwargs), object_node_mapping=self._object_node_mapping, ) + + def as_node_retriever(self, **kwargs: Any) -> BaseRetriever: + return self._index.as_retriever(**kwargs)