diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..64ab95875 --- /dev/null +++ b/404.html @@ -0,0 +1,3726 @@ + + + + + + + + + + + + + + + + + + + Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/SUMMARY/index.html b/API_Documentation/SUMMARY/index.html new file mode 100644 index 000000000..205d974b8 --- /dev/null +++ b/API_Documentation/SUMMARY/index.html @@ -0,0 +1,3791 @@ + + + + + + + + + + + + + + + + + + + + + SUMMARY - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/agents/base_agent/index.html b/API_Documentation/agents/base_agent/index.html new file mode 100644 index 000000000..695ae4f07 --- /dev/null +++ b/API_Documentation/agents/base_agent/index.html @@ -0,0 +1,3902 @@ + + + + + + + + + + + + + + + + + + + + + + + + + base_agent - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

base_agent

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/agents/form_agent/index.html b/API_Documentation/agents/form_agent/index.html new file mode 100644 index 000000000..7299b38a1 --- /dev/null +++ b/API_Documentation/agents/form_agent/index.html @@ -0,0 +1,3902 @@ + + + + + + + + + + + + + + + + + + + + + + + + + form_agent - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

form_agent

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/agents/main_agent/index.html b/API_Documentation/agents/main_agent/index.html new file mode 100644 index 000000000..a17c5c4e5 --- /dev/null +++ b/API_Documentation/agents/main_agent/index.html @@ -0,0 +1,5090 @@ + + + + + + + + + + + + + + + + + + + + + + + + + main_agent - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

main_agent

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ MainAgent + + +

+ + +
+

+ Bases: BaseAgent

+ + +

Main Agent. +This class manages sub agents that in turn use the LLM.

+ +
+ Source code in cat/agents/main_agent.py +
class MainAgent(BaseAgent):
+    """Main Agent.
+    This class manages sub agents that in turn use the LLM.
+    """
+
+    def __init__(self):
+        self.mad_hatter = MadHatter()
+
+        if get_env("CCAT_LOG_LEVEL") in ["DEBUG", "INFO"]:
+            self.verbose = True
+        else:
+            self.verbose = False
+
+    async def execute(self, stray) -> AgentOutput:
+        """Execute the agents.
+
+        Returns
+        -------
+        agent_output : AgentOutput
+            Reply of the agent, instance of AgentOutput.
+        """
+
+        # prepare input to be passed to the agent.
+        #   Info will be extracted from working memory
+        # Note: agent_input works both as a dict and as an object
+        agent_input : BaseModelDict = self.format_agent_input(stray)
+        agent_input = self.mad_hatter.execute_hook(
+            "before_agent_starts", agent_input, cat=stray
+        )
+
+        # store the agent input inside the working memory
+        stray.working_memory.agent_input = agent_input
+
+        # should we run the default agents?
+        fast_reply = {}
+        fast_reply = self.mad_hatter.execute_hook(
+            "agent_fast_reply", fast_reply, cat=stray
+        )
+        if isinstance(fast_reply, AgentOutput):
+            return fast_reply
+        if isinstance(fast_reply, dict) and "output" in fast_reply:
+            return AgentOutput(**fast_reply)
+
+        # obtain prompt parts from plugins
+        prompt_prefix = self.mad_hatter.execute_hook(
+            "agent_prompt_prefix", prompts.MAIN_PROMPT_PREFIX, cat=stray
+        )
+        prompt_suffix = self.mad_hatter.execute_hook(
+            "agent_prompt_suffix", prompts.MAIN_PROMPT_SUFFIX, cat=stray
+        )
+
+        # run tools and forms
+        procedures_agent = ProceduresAgent()
+        procedures_agent_out : AgentOutput = await procedures_agent.execute(stray)
+        if procedures_agent_out.return_direct:
+            return procedures_agent_out
+
+        # we run memory agent if:
+        # - no procedures were recalled or selected or
+        # - procedures have all return_direct=False
+        memory_agent = MemoryAgent()
+        memory_agent_out : AgentOutput = await memory_agent.execute(
+            # TODO: should all agents only receive stray?
+            stray, prompt_prefix, prompt_suffix
+        )
+
+        memory_agent_out.intermediate_steps += procedures_agent_out.intermediate_steps
+
+        return memory_agent_out
+
+    def format_agent_input(self, stray):
+        """Format the input for the Agent.
+
+        The method formats the strings of recalled memories and chat history that will be provided to the Langchain
+        Agent and inserted in the prompt.
+
+        Returns
+        -------
+        BaseModelDict
+            Formatted output to be parsed by the Agent executor. Works both as a dict and as an object.
+
+        Notes
+        -----
+        The context of memories and conversation history is properly formatted before being parsed by the and, hence,
+        information are inserted in the main prompt.
+        All the formatting pipeline is hookable and memories can be edited.
+
+        See Also
+        --------
+        agent_prompt_episodic_memories
+        agent_prompt_declarative_memories
+        agent_prompt_chat_history
+        """
+
+        # format memories to be inserted in the prompt
+        episodic_memory_formatted_content = self.agent_prompt_episodic_memories(
+            stray.working_memory.episodic_memories
+        )
+        declarative_memory_formatted_content = self.agent_prompt_declarative_memories(
+            stray.working_memory.declarative_memories
+        )
+
+        # format conversation history to be inserted in the prompt
+        # TODOV2: take away
+        conversation_history_formatted_content = stray.stringify_chat_history()
+
+        return BaseModelDict(**{
+            "episodic_memory": episodic_memory_formatted_content,
+            "declarative_memory": declarative_memory_formatted_content,
+            "tools_output": "",
+            "input": stray.working_memory.user_message_json.text,  # TODOV2: take away
+            "chat_history": conversation_history_formatted_content, # TODOV2: take away
+        })
+
+    def agent_prompt_episodic_memories(
+        self, memory_docs: List[Tuple[Document, float]]
+    ) -> str:
+        """Formats episodic memories to be inserted into the prompt.
+
+        Parameters
+        ----------
+        memory_docs : List[Document]
+            List of Langchain `Document` retrieved from the episodic memory.
+
+        Returns
+        -------
+        memory_content : str
+            String of retrieved context from the episodic memory.
+        """
+
+        # convert docs to simple text
+        memory_texts = [m[0].page_content.replace("\n", ". ") for m in memory_docs]
+
+        # add time information (e.g. "2 days ago")
+        memory_timestamps = []
+        for m in memory_docs:
+            # Get Time information in the Document metadata
+            timestamp = m[0].metadata["when"]
+
+            # Get Current Time - Time when memory was stored
+            delta = timedelta(seconds=(time.time() - timestamp))
+
+            # Convert and Save timestamps to Verbal (e.g. "2 days ago")
+            memory_timestamps.append(f" ({verbal_timedelta(delta)})")
+
+        # Join Document text content with related temporal information
+        memory_texts = [a + b for a, b in zip(memory_texts, memory_timestamps)]
+
+        # Format the memories for the output
+        memories_separator = "\n  - "
+        memory_content = (
+            "## Context of things the Human said in the past: "
+            + memories_separator
+            + memories_separator.join(memory_texts)
+        )
+
+        # if no data is retrieved from memory don't erite anithing in the prompt
+        if len(memory_texts) == 0:
+            memory_content = ""
+
+        return memory_content
+
+    def agent_prompt_declarative_memories(
+        self, memory_docs: List[Tuple[Document, float]]
+    ) -> str:
+        """Formats the declarative memories for the prompt context.
+        Such context is placed in the `agent_prompt_prefix` in the place held by {declarative_memory}.
+
+        Parameters
+        ----------
+        memory_docs : List[Document]
+            list of Langchain `Document` retrieved from the declarative memory.
+
+        Returns
+        -------
+        memory_content : str
+            String of retrieved context from the declarative memory.
+        """
+
+        # convert docs to simple text
+        memory_texts = [m[0].page_content.replace("\n", ". ") for m in memory_docs]
+
+        # add source information (e.g. "extracted from file.txt")
+        memory_sources = []
+        for m in memory_docs:
+            # Get and save the source of the memory
+            source = m[0].metadata["source"]
+            memory_sources.append(f" (extracted from {source})")
+
+        # Join Document text content with related source information
+        memory_texts = [a + b for a, b in zip(memory_texts, memory_sources)]
+
+        # Format the memories for the output
+        memories_separator = "\n  - "
+
+        memory_content = (
+            "## Context of documents containing relevant information: "
+            + memories_separator
+            + memories_separator.join(memory_texts)
+        )
+
+        # if no data is retrieved from memory don't write anithing in the prompt
+        if len(memory_texts) == 0:
+            memory_content = ""
+
+        return memory_content
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ agent_prompt_declarative_memories(memory_docs) + +

+ + +
+ +

Formats the declarative memories for the prompt context. +Such context is placed in the agent_prompt_prefix in the place held by {declarative_memory}.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
memory_docs + List[Document] + +
+

list of Langchain Document retrieved from the declarative memory.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
memory_content + str + +
+

String of retrieved context from the declarative memory.

+
+
+ +
+ Source code in cat/agents/main_agent.py +
def agent_prompt_declarative_memories(
+    self, memory_docs: List[Tuple[Document, float]]
+) -> str:
+    """Formats the declarative memories for the prompt context.
+    Such context is placed in the `agent_prompt_prefix` in the place held by {declarative_memory}.
+
+    Parameters
+    ----------
+    memory_docs : List[Document]
+        list of Langchain `Document` retrieved from the declarative memory.
+
+    Returns
+    -------
+    memory_content : str
+        String of retrieved context from the declarative memory.
+    """
+
+    # convert docs to simple text
+    memory_texts = [m[0].page_content.replace("\n", ". ") for m in memory_docs]
+
+    # add source information (e.g. "extracted from file.txt")
+    memory_sources = []
+    for m in memory_docs:
+        # Get and save the source of the memory
+        source = m[0].metadata["source"]
+        memory_sources.append(f" (extracted from {source})")
+
+    # Join Document text content with related source information
+    memory_texts = [a + b for a, b in zip(memory_texts, memory_sources)]
+
+    # Format the memories for the output
+    memories_separator = "\n  - "
+
+    memory_content = (
+        "## Context of documents containing relevant information: "
+        + memories_separator
+        + memories_separator.join(memory_texts)
+    )
+
+    # if no data is retrieved from memory don't write anithing in the prompt
+    if len(memory_texts) == 0:
+        memory_content = ""
+
+    return memory_content
+
+
+
+ +
+ +
+ + +

+ agent_prompt_episodic_memories(memory_docs) + +

+ + +
+ +

Formats episodic memories to be inserted into the prompt.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
memory_docs + List[Document] + +
+

List of Langchain Document retrieved from the episodic memory.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
memory_content + str + +
+

String of retrieved context from the episodic memory.

+
+
+ +
+ Source code in cat/agents/main_agent.py +
def agent_prompt_episodic_memories(
+    self, memory_docs: List[Tuple[Document, float]]
+) -> str:
+    """Formats episodic memories to be inserted into the prompt.
+
+    Parameters
+    ----------
+    memory_docs : List[Document]
+        List of Langchain `Document` retrieved from the episodic memory.
+
+    Returns
+    -------
+    memory_content : str
+        String of retrieved context from the episodic memory.
+    """
+
+    # convert docs to simple text
+    memory_texts = [m[0].page_content.replace("\n", ". ") for m in memory_docs]
+
+    # add time information (e.g. "2 days ago")
+    memory_timestamps = []
+    for m in memory_docs:
+        # Get Time information in the Document metadata
+        timestamp = m[0].metadata["when"]
+
+        # Get Current Time - Time when memory was stored
+        delta = timedelta(seconds=(time.time() - timestamp))
+
+        # Convert and Save timestamps to Verbal (e.g. "2 days ago")
+        memory_timestamps.append(f" ({verbal_timedelta(delta)})")
+
+    # Join Document text content with related temporal information
+    memory_texts = [a + b for a, b in zip(memory_texts, memory_timestamps)]
+
+    # Format the memories for the output
+    memories_separator = "\n  - "
+    memory_content = (
+        "## Context of things the Human said in the past: "
+        + memories_separator
+        + memories_separator.join(memory_texts)
+    )
+
+    # if no data is retrieved from memory don't erite anithing in the prompt
+    if len(memory_texts) == 0:
+        memory_content = ""
+
+    return memory_content
+
+
+
+ +
+ +
+ + +

+ execute(stray) + + + async + + +

+ + +
+ +

Execute the agents.

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
agent_output + AgentOutput + +
+

Reply of the agent, instance of AgentOutput.

+
+
+ +
+ Source code in cat/agents/main_agent.py +
29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
async def execute(self, stray) -> AgentOutput:
+    """Execute the agents.
+
+    Returns
+    -------
+    agent_output : AgentOutput
+        Reply of the agent, instance of AgentOutput.
+    """
+
+    # prepare input to be passed to the agent.
+    #   Info will be extracted from working memory
+    # Note: agent_input works both as a dict and as an object
+    agent_input : BaseModelDict = self.format_agent_input(stray)
+    agent_input = self.mad_hatter.execute_hook(
+        "before_agent_starts", agent_input, cat=stray
+    )
+
+    # store the agent input inside the working memory
+    stray.working_memory.agent_input = agent_input
+
+    # should we run the default agents?
+    fast_reply = {}
+    fast_reply = self.mad_hatter.execute_hook(
+        "agent_fast_reply", fast_reply, cat=stray
+    )
+    if isinstance(fast_reply, AgentOutput):
+        return fast_reply
+    if isinstance(fast_reply, dict) and "output" in fast_reply:
+        return AgentOutput(**fast_reply)
+
+    # obtain prompt parts from plugins
+    prompt_prefix = self.mad_hatter.execute_hook(
+        "agent_prompt_prefix", prompts.MAIN_PROMPT_PREFIX, cat=stray
+    )
+    prompt_suffix = self.mad_hatter.execute_hook(
+        "agent_prompt_suffix", prompts.MAIN_PROMPT_SUFFIX, cat=stray
+    )
+
+    # run tools and forms
+    procedures_agent = ProceduresAgent()
+    procedures_agent_out : AgentOutput = await procedures_agent.execute(stray)
+    if procedures_agent_out.return_direct:
+        return procedures_agent_out
+
+    # we run memory agent if:
+    # - no procedures were recalled or selected or
+    # - procedures have all return_direct=False
+    memory_agent = MemoryAgent()
+    memory_agent_out : AgentOutput = await memory_agent.execute(
+        # TODO: should all agents only receive stray?
+        stray, prompt_prefix, prompt_suffix
+    )
+
+    memory_agent_out.intermediate_steps += procedures_agent_out.intermediate_steps
+
+    return memory_agent_out
+
+
+
+ +
+ +
+ + +

+ format_agent_input(stray) + +

+ + +
+ +

Format the input for the Agent.

+

The method formats the strings of recalled memories and chat history that will be provided to the Langchain +Agent and inserted in the prompt.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ BaseModelDict + +
+

Formatted output to be parsed by the Agent executor. Works both as a dict and as an object.

+
+
+ + +
+ Notes +

The context of memories and conversation history is properly formatted before being parsed by the and, hence, +information are inserted in the main prompt. +All the formatting pipeline is hookable and memories can be edited.

+
+ +
+ See Also +

agent_prompt_episodic_memories +agent_prompt_declarative_memories +agent_prompt_chat_history

+
+
+ Source code in cat/agents/main_agent.py +
def format_agent_input(self, stray):
+    """Format the input for the Agent.
+
+    The method formats the strings of recalled memories and chat history that will be provided to the Langchain
+    Agent and inserted in the prompt.
+
+    Returns
+    -------
+    BaseModelDict
+        Formatted output to be parsed by the Agent executor. Works both as a dict and as an object.
+
+    Notes
+    -----
+    The context of memories and conversation history is properly formatted before being parsed by the and, hence,
+    information are inserted in the main prompt.
+    All the formatting pipeline is hookable and memories can be edited.
+
+    See Also
+    --------
+    agent_prompt_episodic_memories
+    agent_prompt_declarative_memories
+    agent_prompt_chat_history
+    """
+
+    # format memories to be inserted in the prompt
+    episodic_memory_formatted_content = self.agent_prompt_episodic_memories(
+        stray.working_memory.episodic_memories
+    )
+    declarative_memory_formatted_content = self.agent_prompt_declarative_memories(
+        stray.working_memory.declarative_memories
+    )
+
+    # format conversation history to be inserted in the prompt
+    # TODOV2: take away
+    conversation_history_formatted_content = stray.stringify_chat_history()
+
+    return BaseModelDict(**{
+        "episodic_memory": episodic_memory_formatted_content,
+        "declarative_memory": declarative_memory_formatted_content,
+        "tools_output": "",
+        "input": stray.working_memory.user_message_json.text,  # TODOV2: take away
+        "chat_history": conversation_history_formatted_content, # TODOV2: take away
+    })
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/agents/memory_agent/index.html b/API_Documentation/agents/memory_agent/index.html new file mode 100644 index 000000000..12e598e1b --- /dev/null +++ b/API_Documentation/agents/memory_agent/index.html @@ -0,0 +1,3902 @@ + + + + + + + + + + + + + + + + + + + + + + + + + memory_agent - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

memory_agent

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/agents/procedures_agent/index.html b/API_Documentation/agents/procedures_agent/index.html new file mode 100644 index 000000000..a3aa808ff --- /dev/null +++ b/API_Documentation/agents/procedures_agent/index.html @@ -0,0 +1,3902 @@ + + + + + + + + + + + + + + + + + + + + + + + + + procedures_agent - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

procedures_agent

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/auth/auth_utils/index.html b/API_Documentation/auth/auth_utils/index.html new file mode 100644 index 000000000..e2e33045a --- /dev/null +++ b/API_Documentation/auth/auth_utils/index.html @@ -0,0 +1,3960 @@ + + + + + + + + + + + + + + + + + + + + + + + + + auth_utils - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

auth_utils

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ is_jwt(token) + +

+ + +
+ +

Returns whether a given string is a JWT.

+ +
+ Source code in cat/auth/auth_utils.py +
 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
def is_jwt(token: str) -> bool:
+    """
+    Returns whether a given string is a JWT.
+    """
+    try:
+        # Decode the JWT without verification to check its structure
+        jwt.decode(token, options={"verify_signature": False})
+        return True
+    except InvalidTokenError:
+        return False
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/log/index.html b/API_Documentation/log/index.html new file mode 100644 index 000000000..273be4248 --- /dev/null +++ b/API_Documentation/log/index.html @@ -0,0 +1,5429 @@ + + + + + + + + + + + + + + + + + + + + + + + + + log - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

log

+ +
+ + + + +
+ +

The log engine.

+ + + +
+ + + + + + + + +
+ + + +

+ CatLogEngine + + +

+ + +
+ + +

The log engine.

+

Engine to filter the logs in the terminal according to the level of severity.

+ + +

Attributes:

+ + + + + + + + + + + + + + + +
NameTypeDescription
LOG_LEVEL + str + +
+

Level of logging set in the .env file.

+
+
+ + +
+ Notes +

The logging level set in the .env file will print all the logs from that level to above. +Available levels are:

+
- `DEBUG`
+- `INFO`
+- `WARNING`
+- `ERROR`
+- `CRITICAL`
+
+

Default to INFO.

+
+
+ Source code in cat/log.py +
class CatLogEngine:
+    """The log engine.
+
+    Engine to filter the logs in the terminal according to the level of severity.
+
+    Attributes
+    ----------
+    LOG_LEVEL : str
+        Level of logging set in the `.env` file.
+
+    Notes
+    -----
+    The logging level set in the `.env` file will print all the logs from that level to above.
+    Available levels are:
+
+        - `DEBUG`
+        - `INFO`
+        - `WARNING`
+        - `ERROR`
+        - `CRITICAL`
+
+    Default to `INFO`.
+
+    """
+
+    def __init__(self):
+        self.LOG_LEVEL = get_log_level()
+        self.default_log()
+
+        # workaround for pdfminer logging
+        # https://github.com/pdfminer/pdfminer.six/issues/347
+        logging.getLogger("pdfminer").setLevel(logging.WARNING)
+
+    def show_log_level(self, record):
+        """Allows to show stuff in the log based on the global setting.
+
+        Parameters
+        ----------
+        record : dict
+
+        Returns
+        -------
+        bool
+
+        """
+        return record["level"].no >= logger.level(self.LOG_LEVEL).no
+
+    def default_log(self):
+        """Set the same debug level to all the project dependencies.
+
+        Returns
+        -------
+        """
+
+        time = "<green>[{time:YYYY-MM-DD HH:mm:ss.SSS}]</green>"
+        level = "<level>{level: <6}</level>"
+        origin = "<level>{extra[original_name]}.{extra[original_class]}.{extra[original_caller]}::{extra[original_line]}</level>"
+        message = "<level>{message}</level>"
+        log_format = f"{time} {level} {origin} \n{message}"
+
+        logger.remove()
+        if self.LOG_LEVEL == "DEBUG":
+            return logger.add(
+                sys.stdout,
+                colorize=True,
+                format=log_format,
+                backtrace=True,
+                diagnose=True,
+                filter=self.show_log_level,
+            )
+        else:
+            return logger.add(
+                sys.stdout,
+                colorize=True,
+                format=log_format,
+                filter=self.show_log_level,
+                level=self.LOG_LEVEL,
+            )
+
+    def get_caller_info(self, skip=3):
+        """Get the name of a caller in the format module.class.method.
+
+        Copied from: https://gist.github.com/techtonik/2151727
+
+        Parameters
+        ----------
+        skip :  int
+            Specifies how many levels of stack to skip while getting caller name.
+
+        Returns
+        -------
+        package : str
+            Caller package.
+        module : str
+            Caller module.
+        klass : str
+            Caller classname if one otherwise None.
+        caller : str
+            Caller function or method (if a class exist).
+        line : int
+            The line of the call.
+
+
+        Notes
+        -----
+        skip=1 means "who calls me",
+        skip=2 "who calls my caller" etc.
+
+        An empty string is returned if skipped levels exceed stack height.
+        """
+        stack = inspect.stack()
+        start = 0 + skip
+        if len(stack) < start + 1:
+            return ""
+        parentframe = stack[start][0]
+
+        # module and packagename.
+        module_info = inspect.getmodule(parentframe)
+        if module_info:
+            mod = module_info.__name__.split(".")
+            package = mod[0]
+            module = ".".join(mod[1:])
+
+        # class name.
+        klass = ""
+        if "self" in parentframe.f_locals:
+            klass = parentframe.f_locals["self"].__class__.__name__
+
+        # method or function name.
+        caller = None
+        if parentframe.f_code.co_name != "<module>":  # top level usually
+            caller = parentframe.f_code.co_name
+
+        # call line.
+        line = parentframe.f_lineno
+
+        # Remove reference to frame
+        # See: https://docs.python.org/3/library/inspect.html#the-interpreter-stack
+        del parentframe
+
+        return package, module, klass, caller, line
+
+    def __call__(self, msg, level="DEBUG"):
+        """Alias of self.log()"""
+        self.log(msg, level)
+
+    def debug(self, msg):
+        """Logs a DEBUG message"""
+        self.log(msg, level="DEBUG")
+
+    def info(self, msg):
+        """Logs an INFO message"""
+        self.log(msg, level="INFO")
+
+    def warning(self, msg):
+        """Logs a WARNING message"""
+        self.log(msg, level="WARNING")
+
+    def error(self, msg):
+        """Logs an ERROR message"""
+        self.log(msg, level="ERROR")
+
+    def critical(self, msg):
+        """Logs a CRITICAL message"""
+        self.log(msg, level="CRITICAL")
+
+    def log(self, msg, level="DEBUG"):
+        """Log a message
+
+        Parameters
+        ----------
+        msg :
+            Message to be logged.
+        level : str
+            Logging level."""
+
+        (package, module, klass, caller, line) = self.get_caller_info()
+
+        custom_logger = logger.bind(
+            original_name=f"{package}.{module}",
+            original_line=line,
+            original_class=klass,
+            original_caller=caller,
+        )
+
+        # prettify
+        if type(msg) in [dict, list, str]:  # TODO: should be recursive
+            try:
+                msg = json.dumps(msg, indent=4)
+            except Exception:
+                pass
+        else:
+            msg = pformat(msg)
+
+        # actual log
+        custom_logger.log(level, msg)
+
+    def welcome(self):
+        """Welcome message in the terminal."""
+        secure = "s" if get_env("CCAT_CORE_USE_SECURE_PROTOCOLS") in ("true", "1") else ""
+
+        cat_host = get_env("CCAT_CORE_HOST")
+        cat_port = get_env("CCAT_CORE_PORT")
+        cat_address = f"http{secure}://{cat_host}:{cat_port}"
+
+        with open("cat/welcome.txt", "r") as f:
+            print(f.read())
+
+        print("\n=============== ^._.^ ===============\n")
+        print(f"Cat REST API:   {cat_address}/docs")
+        print(f"Cat ADMIN:      {cat_address}/admin\n")
+        print("======================================")
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __call__(msg, level='DEBUG') + +

+ + +
+ +

Alias of self.log()

+ +
+ Source code in cat/log.py +
def __call__(self, msg, level="DEBUG"):
+    """Alias of self.log()"""
+    self.log(msg, level)
+
+
+
+ +
+ +
+ + +

+ critical(msg) + +

+ + +
+ +

Logs a CRITICAL message

+ +
+ Source code in cat/log.py +
def critical(self, msg):
+    """Logs a CRITICAL message"""
+    self.log(msg, level="CRITICAL")
+
+
+
+ +
+ +
+ + +

+ debug(msg) + +

+ + +
+ +

Logs a DEBUG message

+ +
+ Source code in cat/log.py +
def debug(self, msg):
+    """Logs a DEBUG message"""
+    self.log(msg, level="DEBUG")
+
+
+
+ +
+ +
+ + +

+ default_log() + +

+ + +
+ +

Set the same debug level to all the project dependencies.

+ +
+ Source code in cat/log.py +
65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
def default_log(self):
+    """Set the same debug level to all the project dependencies.
+
+    Returns
+    -------
+    """
+
+    time = "<green>[{time:YYYY-MM-DD HH:mm:ss.SSS}]</green>"
+    level = "<level>{level: <6}</level>"
+    origin = "<level>{extra[original_name]}.{extra[original_class]}.{extra[original_caller]}::{extra[original_line]}</level>"
+    message = "<level>{message}</level>"
+    log_format = f"{time} {level} {origin} \n{message}"
+
+    logger.remove()
+    if self.LOG_LEVEL == "DEBUG":
+        return logger.add(
+            sys.stdout,
+            colorize=True,
+            format=log_format,
+            backtrace=True,
+            diagnose=True,
+            filter=self.show_log_level,
+        )
+    else:
+        return logger.add(
+            sys.stdout,
+            colorize=True,
+            format=log_format,
+            filter=self.show_log_level,
+            level=self.LOG_LEVEL,
+        )
+
+
+
+ +
+ +
+ + +

+ error(msg) + +

+ + +
+ +

Logs an ERROR message

+ +
+ Source code in cat/log.py +
def error(self, msg):
+    """Logs an ERROR message"""
+    self.log(msg, level="ERROR")
+
+
+
+ +
+ +
+ + +

+ get_caller_info(skip=3) + +

+ + +
+ +

Get the name of a caller in the format module.class.method.

+

Copied from: https://gist.github.com/techtonik/2151727

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
skip + int + +
+

Specifies how many levels of stack to skip while getting caller name.

+
+
+ 3 +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Name TypeDescription
package + str + +
+

Caller package.

+
+
module + str + +
+

Caller module.

+
+
klass + str + +
+

Caller classname if one otherwise None.

+
+
caller + str + +
+

Caller function or method (if a class exist).

+
+
line + int + +
+

The line of the call.

+
+
+ + +
+ Notes +

skip=1 means "who calls me", +skip=2 "who calls my caller" etc.

+

An empty string is returned if skipped levels exceed stack height.

+
+
+ Source code in cat/log.py +
def get_caller_info(self, skip=3):
+    """Get the name of a caller in the format module.class.method.
+
+    Copied from: https://gist.github.com/techtonik/2151727
+
+    Parameters
+    ----------
+    skip :  int
+        Specifies how many levels of stack to skip while getting caller name.
+
+    Returns
+    -------
+    package : str
+        Caller package.
+    module : str
+        Caller module.
+    klass : str
+        Caller classname if one otherwise None.
+    caller : str
+        Caller function or method (if a class exist).
+    line : int
+        The line of the call.
+
+
+    Notes
+    -----
+    skip=1 means "who calls me",
+    skip=2 "who calls my caller" etc.
+
+    An empty string is returned if skipped levels exceed stack height.
+    """
+    stack = inspect.stack()
+    start = 0 + skip
+    if len(stack) < start + 1:
+        return ""
+    parentframe = stack[start][0]
+
+    # module and packagename.
+    module_info = inspect.getmodule(parentframe)
+    if module_info:
+        mod = module_info.__name__.split(".")
+        package = mod[0]
+        module = ".".join(mod[1:])
+
+    # class name.
+    klass = ""
+    if "self" in parentframe.f_locals:
+        klass = parentframe.f_locals["self"].__class__.__name__
+
+    # method or function name.
+    caller = None
+    if parentframe.f_code.co_name != "<module>":  # top level usually
+        caller = parentframe.f_code.co_name
+
+    # call line.
+    line = parentframe.f_lineno
+
+    # Remove reference to frame
+    # See: https://docs.python.org/3/library/inspect.html#the-interpreter-stack
+    del parentframe
+
+    return package, module, klass, caller, line
+
+
+
+ +
+ +
+ + +

+ info(msg) + +

+ + +
+ +

Logs an INFO message

+ +
+ Source code in cat/log.py +
def info(self, msg):
+    """Logs an INFO message"""
+    self.log(msg, level="INFO")
+
+
+
+ +
+ +
+ + +

+ log(msg, level='DEBUG') + +

+ + +
+ +

Log a message

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
msg + +
+

Message to be logged.

+
+
+ required +
level + str + +
+

Logging level.

+
+
+ 'DEBUG' +
+ +
+ Source code in cat/log.py +
def log(self, msg, level="DEBUG"):
+    """Log a message
+
+    Parameters
+    ----------
+    msg :
+        Message to be logged.
+    level : str
+        Logging level."""
+
+    (package, module, klass, caller, line) = self.get_caller_info()
+
+    custom_logger = logger.bind(
+        original_name=f"{package}.{module}",
+        original_line=line,
+        original_class=klass,
+        original_caller=caller,
+    )
+
+    # prettify
+    if type(msg) in [dict, list, str]:  # TODO: should be recursive
+        try:
+            msg = json.dumps(msg, indent=4)
+        except Exception:
+            pass
+    else:
+        msg = pformat(msg)
+
+    # actual log
+    custom_logger.log(level, msg)
+
+
+
+ +
+ +
+ + +

+ show_log_level(record) + +

+ + +
+ +

Allows to show stuff in the log based on the global setting.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
record + dict + +
+ +
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ bool + +
+ +
+
+ +
+ Source code in cat/log.py +
51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
def show_log_level(self, record):
+    """Allows to show stuff in the log based on the global setting.
+
+    Parameters
+    ----------
+    record : dict
+
+    Returns
+    -------
+    bool
+
+    """
+    return record["level"].no >= logger.level(self.LOG_LEVEL).no
+
+
+
+ +
+ +
+ + +

+ warning(msg) + +

+ + +
+ +

Logs a WARNING message

+ +
+ Source code in cat/log.py +
def warning(self, msg):
+    """Logs a WARNING message"""
+    self.log(msg, level="WARNING")
+
+
+
+ +
+ +
+ + +

+ welcome() + +

+ + +
+ +

Welcome message in the terminal.

+ +
+ Source code in cat/log.py +
def welcome(self):
+    """Welcome message in the terminal."""
+    secure = "s" if get_env("CCAT_CORE_USE_SECURE_PROTOCOLS") in ("true", "1") else ""
+
+    cat_host = get_env("CCAT_CORE_HOST")
+    cat_port = get_env("CCAT_CORE_PORT")
+    cat_address = f"http{secure}://{cat_host}:{cat_port}"
+
+    with open("cat/welcome.txt", "r") as f:
+        print(f.read())
+
+    print("\n=============== ^._.^ ===============\n")
+    print(f"Cat REST API:   {cat_address}/docs")
+    print(f"Cat ADMIN:      {cat_address}/admin\n")
+    print("======================================")
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ get_log_level() + +

+ + +
+ +

Return the global LOG level.

+ +
+ Source code in cat/log.py +
13
+14
+15
def get_log_level():
+    """Return the global LOG level."""
+    return get_env("CCAT_LOG_LEVEL")
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/looking_glass/cheshire_cat/index.html b/API_Documentation/looking_glass/cheshire_cat/index.html new file mode 100644 index 000000000..3e74ece96 --- /dev/null +++ b/API_Documentation/looking_glass/cheshire_cat/index.html @@ -0,0 +1,5612 @@ + + + + + + + + + + + + + + + + + + + + + + + + + cheshire_cat - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

cheshire_cat

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ CheshireCat + + +

+ + +
+ + +

The Cheshire Cat.

+

This is the main class that manages everything.

+ + +

Attributes:

+ + + + + + + + + + + + + + + +
NameTypeDescription
todo + list + +
+

Yet to be written.

+
+
+ +
+ Source code in cat/looking_glass/cheshire_cat.py +
 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
@singleton
+class CheshireCat:
+    """The Cheshire Cat.
+
+    This is the main class that manages everything.
+
+    Attributes
+    ----------
+    todo : list
+        Yet to be written.
+
+    """
+
+    def __init__(self):
+        """Cat initialization.
+
+        At init time the Cat executes the bootstrap.
+        """
+
+        # bootstrap the Cat! ^._.^
+
+        # load AuthHandler
+        self.load_auth()
+
+        # Start scheduling system
+        self.white_rabbit = WhiteRabbit()
+
+        # instantiate MadHatter (loads all plugins' hooks and tools)
+        self.mad_hatter = MadHatter()
+
+        # allows plugins to do something before cat components are loaded
+        self.mad_hatter.execute_hook("before_cat_bootstrap", cat=self)
+
+        # load LLM and embedder
+        self.load_natural_language()
+
+        # Load memories (vector collections and working_memory)
+        self.load_memory()
+
+        # After memory is loaded, we can get/create tools embeddings
+        # every time the mad_hatter finishes syncing hooks, tools and forms, it will notify the Cat (so it can embed tools in vector memory)
+        self.mad_hatter.on_finish_plugins_sync_callback = self.embed_procedures
+        self.embed_procedures()  # first time launched manually
+
+        # Main agent instance (for reasoning)
+        self.main_agent = MainAgent()
+
+        # Rabbit Hole Instance
+        self.rabbit_hole = RabbitHole(self)  # :(
+
+        # allows plugins to do something after the cat bootstrap is complete
+        self.mad_hatter.execute_hook("after_cat_bootstrap", cat=self)
+
+    def load_natural_language(self):
+        """Load Natural Language related objects.
+
+        The method exposes in the Cat all the NLP related stuff. Specifically, it sets the language models
+        (LLM and Embedder).
+
+        Warnings
+        --------
+        When using small Language Models it is suggested to turn off the memories and make the main prompt smaller
+        to prevent them to fail.
+
+        See Also
+        --------
+        agent_prompt_prefix
+        """
+        # LLM and embedder
+        self._llm = self.load_language_model()
+        self.embedder = self.load_language_embedder()
+
+    def load_language_model(self) -> BaseLanguageModel:
+        """Large Language Model (LLM) selection at bootstrap time.
+
+        Returns
+        -------
+        llm : BaseLanguageModel
+            Langchain `BaseLanguageModel` instance of the selected model.
+
+        Notes
+        -----
+        Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,
+        the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.
+
+        """
+
+        selected_llm = crud.get_setting_by_name(name="llm_selected")
+
+        if selected_llm is None:
+            # return default LLM
+            llm = LLMDefaultConfig.get_llm_from_config({})
+
+        else:
+            # get LLM factory class
+            selected_llm_class = selected_llm["value"]["name"]
+            FactoryClass = get_llm_from_name(selected_llm_class)
+
+            # obtain configuration and instantiate LLM
+            selected_llm_config = crud.get_setting_by_name(name=selected_llm_class)
+            try:
+                llm = FactoryClass.get_llm_from_config(selected_llm_config["value"])
+            except Exception:
+                import traceback
+
+                traceback.print_exc()
+                llm = LLMDefaultConfig.get_llm_from_config({})
+
+        return llm
+
+    def load_language_embedder(self) -> embedders.EmbedderSettings:
+        """Hook into the  embedder selection.
+
+        Allows to modify how the Cat selects the embedder at bootstrap time.
+
+        Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,
+        the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.
+
+        Parameters
+        ----------
+        cat: CheshireCat
+            Cheshire Cat instance.
+
+        Returns
+        -------
+        embedder : Embeddings
+            Selected embedder model.
+        """
+        # Embedding LLM
+
+        selected_embedder = crud.get_setting_by_name(name="embedder_selected")
+
+        if selected_embedder is not None:
+            # get Embedder factory class
+            selected_embedder_class = selected_embedder["value"]["name"]
+            FactoryClass = get_embedder_from_name(selected_embedder_class)
+
+            # obtain configuration and instantiate Embedder
+            selected_embedder_config = crud.get_setting_by_name(
+                name=selected_embedder_class
+            )
+            try:
+                embedder = FactoryClass.get_embedder_from_config(
+                    selected_embedder_config["value"]
+                )
+            except AttributeError:
+                import traceback
+
+                traceback.print_exc()
+                embedder = embedders.EmbedderDumbConfig.get_embedder_from_config({})
+            return embedder
+
+        # OpenAI embedder
+        if type(self._llm) in [OpenAI, ChatOpenAI]:
+            embedder = embedders.EmbedderOpenAIConfig.get_embedder_from_config(
+                {
+                    "openai_api_key": self._llm.openai_api_key,
+                }
+            )
+
+        # For Azure avoid automatic embedder selection
+
+        # Cohere
+        elif type(self._llm) in [Cohere]:
+            embedder = embedders.EmbedderCohereConfig.get_embedder_from_config(
+                {
+                    "cohere_api_key": self._llm.cohere_api_key,
+                    "model": "embed-multilingual-v2.0",
+                    # Now the best model for embeddings is embed-multilingual-v2.0
+                }
+            )
+
+        elif type(self._llm) in [ChatGoogleGenerativeAI]:
+            embedder = embedders.EmbedderGeminiChatConfig.get_embedder_from_config(
+                {
+                    "model": "models/embedding-001",
+                    "google_api_key": self._llm.google_api_key,
+                }
+            )
+
+        else:
+            # If no embedder matches vendor, and no external embedder is configured, we use the DumbEmbedder.
+            #   `This embedder is not a model properly trained
+            #    and this makes it not suitable to effectively embed text,
+            #    "but it does not know this and embeds anyway".` - cit. Nicola Corbellini
+            embedder = embedders.EmbedderDumbConfig.get_embedder_from_config({})
+
+        return embedder
+
+    def load_auth(self):
+
+        # Custom auth_handler # TODOAUTH: change the name to custom_auth
+        selected_auth_handler = crud.get_setting_by_name(name="auth_handler_selected")
+
+        # if no auth_handler is saved, use default one and save to db
+        if selected_auth_handler is None:
+            # create the auth settings
+            crud.upsert_setting_by_name(
+                models.Setting(
+                    name="CoreOnlyAuthConfig", category="auth_handler_factory", value={}
+                )
+            )
+            crud.upsert_setting_by_name(
+                models.Setting(
+                    name="auth_handler_selected",
+                    category="auth_handler_factory",
+                    value={"name": "CoreOnlyAuthConfig"},
+                )
+            )
+
+            # reload from db
+            selected_auth_handler = crud.get_setting_by_name(
+                name="auth_handler_selected"
+            )
+
+        # get AuthHandler factory class
+        selected_auth_handler_class = selected_auth_handler["value"]["name"]
+        FactoryClass = get_auth_handler_from_name(selected_auth_handler_class)
+
+        # obtain configuration and instantiate AuthHandler
+        selected_auth_handler_config = crud.get_setting_by_name(
+            name=selected_auth_handler_class
+        )
+        try:
+            auth_handler = FactoryClass.get_auth_handler_from_config(
+                selected_auth_handler_config["value"]
+            )
+        except Exception:
+            import traceback
+
+            traceback.print_exc()
+
+            auth_handler = (
+                auth_handlers.CoreOnlyAuthConfig.get_auth_handler_from_config({})
+            )
+
+        self.custom_auth_handler = auth_handler
+        self.core_auth_handler = CoreAuthHandler()
+
+    def load_memory(self):
+        """Load LongTerMemory and WorkingMemory."""
+        # Memory
+
+        # Get embedder size (langchain classes do not store it)
+        embedder_size = len(self.embedder.embed_query("hello world"))
+
+        # Get embedder name (useful for for vectorstore aliases)
+        if hasattr(self.embedder, "model"):
+            embedder_name = self.embedder.model
+        elif hasattr(self.embedder, "repo_id"):
+            embedder_name = self.embedder.repo_id
+        else:
+            embedder_name = "default_embedder"
+
+        # instantiate long term memory
+        vector_memory_config = {
+            "embedder_name": embedder_name,
+            "embedder_size": embedder_size,
+        }
+        self.memory = LongTermMemory(vector_memory_config=vector_memory_config)
+
+    def build_embedded_procedures_hashes(self, embedded_procedures):
+        hashes = {}
+        for ep in embedded_procedures:
+            # log.warning(ep)
+            metadata = ep.payload["metadata"]
+            content = ep.payload["page_content"]
+            source = metadata["source"]
+            # there may be legacy points with no trigger_type
+            trigger_type = metadata.get("trigger_type", "unsupported")
+
+            p_hash = f"{source}.{trigger_type}.{content}"
+            hashes[p_hash] = ep.id
+
+        return hashes
+
+    def build_active_procedures_hashes(self, active_procedures):
+        hashes = {}
+        for ap in active_procedures:
+            for trigger_type, trigger_list in ap.triggers_map.items():
+                for trigger_content in trigger_list:
+                    p_hash = f"{ap.name}.{trigger_type}.{trigger_content}"
+                    hashes[p_hash] = {
+                        "obj": ap,
+                        "source": ap.name,
+                        "type": ap.procedure_type,
+                        "trigger_type": trigger_type,
+                        "content": trigger_content,
+                    }
+        return hashes
+
+    def embed_procedures(self):
+        # Retrieve from vectorDB all procedural embeddings
+        embedded_procedures = self.memory.vectors.procedural.get_all_points()
+        embedded_procedures_hashes = self.build_embedded_procedures_hashes(
+            embedded_procedures
+        )
+
+        # Easy access to active procedures in mad_hatter (source of truth!)
+        active_procedures_hashes = self.build_active_procedures_hashes(
+            self.mad_hatter.procedures
+        )
+
+        # points_to_be_kept     = set(active_procedures_hashes.keys()) and set(embedded_procedures_hashes.keys()) not necessary
+        points_to_be_deleted = set(embedded_procedures_hashes.keys()) - set(
+            active_procedures_hashes.keys()
+        )
+        points_to_be_embedded = set(active_procedures_hashes.keys()) - set(
+            embedded_procedures_hashes.keys()
+        )
+
+        points_to_be_deleted_ids = [
+            embedded_procedures_hashes[p] for p in points_to_be_deleted
+        ]
+        if points_to_be_deleted_ids:
+            log.warning(f"Deleting triggers: {points_to_be_deleted}")
+            self.memory.vectors.procedural.delete_points(points_to_be_deleted_ids)
+
+        active_triggers_to_be_embedded = [
+            active_procedures_hashes[p] for p in points_to_be_embedded
+        ]
+        for t in active_triggers_to_be_embedded:
+            metadata = {
+                "source": t["source"],
+                "type": t["type"],
+                "trigger_type": t["trigger_type"],
+                "when": time.time(),
+            }
+
+            trigger_embedding = self.embedder.embed_documents([t["content"]])
+            self.memory.vectors.procedural.add_point(
+                t["content"],
+                trigger_embedding[0],
+                metadata,
+            )
+
+            log.warning(
+                f"Newly embedded {t['type']} trigger: {t['source']}, {t['trigger_type']}, {t['content']}"
+            )
+
+    def send_ws_message(self, content: str, msg_type="notification"):
+        log.error("No websocket connection open")
+
+    # REFACTOR: cat.llm should be available here, without streaming clearly
+    # (one could be interested in calling the LLM anytime, not only when there is a session)
+    def llm(self, prompt, *args, **kwargs) -> str:
+        """Generate a response using the LLM model.
+
+        This method is useful for generating a response with both a chat and a completion model using the same syntax
+
+        Parameters
+        ----------
+        prompt : str
+            The prompt for generating the response.
+
+        Returns
+        -------
+        str
+            The generated response.
+
+        """
+
+        # Add a token counter to the callbacks
+        caller = utils.get_caller_info()
+
+        # here we deal with motherfucking langchain
+        prompt = ChatPromptTemplate(
+            messages=[
+                SystemMessage(content=prompt)
+            ]
+        )
+
+        chain = (
+            prompt
+            | RunnableLambda(lambda x: utils.langchain_log_prompt(x, f"{caller} prompt"))
+            | self._llm
+            | RunnableLambda(lambda x: utils.langchain_log_output(x, f"{caller} prompt output"))
+            | StrOutputParser()
+        )
+
+        output = chain.invoke(
+            {}, # in case we need to pass info to the template
+        )
+
+        return output
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__() + +

+ + +
+ +

Cat initialization.

+

At init time the Cat executes the bootstrap.

+ +
+ Source code in cat/looking_glass/cheshire_cat.py +
57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
def __init__(self):
+    """Cat initialization.
+
+    At init time the Cat executes the bootstrap.
+    """
+
+    # bootstrap the Cat! ^._.^
+
+    # load AuthHandler
+    self.load_auth()
+
+    # Start scheduling system
+    self.white_rabbit = WhiteRabbit()
+
+    # instantiate MadHatter (loads all plugins' hooks and tools)
+    self.mad_hatter = MadHatter()
+
+    # allows plugins to do something before cat components are loaded
+    self.mad_hatter.execute_hook("before_cat_bootstrap", cat=self)
+
+    # load LLM and embedder
+    self.load_natural_language()
+
+    # Load memories (vector collections and working_memory)
+    self.load_memory()
+
+    # After memory is loaded, we can get/create tools embeddings
+    # every time the mad_hatter finishes syncing hooks, tools and forms, it will notify the Cat (so it can embed tools in vector memory)
+    self.mad_hatter.on_finish_plugins_sync_callback = self.embed_procedures
+    self.embed_procedures()  # first time launched manually
+
+    # Main agent instance (for reasoning)
+    self.main_agent = MainAgent()
+
+    # Rabbit Hole Instance
+    self.rabbit_hole = RabbitHole(self)  # :(
+
+    # allows plugins to do something after the cat bootstrap is complete
+    self.mad_hatter.execute_hook("after_cat_bootstrap", cat=self)
+
+
+
+ +
+ +
+ + +

+ llm(prompt, *args, **kwargs) + +

+ + +
+ +

Generate a response using the LLM model.

+

This method is useful for generating a response with both a chat and a completion model using the same syntax

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
prompt + str + +
+

The prompt for generating the response.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

The generated response.

+
+
+ +
+ Source code in cat/looking_glass/cheshire_cat.py +
def llm(self, prompt, *args, **kwargs) -> str:
+    """Generate a response using the LLM model.
+
+    This method is useful for generating a response with both a chat and a completion model using the same syntax
+
+    Parameters
+    ----------
+    prompt : str
+        The prompt for generating the response.
+
+    Returns
+    -------
+    str
+        The generated response.
+
+    """
+
+    # Add a token counter to the callbacks
+    caller = utils.get_caller_info()
+
+    # here we deal with motherfucking langchain
+    prompt = ChatPromptTemplate(
+        messages=[
+            SystemMessage(content=prompt)
+        ]
+    )
+
+    chain = (
+        prompt
+        | RunnableLambda(lambda x: utils.langchain_log_prompt(x, f"{caller} prompt"))
+        | self._llm
+        | RunnableLambda(lambda x: utils.langchain_log_output(x, f"{caller} prompt output"))
+        | StrOutputParser()
+    )
+
+    output = chain.invoke(
+        {}, # in case we need to pass info to the template
+    )
+
+    return output
+
+
+
+ +
+ +
+ + +

+ load_language_embedder() + +

+ + +
+ +

Hook into the embedder selection.

+

Allows to modify how the Cat selects the embedder at bootstrap time.

+

Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, +the Main Agent, the Rabbit Hole and the White Rabbit.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
cat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
embedder + Embeddings + +
+

Selected embedder model.

+
+
+ +
+ Source code in cat/looking_glass/cheshire_cat.py +
def load_language_embedder(self) -> embedders.EmbedderSettings:
+    """Hook into the  embedder selection.
+
+    Allows to modify how the Cat selects the embedder at bootstrap time.
+
+    Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,
+    the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.
+
+    Parameters
+    ----------
+    cat: CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    embedder : Embeddings
+        Selected embedder model.
+    """
+    # Embedding LLM
+
+    selected_embedder = crud.get_setting_by_name(name="embedder_selected")
+
+    if selected_embedder is not None:
+        # get Embedder factory class
+        selected_embedder_class = selected_embedder["value"]["name"]
+        FactoryClass = get_embedder_from_name(selected_embedder_class)
+
+        # obtain configuration and instantiate Embedder
+        selected_embedder_config = crud.get_setting_by_name(
+            name=selected_embedder_class
+        )
+        try:
+            embedder = FactoryClass.get_embedder_from_config(
+                selected_embedder_config["value"]
+            )
+        except AttributeError:
+            import traceback
+
+            traceback.print_exc()
+            embedder = embedders.EmbedderDumbConfig.get_embedder_from_config({})
+        return embedder
+
+    # OpenAI embedder
+    if type(self._llm) in [OpenAI, ChatOpenAI]:
+        embedder = embedders.EmbedderOpenAIConfig.get_embedder_from_config(
+            {
+                "openai_api_key": self._llm.openai_api_key,
+            }
+        )
+
+    # For Azure avoid automatic embedder selection
+
+    # Cohere
+    elif type(self._llm) in [Cohere]:
+        embedder = embedders.EmbedderCohereConfig.get_embedder_from_config(
+            {
+                "cohere_api_key": self._llm.cohere_api_key,
+                "model": "embed-multilingual-v2.0",
+                # Now the best model for embeddings is embed-multilingual-v2.0
+            }
+        )
+
+    elif type(self._llm) in [ChatGoogleGenerativeAI]:
+        embedder = embedders.EmbedderGeminiChatConfig.get_embedder_from_config(
+            {
+                "model": "models/embedding-001",
+                "google_api_key": self._llm.google_api_key,
+            }
+        )
+
+    else:
+        # If no embedder matches vendor, and no external embedder is configured, we use the DumbEmbedder.
+        #   `This embedder is not a model properly trained
+        #    and this makes it not suitable to effectively embed text,
+        #    "but it does not know this and embeds anyway".` - cit. Nicola Corbellini
+        embedder = embedders.EmbedderDumbConfig.get_embedder_from_config({})
+
+    return embedder
+
+
+
+ +
+ +
+ + +

+ load_language_model() + +

+ + +
+ +

Large Language Model (LLM) selection at bootstrap time.

+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
llm + BaseLanguageModel + +
+

Langchain BaseLanguageModel instance of the selected model.

+
+
+ + +
+ Notes +

Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, +the Main Agent, the Rabbit Hole and the White Rabbit.

+
+
+ Source code in cat/looking_glass/cheshire_cat.py +
def load_language_model(self) -> BaseLanguageModel:
+    """Large Language Model (LLM) selection at bootstrap time.
+
+    Returns
+    -------
+    llm : BaseLanguageModel
+        Langchain `BaseLanguageModel` instance of the selected model.
+
+    Notes
+    -----
+    Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,
+    the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.
+
+    """
+
+    selected_llm = crud.get_setting_by_name(name="llm_selected")
+
+    if selected_llm is None:
+        # return default LLM
+        llm = LLMDefaultConfig.get_llm_from_config({})
+
+    else:
+        # get LLM factory class
+        selected_llm_class = selected_llm["value"]["name"]
+        FactoryClass = get_llm_from_name(selected_llm_class)
+
+        # obtain configuration and instantiate LLM
+        selected_llm_config = crud.get_setting_by_name(name=selected_llm_class)
+        try:
+            llm = FactoryClass.get_llm_from_config(selected_llm_config["value"])
+        except Exception:
+            import traceback
+
+            traceback.print_exc()
+            llm = LLMDefaultConfig.get_llm_from_config({})
+
+    return llm
+
+
+
+ +
+ +
+ + +

+ load_memory() + +

+ + +
+ +

Load LongTerMemory and WorkingMemory.

+ +
+ Source code in cat/looking_glass/cheshire_cat.py +
def load_memory(self):
+    """Load LongTerMemory and WorkingMemory."""
+    # Memory
+
+    # Get embedder size (langchain classes do not store it)
+    embedder_size = len(self.embedder.embed_query("hello world"))
+
+    # Get embedder name (useful for for vectorstore aliases)
+    if hasattr(self.embedder, "model"):
+        embedder_name = self.embedder.model
+    elif hasattr(self.embedder, "repo_id"):
+        embedder_name = self.embedder.repo_id
+    else:
+        embedder_name = "default_embedder"
+
+    # instantiate long term memory
+    vector_memory_config = {
+        "embedder_name": embedder_name,
+        "embedder_size": embedder_size,
+    }
+    self.memory = LongTermMemory(vector_memory_config=vector_memory_config)
+
+
+
+ +
+ +
+ + +

+ load_natural_language() + +

+ + +
+ +

Load Natural Language related objects.

+

The method exposes in the Cat all the NLP related stuff. Specifically, it sets the language models +(LLM and Embedder).

+ + +
+ Warnings +

When using small Language Models it is suggested to turn off the memories and make the main prompt smaller +to prevent them to fail.

+
+ +
+ See Also +

agent_prompt_prefix

+
+
+ Source code in cat/looking_glass/cheshire_cat.py +
def load_natural_language(self):
+    """Load Natural Language related objects.
+
+    The method exposes in the Cat all the NLP related stuff. Specifically, it sets the language models
+    (LLM and Embedder).
+
+    Warnings
+    --------
+    When using small Language Models it is suggested to turn off the memories and make the main prompt smaller
+    to prevent them to fail.
+
+    See Also
+    --------
+    agent_prompt_prefix
+    """
+    # LLM and embedder
+    self._llm = self.load_language_model()
+    self.embedder = self.load_language_embedder()
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/looking_glass/stray_cat/index.html b/API_Documentation/looking_glass/stray_cat/index.html new file mode 100644 index 000000000..f106b8ae1 --- /dev/null +++ b/API_Documentation/looking_glass/stray_cat/index.html @@ -0,0 +1,6595 @@ + + + + + + + + + + + + + + + + + + + + + + + + + stray_cat - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

stray_cat

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ StrayCat + + +

+ + +
+ + +

User/session based object containing working memory and a few utility pointers

+ +
+ Source code in cat/looking_glass/stray_cat.py +
 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
class StrayCat:
+    """User/session based object containing working memory and a few utility pointers"""
+
+    def __init__(
+        self,
+        user_id: str,
+        main_loop,
+        user_data: dict = {},
+        ws: WebSocket = None,
+    ):
+        self.__user_id = user_id
+        self.working_memory = WorkingMemory()
+
+        # attribute to store ws connection
+        self.__ws = ws
+
+        self.__main_loop = main_loop
+
+        self.__loop = asyncio.new_event_loop()
+
+    def __repr__(self):
+        return f"StrayCat(user_id={self.user_id})"
+
+    def __send_ws_json(self, data: Any):
+        # Run the corutine in the main event loop in the main thread
+        # and wait for the result
+        asyncio.run_coroutine_threadsafe(
+            self.__ws.send_json(data), loop=self.__main_loop
+        ).result()
+
+    def __build_why(self) -> MessageWhy:
+        # build data structure for output (response and why with memories)
+        # TODO: these 3 lines are a mess, simplify
+        episodic_report = [
+            dict(d[0]) | {"score": float(d[1]), "id": d[3]}
+            for d in self.working_memory.episodic_memories
+        ]
+        declarative_report = [
+            dict(d[0]) | {"score": float(d[1]), "id": d[3]}
+            for d in self.working_memory.declarative_memories
+        ]
+        procedural_report = [
+            dict(d[0]) | {"score": float(d[1]), "id": d[3]}
+            for d in self.working_memory.procedural_memories
+        ]
+
+        # why this response?
+        why = MessageWhy(
+            input=self.working_memory.user_message_json.text,
+            intermediate_steps=[],
+            memory={
+                "episodic": episodic_report,
+                "declarative": declarative_report,
+                "procedural": procedural_report,
+            },
+            model_interactions=self.working_memory.model_interactions,
+        )
+
+        return why
+
+    def send_ws_message(self, content: str, msg_type: MSG_TYPES = "notification"):
+        """Send a message via websocket.
+
+        This method is useful for sending a message via websocket directly without passing through the LLM
+
+        Parameters
+        ----------
+        content : str
+            The content of the message.
+        msg_type : str
+            The type of the message. Should be either `notification`, `chat`, `chat_token` or `error`
+        """
+
+        if self.__ws is None:
+            log.warning(f"No websocket connection is open for user {self.user_id}")
+            return
+
+        options = get_args(MSG_TYPES)
+
+        if msg_type not in options:
+            raise ValueError(
+                f"The message type `{msg_type}` is not valid. Valid types: {', '.join(options)}"
+            )
+
+        if msg_type == "error":
+            self.__send_ws_json(
+                {"type": msg_type, "name": "GenericError", "description": str(content)}
+            )
+        else:
+            self.__send_ws_json({"type": msg_type, "content": content})
+
+    def send_chat_message(self, message: Union[str, CatMessage], save=False):
+        if self.__ws is None:
+            log.warning(f"No websocket connection is open for user {self.user_id}")
+            return
+
+        if isinstance(message, str):
+            why = self.__build_why()
+            message = CatMessage(content=message, user_id=self.user_id, why=why)
+
+        if save:
+            self.working_memory.update_conversation_history(
+                who="AI", message=message["content"], why=message["why"]
+            )
+
+        self.__send_ws_json(message.model_dump())
+
+    def send_notification(self, content: str):
+        self.send_ws_message(content=content, msg_type="notification")
+
+    def send_error(self, error: Union[str, Exception]):
+        if self.__ws is None:
+            log.warning(f"No websocket connection is open for user {self.user_id}")
+            return
+
+        if isinstance(error, str):
+            error_message = {
+                "type": "error",
+                "name": "GenericError",
+                "description": str(error),
+            }
+        else:
+            error_message = {
+                "type": "error",
+                "name": error.__class__.__name__,
+                "description": str(error),
+            }
+
+        self.__send_ws_json(error_message)
+
+    def recall_relevant_memories_to_working_memory(self, query=None):
+        """Retrieve context from memory.
+
+        The method retrieves the relevant memories from the vector collections that are given as context to the LLM.
+        Recalled memories are stored in the working memory.
+
+        Parameters
+        ----------
+        query : str, optional
+        The query used to make a similarity search in the Cat's vector memories. If not provided, the query
+        will be derived from the user's message.
+
+        Notes
+        -----
+        The user's message is used as a query to make a similarity search in the Cat's vector memories.
+        Five hooks allow to customize the recall pipeline before and after it is done.
+
+        See Also
+        --------
+        cat_recall_query
+        before_cat_recalls_memories
+        before_cat_recalls_episodic_memories
+        before_cat_recalls_declarative_memories
+        before_cat_recalls_procedural_memories
+        after_cat_recalls_memories
+        """
+        recall_query = query
+
+        if query is None:
+            # If query is not provided, use the user's message as the query
+            recall_query = self.working_memory.user_message_json.text
+
+        # We may want to search in memory
+        recall_query = self.mad_hatter.execute_hook(
+            "cat_recall_query", recall_query, cat=self
+        )
+        log.info(f"Recall query: '{recall_query}'")
+
+        # Embed recall query
+        recall_query_embedding = self.embedder.embed_query(recall_query)
+        self.working_memory.recall_query = recall_query
+
+        # keep track of embedder model usage
+        self.working_memory.model_interactions.append(
+            EmbedderModelInteraction(
+                prompt=recall_query,
+                reply=recall_query_embedding,
+                input_tokens=len(tiktoken.get_encoding("cl100k_base").encode(recall_query)),
+            )
+        )
+
+        # hook to do something before recall begins
+        self.mad_hatter.execute_hook("before_cat_recalls_memories", cat=self)
+
+        # Setting default recall configs for each memory
+        # TODO: can these data structures become instances of a RecallSettings class?
+        default_episodic_recall_config = {
+            "embedding": recall_query_embedding,
+            "k": 3,
+            "threshold": 0.7,
+            "metadata": {"source": self.user_id},
+        }
+
+        default_declarative_recall_config = {
+            "embedding": recall_query_embedding,
+            "k": 3,
+            "threshold": 0.7,
+            "metadata": None,
+        }
+
+        default_procedural_recall_config = {
+            "embedding": recall_query_embedding,
+            "k": 3,
+            "threshold": 0.7,
+            "metadata": None,
+        }
+
+        # hooks to change recall configs for each memory
+        recall_configs = [
+            self.mad_hatter.execute_hook(
+                "before_cat_recalls_episodic_memories",
+                default_episodic_recall_config,
+                cat=self,
+            ),
+            self.mad_hatter.execute_hook(
+                "before_cat_recalls_declarative_memories",
+                default_declarative_recall_config,
+                cat=self,
+            ),
+            self.mad_hatter.execute_hook(
+                "before_cat_recalls_procedural_memories",
+                default_procedural_recall_config,
+                cat=self,
+            ),
+        ]
+
+        memory_types = self.memory.vectors.collections.keys()
+
+        for config, memory_type in zip(recall_configs, memory_types):
+            memory_key = f"{memory_type}_memories"
+
+            # recall relevant memories for collection
+            vector_memory = getattr(self.memory.vectors, memory_type)
+            memories = vector_memory.recall_memories_from_embedding(**config)
+
+            setattr(
+                self.working_memory, memory_key, memories
+            )  # self.working_memory.procedural_memories = ...
+
+        # hook to modify/enrich retrieved memories
+        self.mad_hatter.execute_hook("after_cat_recalls_memories", cat=self)
+
+    def llm(self, prompt: str, stream: bool = False) -> str:
+        """Generate a response using the LLM model.
+
+        This method is useful for generating a response with both a chat and a completion model using the same syntax
+
+        Parameters
+        ----------
+        prompt : str
+            The prompt for generating the response.
+
+        Returns
+        -------
+        str
+            The generated response.
+
+        """
+
+        # should we stream the tokens?
+        callbacks = []
+        if stream:
+            callbacks.append(NewTokenHandler(self))
+
+        # Add a token counter to the callbacks
+        caller = utils.get_caller_info()
+        callbacks.append(ModelInteractionHandler(self, caller or "StrayCat"))
+
+
+
+        # here we deal with motherfucking langchain
+        prompt = ChatPromptTemplate(
+            messages=[
+                SystemMessage(content=prompt)
+                # TODO: add here optional convo history passed to the method, 
+                #  or taken from working memory
+            ]
+        )
+
+        chain = (
+            prompt
+            | RunnableLambda(lambda x: utils.langchain_log_prompt(x, f"{caller} prompt"))
+            | self._llm
+            | RunnableLambda(lambda x: utils.langchain_log_output(x, f"{caller} prompt output"))
+            | StrOutputParser()
+        )
+
+        output = chain.invoke(
+            {}, # in case we need to pass info to the template
+            config=RunnableConfig(callbacks=callbacks)
+        )
+
+        return output
+
+
+    async def __call__(self, message_dict):
+        """Call the Cat instance.
+
+        This method is called on the user's message received from the client.
+
+        Parameters
+        ----------
+        message_dict : dict
+            Dictionary received from the Websocket client.
+        save : bool, optional
+            If True, the user's message is stored in the chat history. Default is True.
+
+        Returns
+        -------
+        final_output : dict
+            Dictionary with the Cat's answer to be sent to the client.
+
+        Notes
+        -----
+        Here happens the main pipeline of the Cat. Namely, the Cat receives the user's input and recall the memories.
+        The retrieved context is formatted properly and given in input to the Agent that uses the LLM to produce the
+        answer. This is formatted in a dictionary to be sent as a JSON via Websocket to the client.
+
+        """
+
+        # Parse websocket message into UserMessage obj
+        user_message = UserMessage.model_validate(message_dict)
+        log.info(user_message)
+
+        # set a few easy access variables
+        self.working_memory.user_message_json = user_message
+
+        # keeping track of model interactions
+        self.working_memory.model_interactions = []
+
+        # hook to modify/enrich user input
+        self.working_memory.user_message_json = self.mad_hatter.execute_hook(
+            "before_cat_reads_message", self.working_memory.user_message_json, cat=self
+        )
+
+        # text of latest Human message
+        user_message_text = self.working_memory.user_message_json.text
+
+        # update conversation history (Human turn)
+        self.working_memory.update_conversation_history(
+            who="Human", message=user_message_text
+        )
+
+        # recall episodic and declarative memories from vector collections
+        #   and store them in working_memory
+        try:
+            self.recall_relevant_memories_to_working_memory()
+        except Exception as e:
+            log.error(e)
+            traceback.print_exc(e)
+
+            err_message = (
+                "You probably changed Embedder and old vector memory is not compatible. "
+                "Please delete `core/long_term_memory` folder."
+            )
+
+            return {
+                "type": "error",
+                "name": "VectorMemoryError",
+                "description": err_message,
+            }
+
+        # reply with agent
+        try:
+            agent_output: AgentOutput = await self.main_agent.execute(self)
+        except Exception as e:
+            # This error happens when the LLM
+            #   does not respect prompt instructions.
+            # We grab the LLM output here anyway, so small and
+            #   non instruction-fine-tuned models can still be used.
+            error_description = str(e)
+
+            log.error(error_description)
+            if "Could not parse LLM output: `" not in error_description:
+                raise e
+
+            unparsable_llm_output = error_description.replace(
+                "Could not parse LLM output: `", ""
+            ).replace("`", "")
+            agent_output = AgentOutput(
+                output=unparsable_llm_output,
+            )
+
+        log.info("Agent output returned to stray:")
+        log.info(agent_output)
+
+        doc = Document(
+            page_content=user_message_text,
+            metadata={"source": self.user_id, "when": time.time()},
+        )
+        doc = self.mad_hatter.execute_hook(
+            "before_cat_stores_episodic_memory", doc, cat=self
+        )
+        # store user message in episodic memory
+        # TODO: vectorize and store also conversation chunks
+        #   (not raw dialog, but summarization)
+        user_message_embedding = self.embedder.embed_documents([user_message_text])
+        _ = self.memory.vectors.episodic.add_point(
+            doc.page_content,
+            user_message_embedding[0],
+            doc.metadata,
+        )
+
+        # why this response?
+        why = self.__build_why()
+        # TODO: should these assignations be included in self.__build_why ?
+        why.intermediate_steps = agent_output.intermediate_steps
+        why.agent_output = agent_output.model_dump()
+
+        # prepare final cat message
+        final_output = CatMessage(
+            user_id=self.user_id, content=str(agent_output.output), why=why
+        )
+
+        # run message through plugins
+        final_output = self.mad_hatter.execute_hook(
+            "before_cat_sends_message", final_output, cat=self
+        )
+
+        # update conversation history (AI turn)
+        self.working_memory.update_conversation_history(
+            who="AI", message=final_output.content, why=final_output.why
+        )
+
+        return final_output
+
+    def run(self, user_message_json):
+        try:
+            cat_message = self.loop.run_until_complete(self.__call__(user_message_json))
+            # send message back to client
+            self.send_chat_message(cat_message)
+        except Exception as e:
+            # Log any unexpected errors
+            log.error(e)
+            traceback.print_exc()
+            # Send error as websocket message
+            self.send_error(e)
+
+    def classify(
+        self, sentence: str, labels: List[str] | Dict[str, List[str]]
+    ) -> str | None:
+        """Classify a sentence.
+
+        Parameters
+        ----------
+        sentence : str
+            Sentence to be classified.
+        labels : List[str] or Dict[str, List[str]]
+            Possible output categories and optional examples.
+
+        Returns
+        -------
+        label : str
+            Sentence category.
+
+        Examples
+        -------
+        >>> cat.classify("I feel good", labels=["positive", "negative"])
+        "positive"
+
+        Or giving examples for each category:
+
+        >>> example_labels = {
+        ...     "positive": ["I feel nice", "happy today"],
+        ...     "negative": ["I feel bad", "not my best day"],
+        ... }
+        ... cat.classify("it is a bad day", labels=example_labels)
+        "negative"
+
+        """
+
+        if isinstance(labels, dict):
+            labels_names = labels.keys()
+            examples_list = "\n\nExamples:"
+            for label, examples in labels.items():
+                for ex in examples:
+                    examples_list += f'\n"{ex}" -> "{label}"'
+        else:
+            labels_names = labels
+            examples_list = ""
+
+        labels_list = '"' + '", "'.join(labels_names) + '"'
+
+        prompt = f"""Classify this sentence:
+"{sentence}"
+
+Allowed classes are:
+{labels_list}{examples_list}
+
+"{sentence}" -> """
+
+        response = self.llm(prompt)
+        log.info(response)
+
+        # find the closest match and its score with levenshtein distance
+        best_label, score = min(
+            ((label, utils.levenshtein_distance(response, label)) for label in labels_names),
+            key=lambda x: x[1],
+        )
+
+        # set 0.5 as threshold - let's see if it works properly
+        return best_label if score < 0.5 else None
+
+    def stringify_chat_history(self, latest_n: int = 5) -> str:
+        """Serialize chat history.
+        Converts to text the recent conversation turns.
+
+        Parameters
+        ----------
+        latest_n : int
+            Hoe many latest turns to stringify.
+
+        Returns
+        -------
+        history : str
+            String with recent conversation turns.
+
+        Notes
+        -----
+        Such context is placed in the `agent_prompt_suffix` in the place held by {chat_history}.
+
+        The chat history is a dictionary with keys::
+            'who': the name of who said the utterance;
+            'message': the utterance.
+
+        """
+
+        history = self.working_memory.history[-latest_n:]
+
+        history_string = ""
+        for turn in history:
+            history_string += f"\n - {turn['who']}: {turn['message']}"
+
+        return history_string
+
+    def langchainfy_chat_history(self, latest_n: int = 5) -> List[BaseMessage]:
+        chat_history = self.working_memory.history[-latest_n:]
+
+        langchain_chat_history = []
+        for message in chat_history:
+            if message["role"] == Role.Human:
+                langchain_chat_history.append(
+                    HumanMessage(name=message["who"], content=message["message"])
+                )
+            else:
+                langchain_chat_history.append(
+                    AIMessage(name=message["who"], content=message["message"])
+                )
+
+        return langchain_chat_history
+
+    @property
+    def user_id(self):
+        return self.__user_id
+
+    @property
+    def _llm(self):
+        return CheshireCat()._llm
+
+    @property
+    def embedder(self):
+        return CheshireCat().embedder
+
+    @property
+    def memory(self):
+        return CheshireCat().memory
+
+    @property
+    def rabbit_hole(self):
+        return CheshireCat().rabbit_hole
+
+    @property
+    def mad_hatter(self):
+        return CheshireCat().mad_hatter
+
+    @property
+    def main_agent(self):
+        return CheshireCat().main_agent
+
+    @property
+    def white_rabbit(self):
+        return CheshireCat().white_rabbit
+
+    @property
+    def loop(self):
+        return self.__loop
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __call__(message_dict) + + + async + + +

+ + +
+ +

Call the Cat instance.

+

This method is called on the user's message received from the client.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
message_dict + dict + +
+

Dictionary received from the Websocket client.

+
+
+ required +
save + bool + +
+

If True, the user's message is stored in the chat history. Default is True.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
final_output + dict + +
+

Dictionary with the Cat's answer to be sent to the client.

+
+
+ + +
+ Notes +

Here happens the main pipeline of the Cat. Namely, the Cat receives the user's input and recall the memories. +The retrieved context is formatted properly and given in input to the Agent that uses the LLM to produce the +answer. This is formatted in a dictionary to be sent as a JSON via Websocket to the client.

+
+
+ Source code in cat/looking_glass/stray_cat.py +
async def __call__(self, message_dict):
+    """Call the Cat instance.
+
+    This method is called on the user's message received from the client.
+
+    Parameters
+    ----------
+    message_dict : dict
+        Dictionary received from the Websocket client.
+    save : bool, optional
+        If True, the user's message is stored in the chat history. Default is True.
+
+    Returns
+    -------
+    final_output : dict
+        Dictionary with the Cat's answer to be sent to the client.
+
+    Notes
+    -----
+    Here happens the main pipeline of the Cat. Namely, the Cat receives the user's input and recall the memories.
+    The retrieved context is formatted properly and given in input to the Agent that uses the LLM to produce the
+    answer. This is formatted in a dictionary to be sent as a JSON via Websocket to the client.
+
+    """
+
+    # Parse websocket message into UserMessage obj
+    user_message = UserMessage.model_validate(message_dict)
+    log.info(user_message)
+
+    # set a few easy access variables
+    self.working_memory.user_message_json = user_message
+
+    # keeping track of model interactions
+    self.working_memory.model_interactions = []
+
+    # hook to modify/enrich user input
+    self.working_memory.user_message_json = self.mad_hatter.execute_hook(
+        "before_cat_reads_message", self.working_memory.user_message_json, cat=self
+    )
+
+    # text of latest Human message
+    user_message_text = self.working_memory.user_message_json.text
+
+    # update conversation history (Human turn)
+    self.working_memory.update_conversation_history(
+        who="Human", message=user_message_text
+    )
+
+    # recall episodic and declarative memories from vector collections
+    #   and store them in working_memory
+    try:
+        self.recall_relevant_memories_to_working_memory()
+    except Exception as e:
+        log.error(e)
+        traceback.print_exc(e)
+
+        err_message = (
+            "You probably changed Embedder and old vector memory is not compatible. "
+            "Please delete `core/long_term_memory` folder."
+        )
+
+        return {
+            "type": "error",
+            "name": "VectorMemoryError",
+            "description": err_message,
+        }
+
+    # reply with agent
+    try:
+        agent_output: AgentOutput = await self.main_agent.execute(self)
+    except Exception as e:
+        # This error happens when the LLM
+        #   does not respect prompt instructions.
+        # We grab the LLM output here anyway, so small and
+        #   non instruction-fine-tuned models can still be used.
+        error_description = str(e)
+
+        log.error(error_description)
+        if "Could not parse LLM output: `" not in error_description:
+            raise e
+
+        unparsable_llm_output = error_description.replace(
+            "Could not parse LLM output: `", ""
+        ).replace("`", "")
+        agent_output = AgentOutput(
+            output=unparsable_llm_output,
+        )
+
+    log.info("Agent output returned to stray:")
+    log.info(agent_output)
+
+    doc = Document(
+        page_content=user_message_text,
+        metadata={"source": self.user_id, "when": time.time()},
+    )
+    doc = self.mad_hatter.execute_hook(
+        "before_cat_stores_episodic_memory", doc, cat=self
+    )
+    # store user message in episodic memory
+    # TODO: vectorize and store also conversation chunks
+    #   (not raw dialog, but summarization)
+    user_message_embedding = self.embedder.embed_documents([user_message_text])
+    _ = self.memory.vectors.episodic.add_point(
+        doc.page_content,
+        user_message_embedding[0],
+        doc.metadata,
+    )
+
+    # why this response?
+    why = self.__build_why()
+    # TODO: should these assignations be included in self.__build_why ?
+    why.intermediate_steps = agent_output.intermediate_steps
+    why.agent_output = agent_output.model_dump()
+
+    # prepare final cat message
+    final_output = CatMessage(
+        user_id=self.user_id, content=str(agent_output.output), why=why
+    )
+
+    # run message through plugins
+    final_output = self.mad_hatter.execute_hook(
+        "before_cat_sends_message", final_output, cat=self
+    )
+
+    # update conversation history (AI turn)
+    self.working_memory.update_conversation_history(
+        who="AI", message=final_output.content, why=final_output.why
+    )
+
+    return final_output
+
+
+
+ +
+ +
+ + +

+ classify(sentence, labels) + +

+ + +
+ +

Classify a sentence.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
sentence + str + +
+

Sentence to be classified.

+
+
+ required +
labels + List[str] or Dict[str, List[str]] + +
+

Possible output categories and optional examples.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
label + str + +
+

Sentence category.

+
+
+ + +

Examples:

+
>>> cat.classify("I feel good", labels=["positive", "negative"])
+"positive"
+
+

Or giving examples for each category:

+
>>> example_labels = {
+...     "positive": ["I feel nice", "happy today"],
+...     "negative": ["I feel bad", "not my best day"],
+... }
+... cat.classify("it is a bad day", labels=example_labels)
+"negative"
+
+ +
+ Source code in cat/looking_glass/stray_cat.py +
    def classify(
+        self, sentence: str, labels: List[str] | Dict[str, List[str]]
+    ) -> str | None:
+        """Classify a sentence.
+
+        Parameters
+        ----------
+        sentence : str
+            Sentence to be classified.
+        labels : List[str] or Dict[str, List[str]]
+            Possible output categories and optional examples.
+
+        Returns
+        -------
+        label : str
+            Sentence category.
+
+        Examples
+        -------
+        >>> cat.classify("I feel good", labels=["positive", "negative"])
+        "positive"
+
+        Or giving examples for each category:
+
+        >>> example_labels = {
+        ...     "positive": ["I feel nice", "happy today"],
+        ...     "negative": ["I feel bad", "not my best day"],
+        ... }
+        ... cat.classify("it is a bad day", labels=example_labels)
+        "negative"
+
+        """
+
+        if isinstance(labels, dict):
+            labels_names = labels.keys()
+            examples_list = "\n\nExamples:"
+            for label, examples in labels.items():
+                for ex in examples:
+                    examples_list += f'\n"{ex}" -> "{label}"'
+        else:
+            labels_names = labels
+            examples_list = ""
+
+        labels_list = '"' + '", "'.join(labels_names) + '"'
+
+        prompt = f"""Classify this sentence:
+"{sentence}"
+
+Allowed classes are:
+{labels_list}{examples_list}
+
+"{sentence}" -> """
+
+        response = self.llm(prompt)
+        log.info(response)
+
+        # find the closest match and its score with levenshtein distance
+        best_label, score = min(
+            ((label, utils.levenshtein_distance(response, label)) for label in labels_names),
+            key=lambda x: x[1],
+        )
+
+        # set 0.5 as threshold - let's see if it works properly
+        return best_label if score < 0.5 else None
+
+
+
+ +
+ +
+ + +

+ llm(prompt, stream=False) + +

+ + +
+ +

Generate a response using the LLM model.

+

This method is useful for generating a response with both a chat and a completion model using the same syntax

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
prompt + str + +
+

The prompt for generating the response.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

The generated response.

+
+
+ +
+ Source code in cat/looking_glass/stray_cat.py +
def llm(self, prompt: str, stream: bool = False) -> str:
+    """Generate a response using the LLM model.
+
+    This method is useful for generating a response with both a chat and a completion model using the same syntax
+
+    Parameters
+    ----------
+    prompt : str
+        The prompt for generating the response.
+
+    Returns
+    -------
+    str
+        The generated response.
+
+    """
+
+    # should we stream the tokens?
+    callbacks = []
+    if stream:
+        callbacks.append(NewTokenHandler(self))
+
+    # Add a token counter to the callbacks
+    caller = utils.get_caller_info()
+    callbacks.append(ModelInteractionHandler(self, caller or "StrayCat"))
+
+
+
+    # here we deal with motherfucking langchain
+    prompt = ChatPromptTemplate(
+        messages=[
+            SystemMessage(content=prompt)
+            # TODO: add here optional convo history passed to the method, 
+            #  or taken from working memory
+        ]
+    )
+
+    chain = (
+        prompt
+        | RunnableLambda(lambda x: utils.langchain_log_prompt(x, f"{caller} prompt"))
+        | self._llm
+        | RunnableLambda(lambda x: utils.langchain_log_output(x, f"{caller} prompt output"))
+        | StrOutputParser()
+    )
+
+    output = chain.invoke(
+        {}, # in case we need to pass info to the template
+        config=RunnableConfig(callbacks=callbacks)
+    )
+
+    return output
+
+
+
+ +
+ +
+ + +

+ recall_relevant_memories_to_working_memory(query=None) + +

+ + +
+ +

Retrieve context from memory.

+

The method retrieves the relevant memories from the vector collections that are given as context to the LLM. +Recalled memories are stored in the working memory.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
query + str + +
+ +
+
+ None +
The + +
+ +
+
+ required +
will + +
+ +
+
+ required +
+ + +
+ Notes +

The user's message is used as a query to make a similarity search in the Cat's vector memories. +Five hooks allow to customize the recall pipeline before and after it is done.

+
+ +
+ See Also +

cat_recall_query +before_cat_recalls_memories +before_cat_recalls_episodic_memories +before_cat_recalls_declarative_memories +before_cat_recalls_procedural_memories +after_cat_recalls_memories

+
+
+ Source code in cat/looking_glass/stray_cat.py +
def recall_relevant_memories_to_working_memory(self, query=None):
+    """Retrieve context from memory.
+
+    The method retrieves the relevant memories from the vector collections that are given as context to the LLM.
+    Recalled memories are stored in the working memory.
+
+    Parameters
+    ----------
+    query : str, optional
+    The query used to make a similarity search in the Cat's vector memories. If not provided, the query
+    will be derived from the user's message.
+
+    Notes
+    -----
+    The user's message is used as a query to make a similarity search in the Cat's vector memories.
+    Five hooks allow to customize the recall pipeline before and after it is done.
+
+    See Also
+    --------
+    cat_recall_query
+    before_cat_recalls_memories
+    before_cat_recalls_episodic_memories
+    before_cat_recalls_declarative_memories
+    before_cat_recalls_procedural_memories
+    after_cat_recalls_memories
+    """
+    recall_query = query
+
+    if query is None:
+        # If query is not provided, use the user's message as the query
+        recall_query = self.working_memory.user_message_json.text
+
+    # We may want to search in memory
+    recall_query = self.mad_hatter.execute_hook(
+        "cat_recall_query", recall_query, cat=self
+    )
+    log.info(f"Recall query: '{recall_query}'")
+
+    # Embed recall query
+    recall_query_embedding = self.embedder.embed_query(recall_query)
+    self.working_memory.recall_query = recall_query
+
+    # keep track of embedder model usage
+    self.working_memory.model_interactions.append(
+        EmbedderModelInteraction(
+            prompt=recall_query,
+            reply=recall_query_embedding,
+            input_tokens=len(tiktoken.get_encoding("cl100k_base").encode(recall_query)),
+        )
+    )
+
+    # hook to do something before recall begins
+    self.mad_hatter.execute_hook("before_cat_recalls_memories", cat=self)
+
+    # Setting default recall configs for each memory
+    # TODO: can these data structures become instances of a RecallSettings class?
+    default_episodic_recall_config = {
+        "embedding": recall_query_embedding,
+        "k": 3,
+        "threshold": 0.7,
+        "metadata": {"source": self.user_id},
+    }
+
+    default_declarative_recall_config = {
+        "embedding": recall_query_embedding,
+        "k": 3,
+        "threshold": 0.7,
+        "metadata": None,
+    }
+
+    default_procedural_recall_config = {
+        "embedding": recall_query_embedding,
+        "k": 3,
+        "threshold": 0.7,
+        "metadata": None,
+    }
+
+    # hooks to change recall configs for each memory
+    recall_configs = [
+        self.mad_hatter.execute_hook(
+            "before_cat_recalls_episodic_memories",
+            default_episodic_recall_config,
+            cat=self,
+        ),
+        self.mad_hatter.execute_hook(
+            "before_cat_recalls_declarative_memories",
+            default_declarative_recall_config,
+            cat=self,
+        ),
+        self.mad_hatter.execute_hook(
+            "before_cat_recalls_procedural_memories",
+            default_procedural_recall_config,
+            cat=self,
+        ),
+    ]
+
+    memory_types = self.memory.vectors.collections.keys()
+
+    for config, memory_type in zip(recall_configs, memory_types):
+        memory_key = f"{memory_type}_memories"
+
+        # recall relevant memories for collection
+        vector_memory = getattr(self.memory.vectors, memory_type)
+        memories = vector_memory.recall_memories_from_embedding(**config)
+
+        setattr(
+            self.working_memory, memory_key, memories
+        )  # self.working_memory.procedural_memories = ...
+
+    # hook to modify/enrich retrieved memories
+    self.mad_hatter.execute_hook("after_cat_recalls_memories", cat=self)
+
+
+
+ +
+ +
+ + +

+ send_ws_message(content, msg_type='notification') + +

+ + +
+ +

Send a message via websocket.

+

This method is useful for sending a message via websocket directly without passing through the LLM

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
content + str + +
+

The content of the message.

+
+
+ required +
msg_type + str + +
+

The type of the message. Should be either notification, chat, chat_token or error

+
+
+ 'notification' +
+ +
+ Source code in cat/looking_glass/stray_cat.py +
def send_ws_message(self, content: str, msg_type: MSG_TYPES = "notification"):
+    """Send a message via websocket.
+
+    This method is useful for sending a message via websocket directly without passing through the LLM
+
+    Parameters
+    ----------
+    content : str
+        The content of the message.
+    msg_type : str
+        The type of the message. Should be either `notification`, `chat`, `chat_token` or `error`
+    """
+
+    if self.__ws is None:
+        log.warning(f"No websocket connection is open for user {self.user_id}")
+        return
+
+    options = get_args(MSG_TYPES)
+
+    if msg_type not in options:
+        raise ValueError(
+            f"The message type `{msg_type}` is not valid. Valid types: {', '.join(options)}"
+        )
+
+    if msg_type == "error":
+        self.__send_ws_json(
+            {"type": msg_type, "name": "GenericError", "description": str(content)}
+        )
+    else:
+        self.__send_ws_json({"type": msg_type, "content": content})
+
+
+
+ +
+ +
+ + +

+ stringify_chat_history(latest_n=5) + +

+ + +
+ +

Serialize chat history. +Converts to text the recent conversation turns.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
latest_n + int + +
+

Hoe many latest turns to stringify.

+
+
+ 5 +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
history + str + +
+

String with recent conversation turns.

+
+
+ + +
+ Notes +

Such context is placed in the agent_prompt_suffix in the place held by {chat_history}.

+

The chat history is a dictionary with keys:: + 'who': the name of who said the utterance; + 'message': the utterance.

+
+
+ Source code in cat/looking_glass/stray_cat.py +
def stringify_chat_history(self, latest_n: int = 5) -> str:
+    """Serialize chat history.
+    Converts to text the recent conversation turns.
+
+    Parameters
+    ----------
+    latest_n : int
+        Hoe many latest turns to stringify.
+
+    Returns
+    -------
+    history : str
+        String with recent conversation turns.
+
+    Notes
+    -----
+    Such context is placed in the `agent_prompt_suffix` in the place held by {chat_history}.
+
+    The chat history is a dictionary with keys::
+        'who': the name of who said the utterance;
+        'message': the utterance.
+
+    """
+
+    history = self.working_memory.history[-latest_n:]
+
+    history_string = ""
+    for turn in history:
+        history_string += f"\n - {turn['who']}: {turn['message']}"
+
+    return history_string
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/mad_hatter/core_plugin/hooks/agent/index.html b/API_Documentation/mad_hatter/core_plugin/hooks/agent/index.html new file mode 100644 index 000000000..4f767ba28 --- /dev/null +++ b/API_Documentation/mad_hatter/core_plugin/hooks/agent/index.html @@ -0,0 +1,4388 @@ + + + + + + + + + + + + + + + + + + + + + + + + + agent - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

agent

+ +
+ + + + +
+ +

Hooks to modify the Cat's Agent.

+

Here is a collection of methods to hook into the Agent execution pipeline.

+ + + +
+ + + + + + + + + +
+ + +

+ agent_allowed_tools(allowed_tools, cat) + +

+ + +
+ +

Hook the allowed tools.

+

Allows to decide which tools end up in the Agent prompt.

+

To decide, you can filter the list of tools' names, but you can also check the context in cat.working_memory +and launch custom chains with cat._llm.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
tools + List[str] + +
+

List of allowed Langchain tools.

+
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/agent.py +
75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
@hook(priority=0)
+def agent_allowed_tools(allowed_tools: List[str], cat) -> List[str]:
+    """Hook the allowed tools.
+
+    Allows to decide which tools end up in the *Agent* prompt.
+
+    To decide, you can filter the list of tools' names, but you can also check the context in `cat.working_memory`
+    and launch custom chains with `cat._llm`.
+
+    Parameters
+    ---------
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    tools : List[str]
+        List of allowed Langchain tools.
+    """
+
+    return allowed_tools
+
+
+
+ +
+ +
+ + +

+ agent_fast_reply(fast_reply, cat) + +

+ + +
+ +

This hook is useful to shortcut the Cat response. +If you do not want the agent to run, return the final response from here and it will end up in the chat without the agent being executed.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
fast_reply + +
+

Input is dict (initially empty), which can be enriched whith an "output" key with the shortcut response.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
response + Union[None, Dict] + +
+

Cat response if you want to avoid using the agent, or None / {} if you want the agent to be executed. +See below for examples of Cat response

+
+
+ + +

Examples:

+

Example 1: can't talk about this topic +

# here you could use cat._llm to do topic evaluation
+if "dog" in agent_input["input"]:
+    return {
+        "output": "You went out of topic. Can't talk about dog."
+    }
+
+

Example 2: don't remember (no uploaded documents about topic) +

num_declarative_memories = len( cat.working_memory.declarative_memories )
+if num_declarative_memories == 0:
+    return {
+       "output": "Sorry, I have no memories about that."
+    }
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/agent.py +
32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
@hook(priority=0)
+def agent_fast_reply(fast_reply, cat) -> Union[None, Dict]:
+    """This hook is useful to shortcut the Cat response.
+    If you do not want the agent to run, return the final response from here and it will end up in the chat without the agent being executed.
+
+    Parameters
+    --------
+    fast_reply: dict
+        Input is dict (initially empty), which can be enriched whith an "output" key with the shortcut response.
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    --------
+    response : Union[None, Dict]
+        Cat response if you want to avoid using the agent, or None / {} if you want the agent to be executed.
+        See below for examples of Cat response
+
+    Examples
+    --------
+
+    Example 1: can't talk about this topic
+    ```python
+    # here you could use cat._llm to do topic evaluation
+    if "dog" in agent_input["input"]:
+        return {
+            "output": "You went out of topic. Can't talk about dog."
+        }
+    ```
+
+    Example 2: don't remember (no uploaded documents about topic)
+    ```python
+    num_declarative_memories = len( cat.working_memory.declarative_memories )
+    if num_declarative_memories == 0:
+        return {
+           "output": "Sorry, I have no memories about that."
+        }
+    ```
+    """
+
+    return fast_reply
+
+
+
+ +
+ +
+ + +

+ before_agent_starts(agent_input, cat) + +

+ + +
+ +

Hook to read and edit the agent input

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
agent_input + Dict + +
+

Input that is about to be passed to the agent.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
response + Dict + +
+

Agent Input

+
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/agent.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
@hook(priority=0)
+def before_agent_starts(agent_input: Dict, cat) -> Dict:
+    """Hook to read and edit the agent input
+
+    Parameters
+    --------
+    agent_input: dict
+        Input that is about to be passed to the agent.
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    --------
+    response : Dict
+        Agent Input
+    """
+
+    return agent_input
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/mad_hatter/core_plugin/hooks/flow/index.html b/API_Documentation/mad_hatter/core_plugin/hooks/flow/index.html new file mode 100644 index 000000000..923d33796 --- /dev/null +++ b/API_Documentation/mad_hatter/core_plugin/hooks/flow/index.html @@ -0,0 +1,5608 @@ + + + + + + + + + + + + + + + + + + + + + + + + + flow - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

flow

+ +
+ + + + +
+ +

Hooks to modify the Cat's flow of execution.

+

Here is a collection of methods to hook into the Cat execution pipeline.

+ + + +
+ + + + + + + + + +
+ + +

+ after_cat_bootstrap(cat) + +

+ + +
+ +

Hook into the end of the Cat start up.

+

Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, +the Main Agent, the Rabbit Hole and the White Rabbit.

+

This hook allows to intercept the end of such process and is executed right after the Cat has finished loading +its components.

+

This can be used to set or store variables to be shared further in the pipeline.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/flow.py +
33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
@hook(priority=0)
+def after_cat_bootstrap(cat) -> None:
+    """Hook into the end of the Cat start up.
+
+    Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,
+    the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.
+
+    This hook allows to intercept the end of such process and is executed right after the Cat has finished loading
+    its components.
+
+    This can be used to set or store variables to be shared further in the pipeline.
+
+    Parameters
+    ----------
+    cat : CheshireCat
+        Cheshire Cat instance.
+    """
+    pass  # do nothing
+
+
+
+ +
+ +
+ + +

+ after_cat_recalls_memories(cat) + +

+ + +
+ +

Hook after semantic search in memories.

+

The hook is executed just after the Cat searches for the meaningful context in memories +and stores it in the Working Memory.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/flow.py +
@hook(priority=0)
+def after_cat_recalls_memories(cat) -> None:
+    """Hook after semantic search in memories.
+
+    The hook is executed just after the Cat searches for the meaningful context in memories
+    and stores it in the *Working Memory*.
+
+    Parameters
+    ----------
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    """
+    pass  # do nothing
+
+
+
+ +
+ +
+ + +

+ before_cat_bootstrap(cat) + +

+ + +
+ +

Hook into the Cat start up.

+

Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, +the Main Agent, the Rabbit Hole and the White Rabbit.

+

This hook allows to intercept such process and is executed in the middle of plugins and +natural language objects loading.

+

This hook can be used to set or store variables to be propagated to subsequent loaded objects.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/flow.py +
12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
@hook(priority=0)
+def before_cat_bootstrap(cat) -> None:
+    """Hook into the Cat start up.
+
+    Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,
+    the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.
+
+    This hook allows to intercept such process and is executed in the middle of plugins and
+    natural language objects loading.
+
+    This hook can be used to set or store variables to be propagated to subsequent loaded objects.
+
+    Parameters
+    ----------
+    cat : CheshireCat
+        Cheshire Cat instance.
+    """
+    pass  # do nothing
+
+
+
+ +
+ +
+ + +

+ before_cat_reads_message(user_message_json, cat) + +

+ + +
+ +

Hook the incoming user's JSON dictionary.

+

Allows to edit and enrich the incoming message received from the WebSocket connection.

+

For instance, this hook can be used to translate the user's message before feeding it to the Cat. +Another use case is to add custom keys to the JSON dictionary.

+

The incoming message is a JSON dictionary with keys: + { + "text": message content + }

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
user_message_json + dict + +
+

JSON dictionary with the message received from the chat.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
user_message_json + dict + +
+

Edited JSON dictionary that will be fed to the Cat.

+
+
+ + +
+ Notes +

For example:

+
{
+    "text": "Hello Cheshire Cat!",
+    "custom_key": True
+}
+
+

where "custom_key" is a newly added key to the dictionary to store any data.

+
+
+ Source code in cat/mad_hatter/core_plugin/hooks/flow.py +
55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
@hook(priority=0)
+def before_cat_reads_message(user_message_json: dict, cat) -> dict:
+    """Hook the incoming user's JSON dictionary.
+
+    Allows to edit and enrich the incoming message received from the WebSocket connection.
+
+    For instance, this hook can be used to translate the user's message before feeding it to the Cat.
+    Another use case is to add custom keys to the JSON dictionary.
+
+    The incoming message is a JSON dictionary with keys:
+        {
+            "text": message content
+        }
+
+    Parameters
+    ----------
+    user_message_json : dict
+        JSON dictionary with the message received from the chat.
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+
+    Returns
+    -------
+    user_message_json : dict
+        Edited JSON dictionary that will be fed to the Cat.
+
+    Notes
+    -----
+    For example:
+
+        {
+            "text": "Hello Cheshire Cat!",
+            "custom_key": True
+        }
+
+    where "custom_key" is a newly added key to the dictionary to store any data.
+
+    """
+    return user_message_json
+
+
+
+ +
+ +
+ + +

+ before_cat_recalls_declarative_memories(declarative_recall_config, cat) + +

+ + +
+ +

Hook into semantic search in memories.

+

Allows to intercept when the Cat queries the memories using the embedded user's input.

+

The hook is executed just before the Cat searches for the meaningful context in both memories +and stores it in the Working Memory.

+

The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied +to the query in the vector memory (items with score under threshold are not retrieved) +It also returns the embedded query (embedding) and the conditions on recall (metadata).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
declarative_recall_config + dict + +
+

Dictionary with data needed to recall declarative memories

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
declarative_recall_config + dict + +
+

Edited dictionary that will be fed to the embedder.

+
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/flow.py +
@hook(priority=0)
+def before_cat_recalls_declarative_memories(
+    declarative_recall_config: dict, cat
+) -> dict:
+    """Hook into semantic search in memories.
+
+    Allows to intercept when the Cat queries the memories using the embedded user's input.
+
+    The hook is executed just before the Cat searches for the meaningful context in both memories
+    and stores it in the *Working Memory*.
+
+    The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied
+    to the query in the vector memory (items with score under threshold are not retrieved)
+    It also returns the embedded query (embedding) and the conditions on recall (metadata).
+
+    Parameters
+    ----------
+    declarative_recall_config: dict
+        Dictionary with data needed to recall declarative memories
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    declarative_recall_config: dict
+        Edited dictionary that will be fed to the embedder.
+
+    """
+    return declarative_recall_config
+
+
+
+ +
+ +
+ + +

+ before_cat_recalls_episodic_memories(episodic_recall_config, cat) + +

+ + +
+ +

Hook into semantic search in memories.

+

Allows to intercept when the Cat queries the memories using the embedded user's input.

+

The hook is executed just before the Cat searches for the meaningful context in both memories +and stores it in the Working Memory.

+

The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied +to the query in the vector memory (items with score under threshold are not retrieved). +It also returns the embedded query (embedding) and the conditions on recall (metadata).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
episodic_recall_config + dict + +
+

Dictionary with data needed to recall episodic memories

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
episodic_recall_config + dict + +
+

Edited dictionary that will be fed to the embedder.

+
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/flow.py +
@hook(priority=0)
+def before_cat_recalls_episodic_memories(episodic_recall_config: dict, cat) -> dict:
+    """Hook into semantic search in memories.
+
+    Allows to intercept when the Cat queries the memories using the embedded user's input.
+
+    The hook is executed just before the Cat searches for the meaningful context in both memories
+    and stores it in the *Working Memory*.
+
+    The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied
+    to the query in the vector memory (items with score under threshold are not retrieved).
+    It also returns the embedded query (embedding) and the conditions on recall (metadata).
+
+    Parameters
+    ----------
+    episodic_recall_config : dict
+        Dictionary with data needed to recall episodic memories
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    episodic_recall_config: dict
+        Edited dictionary that will be fed to the embedder.
+
+    """
+    return episodic_recall_config
+
+
+
+ +
+ +
+ + +

+ before_cat_recalls_memories(cat) + +

+ + +
+ +

Hook into semantic search in memories.

+

Allows to intercept when the Cat queries the memories using the embedded user's input.

+

The hook is executed just before the Cat searches for the meaningful context in both memories +and stores it in the Working Memory.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/flow.py +
@hook(priority=0)
+def before_cat_recalls_memories(cat) -> None:
+    """Hook into semantic search in memories.
+
+    Allows to intercept when the Cat queries the memories using the embedded user's input.
+
+    The hook is executed just before the Cat searches for the meaningful context in both memories
+    and stores it in the *Working Memory*.
+
+    Parameters
+    ----------
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    """
+    pass  # do nothing
+
+
+
+ +
+ +
+ + +

+ before_cat_recalls_procedural_memories(procedural_recall_config, cat) + +

+ + +
+ +

Hook into semantic search in memories.

+

Allows to intercept when the Cat queries the memories using the embedded user's input.

+

The hook is executed just before the Cat searches for the meaningful context in both memories +and stores it in the Working Memory.

+

The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied +to the query in the vector memory (items with score under threshold are not retrieved) +It also returns the embedded query (embedding) and the conditions on recall (metadata).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
procedural_recall_config + dict + +
+

Dictionary with data needed to recall tools from procedural memory

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
procedural_recall_config + dict + +
+

Edited dictionary that will be fed to the embedder.

+
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/flow.py +
@hook(priority=0)
+def before_cat_recalls_procedural_memories(procedural_recall_config: dict, cat) -> dict:
+    """Hook into semantic search in memories.
+
+    Allows to intercept when the Cat queries the memories using the embedded user's input.
+
+    The hook is executed just before the Cat searches for the meaningful context in both memories
+    and stores it in the *Working Memory*.
+
+    The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied
+    to the query in the vector memory (items with score under threshold are not retrieved)
+    It also returns the embedded query (embedding) and the conditions on recall (metadata).
+
+    Parameters
+    ----------
+    procedural_recall_config: dict
+        Dictionary with data needed to recall tools from procedural memory
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    procedural_recall_config: dict
+        Edited dictionary that will be fed to the embedder.
+
+    """
+    return procedural_recall_config
+
+
+
+ +
+ +
+ + +

+ before_cat_sends_message(message, cat) + +

+ + +
+ +

Hook the outgoing Cat's message.

+

Allows to edit the JSON dictionary that will be sent to the client via WebSocket connection.

+

This hook can be used to edit the message sent to the user or to add keys to the dictionary.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
message + dict + +
+

JSON dictionary to be sent to the WebSocket client.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
message + dict + +
+

Edited JSON dictionary with the Cat's answer.

+
+
+ + +
+ Notes +

Default message is::

+
    {
+        "type": "chat",
+        "content": cat_message["output"],
+        "why": {
+            "input": cat_message["input"],
+            "output": cat_message["output"],
+            "intermediate_steps": cat_message["intermediate_steps"],
+            "memory": {
+                "vectors": {
+                    "episodic": episodic_report,
+                    "declarative": declarative_report
+                }
+            },
+        },
+    }
+
+
+
+ Source code in cat/mad_hatter/core_plugin/hooks/flow.py +
@hook(priority=0)
+def before_cat_sends_message(message: dict, cat) -> dict:
+    """Hook the outgoing Cat's message.
+
+    Allows to edit the JSON dictionary that will be sent to the client via WebSocket connection.
+
+    This hook can be used to edit the message sent to the user or to add keys to the dictionary.
+
+    Parameters
+    ----------
+    message : dict
+        JSON dictionary to be sent to the WebSocket client.
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    message : dict
+        Edited JSON dictionary with the Cat's answer.
+
+    Notes
+    -----
+    Default `message` is::
+
+            {
+                "type": "chat",
+                "content": cat_message["output"],
+                "why": {
+                    "input": cat_message["input"],
+                    "output": cat_message["output"],
+                    "intermediate_steps": cat_message["intermediate_steps"],
+                    "memory": {
+                        "vectors": {
+                            "episodic": episodic_report,
+                            "declarative": declarative_report
+                        }
+                    },
+                },
+            }
+
+    """
+
+    return message
+
+
+
+ +
+ +
+ + +

+ before_cat_stores_episodic_memory(doc, cat) + +

+ + +
+ +

Hook the user message Document before is inserted in the vector memory.

+

Allows editing and enhancing a single Document before the Cat add it to the episodic vector memory.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
doc + Document + +
+

Langchain Document to be inserted in memory.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
doc + Document + +
+

Langchain Document that is added in the episodic vector memory.

+
+
+ + +
+ Notes +

The Document has two properties::

+
`page_content`: the string with the text to save in memory;
+`metadata`: a dictionary with at least two keys:
+    `source`: where the text comes from;
+    `when`: timestamp to track when it's been uploaded.
+
+
+
+ Source code in cat/mad_hatter/core_plugin/hooks/flow.py +
@hook(priority=0)
+def before_cat_stores_episodic_memory(doc: Document, cat) -> Document:
+    """Hook the user message `Document` before is inserted in the vector memory.
+
+    Allows editing and enhancing a single `Document` before the Cat add it to the episodic vector memory.
+
+    Parameters
+    ----------
+    doc : Document
+        Langchain `Document` to be inserted in memory.
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    doc : Document
+        Langchain `Document` that is added in the episodic vector memory.
+
+    Notes
+    -----
+    The `Document` has two properties::
+
+        `page_content`: the string with the text to save in memory;
+        `metadata`: a dictionary with at least two keys:
+            `source`: where the text comes from;
+            `when`: timestamp to track when it's been uploaded.
+
+    """
+    return doc
+
+
+
+ +
+ +
+ + +

+ cat_recall_query(user_message, cat) + +

+ + +
+ +

Hook the semantic search query.

+

This hook allows to edit the user's message used as a query for context retrieval from memories. +As a result, the retrieved context can be conditioned editing the user's message.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
user_message + str + +
+

String with the text received from the user.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance to exploit the Cat's methods.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
TypeDescription
+ Edited string to be used for context retrieval in memory. The returned string is further stored in the + +
+ +
+
+ Working Memory at `cat.working_memory.recall_query`. + +
+ +
+
+ + +
+ Notes +

For example, this hook is a suitable to perform Hypothetical Document Embedding (HyDE). +HyDE [1]_ strategy exploits the user's message to generate a hypothetical answer. This is then used to recall +the relevant context from the memory. +An official plugin is available to test this technique.

+
+ +
+ References +

[1] Gao, L., Ma, X., Lin, J., & Callan, J. (2022). Precise Zero-Shot Dense Retrieval without Relevance Labels. + arXiv preprint arXiv:2212.10496.

+
+
+ Source code in cat/mad_hatter/core_plugin/hooks/flow.py +
@hook(priority=0)
+def cat_recall_query(user_message: str, cat) -> str:
+    """Hook the semantic search query.
+
+    This hook allows to edit the user's message used as a query for context retrieval from memories.
+    As a result, the retrieved context can be conditioned editing the user's message.
+
+    Parameters
+    ----------
+    user_message : str
+        String with the text received from the user.
+    cat : CheshireCat
+        Cheshire Cat instance to exploit the Cat's methods.
+
+    Returns
+    -------
+    Edited string to be used for context retrieval in memory. The returned string is further stored in the
+    Working Memory at `cat.working_memory.recall_query`.
+
+    Notes
+    -----
+    For example, this hook is a suitable to perform Hypothetical Document Embedding (HyDE).
+    HyDE [1]_ strategy exploits the user's message to generate a hypothetical answer. This is then used to recall
+    the relevant context from the memory.
+    An official plugin is available to test this technique.
+
+    References
+    ----------
+    [1] Gao, L., Ma, X., Lin, J., & Callan, J. (2022). Precise Zero-Shot Dense Retrieval without Relevance Labels.
+       arXiv preprint arXiv:2212.10496.
+
+    """
+
+    # here we just return the latest user message as is
+    return user_message
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/mad_hatter/core_plugin/hooks/prompt/index.html b/API_Documentation/mad_hatter/core_plugin/hooks/prompt/index.html new file mode 100644 index 000000000..282a51c39 --- /dev/null +++ b/API_Documentation/mad_hatter/core_plugin/hooks/prompt/index.html @@ -0,0 +1,4397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + prompt - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

prompt

+ +
+ + + + +
+ +

Hooks to modify the prompts.

+

Here is a collection of methods to hook the prompts components that instruct the Agent.

+ + + +
+ + + + + + + + + +
+ + +

+ agent_prompt_instructions(instructions, cat) + +

+ + +
+ +

Hook the instruction prompt.

+

Allows to edit the instructions that the Cat feeds to the Agent to select tools and forms.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
instructions + str + +
+

Instructions prompt to select tool or form.

+
+
+ required +
cat + StrayCat + +
+

StrayCat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
instructions + str + +
+

Instructions prompt to select tool or form

+
+
+ + +
+ Notes +

This prompt explains the Agent how to select a tool or form.

+
+
+ Source code in cat/mad_hatter/core_plugin/hooks/prompt.py +
40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
@hook(priority=0)
+def agent_prompt_instructions(instructions: str, cat) -> str:
+    """Hook the instruction prompt.
+
+    Allows to edit the instructions that the Cat feeds to the *Agent* to select tools and forms.
+
+    Parameters
+    ----------
+    instructions : str
+        Instructions prompt to select tool or form.
+    cat : StrayCat
+        StrayCat instance.
+
+    Returns
+    -------
+    instructions : str
+        Instructions prompt to select tool or form
+
+    Notes
+    -----
+    This prompt explains the *Agent* how to select a tool or form.
+
+    """
+
+    return instructions
+
+
+
+ +
+ +
+ + +

+ agent_prompt_prefix(prefix, cat) + +

+ + +
+ +

Hook the main prompt prefix.

+

Allows to edit the prefix of the Main Prompt that the Cat feeds to the Agent. +It describes the personality of your assistant and its general task.

+

The prefix is then completed with the agent_prompt_suffix.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
prefix + str + +
+

Main / System prompt with personality and general task to be accomplished.

+
+
+ required +
cat + StrayCat + +
+

StrayCat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
prefix + str + +
+

Main / System prompt.

+
+
+ + +
+ Notes +

The default prefix describe who the AI is and how it is expected to answer the Human.

+
+
+ Source code in cat/mad_hatter/core_plugin/hooks/prompt.py +
11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
@hook(priority=0)
+def agent_prompt_prefix(prefix, cat) -> str:
+    """Hook the main prompt prefix.
+
+    Allows to edit the prefix of the *Main Prompt* that the Cat feeds to the *Agent*.
+    It describes the personality of your assistant and its general task.
+
+    The prefix is then completed with the `agent_prompt_suffix`.
+
+    Parameters
+    ----------
+    prefix : str
+        Main / System prompt with personality and general task to be accomplished.
+    cat : StrayCat
+        StrayCat instance.
+
+    Returns
+    -------
+    prefix : str
+        Main / System prompt.
+
+    Notes
+    -----
+    The default prefix describe who the AI is and how it is expected to answer the Human.
+    """
+
+    return prefix
+
+
+
+ +
+ +
+ + +

+ agent_prompt_suffix(prompt_suffix, cat) + +

+ + +
+ +

Hook the main prompt suffix.

+

Allows to edit the suffix of the Main Prompt that the Cat feeds to the Agent.

+

The suffix is concatenated to agent_prompt_prefix when RAG context is used.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
cat + StrayCat + +
+

StrayCat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
prompt_suffix + str + +
+

The suffix string to be concatenated to the Main Prompt (prefix).

+
+
+ + +
+ Notes +

The default suffix has a few placeholders: +- {episodic_memory} provides memories retrieved from episodic memory (past conversations) +- {declarative_memory} provides memories retrieved from declarative memory (uploaded documents) +- {chat_history} provides the Agent the recent conversation history +- {input} provides the last user's input +- {agent_scratchpad} is where the Agent can concatenate tools use and multiple calls to the LLM.

+
+
+ Source code in cat/mad_hatter/core_plugin/hooks/prompt.py +
67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
@hook(priority=0)
+def agent_prompt_suffix(prompt_suffix: str, cat) -> str:
+    """Hook the main prompt suffix.
+
+    Allows to edit the suffix of the *Main Prompt* that the Cat feeds to the *Agent*.
+
+    The suffix is concatenated to `agent_prompt_prefix` when RAG context is used.
+
+    Parameters
+    ----------
+    cat : StrayCat
+        StrayCat instance.
+
+    Returns
+    -------
+    prompt_suffix : str
+        The suffix string to be concatenated to the *Main Prompt* (prefix).
+
+    Notes
+    -----
+    The default suffix has a few placeholders:
+    - {episodic_memory} provides memories retrieved from *episodic* memory (past conversations)
+    - {declarative_memory} provides memories retrieved from *declarative* memory (uploaded documents)
+    - {chat_history} provides the *Agent* the recent conversation history
+    - {input} provides the last user's input
+    - {agent_scratchpad} is where the *Agent* can concatenate tools use and multiple calls to the LLM.
+
+    """
+
+    return prompt_suffix
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/index.html b/API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/index.html new file mode 100644 index 000000000..7438392d4 --- /dev/null +++ b/API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/index.html @@ -0,0 +1,5000 @@ + + + + + + + + + + + + + + + + + + + + + + + + + rabbithole - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

rabbithole

+ +
+ + + + +
+ +

Hooks to modify the RabbitHole's documents ingestion.

+

Here is a collection of methods to hook into the RabbitHole execution pipeline.

+

These hooks allow to intercept the uploaded documents at different places before they are saved into memory.

+ + + +
+ + + + + + + + + +
+ + +

+ after_rabbithole_splitted_text(chunks, cat) + +

+ + +
+ +

Hook the Document after is split.

+

Allows editing the list of Document right after the RabbitHole chunked them in smaller ones.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
chunks + List[Document] + +
+

List of Langchain Document.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
chunks + List[Document] + +
+

List of modified chunked langchain documents to be stored in the episodic memory.

+
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py +
@hook(priority=0)
+def after_rabbithole_splitted_text(chunks: List[Document], cat) -> List[Document]:
+    """Hook the `Document` after is split.
+
+    Allows editing the list of `Document` right after the *RabbitHole* chunked them in smaller ones.
+
+    Parameters
+    ----------
+    chunks : List[Document]
+        List of Langchain `Document`.
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    chunks : List[Document]
+        List of modified chunked langchain documents to be stored in the episodic memory.
+
+    """
+
+    return chunks
+
+
+
+ +
+ +
+ + +

+ after_rabbithole_stored_documents(source, stored_points, cat) + +

+ + +
+ +

Hook the Document after is inserted in the vector memory.

+

Allows editing and enhancing the list of Document after is inserted in the vector memory.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
source + +
+

Name of ingested file/url

+
+
+ required +
docs + List[PointStruct] + +
+

List of Qdrant PointStruct just inserted into the db.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ None + +
+ +
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py +
@hook(priority=0)
+def after_rabbithole_stored_documents(
+    source, stored_points: List[PointStruct], cat
+) -> None:
+    """Hook the Document after is inserted in the vector memory.
+
+    Allows editing and enhancing the list of Document after is inserted in the vector memory.
+
+    Parameters
+    ----------
+    source: str
+        Name of ingested file/url
+    docs : List[PointStruct]
+        List of Qdrant PointStruct just inserted into the db.
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    None
+    """
+    pass
+
+
+
+ +
+ +
+ + +

+ before_rabbithole_insert_memory(doc, cat) + +

+ + +
+ +

Hook the Document before is inserted in the vector memory.

+

Allows editing and enhancing a single Document before the RabbitHole add it to the declarative vector memory.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
doc + Document + +
+

Langchain Document to be inserted in memory.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
doc + Document + +
+

Langchain Document that is added in the declarative vector memory.

+
+
+ + +
+ Notes +

The Document has two properties::

+
`page_content`: the string with the text to save in memory;
+`metadata`: a dictionary with at least two keys:
+    `source`: where the text comes from;
+    `when`: timestamp to track when it's been uploaded.
+
+
+
+ Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py +
65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
@hook(priority=0)
+def before_rabbithole_insert_memory(doc: Document, cat) -> Document:
+    """Hook the `Document` before is inserted in the vector memory.
+
+    Allows editing and enhancing a single `Document` before the *RabbitHole* add it to the declarative vector memory.
+
+    Parameters
+    ----------
+    doc : Document
+        Langchain `Document` to be inserted in memory.
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    doc : Document
+        Langchain `Document` that is added in the declarative vector memory.
+
+    Notes
+    -----
+    The `Document` has two properties::
+
+        `page_content`: the string with the text to save in memory;
+        `metadata`: a dictionary with at least two keys:
+            `source`: where the text comes from;
+            `when`: timestamp to track when it's been uploaded.
+
+    """
+    return doc
+
+
+
+ +
+ +
+ + +

+ before_rabbithole_splits_text(docs, cat) + +

+ + +
+ +

Hook the Documents before they are split into chunks.

+

Allows editing the uploaded document main Document(s) before the RabbitHole recursively splits it in shorter ones. +Please note that this is a list because parsers can output one or more Document, that are afterwards splitted.

+

For instance, the hook allows to change the text or edit/add metadata.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
docs + List[Document] + +
+

Langchain Documents resulted after parsing the file uploaded in the RabbitHole.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
docs + List[Document] + +
+

Edited Langchain Documents.

+
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py +
@hook(priority=0)
+def before_rabbithole_splits_text(docs: List[Document], cat) -> List[Document]:
+    """Hook the `Documents` before they are split into chunks.
+
+    Allows editing the uploaded document main Document(s) before the *RabbitHole* recursively splits it in shorter ones.
+    Please note that this is a list because parsers can output one or more Document, that are afterwards splitted.
+
+    For instance, the hook allows to change the text or edit/add metadata.
+
+    Parameters
+    ----------
+    docs : List[Document]
+        Langchain `Document`s resulted after parsing the file uploaded in the *RabbitHole*.
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    docs : List[Document]
+        Edited Langchain `Document`s.
+
+    """
+
+    return docs
+
+
+
+ +
+ +
+ + +

+ before_rabbithole_stores_documents(docs, cat) + +

+ + +
+ +

Hook into the memory insertion pipeline.

+

Allows modifying how the list of Document is inserted in the vector memory.

+

For example, this hook is a good point to summarize the incoming documents and save both original and +summarized contents. +An official plugin is available to test this procedure.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
docs + List[Document] + +
+

List of Langchain Document to be edited.

+
+
+ required +
cat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
docs + List[Document] + +
+

List of edited Langchain documents.

+
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py +
@hook(priority=0)
+def before_rabbithole_stores_documents(docs: List[Document], cat) -> List[Document]:
+    """Hook into the memory insertion pipeline.
+
+    Allows modifying how the list of `Document` is inserted in the vector memory.
+
+    For example, this hook is a good point to summarize the incoming documents and save both original and
+    summarized contents.
+    An official plugin is available to test this procedure.
+
+    Parameters
+    ----------
+    docs : List[Document]
+        List of Langchain `Document` to be edited.
+    cat: CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    docs : List[Document]
+        List of edited Langchain documents.
+
+    """
+
+    return docs
+
+
+
+ +
+ +
+ + +

+ rabbithole_instantiates_parsers(file_handlers, cat) + +

+ + +
+ +

Hook the available parsers for ingesting files in the declarative memory.

+

Allows replacing or extending existing supported mime types and related parsers to customize the file ingestion.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file_handlers + dict + +
+

Keys are the supported mime types and values are the related parsers.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
file_handlers + dict + +
+

Edited dictionary of supported mime types and related parsers.

+
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py +
17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
@hook(priority=0)
+def rabbithole_instantiates_parsers(file_handlers: dict, cat) -> dict:
+    """Hook the available parsers for ingesting files in the declarative memory.
+
+    Allows replacing or extending existing supported mime types and related parsers to customize the file ingestion.
+
+    Parameters
+    ----------
+    file_handlers : dict
+        Keys are the supported mime types and values are the related parsers.
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    file_handlers : dict
+        Edited dictionary of supported mime types and related parsers.
+    """
+    return file_handlers
+
+
+
+ +
+ +
+ + +

+ rabbithole_instantiates_splitter(text_splitter, cat) + +

+ + +
+ +

Hook the splitter used to split text in chunks.

+

Allows replacing the default text splitter to customize the splitting process.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
text_splitter + TextSplitter + +
+

The text splitter used by default.

+
+
+ required +
cat + CheshireCat + +
+

Cheshire Cat instance.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
text_splitter + TextSplitter + +
+

An instance of a TextSplitter subclass.

+
+
+ +
+ Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py +
38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
@hook(priority=0)
+def rabbithole_instantiates_splitter(text_splitter: TextSplitter, cat) -> TextSplitter:
+    """Hook the splitter used to split text in chunks.
+
+    Allows replacing the default text splitter to customize the splitting process.
+
+    Parameters
+    ----------
+    text_splitter : TextSplitter
+        The text splitter used by default.
+    cat : CheshireCat
+        Cheshire Cat instance.
+
+    Returns
+    -------
+    text_splitter : TextSplitter
+        An instance of a TextSplitter subclass.
+    """
+
+    # example on how to change chunking
+    # text_splitter._chunk_size = 64
+    # text_splitter._chunk_overlap = 8
+
+    return text_splitter
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/mad_hatter/mad_hatter/index.html b/API_Documentation/mad_hatter/mad_hatter/index.html new file mode 100644 index 000000000..b5eb8297d --- /dev/null +++ b/API_Documentation/mad_hatter/mad_hatter/index.html @@ -0,0 +1,3902 @@ + + + + + + + + + + + + + + + + + + + + + + + + + mad_hatter - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

mad_hatter

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/mad_hatter/plugin/index.html b/API_Documentation/mad_hatter/plugin/index.html new file mode 100644 index 000000000..1519e5e27 --- /dev/null +++ b/API_Documentation/mad_hatter/plugin/index.html @@ -0,0 +1,3902 @@ + + + + + + + + + + + + + + + + + + + + + + + + + plugin - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

plugin

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/memory/vector_memory/index.html b/API_Documentation/memory/vector_memory/index.html new file mode 100644 index 000000000..505c1ce88 --- /dev/null +++ b/API_Documentation/memory/vector_memory/index.html @@ -0,0 +1,3902 @@ + + + + + + + + + + + + + + + + + + + + + + + + + vector_memory - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

vector_memory

+ +
+ + + + +
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/memory/vector_memory_collection/index.html b/API_Documentation/memory/vector_memory_collection/index.html new file mode 100644 index 000000000..f3cc6b4ea --- /dev/null +++ b/API_Documentation/memory/vector_memory_collection/index.html @@ -0,0 +1,4676 @@ + + + + + + + + + + + + + + + + + + + + + + + + + vector_memory_collection - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

vector_memory_collection

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ VectorMemoryCollection + + +

+ + +
+ + +
+ Source code in cat/memory/vector_memory_collection.py +
class VectorMemoryCollection:
+    def __init__(
+        self,
+        client: Any,
+        collection_name: str,
+        embedder_name: str,
+        embedder_size: int,
+    ):
+        # Set attributes (metadata on the embedder are useful because it may change at runtime)
+        self.client = client
+        self.collection_name = collection_name
+        self.embedder_name = embedder_name
+        self.embedder_size = embedder_size
+
+        # Check if memory collection exists also in vectorDB, otherwise create it
+        self.create_db_collection_if_not_exists()
+
+        # Check db collection vector size is same as embedder size
+        self.check_embedding_size()
+
+        # log collection info
+        log.debug(f"Collection {self.collection_name}:")
+        log.debug(self.client.get_collection(self.collection_name))
+
+    def check_embedding_size(self):
+        # having the same size does not necessarily imply being the same embedder
+        # having vectors with the same size but from diffent embedder in the same vector space is wrong
+        same_size = (
+            self.client.get_collection(self.collection_name).config.params.vectors.size
+            == self.embedder_size
+        )
+        alias = self.embedder_name + "_" + self.collection_name
+        if (
+            alias
+            == self.client.get_collection_aliases(self.collection_name)
+            .aliases[0]
+            .alias_name
+            and same_size
+        ):
+            log.debug(f'Collection "{self.collection_name}" has the same embedder')
+        else:
+            log.warning(f'Collection "{self.collection_name}" has different embedder')
+            # Memory snapshot saving can be turned off in the .env file with:
+            # SAVE_MEMORY_SNAPSHOTS=false
+            if get_env("CCAT_SAVE_MEMORY_SNAPSHOTS") == "true":
+                # dump collection on disk before deleting
+                self.save_dump()
+                log.info(f"Dump '{self.collection_name}' completed")
+
+            self.client.delete_collection(self.collection_name)
+            log.warning(f"Collection '{self.collection_name}' deleted")
+            self.create_collection()
+
+    def create_db_collection_if_not_exists(self):
+        # is collection present in DB?
+        collections_response = self.client.get_collections()
+        for c in collections_response.collections:
+            if c.name == self.collection_name:
+                # collection exists. Do nothing
+                log.info(
+                    f"Collection '{self.collection_name}' already present in vector store"
+                )
+                return
+
+        self.create_collection()
+
+    # create collection
+    def create_collection(self):
+        log.warning(f"Creating collection '{self.collection_name}' ...")
+        self.client.recreate_collection(
+            collection_name=self.collection_name,
+            vectors_config=VectorParams(
+                size=self.embedder_size, distance=Distance.COSINE
+            ),
+            # hybrid mode: original vector on Disk, quantized vector in RAM
+            optimizers_config=OptimizersConfigDiff(memmap_threshold=20000),
+            quantization_config=ScalarQuantization(
+                scalar=ScalarQuantizationConfig(
+                    type=ScalarType.INT8, quantile=0.95, always_ram=True
+                )
+            ),
+            # shard_number=3,
+        )
+
+        self.client.update_collection_aliases(
+            change_aliases_operations=[
+                CreateAliasOperation(
+                    create_alias=CreateAlias(
+                        collection_name=self.collection_name,
+                        alias_name=self.embedder_name + "_" + self.collection_name,
+                    )
+                )
+            ]
+        )
+
+    # adapted from https://github.com/langchain-ai/langchain/blob/bfc12a4a7644cfc4d832cc4023086a7a5374f46a/libs/langchain/langchain/vectorstores/qdrant.py#L1965
+    def _qdrant_filter_from_dict(self, filter: dict) -> Filter:
+        if not filter:
+            return None
+
+        return Filter(
+            must=[
+                condition
+                for key, value in filter.items()
+                for condition in self._build_condition(key, value)
+            ]
+        )
+
+    # adapted from https://github.com/langchain-ai/langchain/blob/bfc12a4a7644cfc4d832cc4023086a7a5374f46a/libs/langchain/langchain/vectorstores/qdrant.py#L1941
+    def _build_condition(self, key: str, value: Any) -> List[FieldCondition]:
+        out = []
+
+        if isinstance(value, dict):
+            for _key, value in value.items():
+                out.extend(self._build_condition(f"{key}.{_key}", value))
+        elif isinstance(value, list):
+            for _value in value:
+                if isinstance(_value, dict):
+                    out.extend(self._build_condition(f"{key}[]", _value))
+                else:
+                    out.extend(self._build_condition(f"{key}", _value))
+        else:
+            out.append(
+                FieldCondition(
+                    key=f"metadata.{key}",
+                    match=MatchValue(value=value),
+                )
+            )
+
+        return out
+
+    def add_point(
+        self,
+        content: str,
+        vector: Iterable,
+        metadata: dict = None,
+        id: Optional[str] = None,
+        **kwargs: Any,
+    ) -> List[str]:
+        """Add a point (and its metadata) to the vectorstore.
+
+        Args:
+            content: original text.
+            vector: Embedding vector.
+            metadata: Optional metadata dict associated with the text.
+            id:
+                Optional id to associate with the point. Id has to be a uuid-like string.
+
+        Returns:
+            Point id as saved into the vectorstore.
+        """
+
+        # TODO: may be adapted to upload batches of points as langchain does.
+        # Not necessary now as the bottleneck is the embedder
+        point = PointStruct(
+            id=id or uuid.uuid4().hex,
+            payload={
+                "page_content": content,
+                "metadata": metadata,
+            },
+            vector=vector,
+        )
+
+        update_status = self.client.upsert(
+            collection_name=self.collection_name, points=[point], **kwargs
+        )
+
+        if update_status.status == "completed":
+            # returnign stored point
+            return point # TODOV2 return internal MemoryPoint
+        else:
+            return None
+
+    def delete_points_by_metadata_filter(self, metadata=None):
+        res = self.client.delete(
+            collection_name=self.collection_name,
+            points_selector=self._qdrant_filter_from_dict(metadata),
+        )
+        return res
+
+    # delete point in collection
+    def delete_points(self, points_ids):
+        res = self.client.delete(
+            collection_name=self.collection_name,
+            points_selector=points_ids,
+        )
+        return res
+
+    # retrieve similar memories from embedding
+    def recall_memories_from_embedding(
+        self, embedding, metadata=None, k=5, threshold=None
+    ):
+        # retrieve memories
+        memories = self.client.search(
+            collection_name=self.collection_name,
+            query_vector=embedding,
+            query_filter=self._qdrant_filter_from_dict(metadata),
+            with_payload=True,
+            with_vectors=True,
+            limit=k,
+            score_threshold=threshold,
+            search_params=SearchParams(
+                quantization=QuantizationSearchParams(
+                    ignore=False,
+                    rescore=True,
+                    oversampling=2.0,  # Available as of v1.3.0
+                )
+            ),
+        )
+
+        # convert Qdrant points to langchain.Document
+        langchain_documents_from_points = []
+        for m in memories:
+            langchain_documents_from_points.append(
+                (
+                    Document(
+                        page_content=m.payload.get("page_content"),
+                        metadata=m.payload.get("metadata") or {},
+                    ),
+                    m.score,
+                    m.vector,
+                    m.id,
+                )
+            )
+
+        # we'll move out of langchain conventions soon and have our own cat Document
+        # for doc, score, vector in langchain_documents_from_points:
+        #    doc.lc_kwargs = None
+
+        return langchain_documents_from_points
+
+    # retrieve all the points in the collection
+    def get_all_points(self):
+        # retrieving the points
+        all_points, _ = self.client.scroll(
+            collection_name=self.collection_name,
+            with_vectors=True,
+            limit=10000,  # yeah, good for now dear :*
+        )
+
+        return all_points
+
+    def db_is_remote(self):
+        return isinstance(self.client._client, QdrantRemote)
+
+    # dump collection on disk before deleting
+    def save_dump(self, folder="dormouse/"):
+        # only do snapshotting if using remote Qdrant
+        if not self.db_is_remote():
+            return
+
+        host = self.client._client._host
+        port = self.client._client._port
+
+        if os.path.isdir(folder):
+            log.info("Directory dormouse exists")
+        else:
+            log.warning("Directory dormouse does NOT exists, creating it.")
+            os.mkdir(folder)
+
+        self.snapshot_info = self.client.create_snapshot(
+            collection_name=self.collection_name
+        )
+        snapshot_url_in = (
+            "http://"
+            + str(host)
+            + ":"
+            + str(port)
+            + "/collections/"
+            + self.collection_name
+            + "/snapshots/"
+            + self.snapshot_info.name
+        )
+        snapshot_url_out = folder + self.snapshot_info.name
+        # rename snapshots for a easyer restore in the future
+        alias = (
+            self.client.get_collection_aliases(self.collection_name)
+            .aliases[0]
+            .alias_name
+        )
+        response = requests.get(snapshot_url_in)
+        open(snapshot_url_out, "wb").write(response.content)
+        new_name = folder + alias.replace("/", "-") + ".snapshot"
+        os.rename(snapshot_url_out, new_name)
+        for s in self.client.list_snapshots(self.collection_name):
+            self.client.delete_snapshot(
+                collection_name=self.collection_name, snapshot_name=s.name
+            )
+        log.warning(f'Dump "{new_name}" completed')
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ add_point(content, vector, metadata=None, id=None, **kwargs) + +

+ + +
+ +

Add a point (and its metadata) to the vectorstore.

+

Args: + content: original text. + vector: Embedding vector. + metadata: Optional metadata dict associated with the text. + id: + Optional id to associate with the point. Id has to be a uuid-like string.

+

Returns: + Point id as saved into the vectorstore.

+ +
+ Source code in cat/memory/vector_memory_collection.py +
def add_point(
+    self,
+    content: str,
+    vector: Iterable,
+    metadata: dict = None,
+    id: Optional[str] = None,
+    **kwargs: Any,
+) -> List[str]:
+    """Add a point (and its metadata) to the vectorstore.
+
+    Args:
+        content: original text.
+        vector: Embedding vector.
+        metadata: Optional metadata dict associated with the text.
+        id:
+            Optional id to associate with the point. Id has to be a uuid-like string.
+
+    Returns:
+        Point id as saved into the vectorstore.
+    """
+
+    # TODO: may be adapted to upload batches of points as langchain does.
+    # Not necessary now as the bottleneck is the embedder
+    point = PointStruct(
+        id=id or uuid.uuid4().hex,
+        payload={
+            "page_content": content,
+            "metadata": metadata,
+        },
+        vector=vector,
+    )
+
+    update_status = self.client.upsert(
+        collection_name=self.collection_name, points=[point], **kwargs
+    )
+
+    if update_status.status == "completed":
+        # returnign stored point
+        return point # TODOV2 return internal MemoryPoint
+    else:
+        return None
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/memory/working_memory/index.html b/API_Documentation/memory/working_memory/index.html new file mode 100644 index 000000000..1b8d9e5be --- /dev/null +++ b/API_Documentation/memory/working_memory/index.html @@ -0,0 +1,4246 @@ + + + + + + + + + + + + + + + + + + + + + + + + + working_memory - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

working_memory

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ WorkingMemory + + +

+ + +
+

+ Bases: BaseModelDict

+ + +

Cat's volatile memory.

+

Handy class that behaves like a dict to store temporary custom data.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ dict[str, list] + +
+

Default instance is a dictionary with history key set to an empty list.

+
+
+ + +
+ Notes +

The constructor instantiates a dictionary with a history key set to an empty list that is further used to store +the conversation turns between the Human and the AI.

+
+
+ Source code in cat/memory/working_memory.py +
 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
class WorkingMemory(BaseModelDict):
+    """Cat's volatile memory.
+
+    Handy class that behaves like a `dict` to store temporary custom data.
+
+    Returns
+    -------
+    dict[str, list]
+        Default instance is a dictionary with `history` key set to an empty list.
+
+    Notes
+    -----
+    The constructor instantiates a dictionary with a `history` key set to an empty list that is further used to store
+    the conversation turns between the Human and the AI.
+    """
+
+    # stores conversation history
+    history: List = []
+    user_message_json: None | UserMessage = None
+    active_form: None | CatForm = None
+
+    # recalled memories attributes
+    recall_query: str = ""
+    episodic_memories: List = []
+    declarative_memories: List = []
+    procedural_memories: List = []
+
+    # track models usage
+    model_interactions: List[ModelInteraction] = []
+
+    def update_conversation_history(self, who, message, why={}):
+        """Update the conversation history.
+
+        The methods append to the history key the last three conversation turns.
+
+        Parameters
+        ----------
+        who : str
+            Who said the message. Can either be `Human` or `AI`.
+        message : str
+            The message said.
+
+        """
+        # append latest message in conversation
+        # TODO: Message should be of type CatMessage or UserMessage. For retrocompatibility we put a new key
+        # we are sure that who is not change in the current call
+        self.history.append(
+            {
+                "who": who,
+                "message": message,
+                "why": why,
+                "when": time.time(),
+                "role": Role.AI if who == "AI" else Role.Human,
+            }
+        )
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ update_conversation_history(who, message, why={}) + +

+ + +
+ +

Update the conversation history.

+

The methods append to the history key the last three conversation turns.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
who + str + +
+

Who said the message. Can either be Human or AI.

+
+
+ required +
message + str + +
+

The message said.

+
+
+ required +
+ +
+ Source code in cat/memory/working_memory.py +
38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
def update_conversation_history(self, who, message, why={}):
+    """Update the conversation history.
+
+    The methods append to the history key the last three conversation turns.
+
+    Parameters
+    ----------
+    who : str
+        Who said the message. Can either be `Human` or `AI`.
+    message : str
+        The message said.
+
+    """
+    # append latest message in conversation
+    # TODO: Message should be of type CatMessage or UserMessage. For retrocompatibility we put a new key
+    # we are sure that who is not change in the current call
+    self.history.append(
+        {
+            "who": who,
+            "message": message,
+            "why": why,
+            "when": time.time(),
+            "role": Role.AI if who == "AI" else Role.Human,
+        }
+    )
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/rabbit_hole/index.html b/API_Documentation/rabbit_hole/index.html new file mode 100644 index 000000000..ef0dc5f6b --- /dev/null +++ b/API_Documentation/rabbit_hole/index.html @@ -0,0 +1,6374 @@ + + + + + + + + + + + + + + + + + + + + + + + + + rabbit_hole - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

rabbit_hole

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ RabbitHole + + +

+ + +
+ + +

Manages content ingestion. I'm late... I'm late!

+ +
+ Source code in cat/rabbit_hole.py +
 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
@singleton
+class RabbitHole:
+    """Manages content ingestion. I'm late... I'm late!"""
+
+    def __init__(self, cat) -> None:
+        self.__cat = cat
+
+    # each time we access the file handlers, plugins can intervene
+    def __reload_file_handlers(self):
+        # default file handlers
+        self.__file_handlers = {
+            "application/pdf": PDFMinerParser(),
+            "text/plain": TextParser(),
+            "text/markdown": TextParser(),
+            "text/html": BS4HTMLParser(),
+        }
+
+        # no access to stray
+        self.__file_handlers = self.__cat.mad_hatter.execute_hook(
+            "rabbithole_instantiates_parsers", self.__file_handlers, cat=self.__cat
+        )
+
+    def __reload_text_splitter(self):
+        # default text splitter
+        self.__text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
+            chunk_size=256,
+            chunk_overlap=64,
+            separators=["\\n\\n", "\n\n", ".\\n", ".\n", "\\n", "\n", " ", ""],
+            encoding_name="cl100k_base",
+            keep_separator=True,
+            strip_whitespace=True,
+        )
+
+        # no access to stray
+        self.__text_splitter = self.__cat.mad_hatter.execute_hook(
+            "rabbithole_instantiates_splitter", self.__text_splitter, cat=self.__cat
+        )
+
+    def ingest_memory(
+            self,
+            stray,
+            file: UploadFile
+        ):
+        """Upload memories to the declarative memory from a JSON file.
+
+        Parameters
+        ----------
+        file : UploadFile
+            File object sent via `rabbithole/memory` hook.
+
+        Notes
+        -----
+        This method allows uploading a JSON file containing vector and text memories directly to the declarative memory.
+        When doing this, please, make sure the embedder used to export the memories is the same as the one used
+        when uploading.
+        The method also performs a check on the dimensionality of the embeddings (i.e. length of each vector).
+
+        """
+
+        # Get file bytes
+        file_bytes = file.file.read()
+
+        # Load fyle byte in a dict
+        memories = json.loads(file_bytes.decode("utf-8"))
+
+        # Check the embedder used for the uploaded memories is the same the Cat is using now
+        upload_embedder = memories["embedder"]
+        cat_embedder = str(stray.embedder.__class__.__name__)
+
+        if upload_embedder != cat_embedder:
+            message = f"Embedder mismatch: file embedder {upload_embedder} is different from {cat_embedder}"
+            raise Exception(message)
+
+        # Get Declarative memories in file
+        declarative_memories = memories["collections"]["declarative"]
+
+        # Store data to upload the memories in batch
+        ids = [i["id"] for i in declarative_memories]
+        payloads = [
+            {"page_content": p["page_content"], "metadata": p["metadata"]}
+            for p in declarative_memories
+        ]
+        vectors = [v["vector"] for v in declarative_memories]
+
+        log.info(f"Preparing to load {len(vectors)} vector memories")
+
+        # Check embedding size is correct
+        embedder_size = stray.memory.vectors.declarative.embedder_size
+        len_mismatch = [len(v) == embedder_size for v in vectors]
+
+        if not any(len_mismatch):
+            message = (
+                f"Embedding size mismatch: vectors length should be {embedder_size}"
+            )
+            raise Exception(message)
+
+        # Upsert memories in batch mode # TODO REFACTOR: use VectorMemoryCollection.add_point
+        stray.memory.vectors.vector_db.upsert(
+            collection_name="declarative",
+            points=models.Batch(ids=ids, payloads=payloads, vectors=vectors),
+        )
+
+    def ingest_file(
+        self,
+        stray,
+        file: Union[str, UploadFile],
+        chunk_size: int | None = None,
+        chunk_overlap: int | None = None,
+        metadata: dict = {}
+    ):
+        """Load a file in the Cat's declarative memory.
+
+        The method splits and converts the file in Langchain `Document`. Then, it stores the `Document` in the Cat's
+        memory.
+
+        Parameters
+        ----------
+        file : str, UploadFile
+            The file can be a path passed as a string or an `UploadFile` object if the document is ingested using the
+            `rabbithole` endpoint.
+        chunk_size : int
+            Number of tokens in each document chunk.
+        chunk_overlap : int
+            Number of overlapping tokens between consecutive chunks.
+        metadata : dict
+            Metadata to be stored with each chunk.
+
+        Notes
+        ----------
+        Currently supported formats are `.txt`, `.pdf` and `.md`.
+        You cn add custom ones or substitute the above via RabbitHole hooks.
+
+        See Also
+        ----------
+        before_rabbithole_stores_documents
+        """
+
+        # split file into a list of docs
+        docs = self.file_to_docs(
+            stray=stray,
+            file=file,
+            chunk_size=chunk_size,
+            chunk_overlap=chunk_overlap
+        )
+
+        # store in memory
+        if isinstance(file, str):
+            filename = file
+        else:
+            filename = file.filename
+
+        self.store_documents(stray=stray, docs=docs, source=filename, metadata=metadata)
+
+    def file_to_docs(
+        self,
+        stray,
+        file: Union[str, UploadFile],
+        chunk_size: int | None = None,
+        chunk_overlap: int | None = None
+    ) -> List[Document]:
+        """Load and convert files to Langchain `Document`.
+
+        This method takes a file either from a Python script, from the `/rabbithole/` or `/rabbithole/web` endpoints.
+        Hence, it loads it in memory and splits it in overlapped chunks of text.
+
+        Parameters
+        ----------
+        file : str, UploadFile
+            The file can be either a string path if loaded programmatically, a FastAPI `UploadFile`
+            if coming from the `/rabbithole/` endpoint or a URL if coming from the `/rabbithole/web` endpoint.
+        chunk_size : int
+            Number of tokens in each document chunk.
+        chunk_overlap : int
+            Number of overlapping tokens between consecutive chunks.
+
+        Returns
+        -------
+        docs : List[Document]
+            List of Langchain `Document` of chunked text.
+
+        Notes
+        -----
+        This method is used by both `/rabbithole/` and `/rabbithole/web` endpoints.
+        Currently supported files are `.txt`, `.pdf`, `.md` and web pages.
+
+        """
+
+        # Check type of incoming file.
+        if isinstance(file, UploadFile):
+            # Get mime type and source of UploadFile
+            content_type = mimetypes.guess_type(file.filename)[0]
+            source = file.filename
+
+            # Get file bytes
+            file_bytes = file.file.read()
+        elif isinstance(file, str):
+            # Check if string file is a string or url
+            parsed_file = urlparse(file)
+            is_url = all([parsed_file.scheme, parsed_file.netloc])
+
+            if is_url:
+                # Make a request with a fake browser name
+                request = httpx.get(file, headers={"User-Agent": "Magic Browser"})
+
+                # Define mime type and source of url
+                content_type = request.headers["Content-Type"].split(";")[0]
+                source = file
+
+                try:
+                    # Get binary content of url
+                    file_bytes = request.content
+                except HTTPError as e:
+                    log.error(e)
+            else:
+                # Get mime type from file extension and source
+                content_type = mimetypes.guess_type(file)[0]
+                source = os.path.basename(file)
+
+                # Get file bytes
+                with open(file, "rb") as f:
+                    file_bytes = f.read()
+        else:
+            raise ValueError(f"{type(file)} is not a valid type.")
+        return self.string_to_docs(
+            stray=stray,
+            file_bytes=file_bytes,
+            source=source,
+            content_type=content_type,
+            chunk_size=chunk_size,
+            chunk_overlap=chunk_overlap
+        )
+
+    def string_to_docs(
+        self,
+        stray,
+        file_bytes: str,
+        source: str = None,
+        content_type: str = "text/plain",
+        chunk_size: int | None = None,
+        chunk_overlap: int | None = None
+    ) -> List[Document]:
+        """Convert string to Langchain `Document`.
+
+        Takes a string, converts it to langchain `Document`.
+        Hence, loads it in memory and splits it in overlapped chunks of text.
+
+        Parameters
+        ----------
+        file_bytes : str
+            The string to be converted.
+        source: str
+            Source filename.
+        content_type:
+            Mimetype of content.
+        chunk_size : int
+            Number of tokens in each document chunk.
+        chunk_overlap : int
+            Number of overlapping tokens between consecutive chunks.
+
+        Returns
+        -------
+        docs : List[Document]
+            List of Langchain `Document` of chunked text.
+        """
+
+        # Load the bytes in the Blob schema
+        blob = Blob(data=file_bytes, mimetype=content_type, source=source).from_data(
+            data=file_bytes, mime_type=content_type, path=source
+        )
+        # Parser based on the mime type
+        parser = MimeTypeBasedParser(handlers=self.file_handlers)
+
+        # Parse the text
+        stray.send_ws_message(
+            "I'm parsing the content. Big content could require some minutes..."
+        )
+        super_docs = parser.parse(blob)
+
+        # Split
+        stray.send_ws_message("Parsing completed. Now let's go with reading process...")
+        docs = self.__split_text(
+            stray=stray,
+            text=super_docs,
+            chunk_size=chunk_size,
+            chunk_overlap=chunk_overlap,
+        )
+        return docs
+
+    def store_documents(
+            self,
+            stray,
+            docs: List[Document],
+            source: str, # TODOV2: is this necessary?
+            metadata: dict = {}
+        ) -> None:
+        """Add documents to the Cat's declarative memory.
+
+        This method loops a list of Langchain `Document` and adds some metadata. Namely, the source filename and the
+        timestamp of insertion. Once done, the method notifies the client via Websocket connection.
+
+        Parameters
+        ----------
+        docs : List[Document]
+            List of Langchain `Document` to be inserted in the Cat's declarative memory.
+        source : str
+            Source name to be added as a metadata. It can be a file name or an URL.
+        metadata : dict
+            Metadata to be stored with each chunk.
+
+        Notes
+        -------
+        At this point, it is possible to customize the Cat's behavior using the `before_rabbithole_insert_memory` hook
+        to edit the memories before they are inserted in the vector database.
+
+        See Also
+        --------
+        before_rabbithole_insert_memory
+        """
+
+        log.info(f"Preparing to memorize {len(docs)} vectors")
+
+        # hook the docs before they are stored in the vector memory
+        docs = stray.mad_hatter.execute_hook(
+            "before_rabbithole_stores_documents", docs, cat=stray
+        )
+
+        # classic embed
+        time_last_notification = time.time()
+        time_interval = 10  # a notification every 10 secs
+        stored_points = []
+        for d, doc in enumerate(docs):
+            if time.time() - time_last_notification > time_interval:
+                time_last_notification = time.time()
+                perc_read = int(d / len(docs) * 100)
+                read_message = f"Read {perc_read}% of {source}"
+                stray.send_ws_message(read_message)
+                log.warning(read_message)
+
+            # add default metadata
+            doc.metadata["source"] = source
+            doc.metadata["when"] = time.time()
+            # add custom metadata (sent via endpoint)
+            for k,v in metadata.items():
+                doc.metadata[k] = v
+
+            doc = stray.mad_hatter.execute_hook(
+                "before_rabbithole_insert_memory", doc, cat=stray
+            )
+            inserting_info = f"{d + 1}/{len(docs)}):    {doc.page_content}"
+            if doc.page_content != "":
+                doc_embedding = stray.embedder.embed_documents([doc.page_content])
+                stored_point = stray.memory.vectors.declarative.add_point(
+                    doc.page_content,
+                    doc_embedding[0],
+                    doc.metadata,
+                )
+                stored_points.append(stored_point)
+
+                log.info(f"Inserted into memory ({inserting_info})")
+            else:
+                log.info(f"Skipped memory insertion of empty doc ({inserting_info})")
+
+            # wait a little to avoid APIs rate limit errors
+            time.sleep(0.05)
+
+        # hook the points after they are stored in the vector memory
+        stray.mad_hatter.execute_hook(
+            "after_rabbithole_stored_documents", source, stored_points, cat=stray
+        )
+
+        # notify client
+        finished_reading_message = (
+            f"Finished reading {source}, I made {len(docs)} thoughts on it."
+        )
+
+        stray.send_ws_message(finished_reading_message)
+
+        log.warning(f"Done uploading {source}")
+
+    def __split_text(self, stray, text, chunk_size, chunk_overlap):
+        """Split text in overlapped chunks.
+
+        This method executes the `rabbithole_splits_text` to split the incoming text in overlapped
+        chunks of text. Other two hooks are available to edit the text before and after the split step.
+
+        Parameters
+        ----------
+        text : str
+            Content of the loaded file.
+        chunk_size : int
+            Number of tokens in each document chunk.
+        chunk_overlap : int
+            Number of overlapping tokens between consecutive chunks.
+
+        Returns
+        -------
+        docs : List[Document]
+            List of split Langchain `Document`.
+
+        Notes
+        -----
+        The default behavior only executes the `rabbithole_splits_text` hook. `before_rabbithole_splits_text` and
+        `after_rabbithole_splitted_text` hooks return the original input without any modification.
+
+        See Also
+        --------
+        before_rabbithole_splits_text
+        rabbithole_splits_text
+        after_rabbithole_splitted_text
+
+        """
+        # do something on the text before it is split
+        text = stray.mad_hatter.execute_hook(
+            "before_rabbithole_splits_text", text, cat=stray
+        )
+
+        # hooks decide the test splitter (see @property .text_splitter)
+        text_splitter = self.text_splitter
+
+        # override chunk_size and chunk_overlap only if the request has those info
+        if chunk_size:
+            text_splitter._chunk_size = chunk_size
+        if chunk_overlap:
+            text_splitter._chunk_overlap = chunk_overlap
+
+        log.info(f"Chunk size: {chunk_size}, chunk overlap: {chunk_overlap}")
+        # split text
+        docs = text_splitter.split_documents(text)
+        # remove short texts (page numbers, isolated words, etc.)
+        # TODO: join each short chunk with previous one, instead of deleting them
+        docs = list(filter(lambda d: len(d.page_content) > 10, docs))
+
+        # do something on the text after it is split
+        docs = stray.mad_hatter.execute_hook(
+            "after_rabbithole_splitted_text", docs, cat=stray
+        )
+
+        return docs
+
+    # each time we access the file handlers, plugins can intervene
+    @property
+    def file_handlers(self):
+        self.__reload_file_handlers()
+        return self.__file_handlers
+
+    # each time we access the text splitter, plugins can intervene
+    @property
+    def text_splitter(self):
+        self.__reload_text_splitter()
+        return self.__text_splitter
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __split_text(stray, text, chunk_size, chunk_overlap) + +

+ + +
+ +

Split text in overlapped chunks.

+

This method executes the rabbithole_splits_text to split the incoming text in overlapped +chunks of text. Other two hooks are available to edit the text before and after the split step.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
text + str + +
+

Content of the loaded file.

+
+
+ required +
chunk_size + int + +
+

Number of tokens in each document chunk.

+
+
+ required +
chunk_overlap + int + +
+

Number of overlapping tokens between consecutive chunks.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
docs + List[Document] + +
+

List of split Langchain Document.

+
+
+ + +
+ Notes +

The default behavior only executes the rabbithole_splits_text hook. before_rabbithole_splits_text and +after_rabbithole_splitted_text hooks return the original input without any modification.

+
+ +
+ See Also +

before_rabbithole_splits_text +rabbithole_splits_text +after_rabbithole_splitted_text

+
+
+ Source code in cat/rabbit_hole.py +
def __split_text(self, stray, text, chunk_size, chunk_overlap):
+    """Split text in overlapped chunks.
+
+    This method executes the `rabbithole_splits_text` to split the incoming text in overlapped
+    chunks of text. Other two hooks are available to edit the text before and after the split step.
+
+    Parameters
+    ----------
+    text : str
+        Content of the loaded file.
+    chunk_size : int
+        Number of tokens in each document chunk.
+    chunk_overlap : int
+        Number of overlapping tokens between consecutive chunks.
+
+    Returns
+    -------
+    docs : List[Document]
+        List of split Langchain `Document`.
+
+    Notes
+    -----
+    The default behavior only executes the `rabbithole_splits_text` hook. `before_rabbithole_splits_text` and
+    `after_rabbithole_splitted_text` hooks return the original input without any modification.
+
+    See Also
+    --------
+    before_rabbithole_splits_text
+    rabbithole_splits_text
+    after_rabbithole_splitted_text
+
+    """
+    # do something on the text before it is split
+    text = stray.mad_hatter.execute_hook(
+        "before_rabbithole_splits_text", text, cat=stray
+    )
+
+    # hooks decide the test splitter (see @property .text_splitter)
+    text_splitter = self.text_splitter
+
+    # override chunk_size and chunk_overlap only if the request has those info
+    if chunk_size:
+        text_splitter._chunk_size = chunk_size
+    if chunk_overlap:
+        text_splitter._chunk_overlap = chunk_overlap
+
+    log.info(f"Chunk size: {chunk_size}, chunk overlap: {chunk_overlap}")
+    # split text
+    docs = text_splitter.split_documents(text)
+    # remove short texts (page numbers, isolated words, etc.)
+    # TODO: join each short chunk with previous one, instead of deleting them
+    docs = list(filter(lambda d: len(d.page_content) > 10, docs))
+
+    # do something on the text after it is split
+    docs = stray.mad_hatter.execute_hook(
+        "after_rabbithole_splitted_text", docs, cat=stray
+    )
+
+    return docs
+
+
+
+ +
+ +
+ + +

+ file_to_docs(stray, file, chunk_size=None, chunk_overlap=None) + +

+ + +
+ +

Load and convert files to Langchain Document.

+

This method takes a file either from a Python script, from the /rabbithole/ or /rabbithole/web endpoints. +Hence, it loads it in memory and splits it in overlapped chunks of text.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + (str, UploadFile) + +
+

The file can be either a string path if loaded programmatically, a FastAPI UploadFile +if coming from the /rabbithole/ endpoint or a URL if coming from the /rabbithole/web endpoint.

+
+
+ required +
chunk_size + int + +
+

Number of tokens in each document chunk.

+
+
+ None +
chunk_overlap + int + +
+

Number of overlapping tokens between consecutive chunks.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
docs + List[Document] + +
+

List of Langchain Document of chunked text.

+
+
+ + +
+ Notes +

This method is used by both /rabbithole/ and /rabbithole/web endpoints. +Currently supported files are .txt, .pdf, .md and web pages.

+
+
+ Source code in cat/rabbit_hole.py +
def file_to_docs(
+    self,
+    stray,
+    file: Union[str, UploadFile],
+    chunk_size: int | None = None,
+    chunk_overlap: int | None = None
+) -> List[Document]:
+    """Load and convert files to Langchain `Document`.
+
+    This method takes a file either from a Python script, from the `/rabbithole/` or `/rabbithole/web` endpoints.
+    Hence, it loads it in memory and splits it in overlapped chunks of text.
+
+    Parameters
+    ----------
+    file : str, UploadFile
+        The file can be either a string path if loaded programmatically, a FastAPI `UploadFile`
+        if coming from the `/rabbithole/` endpoint or a URL if coming from the `/rabbithole/web` endpoint.
+    chunk_size : int
+        Number of tokens in each document chunk.
+    chunk_overlap : int
+        Number of overlapping tokens between consecutive chunks.
+
+    Returns
+    -------
+    docs : List[Document]
+        List of Langchain `Document` of chunked text.
+
+    Notes
+    -----
+    This method is used by both `/rabbithole/` and `/rabbithole/web` endpoints.
+    Currently supported files are `.txt`, `.pdf`, `.md` and web pages.
+
+    """
+
+    # Check type of incoming file.
+    if isinstance(file, UploadFile):
+        # Get mime type and source of UploadFile
+        content_type = mimetypes.guess_type(file.filename)[0]
+        source = file.filename
+
+        # Get file bytes
+        file_bytes = file.file.read()
+    elif isinstance(file, str):
+        # Check if string file is a string or url
+        parsed_file = urlparse(file)
+        is_url = all([parsed_file.scheme, parsed_file.netloc])
+
+        if is_url:
+            # Make a request with a fake browser name
+            request = httpx.get(file, headers={"User-Agent": "Magic Browser"})
+
+            # Define mime type and source of url
+            content_type = request.headers["Content-Type"].split(";")[0]
+            source = file
+
+            try:
+                # Get binary content of url
+                file_bytes = request.content
+            except HTTPError as e:
+                log.error(e)
+        else:
+            # Get mime type from file extension and source
+            content_type = mimetypes.guess_type(file)[0]
+            source = os.path.basename(file)
+
+            # Get file bytes
+            with open(file, "rb") as f:
+                file_bytes = f.read()
+    else:
+        raise ValueError(f"{type(file)} is not a valid type.")
+    return self.string_to_docs(
+        stray=stray,
+        file_bytes=file_bytes,
+        source=source,
+        content_type=content_type,
+        chunk_size=chunk_size,
+        chunk_overlap=chunk_overlap
+    )
+
+
+
+ +
+ +
+ + +

+ ingest_file(stray, file, chunk_size=None, chunk_overlap=None, metadata={}) + +

+ + +
+ +

Load a file in the Cat's declarative memory.

+

The method splits and converts the file in Langchain Document. Then, it stores the Document in the Cat's +memory.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + (str, UploadFile) + +
+

The file can be a path passed as a string or an UploadFile object if the document is ingested using the +rabbithole endpoint.

+
+
+ required +
chunk_size + int + +
+

Number of tokens in each document chunk.

+
+
+ None +
chunk_overlap + int + +
+

Number of overlapping tokens between consecutive chunks.

+
+
+ None +
metadata + dict + +
+

Metadata to be stored with each chunk.

+
+
+ {} +
+ + +
+ Notes +

Currently supported formats are .txt, .pdf and .md. +You cn add custom ones or substitute the above via RabbitHole hooks.

+
+ +
+ See Also +

before_rabbithole_stores_documents

+
+
+ Source code in cat/rabbit_hole.py +
def ingest_file(
+    self,
+    stray,
+    file: Union[str, UploadFile],
+    chunk_size: int | None = None,
+    chunk_overlap: int | None = None,
+    metadata: dict = {}
+):
+    """Load a file in the Cat's declarative memory.
+
+    The method splits and converts the file in Langchain `Document`. Then, it stores the `Document` in the Cat's
+    memory.
+
+    Parameters
+    ----------
+    file : str, UploadFile
+        The file can be a path passed as a string or an `UploadFile` object if the document is ingested using the
+        `rabbithole` endpoint.
+    chunk_size : int
+        Number of tokens in each document chunk.
+    chunk_overlap : int
+        Number of overlapping tokens between consecutive chunks.
+    metadata : dict
+        Metadata to be stored with each chunk.
+
+    Notes
+    ----------
+    Currently supported formats are `.txt`, `.pdf` and `.md`.
+    You cn add custom ones or substitute the above via RabbitHole hooks.
+
+    See Also
+    ----------
+    before_rabbithole_stores_documents
+    """
+
+    # split file into a list of docs
+    docs = self.file_to_docs(
+        stray=stray,
+        file=file,
+        chunk_size=chunk_size,
+        chunk_overlap=chunk_overlap
+    )
+
+    # store in memory
+    if isinstance(file, str):
+        filename = file
+    else:
+        filename = file.filename
+
+    self.store_documents(stray=stray, docs=docs, source=filename, metadata=metadata)
+
+
+
+ +
+ +
+ + +

+ ingest_memory(stray, file) + +

+ + +
+ +

Upload memories to the declarative memory from a JSON file.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file + UploadFile + +
+

File object sent via rabbithole/memory hook.

+
+
+ required +
+ + +
+ Notes +

This method allows uploading a JSON file containing vector and text memories directly to the declarative memory. +When doing this, please, make sure the embedder used to export the memories is the same as the one used +when uploading. +The method also performs a check on the dimensionality of the embeddings (i.e. length of each vector).

+
+
+ Source code in cat/rabbit_hole.py +
def ingest_memory(
+        self,
+        stray,
+        file: UploadFile
+    ):
+    """Upload memories to the declarative memory from a JSON file.
+
+    Parameters
+    ----------
+    file : UploadFile
+        File object sent via `rabbithole/memory` hook.
+
+    Notes
+    -----
+    This method allows uploading a JSON file containing vector and text memories directly to the declarative memory.
+    When doing this, please, make sure the embedder used to export the memories is the same as the one used
+    when uploading.
+    The method also performs a check on the dimensionality of the embeddings (i.e. length of each vector).
+
+    """
+
+    # Get file bytes
+    file_bytes = file.file.read()
+
+    # Load fyle byte in a dict
+    memories = json.loads(file_bytes.decode("utf-8"))
+
+    # Check the embedder used for the uploaded memories is the same the Cat is using now
+    upload_embedder = memories["embedder"]
+    cat_embedder = str(stray.embedder.__class__.__name__)
+
+    if upload_embedder != cat_embedder:
+        message = f"Embedder mismatch: file embedder {upload_embedder} is different from {cat_embedder}"
+        raise Exception(message)
+
+    # Get Declarative memories in file
+    declarative_memories = memories["collections"]["declarative"]
+
+    # Store data to upload the memories in batch
+    ids = [i["id"] for i in declarative_memories]
+    payloads = [
+        {"page_content": p["page_content"], "metadata": p["metadata"]}
+        for p in declarative_memories
+    ]
+    vectors = [v["vector"] for v in declarative_memories]
+
+    log.info(f"Preparing to load {len(vectors)} vector memories")
+
+    # Check embedding size is correct
+    embedder_size = stray.memory.vectors.declarative.embedder_size
+    len_mismatch = [len(v) == embedder_size for v in vectors]
+
+    if not any(len_mismatch):
+        message = (
+            f"Embedding size mismatch: vectors length should be {embedder_size}"
+        )
+        raise Exception(message)
+
+    # Upsert memories in batch mode # TODO REFACTOR: use VectorMemoryCollection.add_point
+    stray.memory.vectors.vector_db.upsert(
+        collection_name="declarative",
+        points=models.Batch(ids=ids, payloads=payloads, vectors=vectors),
+    )
+
+
+
+ +
+ +
+ + +

+ store_documents(stray, docs, source, metadata={}) + +

+ + +
+ +

Add documents to the Cat's declarative memory.

+

This method loops a list of Langchain Document and adds some metadata. Namely, the source filename and the +timestamp of insertion. Once done, the method notifies the client via Websocket connection.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
docs + List[Document] + +
+

List of Langchain Document to be inserted in the Cat's declarative memory.

+
+
+ required +
source + str + +
+

Source name to be added as a metadata. It can be a file name or an URL.

+
+
+ required +
metadata + dict + +
+

Metadata to be stored with each chunk.

+
+
+ {} +
+ + +
+ Notes +

At this point, it is possible to customize the Cat's behavior using the before_rabbithole_insert_memory hook +to edit the memories before they are inserted in the vector database.

+
+ +
+ See Also +

before_rabbithole_insert_memory

+
+
+ Source code in cat/rabbit_hole.py +
def store_documents(
+        self,
+        stray,
+        docs: List[Document],
+        source: str, # TODOV2: is this necessary?
+        metadata: dict = {}
+    ) -> None:
+    """Add documents to the Cat's declarative memory.
+
+    This method loops a list of Langchain `Document` and adds some metadata. Namely, the source filename and the
+    timestamp of insertion. Once done, the method notifies the client via Websocket connection.
+
+    Parameters
+    ----------
+    docs : List[Document]
+        List of Langchain `Document` to be inserted in the Cat's declarative memory.
+    source : str
+        Source name to be added as a metadata. It can be a file name or an URL.
+    metadata : dict
+        Metadata to be stored with each chunk.
+
+    Notes
+    -------
+    At this point, it is possible to customize the Cat's behavior using the `before_rabbithole_insert_memory` hook
+    to edit the memories before they are inserted in the vector database.
+
+    See Also
+    --------
+    before_rabbithole_insert_memory
+    """
+
+    log.info(f"Preparing to memorize {len(docs)} vectors")
+
+    # hook the docs before they are stored in the vector memory
+    docs = stray.mad_hatter.execute_hook(
+        "before_rabbithole_stores_documents", docs, cat=stray
+    )
+
+    # classic embed
+    time_last_notification = time.time()
+    time_interval = 10  # a notification every 10 secs
+    stored_points = []
+    for d, doc in enumerate(docs):
+        if time.time() - time_last_notification > time_interval:
+            time_last_notification = time.time()
+            perc_read = int(d / len(docs) * 100)
+            read_message = f"Read {perc_read}% of {source}"
+            stray.send_ws_message(read_message)
+            log.warning(read_message)
+
+        # add default metadata
+        doc.metadata["source"] = source
+        doc.metadata["when"] = time.time()
+        # add custom metadata (sent via endpoint)
+        for k,v in metadata.items():
+            doc.metadata[k] = v
+
+        doc = stray.mad_hatter.execute_hook(
+            "before_rabbithole_insert_memory", doc, cat=stray
+        )
+        inserting_info = f"{d + 1}/{len(docs)}):    {doc.page_content}"
+        if doc.page_content != "":
+            doc_embedding = stray.embedder.embed_documents([doc.page_content])
+            stored_point = stray.memory.vectors.declarative.add_point(
+                doc.page_content,
+                doc_embedding[0],
+                doc.metadata,
+            )
+            stored_points.append(stored_point)
+
+            log.info(f"Inserted into memory ({inserting_info})")
+        else:
+            log.info(f"Skipped memory insertion of empty doc ({inserting_info})")
+
+        # wait a little to avoid APIs rate limit errors
+        time.sleep(0.05)
+
+    # hook the points after they are stored in the vector memory
+    stray.mad_hatter.execute_hook(
+        "after_rabbithole_stored_documents", source, stored_points, cat=stray
+    )
+
+    # notify client
+    finished_reading_message = (
+        f"Finished reading {source}, I made {len(docs)} thoughts on it."
+    )
+
+    stray.send_ws_message(finished_reading_message)
+
+    log.warning(f"Done uploading {source}")
+
+
+
+ +
+ +
+ + +

+ string_to_docs(stray, file_bytes, source=None, content_type='text/plain', chunk_size=None, chunk_overlap=None) + +

+ + +
+ +

Convert string to Langchain Document.

+

Takes a string, converts it to langchain Document. +Hence, loads it in memory and splits it in overlapped chunks of text.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
file_bytes + str + +
+

The string to be converted.

+
+
+ required +
source + str + +
+

Source filename.

+
+
+ None +
content_type + str + +
+

Mimetype of content.

+
+
+ 'text/plain' +
chunk_size + int + +
+

Number of tokens in each document chunk.

+
+
+ None +
chunk_overlap + int + +
+

Number of overlapping tokens between consecutive chunks.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
docs + List[Document] + +
+

List of Langchain Document of chunked text.

+
+
+ +
+ Source code in cat/rabbit_hole.py +
def string_to_docs(
+    self,
+    stray,
+    file_bytes: str,
+    source: str = None,
+    content_type: str = "text/plain",
+    chunk_size: int | None = None,
+    chunk_overlap: int | None = None
+) -> List[Document]:
+    """Convert string to Langchain `Document`.
+
+    Takes a string, converts it to langchain `Document`.
+    Hence, loads it in memory and splits it in overlapped chunks of text.
+
+    Parameters
+    ----------
+    file_bytes : str
+        The string to be converted.
+    source: str
+        Source filename.
+    content_type:
+        Mimetype of content.
+    chunk_size : int
+        Number of tokens in each document chunk.
+    chunk_overlap : int
+        Number of overlapping tokens between consecutive chunks.
+
+    Returns
+    -------
+    docs : List[Document]
+        List of Langchain `Document` of chunked text.
+    """
+
+    # Load the bytes in the Blob schema
+    blob = Blob(data=file_bytes, mimetype=content_type, source=source).from_data(
+        data=file_bytes, mime_type=content_type, path=source
+    )
+    # Parser based on the mime type
+    parser = MimeTypeBasedParser(handlers=self.file_handlers)
+
+    # Parse the text
+    stray.send_ws_message(
+        "I'm parsing the content. Big content could require some minutes..."
+    )
+    super_docs = parser.parse(blob)
+
+    # Split
+    stray.send_ws_message("Parsing completed. Now let's go with reading process...")
+    docs = self.__split_text(
+        stray=stray,
+        text=super_docs,
+        chunk_size=chunk_size,
+        chunk_overlap=chunk_overlap,
+    )
+    return docs
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/routes/settings/index.html b/API_Documentation/routes/settings/index.html new file mode 100644 index 000000000..0fc93181d --- /dev/null +++ b/API_Documentation/routes/settings/index.html @@ -0,0 +1,4264 @@ + + + + + + + + + + + + + + + + + + + + + + + + + settings - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

settings

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ create_setting(payload, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.WRITE))) + +

+ + +
+ +

Create a new setting in the database

+ +
+ Source code in cat/routes/settings.py +
23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
@router.post("/")
+def create_setting(
+    payload: models.SettingBody,
+    stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.WRITE)),
+):
+    """Create a new setting in the database"""
+
+    # complete the payload with setting_id and updated_at
+    payload = models.Setting(**payload.model_dump())
+
+    # save to DB
+    new_setting = crud.create_setting(payload)
+
+    return {"setting": new_setting}
+
+
+
+ +
+ +
+ + +

+ delete_setting(settingId, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.DELETE))) + +

+ + +
+ +

Delete a specific setting in the database

+ +
+ Source code in cat/routes/settings.py +
@router.delete("/{settingId}")
+def delete_setting(
+    settingId: str,
+    stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.DELETE)),
+):
+    """Delete a specific setting in the database"""
+
+    # does the setting exist?
+    setting = crud.get_setting_by_id(settingId)
+    if not setting:
+        raise HTTPException(
+            status_code=404,
+            detail={
+                "error": f"No setting with this id: {settingId}",
+            },
+        )
+
+    # delete
+    crud.delete_setting_by_id(settingId)
+
+    return {"deleted": settingId}
+
+
+
+ +
+ +
+ + +

+ get_setting(settingId, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.READ))) + +

+ + +
+ +

Get the a specific setting from the database

+ +
+ Source code in cat/routes/settings.py +
39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
@router.get("/{settingId}")
+def get_setting(
+    settingId: str, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.READ))
+):
+    """Get the a specific setting from the database"""
+
+    setting = crud.get_setting_by_id(settingId)
+    if not setting:
+        raise HTTPException(
+            status_code=404,
+            detail={
+                "error": f"No setting with this id: {settingId}",
+            },
+        )
+    return {"setting": setting}
+
+
+
+ +
+ +
+ + +

+ get_settings(search='', stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.LIST))) + +

+ + +
+ +

Get the entire list of settings available in the database

+ +
+ Source code in cat/routes/settings.py +
11
+12
+13
+14
+15
+16
+17
+18
+19
+20
@router.get("/")
+def get_settings(
+    search: str = "",
+    stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.LIST)),
+):
+    """Get the entire list of settings available in the database"""
+
+    settings = crud.get_settings(search=search)
+
+    return {"settings": settings}
+
+
+
+ +
+ +
+ + +

+ update_setting(settingId, payload, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.EDIT))) + +

+ + +
+ +

Update a specific setting in the database if it exists

+ +
+ Source code in cat/routes/settings.py +
56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
@router.put("/{settingId}")
+def update_setting(
+    settingId: str,
+    payload: models.SettingBody,
+    stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.EDIT)),
+):
+    """Update a specific setting in the database if it exists"""
+
+    # does the setting exist?
+    setting = crud.get_setting_by_id(settingId)
+    if not setting:
+        raise HTTPException(
+            status_code=404,
+            detail={
+                "error": f"No setting with this id: {settingId}",
+            },
+        )
+
+    # complete the payload with setting_id and updated_at
+    payload = models.Setting(**payload.model_dump())
+    payload.setting_id = settingId  # force this to be the setting_id
+
+    # save to DB
+    updated_setting = crud.update_setting_by_id(payload)
+
+    return {"setting": updated_setting}
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API_Documentation/utils/index.html b/API_Documentation/utils/index.html new file mode 100644 index 000000000..fd41e1e25 --- /dev/null +++ b/API_Documentation/utils/index.html @@ -0,0 +1,4532 @@ + + + + + + + + + + + + + + + + + + + + + + + + + utils - Cheshire Cat AI docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

utils

+ +
+ + + + +
+ +

Various utiles used from the projects.

+ + + +
+ + + + + + + + + +
+ + +

+ get_base_path() + +

+ + +
+ +

Allows exposing the base path.

+ +
+ Source code in cat/utils.py +
94
+95
+96
def get_base_path():
+    """Allows exposing the base path."""
+    return "cat/"
+
+
+
+ +
+ +
+ + +

+ get_base_url() + +

+ + +
+ +

Allows exposing the base url.

+ +
+ Source code in cat/utils.py +
86
+87
+88
+89
+90
+91
def get_base_url():
+    """Allows exposing the base url."""
+    secure = "s" if get_env("CCAT_CORE_USE_SECURE_PROTOCOLS") in ("true", "1") else ""
+    cat_host = get_env("CCAT_CORE_HOST")
+    cat_port = get_env("CCAT_CORE_PORT")
+    return f"http{secure}://{cat_host}:{cat_port}/"
+
+
+
+ +
+ +
+ + +

+ get_plugins_path() + +

+ + +
+ +

Allows exposing the plugins' path.

+ +
+ Source code in cat/utils.py +
def get_plugins_path():
+    """Allows exposing the plugins' path."""
+    return os.path.join(get_base_path(), "plugins/")
+
+
+
+ +
+ +
+ + +

+ get_static_path() + +

+ + +
+ +

Allows exposing the static files' path.

+ +
+ Source code in cat/utils.py +
def get_static_path():
+    """Allows exposing the static files' path."""
+    return os.path.join(get_base_path(), "static/")
+
+
+
+ +
+ +
+ + +

+ get_static_url() + +

+ + +
+ +

Allows exposing the static server url.

+ +
+ Source code in cat/utils.py +
def get_static_url():
+    """Allows exposing the static server url."""
+    return get_base_url() + "static/"
+
+
+
+ +
+ +
+ + +

+ match_prompt_variables(prompt_variables, prompt_template) + +

+ + +
+ +

Ensure prompt variables and prompt placeholders map, so there are no issues on mismatches

+ +
+ Source code in cat/utils.py +
def match_prompt_variables(
+        prompt_variables: Dict,
+        prompt_template: str
+    ) -> Tuple[Dict, str]:
+    """Ensure prompt variables and prompt placeholders map, so there are no issues on mismatches"""
+
+    tmp_prompt = PromptTemplate.from_template(
+        template=prompt_template
+    )
+
+    # outer set difference
+    prompt_mismatches = set(prompt_variables.keys()) ^ set(tmp_prompt.input_variables)
+
+    # clean up
+    for m in prompt_mismatches:
+        if m in prompt_variables.keys():
+            log.warning(f"Prompt variable '{m}' not found in prompt template, removed")
+            del prompt_variables[m]
+        if m in tmp_prompt.input_variables:
+            prompt_template = \
+                prompt_template.replace("{" + m + "}", "")
+            log.warning(f"Placeholder '{m}' not found in prompt variables, removed")
+
+    return prompt_variables, prompt_template
+
+
+
+ +
+ +
+ + +

+ to_camel_case(text) + +

+ + +
+ +

Format string to camel case.

+

Takes a string of words separated by either hyphens or underscores and returns a string of words in camel case.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
text + str + +
+

String of hyphens or underscores separated words.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

Camel case formatted string.

+
+
+ +
+ Source code in cat/utils.py +
20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
def to_camel_case(text: str) -> str:
+    """Format string to camel case.
+
+    Takes a string of words separated by either hyphens or underscores and returns a string of words in camel case.
+
+    Parameters
+    ----------
+    text : str
+        String of hyphens or underscores separated words.
+
+    Returns
+    -------
+    str
+        Camel case formatted string.
+    """
+    s = text.replace("-", " ").replace("_", " ").capitalize()
+    s = s.split()
+    if len(text) == 0:
+        return text
+    return s[0] + "".join(i.capitalize() for i in s[1:])
+
+
+
+ +
+ +
+ + +

+ verbal_timedelta(td) + +

+ + +
+ +

Convert a timedelta in human form.

+

The function takes a timedelta and converts it to a human-readable string format.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
td + timedelta + +
+

Difference between two dates.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ str + +
+

Human-readable string of time difference.

+
+
+ + +
+ Notes +

This method is used to give the Language Model information time information about the memories retrieved from +the vector database.

+
+ +

Examples:

+
>>> print(verbal_timedelta(timedelta(days=2, weeks=1))
+'One week and two days ago'
+
+ +
+ Source code in cat/utils.py +
42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
def verbal_timedelta(td: timedelta) -> str:
+    """Convert a timedelta in human form.
+
+    The function takes a timedelta and converts it to a human-readable string format.
+
+    Parameters
+    ----------
+    td : timedelta
+        Difference between two dates.
+
+    Returns
+    -------
+    str
+        Human-readable string of time difference.
+
+    Notes
+    -----
+    This method is used to give the Language Model information time information about the memories retrieved from
+    the vector database.
+
+    Examples
+    --------
+    >>> print(verbal_timedelta(timedelta(days=2, weeks=1))
+    'One week and two days ago'
+    """
+
+    if td.days != 0:
+        abs_days = abs(td.days)
+        if abs_days > 7:
+            abs_delta = "{} weeks".format(td.days // 7)
+        else:
+            abs_delta = "{} days".format(td.days)
+    else:
+        abs_minutes = abs(td.seconds) // 60
+        if abs_minutes > 60:
+            abs_delta = "{} hours".format(abs_minutes // 60)
+        else:
+            abs_delta = "{} minutes".format(abs_minutes)
+    if td < timedelta(0):
+        return "{} ago".format(abs_delta)
+    else:
+        return "{} ago".format(abs_delta)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/assets/_mkdocstrings.css b/assets/_mkdocstrings.css new file mode 100644 index 000000000..85449ec79 --- /dev/null +++ b/assets/_mkdocstrings.css @@ -0,0 +1,119 @@ + +/* Avoid breaking parameter names, etc. in table cells. */ +.doc-contents td code { + word-break: normal !important; +} + +/* No line break before first paragraph of descriptions. */ +.doc-md-description, +.doc-md-description>p:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Backward-compatibility: docstring section titles in bold. */ +.doc-section-title { + font-weight: bold; +} + +/* Symbols in Navigation and ToC. */ +:root, +[data-md-color-scheme="default"] { + --doc-symbol-attribute-fg-color: #953800; + --doc-symbol-function-fg-color: #8250df; + --doc-symbol-method-fg-color: #8250df; + --doc-symbol-class-fg-color: #0550ae; + --doc-symbol-module-fg-color: #5cad0f; + + --doc-symbol-attribute-bg-color: #9538001a; + --doc-symbol-function-bg-color: #8250df1a; + --doc-symbol-method-bg-color: #8250df1a; + --doc-symbol-class-bg-color: #0550ae1a; + --doc-symbol-module-bg-color: #5cad0f1a; +} + +[data-md-color-scheme="slate"] { + --doc-symbol-attribute-fg-color: #ffa657; + --doc-symbol-function-fg-color: #d2a8ff; + --doc-symbol-method-fg-color: #d2a8ff; + --doc-symbol-class-fg-color: #79c0ff; + --doc-symbol-module-fg-color: #baff79; + + --doc-symbol-attribute-bg-color: #ffa6571a; + --doc-symbol-function-bg-color: #d2a8ff1a; + --doc-symbol-method-bg-color: #d2a8ff1a; + --doc-symbol-class-bg-color: #79c0ff1a; + --doc-symbol-module-bg-color: #baff791a; +} + +code.doc-symbol { + border-radius: .1rem; + font-size: .85em; + padding: 0 .3em; + font-weight: bold; +} + +code.doc-symbol-attribute { + color: var(--doc-symbol-attribute-fg-color); + background-color: var(--doc-symbol-attribute-bg-color); +} + +code.doc-symbol-attribute::after { + content: "attr"; +} + +code.doc-symbol-function { + color: var(--doc-symbol-function-fg-color); + background-color: var(--doc-symbol-function-bg-color); +} + +code.doc-symbol-function::after { + content: "func"; +} + +code.doc-symbol-method { + color: var(--doc-symbol-method-fg-color); + background-color: var(--doc-symbol-method-bg-color); +} + +code.doc-symbol-method::after { + content: "meth"; +} + +code.doc-symbol-class { + color: var(--doc-symbol-class-fg-color); + background-color: var(--doc-symbol-class-bg-color); +} + +code.doc-symbol-class::after { + content: "class"; +} + +code.doc-symbol-module { + color: var(--doc-symbol-module-fg-color); + background-color: var(--doc-symbol-module-bg-color); +} + +code.doc-symbol-module::after { + content: "mod"; +} + +.doc-signature .autorefs { + color: inherit; + border-bottom: 1px dotted currentcolor; +} diff --git a/assets/diagrams/cat-bootstrap.drawio b/assets/diagrams/cat-bootstrap.drawio new file mode 100644 index 000000000..dc5d32bed --- /dev/null +++ b/assets/diagrams/cat-bootstrap.drawio @@ -0,0 +1,461 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/assets/diagrams/chatting-with-the-cat.drawio b/assets/diagrams/chatting-with-the-cat.drawio new file mode 100644 index 000000000..5fecff027 --- /dev/null +++ b/assets/diagrams/chatting-with-the-cat.drawio @@ -0,0 +1,1900 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 000000000..1cf13b9f9 Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/img/HA_Assist.png b/assets/img/HA_Assist.png new file mode 100644 index 000000000..9acd3ecc3 Binary files /dev/null and b/assets/img/HA_Assist.png differ diff --git a/assets/img/Kubernetes.png b/assets/img/Kubernetes.png new file mode 100644 index 000000000..21ced8568 Binary files /dev/null and b/assets/img/Kubernetes.png differ diff --git a/assets/img/admin_screenshots/home.png b/assets/img/admin_screenshots/home.png new file mode 100644 index 000000000..fc426d271 Binary files /dev/null and b/assets/img/admin_screenshots/home.png differ diff --git a/assets/img/admin_screenshots/install-plugin-from-registry.gif b/assets/img/admin_screenshots/install-plugin-from-registry.gif new file mode 100644 index 000000000..3992aff10 Binary files /dev/null and b/assets/img/admin_screenshots/install-plugin-from-registry.gif differ diff --git a/assets/img/admin_screenshots/memory.png b/assets/img/admin_screenshots/memory.png new file mode 100644 index 000000000..ec03ead46 Binary files /dev/null and b/assets/img/admin_screenshots/memory.png differ diff --git a/assets/img/admin_screenshots/memory_content.png b/assets/img/admin_screenshots/memory_content.png new file mode 100644 index 000000000..23ab59560 Binary files /dev/null and b/assets/img/admin_screenshots/memory_content.png differ diff --git a/assets/img/admin_screenshots/memory_details.png b/assets/img/admin_screenshots/memory_details.png new file mode 100644 index 000000000..2519a0569 Binary files /dev/null and b/assets/img/admin_screenshots/memory_details.png differ diff --git a/assets/img/admin_screenshots/modal_home.png b/assets/img/admin_screenshots/modal_home.png new file mode 100644 index 000000000..a06432320 Binary files /dev/null and b/assets/img/admin_screenshots/modal_home.png differ diff --git a/assets/img/admin_screenshots/plugin_settings/form1.png b/assets/img/admin_screenshots/plugin_settings/form1.png new file mode 100644 index 000000000..2ce6ee4fe Binary files /dev/null and b/assets/img/admin_screenshots/plugin_settings/form1.png differ diff --git a/assets/img/admin_screenshots/plugin_settings/form2.png b/assets/img/admin_screenshots/plugin_settings/form2.png new file mode 100644 index 000000000..92a621cfb Binary files /dev/null and b/assets/img/admin_screenshots/plugin_settings/form2.png differ diff --git a/assets/img/admin_screenshots/plugin_settings/settings.png b/assets/img/admin_screenshots/plugin_settings/settings.png new file mode 100644 index 000000000..15dfda00d Binary files /dev/null and b/assets/img/admin_screenshots/plugin_settings/settings.png differ diff --git a/assets/img/admin_screenshots/plugins.png b/assets/img/admin_screenshots/plugins.png new file mode 100644 index 000000000..e29c36c42 Binary files /dev/null and b/assets/img/admin_screenshots/plugins.png differ diff --git a/assets/img/admin_screenshots/settings.png b/assets/img/admin_screenshots/settings.png new file mode 100644 index 000000000..b722397a4 Binary files /dev/null and b/assets/img/admin_screenshots/settings.png differ diff --git a/assets/img/admin_screenshots/why.png b/assets/img/admin_screenshots/why.png new file mode 100644 index 000000000..661f4c4e2 Binary files /dev/null and b/assets/img/admin_screenshots/why.png differ diff --git a/assets/img/cheshire-cat-logo.svg b/assets/img/cheshire-cat-logo.svg new file mode 100644 index 000000000..ccdfbf446 --- /dev/null +++ b/assets/img/cheshire-cat-logo.svg @@ -0,0 +1,14 @@ + + + + + + + + + + \ No newline at end of file diff --git a/assets/img/cheshire-cat-tree-shade.jpg b/assets/img/cheshire-cat-tree-shade.jpg new file mode 100644 index 000000000..a105dbaeb Binary files /dev/null and b/assets/img/cheshire-cat-tree-shade.jpg differ diff --git a/assets/img/clientlib/1200px-Logo_C_sharp.png b/assets/img/clientlib/1200px-Logo_C_sharp.png new file mode 100644 index 000000000..ce7a278e2 Binary files /dev/null and b/assets/img/clientlib/1200px-Logo_C_sharp.png differ diff --git a/assets/img/clientlib/1869px-Python-logo-notext.png b/assets/img/clientlib/1869px-Python-logo-notext.png new file mode 100644 index 000000000..ef43d0837 Binary files /dev/null and b/assets/img/clientlib/1869px-Python-logo-notext.png differ diff --git a/assets/img/clientlib/198px-Ruby_logo.png b/assets/img/clientlib/198px-Ruby_logo.png new file mode 100644 index 000000000..f2ee1ef9d Binary files /dev/null and b/assets/img/clientlib/198px-Ruby_logo.png differ diff --git a/assets/img/clientlib/2048px-Telegram_logo.png b/assets/img/clientlib/2048px-Telegram_logo.png new file mode 100644 index 000000000..c03f3f4f1 Binary files /dev/null and b/assets/img/clientlib/2048px-Telegram_logo.png differ diff --git a/assets/img/clientlib/512px-Typescript_logo_2020.png b/assets/img/clientlib/512px-Typescript_logo_2020.png new file mode 100644 index 000000000..d0a36f812 Binary files /dev/null and b/assets/img/clientlib/512px-Typescript_logo_2020.png differ diff --git a/assets/img/clientlib/711px-PHP-logo.png b/assets/img/clientlib/711px-PHP-logo.png new file mode 100644 index 000000000..4ee67dba1 Binary files /dev/null and b/assets/img/clientlib/711px-PHP-logo.png differ diff --git a/assets/img/clientlib/Alpine.js.png b/assets/img/clientlib/Alpine.js.png new file mode 100644 index 000000000..28aa1e1d3 Binary files /dev/null and b/assets/img/clientlib/Alpine.js.png differ diff --git a/assets/img/clientlib/Java_Logo.png b/assets/img/clientlib/Java_Logo.png new file mode 100644 index 000000000..7609ab0e3 Binary files /dev/null and b/assets/img/clientlib/Java_Logo.png differ diff --git a/assets/img/clientlib/Vue.js.png b/assets/img/clientlib/Vue.js.png new file mode 100644 index 000000000..2c1917850 Binary files /dev/null and b/assets/img/clientlib/Vue.js.png differ diff --git a/assets/img/clientlib/discord.png b/assets/img/clientlib/discord.png new file mode 100644 index 000000000..804e1b8a6 Binary files /dev/null and b/assets/img/clientlib/discord.png differ diff --git a/assets/img/clientlib/golang.jpg b/assets/img/clientlib/golang.jpg new file mode 100644 index 000000000..ce78f5f49 Binary files /dev/null and b/assets/img/clientlib/golang.jpg differ diff --git a/assets/img/diagrams/agent-manager.jpg b/assets/img/diagrams/agent-manager.jpg new file mode 100644 index 000000000..b4ab62bd1 Binary files /dev/null and b/assets/img/diagrams/agent-manager.jpg differ diff --git a/assets/img/diagrams/cat-ollama.jpg b/assets/img/diagrams/cat-ollama.jpg new file mode 100644 index 000000000..947ec671e Binary files /dev/null and b/assets/img/diagrams/cat-ollama.jpg differ diff --git a/assets/img/diagrams/components.png b/assets/img/diagrams/components.png new file mode 100644 index 000000000..0d56986e5 Binary files /dev/null and b/assets/img/diagrams/components.png differ diff --git a/assets/img/diagrams/declarative-memory.jpg b/assets/img/diagrams/declarative-memory.jpg new file mode 100644 index 000000000..d741fbef8 Binary files /dev/null and b/assets/img/diagrams/declarative-memory.jpg differ diff --git a/assets/img/diagrams/episodic-memory.jpg b/assets/img/diagrams/episodic-memory.jpg new file mode 100644 index 000000000..ae0245366 Binary files /dev/null and b/assets/img/diagrams/episodic-memory.jpg differ diff --git a/assets/img/diagrams/flow.jpg b/assets/img/diagrams/flow.jpg new file mode 100644 index 000000000..58dc02813 Binary files /dev/null and b/assets/img/diagrams/flow.jpg differ diff --git a/assets/img/diagrams/ltm.jpg b/assets/img/diagrams/ltm.jpg new file mode 100644 index 000000000..b13f81f14 Binary files /dev/null and b/assets/img/diagrams/ltm.jpg differ diff --git a/assets/img/diagrams/memory-chain.jpg b/assets/img/diagrams/memory-chain.jpg new file mode 100644 index 000000000..7e8711f23 Binary files /dev/null and b/assets/img/diagrams/memory-chain.jpg differ diff --git a/assets/img/diagrams/plugin.png b/assets/img/diagrams/plugin.png new file mode 100644 index 000000000..d541a82c6 Binary files /dev/null and b/assets/img/diagrams/plugin.png differ diff --git a/assets/img/diagrams/plugin2.jpg b/assets/img/diagrams/plugin2.jpg new file mode 100644 index 000000000..07444f5c6 Binary files /dev/null and b/assets/img/diagrams/plugin2.jpg differ diff --git a/assets/img/diagrams/procedural-memory.jpg b/assets/img/diagrams/procedural-memory.jpg new file mode 100644 index 000000000..453a3cc97 Binary files /dev/null and b/assets/img/diagrams/procedural-memory.jpg differ diff --git a/assets/img/diagrams/prompt.jpg b/assets/img/diagrams/prompt.jpg new file mode 100644 index 000000000..070083232 Binary files /dev/null and b/assets/img/diagrams/prompt.jpg differ diff --git a/assets/img/diagrams/rabbithole.jpg b/assets/img/diagrams/rabbithole.jpg new file mode 100644 index 000000000..25c925416 Binary files /dev/null and b/assets/img/diagrams/rabbithole.jpg differ diff --git a/assets/img/diagrams/rag.jpg b/assets/img/diagrams/rag.jpg new file mode 100644 index 000000000..a15323f7d Binary files /dev/null and b/assets/img/diagrams/rag.jpg differ diff --git a/assets/img/diagrams/tool-chain.jpg b/assets/img/diagrams/tool-chain.jpg new file mode 100644 index 000000000..e722e6508 Binary files /dev/null and b/assets/img/diagrams/tool-chain.jpg differ diff --git a/assets/img/diagrams/working-memory.jpg b/assets/img/diagrams/working-memory.jpg new file mode 100644 index 000000000..3cd26fd2b Binary files /dev/null and b/assets/img/diagrams/working-memory.jpg differ diff --git a/assets/img/favicon.ico b/assets/img/favicon.ico new file mode 100644 index 000000000..7aff06eea Binary files /dev/null and b/assets/img/favicon.ico differ diff --git a/assets/img/mad_hatter_10.png b/assets/img/mad_hatter_10.png new file mode 100644 index 000000000..8f96436d6 Binary files /dev/null and b/assets/img/mad_hatter_10.png differ diff --git a/assets/img/mad_hatter_6.png b/assets/img/mad_hatter_6.png new file mode 100644 index 000000000..c9ce9b354 Binary files /dev/null and b/assets/img/mad_hatter_6.png differ diff --git a/assets/img/nginx.png b/assets/img/nginx.png new file mode 100644 index 000000000..d8803032b Binary files /dev/null and b/assets/img/nginx.png differ diff --git a/assets/img/quickstart/play-with-the-cat/play-with-the-cat.png b/assets/img/quickstart/play-with-the-cat/play-with-the-cat.png new file mode 100644 index 000000000..6268cae32 Binary files /dev/null and b/assets/img/quickstart/play-with-the-cat/play-with-the-cat.png differ diff --git a/assets/img/quickstart/prepare-plugin/activate-plugins.png b/assets/img/quickstart/prepare-plugin/activate-plugins.png new file mode 100644 index 000000000..55096a7b8 Binary files /dev/null and b/assets/img/quickstart/prepare-plugin/activate-plugins.png differ diff --git a/assets/img/quickstart/prepare-plugin/create-from-template.png b/assets/img/quickstart/prepare-plugin/create-from-template.png new file mode 100644 index 000000000..2d2df1092 Binary files /dev/null and b/assets/img/quickstart/prepare-plugin/create-from-template.png differ diff --git a/assets/img/quickstart/prepare-plugin/repo-name.png b/assets/img/quickstart/prepare-plugin/repo-name.png new file mode 100644 index 000000000..d912cb24b Binary files /dev/null and b/assets/img/quickstart/prepare-plugin/repo-name.png differ diff --git a/assets/img/quickstart/prepare-plugin/shell-setup.png b/assets/img/quickstart/prepare-plugin/shell-setup.png new file mode 100644 index 000000000..cacd6130a Binary files /dev/null and b/assets/img/quickstart/prepare-plugin/shell-setup.png differ diff --git a/assets/img/quickstart/upload-document/cat-answers.png b/assets/img/quickstart/upload-document/cat-answers.png new file mode 100644 index 000000000..28b28b907 Binary files /dev/null and b/assets/img/quickstart/upload-document/cat-answers.png differ diff --git a/assets/img/quickstart/upload-document/finish-notification.png b/assets/img/quickstart/upload-document/finish-notification.png new file mode 100644 index 000000000..67fb047ff Binary files /dev/null and b/assets/img/quickstart/upload-document/finish-notification.png differ diff --git a/assets/img/quickstart/upload-document/upload-url-notification.png b/assets/img/quickstart/upload-document/upload-url-notification.png new file mode 100644 index 000000000..cdf864a51 Binary files /dev/null and b/assets/img/quickstart/upload-document/upload-url-notification.png differ diff --git a/assets/img/quickstart/upload-document/upload-url.png b/assets/img/quickstart/upload-document/upload-url.png new file mode 100644 index 000000000..5eeefa064 Binary files /dev/null and b/assets/img/quickstart/upload-document/upload-url.png differ diff --git a/assets/img/quickstart/upload-document/why-response.png b/assets/img/quickstart/upload-document/why-response.png new file mode 100644 index 000000000..529f11ddc Binary files /dev/null and b/assets/img/quickstart/upload-document/why-response.png differ diff --git a/assets/img/quickstart/write-hook/marvin-sockseller.png b/assets/img/quickstart/write-hook/marvin-sockseller.png new file mode 100644 index 000000000..25914dc94 Binary files /dev/null and b/assets/img/quickstart/write-hook/marvin-sockseller.png differ diff --git a/assets/img/quickstart/write-tool/ask-price-socks.png b/assets/img/quickstart/write-tool/ask-price-socks.png new file mode 100644 index 000000000..a01ad242d Binary files /dev/null and b/assets/img/quickstart/write-tool/ask-price-socks.png differ diff --git a/assets/img/quickstart/write-tool/why-the-response.png b/assets/img/quickstart/write-tool/why-the-response.png new file mode 100644 index 000000000..c44a38f14 Binary files /dev/null and b/assets/img/quickstart/write-tool/why-the-response.png differ diff --git a/assets/img/swagger_endpoints.png b/assets/img/swagger_endpoints.png new file mode 100644 index 000000000..2085e937d Binary files /dev/null and b/assets/img/swagger_endpoints.png differ diff --git a/assets/img/technical/forms/how_invalid_is_display.png b/assets/img/technical/forms/how_invalid_is_display.png new file mode 100644 index 000000000..7aa7711d9 Binary files /dev/null and b/assets/img/technical/forms/how_invalid_is_display.png differ diff --git a/assets/img/technical/forms/how_is_confirm_display.png b/assets/img/technical/forms/how_is_confirm_display.png new file mode 100644 index 000000000..2a5f76769 Binary files /dev/null and b/assets/img/technical/forms/how_is_confirm_display.png differ diff --git a/assets/img/technical/forms/how_is_display.png b/assets/img/technical/forms/how_is_display.png new file mode 100644 index 000000000..bef88caf1 Binary files /dev/null and b/assets/img/technical/forms/how_is_display.png differ diff --git a/assets/img/tipi.png b/assets/img/tipi.png new file mode 100644 index 000000000..53ce2a24f Binary files /dev/null and b/assets/img/tipi.png differ diff --git a/assets/img/vector_memory/cosine.png b/assets/img/vector_memory/cosine.png new file mode 100644 index 000000000..3a9f55ad7 Binary files /dev/null and b/assets/img/vector_memory/cosine.png differ diff --git a/assets/img/vector_memory/word_embeddings.png b/assets/img/vector_memory/word_embeddings.png new file mode 100644 index 000000000..f2eddcf2d Binary files /dev/null and b/assets/img/vector_memory/word_embeddings.png differ diff --git a/assets/img/vscode-debugger/image.png b/assets/img/vscode-debugger/image.png new file mode 100644 index 000000000..eea77fd22 Binary files /dev/null and b/assets/img/vscode-debugger/image.png differ diff --git a/assets/img/vscode-debugger/run_debugger.png b/assets/img/vscode-debugger/run_debugger.png new file mode 100644 index 000000000..2c23363d2 Binary files /dev/null and b/assets/img/vscode-debugger/run_debugger.png differ diff --git a/assets/javascripts/bundle.471ce7a9.min.js b/assets/javascripts/bundle.471ce7a9.min.js new file mode 100644 index 000000000..90d2d95af --- /dev/null +++ b/assets/javascripts/bundle.471ce7a9.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var ji=Object.create;var gr=Object.defineProperty;var Wi=Object.getOwnPropertyDescriptor;var Ui=Object.getOwnPropertyNames,Vt=Object.getOwnPropertySymbols,Di=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,io=Object.prototype.propertyIsEnumerable;var no=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,$=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&no(e,r,t[r]);if(Vt)for(var r of Vt(t))io.call(t,r)&&no(e,r,t[r]);return e};var ao=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Vt)for(var o of Vt(e))t.indexOf(o)<0&&io.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Vi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Ui(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Wi(t,n))||o.enumerable});return e};var Lt=(e,t,r)=>(r=e!=null?ji(Di(e)):{},Vi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var so=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var po=yr((Er,co)=>{(function(e,t){typeof Er=="object"&&typeof co!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var ft=H.type,qe=H.tagName;return!!(qe==="INPUT"&&a[ft]&&!H.readOnly||qe==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var qr=yr((lx,Sn)=>{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var Ha=/["'&<>]/;Sn.exports=ka;function ka(e){var t=""+e,r=Ha.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof It=="object"&&typeof Yr=="object"?Yr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof It=="object"?It.ClipboardJS=r():t.ClipboardJS=r()})(It,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Fi}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var M=f()(_);return u("cut"),M},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(j,"px"),M.setAttribute("readonly",""),M.value=V,M}var te=function(_,M){var j=A(_);M.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,M):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,M):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(M){return typeof M}:H=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},H(V)}var ft=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=_.action,j=M===void 0?"copy":M,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},qe=ft;function Fe(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Fe=function(M){return typeof M}:Fe=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Fe(V)}function Ai(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function oo(V,_){for(var M=0;M<_.length;M++){var j=_[M];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ci(V,_,M){return _&&oo(V.prototype,_),M&&oo(V,M),V}function Hi(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function ki(V){var _=Ri();return function(){var j=Ut(V),D;if(_){var Y=Ut(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return $i(this,D)}}function $i(V,_){return _&&(Fe(_)==="object"||typeof _=="function")?_:Pi(V)}function Pi(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Ri(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Ut(V){return Ut=Object.setPrototypeOf?Object.getPrototypeOf:function(M){return M.__proto__||Object.getPrototypeOf(M)},Ut(V)}function vr(V,_){var M="data-clipboard-".concat(V);if(_.hasAttribute(M))return _.getAttribute(M)}var Ii=function(V){Hi(M,V);var _=ki(M);function M(j,D){var Y;return Ai(this,M),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ci(M,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Fe(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Dt=qe({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Dt?"success":"error",{action:ke,text:Dt,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Dt){ke=ke&&!!document.queryCommandSupported(Dt)}),ke}}]),M}(s()),Fi=Ii},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function fo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof he=="function"?he(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ut(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ut(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var je=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=he(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=he(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{uo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)uo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=je.EMPTY;function qt(e){return e instanceof je||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function uo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var dt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new je(function(){o.currentObservers=null,Qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new wo(r,o)},t}(F);var wo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var At={now:function(){return(At.delegate||Date).now()},delegate:void 0};var Ct=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=At);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(gt);var Oo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(xt);var Hr=new Oo(So);var Mo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=vt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(vt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(gt);var Lo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(xt);var me=new Lo(Mo);var S=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var yt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[bt])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ji(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Ji();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return mo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Xi(e);if(yt(e))return Zi(e);if(Gt(e))return ea(e);if(Xt(e))return _o(e);if(tr(e))return ta(e);if(or(e))return ra(e)}throw Zt(e)}function Xi(e){return new F(function(t){var r=e[bt]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Zi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?De(t):qo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return S}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,ft){h++,!A&&!w&&te();var qe=u=u!=null?u:r();ft.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),qe.subscribe(ft),!l&&h>0&&(l=new at({next:function(Fe){return qe.next(Fe)},error:function(Fe){A=!0,te(),f=Wr(ie,n,Fe),qe.error(Fe)},complete:function(){w=!0,te(),f=Wr(ie,a),qe.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function P(e,t=document){return Array.from(t.querySelectorAll(e))}function R(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var ya=O(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),G(1));function et(e){return ya.pipe(m(t=>e.contains(t)),K())}function $t(e,t){return C(()=>O(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?kt(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Go(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Go(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Go(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Tt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),O(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Jo=new g,Ea=C(()=>typeof ResizeObserver=="undefined"?Tt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Jo.next(t)))),v(e=>O(Ye,I(e)).pipe(L(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ea.pipe(E(r=>r.observe(t)),v(r=>Jo.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function St(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Xo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ve(e){return{x:e.offsetLeft,y:e.offsetTop}}function Zo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function en(e){return O(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ve(e)),Q(Ve(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ne(e){return O(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var tn=new g,wa=C(()=>I(new IntersectionObserver(e=>{for(let t of e)tn.next(t)},{threshold:0}))).pipe(v(e=>O(Ye,I(e)).pipe(L(()=>e.disconnect()))),G(1));function tt(e){return wa.pipe(E(t=>t.observe(e)),v(t=>tn.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function rn(e,t=16){return Ne(e).pipe(m(({y:r})=>{let o=ce(e),n=St(e);return r>=n.height-o.height-t}),K())}var lr={drawer:R("[data-md-toggle=drawer]"),search:R("[data-md-toggle=search]")};function on(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function ze(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function Ta(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Sa(){return O(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function nn(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:on("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!Ta(o,r)}return!0}),pe());return Sa().pipe(v(t=>t?S:e))}function xe(){return new URL(location.href)}function lt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function an(){return new g}function sn(){return location.hash.slice(1)}function cn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Oa(e){return O(d(window,"hashchange"),e).pipe(m(sn),Q(sn()),b(t=>t.length>0),G(1))}function pn(e){return Oa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function Pt(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function ln(){let e=matchMedia("print");return O(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():S))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ie(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function fn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function un(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function dn(){return O(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(un),Q(un()))}function hn(){return{width:innerWidth,height:innerHeight}}function bn(){return d(window,"resize",{passive:!0}).pipe(m(hn),Q(hn()))}function vn(){return z([dn(),bn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ve(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Ma(e){return d(e,"message",t=>t.data)}function La(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function gn(e,t=new Worker(e)){let r=Ma(t),o=La(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var _a=R("#__config"),Ot=JSON.parse(_a.textContent);Ot.base=`${new URL(Ot.base,xe())}`;function ye(){return Ot}function B(e){return Ot.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?Ot.translations[e].replace("#",t.toString()):Ot.translations[e]}function Se(e,t=document){return R(`[data-md-component=${e}]`,t)}function ae(e,t=document){return P(`[data-md-component=${e}]`,t)}function Aa(e){let t=R(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>R(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function xn(e){if(!B("announce.dismiss")||!e.childElementCount)return S;if(!e.hidden){let t=R(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),Aa(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>$({ref:e},r)))})}function Ca(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function yn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Ca(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))}function Rt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function En(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function wn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Tn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}var On=Lt(qr());function Qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,(0,On.default)(c))," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Mn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreQr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>Qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Ln(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Kr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function _n(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function $a(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function An(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map($a)))}var Pa=0;function Ra(e){let t=z([et(e),$t(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Xo(e)).pipe(oe(Ne),pt(1),m(()=>Zo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function Ia(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Pa++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(kt(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:S),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>$t(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(R(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),ve(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),Ra(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>$({ref:e},c)))})}function mt(e,{viewport$:t},r=document.body){return Ia(e,{content$:new F(o=>{let n=e.title,i=En(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Fa(e,t){let r=C(()=>z([en(e),Ne(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Cn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),O(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Fa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>$({ref:e},s)))})}function ja(e){return e.tagName==="CODE"?P(".c, .c1, .cm",e):[e]}function Wa(e){let t=[];for(let r of ja(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function Hn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Wa(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,wn(p,i)),s.replaceWith(a.get(p)))}return a.size===0?S:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([R(".md-typeset",f),R(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?Hn(f,u):Hn(u,f)}),O(...[...a].map(([,l])=>Cn(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function $n(e,t){return C(()=>{let r=kn(e);return typeof r!="undefined"?fr(r,e,t):S})}var Pn=Lt(Br());var Ua=0;function Rn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Rn(t)}}function Da(e){return ge(e).pipe(m(({width:t})=>({scrollable:St(e).width>t})),Z("scrollable"))}function In(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Pn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Ua++}`;let l=Tn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(mt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=Rn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:S)))}}return P(":scope > span[id]",e).length&&e.classList.add("md-code__content"),Da(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>$({ref:e},c)),Pe(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Va(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Fn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Va(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))})}var jn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Gr,za=0;function qa(){return typeof mermaid=="undefined"||mermaid instanceof Element?Tt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function Wn(e){return e.classList.remove("mermaid"),Gr||(Gr=qa().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:jn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Gr.subscribe(()=>so(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${za++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Gr.pipe(m(()=>({ref:e})))}var Un=x("table");function Dn(e){return e.replaceWith(Un),Un.replaceWith(_n(e)),I({ref:e})}function Qa(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>d(r,"change").pipe(m(()=>R(`label[for="${r.id}"]`))))).pipe(Q(R(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Vn(e,{viewport$:t,target$:r}){let o=R(".tabbed-labels",e),n=P(":scope > input",e),i=Kr("prev");e.append(i);let a=Kr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ve(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ne(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=St(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),O(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=R(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of P("[data-tabs]"))for(let A of P(":scope > input",w)){let te=R(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of P("audio, video",e))c.pause()}),Qa(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>$({ref:e},c)))}).pipe(Ke(se))}function Nn(e,{viewport$:t,target$:r,print$:o}){return O(...P(".annotate:not(.highlight)",e).map(n=>$n(n,{target$:r,print$:o})),...P("pre:not(.mermaid) > code",e).map(n=>In(n,{target$:r,print$:o})),...P("pre.mermaid",e).map(n=>Wn(n)),...P("table:not([class])",e).map(n=>Dn(n)),...P("details",e).map(n=>Fn(n,{target$:r,print$:o})),...P("[data-tabs]",e).map(n=>Vn(n,{viewport$:t,target$:r})),...P("[title]",e).filter(()=>B("content.tooltips")).map(n=>mt(n,{viewport$:t})))}function Ka(e,{alert$:t}){return t.pipe(v(r=>O(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function zn(e,t){let r=R(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ka(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>$({ref:e},n)))})}var Ya=0;function Ba(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?Ne(o):I({x:0,y:0}),i=O(et(t),$t(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ve(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function qn(e){let t=e.title;if(!t.length)return S;let r=`__tooltip_${Ya++}`,o=Rt(r,"inline"),n=R(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ba(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>$({ref:e},a)))}).pipe(Ke(se))}function Ga({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Be(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=ze("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Qn(e,t){return C(()=>z([ge(e),Ga(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function Kn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),Ue(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue(P("[title]",e)).pipe(b(()=>B("content.tooltips")),oe(a=>qn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>$({ref:e},a)),Pe(i.pipe(U(n))))})}function Ja(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function Yn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?S:Ja(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>$({ref:e},n)))})}function Bn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Xa(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Gn(e){let t=P("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Pt("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(ve(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Xa(t).pipe(U(n.pipe(Ce(1))),ct(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>$({ref:e},a)))})}function Jn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Jr=Lt(Br());function Za(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Xn({alert$:e}){Jr.default.isSupported()&&new F(t=>{new Jr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Za(R(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Zn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function es(e,t){let r=new Map;for(let o of P("url",e)){let n=R("loc",o),i=[Zn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of P("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Zn(new URL(s),t))}}return r}function ur(e){return fn(new URL("sitemap.xml",e)).pipe(m(t=>es(t,new URL(e))),de(()=>I(new Map)))}function ts(e,t){if(!(e.target instanceof Element))return S;let r=e.target.closest("a");if(r===null)return S;if(r.target||e.metaKey||e.ctrlKey)return S;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):S}function ei(e){let t=new Map;for(let r of P(":scope > *",e.head))t.set(r.outerHTML,r);return t}function ti(e){for(let t of P("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function rs(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=ei(document);for(let[o,n]of ei(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return We(P("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),S}),X(),ne(document))}function ri({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return S;let n=ur(o.base);I(document).subscribe(ti);let i=d(document.body,"click").pipe(Ue(n),v(([p,c])=>ts(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),O(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>mn(p,{progress$:r}).pipe(de(()=>(lt(p,!0),S)))),v(ti),v(rs),pe());return O(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",cn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var oi=Lt(qr());function ni(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,oi.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function Ft(e){return e.type===1}function dr(e){return e.type===3}function ii(e,t){let r=gn(e);return O(I(location.protocol!=="file:"),ze("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function ai({document$:e}){let t=ye(),r=Ie(new URL("../versions.json",t.base)).pipe(de(()=>S)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?S:(i.preventDefault(),I(p))}}return S}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>lt(n,!0)),z([r,o]).subscribe(([n,i])=>{R(".md-header__topic").appendChild(An(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function is(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),ze("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=O(t.pipe(Ae(Ft)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function si(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(Ft)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=R("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),is(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>$({ref:e},i)),G(1))}function ci(e,{worker$:t,query$:r}){let o=new g,n=rn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=R(":scope > :first-child",e),s=R(":scope > :last-child",e);ze("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(Ft)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>O(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Be(4),Vr(n),v(([f])=>f)))),m(Mn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?S:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>$({ref:e},l)))}function as(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function pi(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),as(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>$({ref:e},n)))}function li(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=O(d(n,"keydown"),d(n,"focus")).pipe(ve(se),m(()=>n.value),K());return o.pipe(Ue(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function mi(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ii(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of P(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...P(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=si(i,{worker$:n});return O(s,ci(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>pi(p,{query$:s})),...ae("search-suggest",e).map(p=>li(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ye}}function fi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>ni(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function ss(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Xr(e,o){var n=o,{header$:t}=n,r=ao(n,["header$"]);let i=R(".md-sidebar__scrollwrap",e),{y:a}=Ve(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of P(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue(P("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(ve(se),m(()=>l),U(p)))).subscribe(l=>{let f=R(`[id="${l.htmlFor}"]`);R(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),ss(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>$({ref:e},l)))})}function ui(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return st(Ie(`${r}/releases/latest`).pipe(de(()=>S),m(o=>({version:o.tag_name})),De({})),Ie(r).pipe(de(()=>S),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ie(r).pipe(m(o=>({repositories:o.public_repos})),De({}))}}function di(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return st(Ie(`${r}/releases/permalink/latest`).pipe(de(()=>S),m(({tag_name:o})=>({version:o})),De({})),Ie(r).pipe(de(()=>S),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}function hi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return ui(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return di(r,o)}return S}var cs;function ps(e){return cs||(cs=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return S}return hi(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>S),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function bi(e){let t=R(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Ln(o)),t.classList.add("md-source__repository--active")}),ps(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function vi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):ls(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))})}function ms(e,{viewport$:t,header$:r}){let o=new Map,n=P(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=R(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),Ue(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Be(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=O(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),Ue(o.pipe(ve(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),ct({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ms(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>$({ref:e},s)))})}function fs(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Be(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),ct({delay:250}),m(a=>({hidden:a})))}function xi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),fs(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>$({ref:e},s)))}function yi({document$:e,viewport$:t}){e.pipe(v(()=>P(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?mt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title"))):S})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>P(".md-status")),oe(r=>mt(r,{viewport$:t}))).subscribe()}function Ei({document$:e,tablet$:t}){e.pipe(v(()=>P(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function us(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function wi({document$:e}){e.pipe(v(()=>P("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(us),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Ti({viewport$:e,tablet$:t}){z([ze("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ds(){return location.protocol==="file:"?Tt(`${new URL("search/search_index.js",Zr.base)}`).pipe(m(()=>__index),G(1)):Ie(new URL("search/search_index.json",Zr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Bo(),Wt=an(),Mt=pn(Wt),eo=nn(),Oe=vn(),hr=Pt("(min-width: 960px)"),Oi=Pt("(min-width: 1220px)"),Mi=ln(),Zr=ye(),Li=document.forms.namedItem("search")?ds():Ye,to=new g;Xn({alert$:to});var ro=new g;B("navigation.instant")&&ri({location$:Wt,viewport$:Oe,progress$:ro}).subscribe(ot);var Si;((Si=Zr.version)==null?void 0:Si.provider)==="mike"&&ai({document$:ot});O(Wt,Mt).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});eo.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&<(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&<(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});yi({viewport$:Oe,document$:ot});Ei({document$:ot,tablet$:hr});wi({document$:ot});Ti({viewport$:Oe,tablet$:hr});var rt=Qn(Se("header"),{viewport$:Oe}),jt=ot.pipe(m(()=>Se("main")),v(e=>Bn(e,{viewport$:Oe,header$:rt})),G(1)),hs=O(...ae("consent").map(e=>yn(e,{target$:Mt})),...ae("dialog").map(e=>zn(e,{alert$:to})),...ae("header").map(e=>Kn(e,{viewport$:Oe,header$:rt,main$:jt})),...ae("palette").map(e=>Gn(e)),...ae("progress").map(e=>Jn(e,{progress$:ro})),...ae("search").map(e=>mi(e,{index$:Li,keyboard$:eo})),...ae("source").map(e=>bi(e))),bs=C(()=>O(...ae("announce").map(e=>xn(e)),...ae("content").map(e=>Nn(e,{viewport$:Oe,target$:Mt,print$:Mi})),...ae("content").map(e=>B("search.highlight")?fi(e,{index$:Li,location$:Wt}):S),...ae("header-title").map(e=>Yn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Oi,()=>Xr(e,{viewport$:Oe,header$:rt,main$:jt})):Nr(hr,()=>Xr(e,{viewport$:Oe,header$:rt,main$:jt}))),...ae("tabs").map(e=>vi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:jt,target$:Mt})),...ae("top").map(e=>xi(e,{viewport$:Oe,header$:rt,main$:jt,target$:Mt})))),_i=ot.pipe(v(()=>bs),Pe(hs),G(1));_i.subscribe();window.document$=ot;window.location$=Wt;window.target$=Mt;window.keyboard$=eo;window.viewport$=Oe;window.tablet$=hr;window.screen$=Oi;window.print$=Mi;window.alert$=to;window.progress$=ro;window.component$=_i;})(); +//# sourceMappingURL=bundle.471ce7a9.min.js.map + diff --git a/assets/javascripts/bundle.471ce7a9.min.js.map b/assets/javascripts/bundle.471ce7a9.min.js.map new file mode 100644 index 000000000..c24182b7e --- /dev/null +++ b/assets/javascripts/bundle.471ce7a9.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 000000000..92ccf240a --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Hello, dear","text":""},{"location":"#hello-dear","title":"\ud83d\udc4b Hello, dear","text":"
\"Every adventure requires a first step\".\n(Alice's Adventures in Wonderland - Lewis Carroll)\n
"},{"location":"#quick-links","title":"Quick links","text":"
  • \ud83c\udfc3 Onboarding

    Take your first step with hands-on examples!

    Get started now

  • \ud83d\udd0c Plugins

    Dive into advanced concepts to develop and publish your plugin!

    Plugin documentation

  • \ud83e\ude9d Hooks

    Explore all the available hooks to customize the Cat!

    Available Hooks

  • \ud83d\ude80 Deploy

    Ready-to-use setup to deploy your assistant!

    Coming soon...

"},{"location":"#cheshire-cat-features","title":"Cheshire Cat Features","text":"

The Cheshire Cat is an open-source, hackable and production-ready framework that allows developing intelligent personal AI assistant agents on top of Large Language Models (LLM).

  • \ud83e\uddf0 API first framework

    Chat with the Cat and interact with its endpoints!

    Python API

  • \ud83d\ude80 Extendable via plugins in Python

    Write your first plugin, your imagination is the limit!

    Write your first plugin

  • \ud83c\udf0d Language model agnostic

    Easily choose from a plenty of models to use!

    compatible-models

  • \ud83d\udcdc Can ingest documents

    Ground the model based on your knowledge base!

    Retrieval Augmented Generation

  • \ud83d\udc18 Local Long term memory

    Make use of a persistent memory across restarts!

    Cat's Long Term Memory

  • \ud83d\udc0b 100% dockerized

    Setup the Cat in the blink of an eye with the pre-built docker image!

    Installation

"},{"location":"#get-in-touch-with-us","title":"Get in touch with us","text":"

Discord \ud83d\udc48 Join our Discord community where you can connect with other developers and ask for support to the contributors or directly to the Cat \ud83d\ude3a mentioning @Cheshire Cat AI.

Remember to give the project a star! \u2b50 and thanks! \ud83d\ude4f

\"Would you tell me, please, which way I ought to go from here?\"\n\"That depends a good deal on where you want to get to,\" said the Cat.\n\"I don't much care where--\" said Alice.\n\"Then it doesn't matter which way you go,\" said the Cat.\n\n(Alice's Adventures in Wonderland - Lewis Carroll)\n

License

GNU General Public License v3.0

"},{"location":"made-with-the-cat/","title":"👋 Made with the Cat","text":""},{"location":"made-with-the-cat/#made-with-the-cat","title":"\ud83d\udc4b Made with the Cat","text":"

If you're curious to see what people have already done with the Cat, visit our dedicated Discord Channel!

"},{"location":"API_Documentation/SUMMARY/","title":"SUMMARY","text":"
  • agents
    • base_agent
    • form_agent
    • main_agent
    • memory_agent
    • procedures_agent
  • auth
    • auth_utils
  • log
  • looking_glass
    • cheshire_cat
    • stray_cat
  • mad_hatter
    • core_plugin
      • hooks
        • agent
        • flow
        • prompt
        • rabbithole
    • mad_hatter
    • plugin
  • memory
    • vector_memory
    • vector_memory_collection
    • working_memory
  • rabbit_hole
  • routes
    • settings
  • utils
"},{"location":"API_Documentation/log/","title":"log","text":"

The log engine.

"},{"location":"API_Documentation/log/#cat.log.CatLogEngine","title":"CatLogEngine","text":"

The log engine.

Engine to filter the logs in the terminal according to the level of severity.

Attributes:

Name Type Description LOG_LEVEL str

Level of logging set in the .env file.

Notes

The logging level set in the .env file will print all the logs from that level to above. Available levels are:

- `DEBUG`\n- `INFO`\n- `WARNING`\n- `ERROR`\n- `CRITICAL`\n

Default to INFO.

Source code in cat/log.py
class CatLogEngine:\n    \"\"\"The log engine.\n\n    Engine to filter the logs in the terminal according to the level of severity.\n\n    Attributes\n    ----------\n    LOG_LEVEL : str\n        Level of logging set in the `.env` file.\n\n    Notes\n    -----\n    The logging level set in the `.env` file will print all the logs from that level to above.\n    Available levels are:\n\n        - `DEBUG`\n        - `INFO`\n        - `WARNING`\n        - `ERROR`\n        - `CRITICAL`\n\n    Default to `INFO`.\n\n    \"\"\"\n\n    def __init__(self):\n        self.LOG_LEVEL = get_log_level()\n        self.default_log()\n\n        # workaround for pdfminer logging\n        # https://github.com/pdfminer/pdfminer.six/issues/347\n        logging.getLogger(\"pdfminer\").setLevel(logging.WARNING)\n\n    def show_log_level(self, record):\n        \"\"\"Allows to show stuff in the log based on the global setting.\n\n        Parameters\n        ----------\n        record : dict\n\n        Returns\n        -------\n        bool\n\n        \"\"\"\n        return record[\"level\"].no >= logger.level(self.LOG_LEVEL).no\n\n    def default_log(self):\n        \"\"\"Set the same debug level to all the project dependencies.\n\n        Returns\n        -------\n        \"\"\"\n\n        time = \"<green>[{time:YYYY-MM-DD HH:mm:ss.SSS}]</green>\"\n        level = \"<level>{level: <6}</level>\"\n        origin = \"<level>{extra[original_name]}.{extra[original_class]}.{extra[original_caller]}::{extra[original_line]}</level>\"\n        message = \"<level>{message}</level>\"\n        log_format = f\"{time} {level} {origin} \\n{message}\"\n\n        logger.remove()\n        if self.LOG_LEVEL == \"DEBUG\":\n            return logger.add(\n                sys.stdout,\n                colorize=True,\n                format=log_format,\n                backtrace=True,\n                diagnose=True,\n                filter=self.show_log_level,\n            )\n        else:\n            return logger.add(\n                sys.stdout,\n                colorize=True,\n                format=log_format,\n                filter=self.show_log_level,\n                level=self.LOG_LEVEL,\n            )\n\n    def get_caller_info(self, skip=3):\n        \"\"\"Get the name of a caller in the format module.class.method.\n\n        Copied from: https://gist.github.com/techtonik/2151727\n\n        Parameters\n        ----------\n        skip :  int\n            Specifies how many levels of stack to skip while getting caller name.\n\n        Returns\n        -------\n        package : str\n            Caller package.\n        module : str\n            Caller module.\n        klass : str\n            Caller classname if one otherwise None.\n        caller : str\n            Caller function or method (if a class exist).\n        line : int\n            The line of the call.\n\n\n        Notes\n        -----\n        skip=1 means \"who calls me\",\n        skip=2 \"who calls my caller\" etc.\n\n        An empty string is returned if skipped levels exceed stack height.\n        \"\"\"\n        stack = inspect.stack()\n        start = 0 + skip\n        if len(stack) < start + 1:\n            return \"\"\n        parentframe = stack[start][0]\n\n        # module and packagename.\n        module_info = inspect.getmodule(parentframe)\n        if module_info:\n            mod = module_info.__name__.split(\".\")\n            package = mod[0]\n            module = \".\".join(mod[1:])\n\n        # class name.\n        klass = \"\"\n        if \"self\" in parentframe.f_locals:\n            klass = parentframe.f_locals[\"self\"].__class__.__name__\n\n        # method or function name.\n        caller = None\n        if parentframe.f_code.co_name != \"<module>\":  # top level usually\n            caller = parentframe.f_code.co_name\n\n        # call line.\n        line = parentframe.f_lineno\n\n        # Remove reference to frame\n        # See: https://docs.python.org/3/library/inspect.html#the-interpreter-stack\n        del parentframe\n\n        return package, module, klass, caller, line\n\n    def __call__(self, msg, level=\"DEBUG\"):\n        \"\"\"Alias of self.log()\"\"\"\n        self.log(msg, level)\n\n    def debug(self, msg):\n        \"\"\"Logs a DEBUG message\"\"\"\n        self.log(msg, level=\"DEBUG\")\n\n    def info(self, msg):\n        \"\"\"Logs an INFO message\"\"\"\n        self.log(msg, level=\"INFO\")\n\n    def warning(self, msg):\n        \"\"\"Logs a WARNING message\"\"\"\n        self.log(msg, level=\"WARNING\")\n\n    def error(self, msg):\n        \"\"\"Logs an ERROR message\"\"\"\n        self.log(msg, level=\"ERROR\")\n\n    def critical(self, msg):\n        \"\"\"Logs a CRITICAL message\"\"\"\n        self.log(msg, level=\"CRITICAL\")\n\n    def log(self, msg, level=\"DEBUG\"):\n        \"\"\"Log a message\n\n        Parameters\n        ----------\n        msg :\n            Message to be logged.\n        level : str\n            Logging level.\"\"\"\n\n        (package, module, klass, caller, line) = self.get_caller_info()\n\n        custom_logger = logger.bind(\n            original_name=f\"{package}.{module}\",\n            original_line=line,\n            original_class=klass,\n            original_caller=caller,\n        )\n\n        # prettify\n        if type(msg) in [dict, list, str]:  # TODO: should be recursive\n            try:\n                msg = json.dumps(msg, indent=4)\n            except Exception:\n                pass\n        else:\n            msg = pformat(msg)\n\n        # actual log\n        custom_logger.log(level, msg)\n\n    def welcome(self):\n        \"\"\"Welcome message in the terminal.\"\"\"\n        secure = \"s\" if get_env(\"CCAT_CORE_USE_SECURE_PROTOCOLS\") in (\"true\", \"1\") else \"\"\n\n        cat_host = get_env(\"CCAT_CORE_HOST\")\n        cat_port = get_env(\"CCAT_CORE_PORT\")\n        cat_address = f\"http{secure}://{cat_host}:{cat_port}\"\n\n        with open(\"cat/welcome.txt\", \"r\") as f:\n            print(f.read())\n\n        print(\"\\n=============== ^._.^ ===============\\n\")\n        print(f\"Cat REST API:   {cat_address}/docs\")\n        print(f\"Cat ADMIN:      {cat_address}/admin\\n\")\n        print(\"======================================\")\n
"},{"location":"API_Documentation/log/#cat.log.CatLogEngine.__call__","title":"__call__(msg, level='DEBUG')","text":"

Alias of self.log()

Source code in cat/log.py
def __call__(self, msg, level=\"DEBUG\"):\n    \"\"\"Alias of self.log()\"\"\"\n    self.log(msg, level)\n
"},{"location":"API_Documentation/log/#cat.log.CatLogEngine.critical","title":"critical(msg)","text":"

Logs a CRITICAL message

Source code in cat/log.py
def critical(self, msg):\n    \"\"\"Logs a CRITICAL message\"\"\"\n    self.log(msg, level=\"CRITICAL\")\n
"},{"location":"API_Documentation/log/#cat.log.CatLogEngine.debug","title":"debug(msg)","text":"

Logs a DEBUG message

Source code in cat/log.py
def debug(self, msg):\n    \"\"\"Logs a DEBUG message\"\"\"\n    self.log(msg, level=\"DEBUG\")\n
"},{"location":"API_Documentation/log/#cat.log.CatLogEngine.default_log","title":"default_log()","text":"

Set the same debug level to all the project dependencies.

Source code in cat/log.py
def default_log(self):\n    \"\"\"Set the same debug level to all the project dependencies.\n\n    Returns\n    -------\n    \"\"\"\n\n    time = \"<green>[{time:YYYY-MM-DD HH:mm:ss.SSS}]</green>\"\n    level = \"<level>{level: <6}</level>\"\n    origin = \"<level>{extra[original_name]}.{extra[original_class]}.{extra[original_caller]}::{extra[original_line]}</level>\"\n    message = \"<level>{message}</level>\"\n    log_format = f\"{time} {level} {origin} \\n{message}\"\n\n    logger.remove()\n    if self.LOG_LEVEL == \"DEBUG\":\n        return logger.add(\n            sys.stdout,\n            colorize=True,\n            format=log_format,\n            backtrace=True,\n            diagnose=True,\n            filter=self.show_log_level,\n        )\n    else:\n        return logger.add(\n            sys.stdout,\n            colorize=True,\n            format=log_format,\n            filter=self.show_log_level,\n            level=self.LOG_LEVEL,\n        )\n
"},{"location":"API_Documentation/log/#cat.log.CatLogEngine.error","title":"error(msg)","text":"

Logs an ERROR message

Source code in cat/log.py
def error(self, msg):\n    \"\"\"Logs an ERROR message\"\"\"\n    self.log(msg, level=\"ERROR\")\n
"},{"location":"API_Documentation/log/#cat.log.CatLogEngine.get_caller_info","title":"get_caller_info(skip=3)","text":"

Get the name of a caller in the format module.class.method.

Copied from: https://gist.github.com/techtonik/2151727

Parameters:

Name Type Description Default skip int

Specifies how many levels of stack to skip while getting caller name.

3

Returns:

Name Type Description package str

Caller package.

module str

Caller module.

klass str

Caller classname if one otherwise None.

caller str

Caller function or method (if a class exist).

line int

The line of the call.

Notes

skip=1 means \"who calls me\", skip=2 \"who calls my caller\" etc.

An empty string is returned if skipped levels exceed stack height.

Source code in cat/log.py
def get_caller_info(self, skip=3):\n    \"\"\"Get the name of a caller in the format module.class.method.\n\n    Copied from: https://gist.github.com/techtonik/2151727\n\n    Parameters\n    ----------\n    skip :  int\n        Specifies how many levels of stack to skip while getting caller name.\n\n    Returns\n    -------\n    package : str\n        Caller package.\n    module : str\n        Caller module.\n    klass : str\n        Caller classname if one otherwise None.\n    caller : str\n        Caller function or method (if a class exist).\n    line : int\n        The line of the call.\n\n\n    Notes\n    -----\n    skip=1 means \"who calls me\",\n    skip=2 \"who calls my caller\" etc.\n\n    An empty string is returned if skipped levels exceed stack height.\n    \"\"\"\n    stack = inspect.stack()\n    start = 0 + skip\n    if len(stack) < start + 1:\n        return \"\"\n    parentframe = stack[start][0]\n\n    # module and packagename.\n    module_info = inspect.getmodule(parentframe)\n    if module_info:\n        mod = module_info.__name__.split(\".\")\n        package = mod[0]\n        module = \".\".join(mod[1:])\n\n    # class name.\n    klass = \"\"\n    if \"self\" in parentframe.f_locals:\n        klass = parentframe.f_locals[\"self\"].__class__.__name__\n\n    # method or function name.\n    caller = None\n    if parentframe.f_code.co_name != \"<module>\":  # top level usually\n        caller = parentframe.f_code.co_name\n\n    # call line.\n    line = parentframe.f_lineno\n\n    # Remove reference to frame\n    # See: https://docs.python.org/3/library/inspect.html#the-interpreter-stack\n    del parentframe\n\n    return package, module, klass, caller, line\n
"},{"location":"API_Documentation/log/#cat.log.CatLogEngine.info","title":"info(msg)","text":"

Logs an INFO message

Source code in cat/log.py
def info(self, msg):\n    \"\"\"Logs an INFO message\"\"\"\n    self.log(msg, level=\"INFO\")\n
"},{"location":"API_Documentation/log/#cat.log.CatLogEngine.log","title":"log(msg, level='DEBUG')","text":"

Log a message

Parameters:

Name Type Description Default msg

Message to be logged.

required level str

Logging level.

'DEBUG' Source code in cat/log.py
def log(self, msg, level=\"DEBUG\"):\n    \"\"\"Log a message\n\n    Parameters\n    ----------\n    msg :\n        Message to be logged.\n    level : str\n        Logging level.\"\"\"\n\n    (package, module, klass, caller, line) = self.get_caller_info()\n\n    custom_logger = logger.bind(\n        original_name=f\"{package}.{module}\",\n        original_line=line,\n        original_class=klass,\n        original_caller=caller,\n    )\n\n    # prettify\n    if type(msg) in [dict, list, str]:  # TODO: should be recursive\n        try:\n            msg = json.dumps(msg, indent=4)\n        except Exception:\n            pass\n    else:\n        msg = pformat(msg)\n\n    # actual log\n    custom_logger.log(level, msg)\n
"},{"location":"API_Documentation/log/#cat.log.CatLogEngine.show_log_level","title":"show_log_level(record)","text":"

Allows to show stuff in the log based on the global setting.

Parameters:

Name Type Description Default record dict required

Returns:

Type Description bool Source code in cat/log.py
def show_log_level(self, record):\n    \"\"\"Allows to show stuff in the log based on the global setting.\n\n    Parameters\n    ----------\n    record : dict\n\n    Returns\n    -------\n    bool\n\n    \"\"\"\n    return record[\"level\"].no >= logger.level(self.LOG_LEVEL).no\n
"},{"location":"API_Documentation/log/#cat.log.CatLogEngine.warning","title":"warning(msg)","text":"

Logs a WARNING message

Source code in cat/log.py
def warning(self, msg):\n    \"\"\"Logs a WARNING message\"\"\"\n    self.log(msg, level=\"WARNING\")\n
"},{"location":"API_Documentation/log/#cat.log.CatLogEngine.welcome","title":"welcome()","text":"

Welcome message in the terminal.

Source code in cat/log.py
def welcome(self):\n    \"\"\"Welcome message in the terminal.\"\"\"\n    secure = \"s\" if get_env(\"CCAT_CORE_USE_SECURE_PROTOCOLS\") in (\"true\", \"1\") else \"\"\n\n    cat_host = get_env(\"CCAT_CORE_HOST\")\n    cat_port = get_env(\"CCAT_CORE_PORT\")\n    cat_address = f\"http{secure}://{cat_host}:{cat_port}\"\n\n    with open(\"cat/welcome.txt\", \"r\") as f:\n        print(f.read())\n\n    print(\"\\n=============== ^._.^ ===============\\n\")\n    print(f\"Cat REST API:   {cat_address}/docs\")\n    print(f\"Cat ADMIN:      {cat_address}/admin\\n\")\n    print(\"======================================\")\n
"},{"location":"API_Documentation/log/#cat.log.get_log_level","title":"get_log_level()","text":"

Return the global LOG level.

Source code in cat/log.py
def get_log_level():\n    \"\"\"Return the global LOG level.\"\"\"\n    return get_env(\"CCAT_LOG_LEVEL\")\n
"},{"location":"API_Documentation/rabbit_hole/","title":"rabbit_hole","text":""},{"location":"API_Documentation/rabbit_hole/#cat.rabbit_hole.RabbitHole","title":"RabbitHole","text":"

Manages content ingestion. I'm late... I'm late!

Source code in cat/rabbit_hole.py
@singleton\nclass RabbitHole:\n    \"\"\"Manages content ingestion. I'm late... I'm late!\"\"\"\n\n    def __init__(self, cat) -> None:\n        self.__cat = cat\n\n    # each time we access the file handlers, plugins can intervene\n    def __reload_file_handlers(self):\n        # default file handlers\n        self.__file_handlers = {\n            \"application/pdf\": PDFMinerParser(),\n            \"text/plain\": TextParser(),\n            \"text/markdown\": TextParser(),\n            \"text/html\": BS4HTMLParser(),\n        }\n\n        # no access to stray\n        self.__file_handlers = self.__cat.mad_hatter.execute_hook(\n            \"rabbithole_instantiates_parsers\", self.__file_handlers, cat=self.__cat\n        )\n\n    def __reload_text_splitter(self):\n        # default text splitter\n        self.__text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(\n            chunk_size=256,\n            chunk_overlap=64,\n            separators=[\"\\\\n\\\\n\", \"\\n\\n\", \".\\\\n\", \".\\n\", \"\\\\n\", \"\\n\", \" \", \"\"],\n            encoding_name=\"cl100k_base\",\n            keep_separator=True,\n            strip_whitespace=True,\n        )\n\n        # no access to stray\n        self.__text_splitter = self.__cat.mad_hatter.execute_hook(\n            \"rabbithole_instantiates_splitter\", self.__text_splitter, cat=self.__cat\n        )\n\n    def ingest_memory(\n            self,\n            stray,\n            file: UploadFile\n        ):\n        \"\"\"Upload memories to the declarative memory from a JSON file.\n\n        Parameters\n        ----------\n        file : UploadFile\n            File object sent via `rabbithole/memory` hook.\n\n        Notes\n        -----\n        This method allows uploading a JSON file containing vector and text memories directly to the declarative memory.\n        When doing this, please, make sure the embedder used to export the memories is the same as the one used\n        when uploading.\n        The method also performs a check on the dimensionality of the embeddings (i.e. length of each vector).\n\n        \"\"\"\n\n        # Get file bytes\n        file_bytes = file.file.read()\n\n        # Load fyle byte in a dict\n        memories = json.loads(file_bytes.decode(\"utf-8\"))\n\n        # Check the embedder used for the uploaded memories is the same the Cat is using now\n        upload_embedder = memories[\"embedder\"]\n        cat_embedder = str(stray.embedder.__class__.__name__)\n\n        if upload_embedder != cat_embedder:\n            message = f\"Embedder mismatch: file embedder {upload_embedder} is different from {cat_embedder}\"\n            raise Exception(message)\n\n        # Get Declarative memories in file\n        declarative_memories = memories[\"collections\"][\"declarative\"]\n\n        # Store data to upload the memories in batch\n        ids = [i[\"id\"] for i in declarative_memories]\n        payloads = [\n            {\"page_content\": p[\"page_content\"], \"metadata\": p[\"metadata\"]}\n            for p in declarative_memories\n        ]\n        vectors = [v[\"vector\"] for v in declarative_memories]\n\n        log.info(f\"Preparing to load {len(vectors)} vector memories\")\n\n        # Check embedding size is correct\n        embedder_size = stray.memory.vectors.declarative.embedder_size\n        len_mismatch = [len(v) == embedder_size for v in vectors]\n\n        if not any(len_mismatch):\n            message = (\n                f\"Embedding size mismatch: vectors length should be {embedder_size}\"\n            )\n            raise Exception(message)\n\n        # Upsert memories in batch mode # TODO REFACTOR: use VectorMemoryCollection.add_point\n        stray.memory.vectors.vector_db.upsert(\n            collection_name=\"declarative\",\n            points=models.Batch(ids=ids, payloads=payloads, vectors=vectors),\n        )\n\n    def ingest_file(\n        self,\n        stray,\n        file: Union[str, UploadFile],\n        chunk_size: int | None = None,\n        chunk_overlap: int | None = None,\n        metadata: dict = {}\n    ):\n        \"\"\"Load a file in the Cat's declarative memory.\n\n        The method splits and converts the file in Langchain `Document`. Then, it stores the `Document` in the Cat's\n        memory.\n\n        Parameters\n        ----------\n        file : str, UploadFile\n            The file can be a path passed as a string or an `UploadFile` object if the document is ingested using the\n            `rabbithole` endpoint.\n        chunk_size : int\n            Number of tokens in each document chunk.\n        chunk_overlap : int\n            Number of overlapping tokens between consecutive chunks.\n        metadata : dict\n            Metadata to be stored with each chunk.\n\n        Notes\n        ----------\n        Currently supported formats are `.txt`, `.pdf` and `.md`.\n        You cn add custom ones or substitute the above via RabbitHole hooks.\n\n        See Also\n        ----------\n        before_rabbithole_stores_documents\n        \"\"\"\n\n        # split file into a list of docs\n        docs = self.file_to_docs(\n            stray=stray,\n            file=file,\n            chunk_size=chunk_size,\n            chunk_overlap=chunk_overlap\n        )\n\n        # store in memory\n        if isinstance(file, str):\n            filename = file\n        else:\n            filename = file.filename\n\n        self.store_documents(stray=stray, docs=docs, source=filename, metadata=metadata)\n\n    def file_to_docs(\n        self,\n        stray,\n        file: Union[str, UploadFile],\n        chunk_size: int | None = None,\n        chunk_overlap: int | None = None\n    ) -> List[Document]:\n        \"\"\"Load and convert files to Langchain `Document`.\n\n        This method takes a file either from a Python script, from the `/rabbithole/` or `/rabbithole/web` endpoints.\n        Hence, it loads it in memory and splits it in overlapped chunks of text.\n\n        Parameters\n        ----------\n        file : str, UploadFile\n            The file can be either a string path if loaded programmatically, a FastAPI `UploadFile`\n            if coming from the `/rabbithole/` endpoint or a URL if coming from the `/rabbithole/web` endpoint.\n        chunk_size : int\n            Number of tokens in each document chunk.\n        chunk_overlap : int\n            Number of overlapping tokens between consecutive chunks.\n\n        Returns\n        -------\n        docs : List[Document]\n            List of Langchain `Document` of chunked text.\n\n        Notes\n        -----\n        This method is used by both `/rabbithole/` and `/rabbithole/web` endpoints.\n        Currently supported files are `.txt`, `.pdf`, `.md` and web pages.\n\n        \"\"\"\n\n        # Check type of incoming file.\n        if isinstance(file, UploadFile):\n            # Get mime type and source of UploadFile\n            content_type = mimetypes.guess_type(file.filename)[0]\n            source = file.filename\n\n            # Get file bytes\n            file_bytes = file.file.read()\n        elif isinstance(file, str):\n            # Check if string file is a string or url\n            parsed_file = urlparse(file)\n            is_url = all([parsed_file.scheme, parsed_file.netloc])\n\n            if is_url:\n                # Make a request with a fake browser name\n                request = httpx.get(file, headers={\"User-Agent\": \"Magic Browser\"})\n\n                # Define mime type and source of url\n                content_type = request.headers[\"Content-Type\"].split(\";\")[0]\n                source = file\n\n                try:\n                    # Get binary content of url\n                    file_bytes = request.content\n                except HTTPError as e:\n                    log.error(e)\n            else:\n                # Get mime type from file extension and source\n                content_type = mimetypes.guess_type(file)[0]\n                source = os.path.basename(file)\n\n                # Get file bytes\n                with open(file, \"rb\") as f:\n                    file_bytes = f.read()\n        else:\n            raise ValueError(f\"{type(file)} is not a valid type.\")\n        return self.string_to_docs(\n            stray=stray,\n            file_bytes=file_bytes,\n            source=source,\n            content_type=content_type,\n            chunk_size=chunk_size,\n            chunk_overlap=chunk_overlap\n        )\n\n    def string_to_docs(\n        self,\n        stray,\n        file_bytes: str,\n        source: str = None,\n        content_type: str = \"text/plain\",\n        chunk_size: int | None = None,\n        chunk_overlap: int | None = None\n    ) -> List[Document]:\n        \"\"\"Convert string to Langchain `Document`.\n\n        Takes a string, converts it to langchain `Document`.\n        Hence, loads it in memory and splits it in overlapped chunks of text.\n\n        Parameters\n        ----------\n        file_bytes : str\n            The string to be converted.\n        source: str\n            Source filename.\n        content_type:\n            Mimetype of content.\n        chunk_size : int\n            Number of tokens in each document chunk.\n        chunk_overlap : int\n            Number of overlapping tokens between consecutive chunks.\n\n        Returns\n        -------\n        docs : List[Document]\n            List of Langchain `Document` of chunked text.\n        \"\"\"\n\n        # Load the bytes in the Blob schema\n        blob = Blob(data=file_bytes, mimetype=content_type, source=source).from_data(\n            data=file_bytes, mime_type=content_type, path=source\n        )\n        # Parser based on the mime type\n        parser = MimeTypeBasedParser(handlers=self.file_handlers)\n\n        # Parse the text\n        stray.send_ws_message(\n            \"I'm parsing the content. Big content could require some minutes...\"\n        )\n        super_docs = parser.parse(blob)\n\n        # Split\n        stray.send_ws_message(\"Parsing completed. Now let's go with reading process...\")\n        docs = self.__split_text(\n            stray=stray,\n            text=super_docs,\n            chunk_size=chunk_size,\n            chunk_overlap=chunk_overlap,\n        )\n        return docs\n\n    def store_documents(\n            self,\n            stray,\n            docs: List[Document],\n            source: str, # TODOV2: is this necessary?\n            metadata: dict = {}\n        ) -> None:\n        \"\"\"Add documents to the Cat's declarative memory.\n\n        This method loops a list of Langchain `Document` and adds some metadata. Namely, the source filename and the\n        timestamp of insertion. Once done, the method notifies the client via Websocket connection.\n\n        Parameters\n        ----------\n        docs : List[Document]\n            List of Langchain `Document` to be inserted in the Cat's declarative memory.\n        source : str\n            Source name to be added as a metadata. It can be a file name or an URL.\n        metadata : dict\n            Metadata to be stored with each chunk.\n\n        Notes\n        -------\n        At this point, it is possible to customize the Cat's behavior using the `before_rabbithole_insert_memory` hook\n        to edit the memories before they are inserted in the vector database.\n\n        See Also\n        --------\n        before_rabbithole_insert_memory\n        \"\"\"\n\n        log.info(f\"Preparing to memorize {len(docs)} vectors\")\n\n        # hook the docs before they are stored in the vector memory\n        docs = stray.mad_hatter.execute_hook(\n            \"before_rabbithole_stores_documents\", docs, cat=stray\n        )\n\n        # classic embed\n        time_last_notification = time.time()\n        time_interval = 10  # a notification every 10 secs\n        stored_points = []\n        for d, doc in enumerate(docs):\n            if time.time() - time_last_notification > time_interval:\n                time_last_notification = time.time()\n                perc_read = int(d / len(docs) * 100)\n                read_message = f\"Read {perc_read}% of {source}\"\n                stray.send_ws_message(read_message)\n                log.warning(read_message)\n\n            # add default metadata\n            doc.metadata[\"source\"] = source\n            doc.metadata[\"when\"] = time.time()\n            # add custom metadata (sent via endpoint)\n            for k,v in metadata.items():\n                doc.metadata[k] = v\n\n            doc = stray.mad_hatter.execute_hook(\n                \"before_rabbithole_insert_memory\", doc, cat=stray\n            )\n            inserting_info = f\"{d + 1}/{len(docs)}):    {doc.page_content}\"\n            if doc.page_content != \"\":\n                doc_embedding = stray.embedder.embed_documents([doc.page_content])\n                stored_point = stray.memory.vectors.declarative.add_point(\n                    doc.page_content,\n                    doc_embedding[0],\n                    doc.metadata,\n                )\n                stored_points.append(stored_point)\n\n                log.info(f\"Inserted into memory ({inserting_info})\")\n            else:\n                log.info(f\"Skipped memory insertion of empty doc ({inserting_info})\")\n\n            # wait a little to avoid APIs rate limit errors\n            time.sleep(0.05)\n\n        # hook the points after they are stored in the vector memory\n        stray.mad_hatter.execute_hook(\n            \"after_rabbithole_stored_documents\", source, stored_points, cat=stray\n        )\n\n        # notify client\n        finished_reading_message = (\n            f\"Finished reading {source}, I made {len(docs)} thoughts on it.\"\n        )\n\n        stray.send_ws_message(finished_reading_message)\n\n        log.warning(f\"Done uploading {source}\")\n\n    def __split_text(self, stray, text, chunk_size, chunk_overlap):\n        \"\"\"Split text in overlapped chunks.\n\n        This method executes the `rabbithole_splits_text` to split the incoming text in overlapped\n        chunks of text. Other two hooks are available to edit the text before and after the split step.\n\n        Parameters\n        ----------\n        text : str\n            Content of the loaded file.\n        chunk_size : int\n            Number of tokens in each document chunk.\n        chunk_overlap : int\n            Number of overlapping tokens between consecutive chunks.\n\n        Returns\n        -------\n        docs : List[Document]\n            List of split Langchain `Document`.\n\n        Notes\n        -----\n        The default behavior only executes the `rabbithole_splits_text` hook. `before_rabbithole_splits_text` and\n        `after_rabbithole_splitted_text` hooks return the original input without any modification.\n\n        See Also\n        --------\n        before_rabbithole_splits_text\n        rabbithole_splits_text\n        after_rabbithole_splitted_text\n\n        \"\"\"\n        # do something on the text before it is split\n        text = stray.mad_hatter.execute_hook(\n            \"before_rabbithole_splits_text\", text, cat=stray\n        )\n\n        # hooks decide the test splitter (see @property .text_splitter)\n        text_splitter = self.text_splitter\n\n        # override chunk_size and chunk_overlap only if the request has those info\n        if chunk_size:\n            text_splitter._chunk_size = chunk_size\n        if chunk_overlap:\n            text_splitter._chunk_overlap = chunk_overlap\n\n        log.info(f\"Chunk size: {chunk_size}, chunk overlap: {chunk_overlap}\")\n        # split text\n        docs = text_splitter.split_documents(text)\n        # remove short texts (page numbers, isolated words, etc.)\n        # TODO: join each short chunk with previous one, instead of deleting them\n        docs = list(filter(lambda d: len(d.page_content) > 10, docs))\n\n        # do something on the text after it is split\n        docs = stray.mad_hatter.execute_hook(\n            \"after_rabbithole_splitted_text\", docs, cat=stray\n        )\n\n        return docs\n\n    # each time we access the file handlers, plugins can intervene\n    @property\n    def file_handlers(self):\n        self.__reload_file_handlers()\n        return self.__file_handlers\n\n    # each time we access the text splitter, plugins can intervene\n    @property\n    def text_splitter(self):\n        self.__reload_text_splitter()\n        return self.__text_splitter\n
"},{"location":"API_Documentation/rabbit_hole/#cat.rabbit_hole.RabbitHole.__split_text","title":"__split_text(stray, text, chunk_size, chunk_overlap)","text":"

Split text in overlapped chunks.

This method executes the rabbithole_splits_text to split the incoming text in overlapped chunks of text. Other two hooks are available to edit the text before and after the split step.

Parameters:

Name Type Description Default text str

Content of the loaded file.

required chunk_size int

Number of tokens in each document chunk.

required chunk_overlap int

Number of overlapping tokens between consecutive chunks.

required

Returns:

Name Type Description docs List[Document]

List of split Langchain Document.

Notes

The default behavior only executes the rabbithole_splits_text hook. before_rabbithole_splits_text and after_rabbithole_splitted_text hooks return the original input without any modification.

See Also

before_rabbithole_splits_text rabbithole_splits_text after_rabbithole_splitted_text

Source code in cat/rabbit_hole.py
def __split_text(self, stray, text, chunk_size, chunk_overlap):\n    \"\"\"Split text in overlapped chunks.\n\n    This method executes the `rabbithole_splits_text` to split the incoming text in overlapped\n    chunks of text. Other two hooks are available to edit the text before and after the split step.\n\n    Parameters\n    ----------\n    text : str\n        Content of the loaded file.\n    chunk_size : int\n        Number of tokens in each document chunk.\n    chunk_overlap : int\n        Number of overlapping tokens between consecutive chunks.\n\n    Returns\n    -------\n    docs : List[Document]\n        List of split Langchain `Document`.\n\n    Notes\n    -----\n    The default behavior only executes the `rabbithole_splits_text` hook. `before_rabbithole_splits_text` and\n    `after_rabbithole_splitted_text` hooks return the original input without any modification.\n\n    See Also\n    --------\n    before_rabbithole_splits_text\n    rabbithole_splits_text\n    after_rabbithole_splitted_text\n\n    \"\"\"\n    # do something on the text before it is split\n    text = stray.mad_hatter.execute_hook(\n        \"before_rabbithole_splits_text\", text, cat=stray\n    )\n\n    # hooks decide the test splitter (see @property .text_splitter)\n    text_splitter = self.text_splitter\n\n    # override chunk_size and chunk_overlap only if the request has those info\n    if chunk_size:\n        text_splitter._chunk_size = chunk_size\n    if chunk_overlap:\n        text_splitter._chunk_overlap = chunk_overlap\n\n    log.info(f\"Chunk size: {chunk_size}, chunk overlap: {chunk_overlap}\")\n    # split text\n    docs = text_splitter.split_documents(text)\n    # remove short texts (page numbers, isolated words, etc.)\n    # TODO: join each short chunk with previous one, instead of deleting them\n    docs = list(filter(lambda d: len(d.page_content) > 10, docs))\n\n    # do something on the text after it is split\n    docs = stray.mad_hatter.execute_hook(\n        \"after_rabbithole_splitted_text\", docs, cat=stray\n    )\n\n    return docs\n
"},{"location":"API_Documentation/rabbit_hole/#cat.rabbit_hole.RabbitHole.file_to_docs","title":"file_to_docs(stray, file, chunk_size=None, chunk_overlap=None)","text":"

Load and convert files to Langchain Document.

This method takes a file either from a Python script, from the /rabbithole/ or /rabbithole/web endpoints. Hence, it loads it in memory and splits it in overlapped chunks of text.

Parameters:

Name Type Description Default file (str, UploadFile)

The file can be either a string path if loaded programmatically, a FastAPI UploadFile if coming from the /rabbithole/ endpoint or a URL if coming from the /rabbithole/web endpoint.

required chunk_size int

Number of tokens in each document chunk.

None chunk_overlap int

Number of overlapping tokens between consecutive chunks.

None

Returns:

Name Type Description docs List[Document]

List of Langchain Document of chunked text.

Notes

This method is used by both /rabbithole/ and /rabbithole/web endpoints. Currently supported files are .txt, .pdf, .md and web pages.

Source code in cat/rabbit_hole.py
def file_to_docs(\n    self,\n    stray,\n    file: Union[str, UploadFile],\n    chunk_size: int | None = None,\n    chunk_overlap: int | None = None\n) -> List[Document]:\n    \"\"\"Load and convert files to Langchain `Document`.\n\n    This method takes a file either from a Python script, from the `/rabbithole/` or `/rabbithole/web` endpoints.\n    Hence, it loads it in memory and splits it in overlapped chunks of text.\n\n    Parameters\n    ----------\n    file : str, UploadFile\n        The file can be either a string path if loaded programmatically, a FastAPI `UploadFile`\n        if coming from the `/rabbithole/` endpoint or a URL if coming from the `/rabbithole/web` endpoint.\n    chunk_size : int\n        Number of tokens in each document chunk.\n    chunk_overlap : int\n        Number of overlapping tokens between consecutive chunks.\n\n    Returns\n    -------\n    docs : List[Document]\n        List of Langchain `Document` of chunked text.\n\n    Notes\n    -----\n    This method is used by both `/rabbithole/` and `/rabbithole/web` endpoints.\n    Currently supported files are `.txt`, `.pdf`, `.md` and web pages.\n\n    \"\"\"\n\n    # Check type of incoming file.\n    if isinstance(file, UploadFile):\n        # Get mime type and source of UploadFile\n        content_type = mimetypes.guess_type(file.filename)[0]\n        source = file.filename\n\n        # Get file bytes\n        file_bytes = file.file.read()\n    elif isinstance(file, str):\n        # Check if string file is a string or url\n        parsed_file = urlparse(file)\n        is_url = all([parsed_file.scheme, parsed_file.netloc])\n\n        if is_url:\n            # Make a request with a fake browser name\n            request = httpx.get(file, headers={\"User-Agent\": \"Magic Browser\"})\n\n            # Define mime type and source of url\n            content_type = request.headers[\"Content-Type\"].split(\";\")[0]\n            source = file\n\n            try:\n                # Get binary content of url\n                file_bytes = request.content\n            except HTTPError as e:\n                log.error(e)\n        else:\n            # Get mime type from file extension and source\n            content_type = mimetypes.guess_type(file)[0]\n            source = os.path.basename(file)\n\n            # Get file bytes\n            with open(file, \"rb\") as f:\n                file_bytes = f.read()\n    else:\n        raise ValueError(f\"{type(file)} is not a valid type.\")\n    return self.string_to_docs(\n        stray=stray,\n        file_bytes=file_bytes,\n        source=source,\n        content_type=content_type,\n        chunk_size=chunk_size,\n        chunk_overlap=chunk_overlap\n    )\n
"},{"location":"API_Documentation/rabbit_hole/#cat.rabbit_hole.RabbitHole.ingest_file","title":"ingest_file(stray, file, chunk_size=None, chunk_overlap=None, metadata={})","text":"

Load a file in the Cat's declarative memory.

The method splits and converts the file in Langchain Document. Then, it stores the Document in the Cat's memory.

Parameters:

Name Type Description Default file (str, UploadFile)

The file can be a path passed as a string or an UploadFile object if the document is ingested using the rabbithole endpoint.

required chunk_size int

Number of tokens in each document chunk.

None chunk_overlap int

Number of overlapping tokens between consecutive chunks.

None metadata dict

Metadata to be stored with each chunk.

{} Notes

Currently supported formats are .txt, .pdf and .md. You cn add custom ones or substitute the above via RabbitHole hooks.

See Also

before_rabbithole_stores_documents

Source code in cat/rabbit_hole.py
def ingest_file(\n    self,\n    stray,\n    file: Union[str, UploadFile],\n    chunk_size: int | None = None,\n    chunk_overlap: int | None = None,\n    metadata: dict = {}\n):\n    \"\"\"Load a file in the Cat's declarative memory.\n\n    The method splits and converts the file in Langchain `Document`. Then, it stores the `Document` in the Cat's\n    memory.\n\n    Parameters\n    ----------\n    file : str, UploadFile\n        The file can be a path passed as a string or an `UploadFile` object if the document is ingested using the\n        `rabbithole` endpoint.\n    chunk_size : int\n        Number of tokens in each document chunk.\n    chunk_overlap : int\n        Number of overlapping tokens between consecutive chunks.\n    metadata : dict\n        Metadata to be stored with each chunk.\n\n    Notes\n    ----------\n    Currently supported formats are `.txt`, `.pdf` and `.md`.\n    You cn add custom ones or substitute the above via RabbitHole hooks.\n\n    See Also\n    ----------\n    before_rabbithole_stores_documents\n    \"\"\"\n\n    # split file into a list of docs\n    docs = self.file_to_docs(\n        stray=stray,\n        file=file,\n        chunk_size=chunk_size,\n        chunk_overlap=chunk_overlap\n    )\n\n    # store in memory\n    if isinstance(file, str):\n        filename = file\n    else:\n        filename = file.filename\n\n    self.store_documents(stray=stray, docs=docs, source=filename, metadata=metadata)\n
"},{"location":"API_Documentation/rabbit_hole/#cat.rabbit_hole.RabbitHole.ingest_memory","title":"ingest_memory(stray, file)","text":"

Upload memories to the declarative memory from a JSON file.

Parameters:

Name Type Description Default file UploadFile

File object sent via rabbithole/memory hook.

required Notes

This method allows uploading a JSON file containing vector and text memories directly to the declarative memory. When doing this, please, make sure the embedder used to export the memories is the same as the one used when uploading. The method also performs a check on the dimensionality of the embeddings (i.e. length of each vector).

Source code in cat/rabbit_hole.py
def ingest_memory(\n        self,\n        stray,\n        file: UploadFile\n    ):\n    \"\"\"Upload memories to the declarative memory from a JSON file.\n\n    Parameters\n    ----------\n    file : UploadFile\n        File object sent via `rabbithole/memory` hook.\n\n    Notes\n    -----\n    This method allows uploading a JSON file containing vector and text memories directly to the declarative memory.\n    When doing this, please, make sure the embedder used to export the memories is the same as the one used\n    when uploading.\n    The method also performs a check on the dimensionality of the embeddings (i.e. length of each vector).\n\n    \"\"\"\n\n    # Get file bytes\n    file_bytes = file.file.read()\n\n    # Load fyle byte in a dict\n    memories = json.loads(file_bytes.decode(\"utf-8\"))\n\n    # Check the embedder used for the uploaded memories is the same the Cat is using now\n    upload_embedder = memories[\"embedder\"]\n    cat_embedder = str(stray.embedder.__class__.__name__)\n\n    if upload_embedder != cat_embedder:\n        message = f\"Embedder mismatch: file embedder {upload_embedder} is different from {cat_embedder}\"\n        raise Exception(message)\n\n    # Get Declarative memories in file\n    declarative_memories = memories[\"collections\"][\"declarative\"]\n\n    # Store data to upload the memories in batch\n    ids = [i[\"id\"] for i in declarative_memories]\n    payloads = [\n        {\"page_content\": p[\"page_content\"], \"metadata\": p[\"metadata\"]}\n        for p in declarative_memories\n    ]\n    vectors = [v[\"vector\"] for v in declarative_memories]\n\n    log.info(f\"Preparing to load {len(vectors)} vector memories\")\n\n    # Check embedding size is correct\n    embedder_size = stray.memory.vectors.declarative.embedder_size\n    len_mismatch = [len(v) == embedder_size for v in vectors]\n\n    if not any(len_mismatch):\n        message = (\n            f\"Embedding size mismatch: vectors length should be {embedder_size}\"\n        )\n        raise Exception(message)\n\n    # Upsert memories in batch mode # TODO REFACTOR: use VectorMemoryCollection.add_point\n    stray.memory.vectors.vector_db.upsert(\n        collection_name=\"declarative\",\n        points=models.Batch(ids=ids, payloads=payloads, vectors=vectors),\n    )\n
"},{"location":"API_Documentation/rabbit_hole/#cat.rabbit_hole.RabbitHole.store_documents","title":"store_documents(stray, docs, source, metadata={})","text":"

Add documents to the Cat's declarative memory.

This method loops a list of Langchain Document and adds some metadata. Namely, the source filename and the timestamp of insertion. Once done, the method notifies the client via Websocket connection.

Parameters:

Name Type Description Default docs List[Document]

List of Langchain Document to be inserted in the Cat's declarative memory.

required source str

Source name to be added as a metadata. It can be a file name or an URL.

required metadata dict

Metadata to be stored with each chunk.

{} Notes

At this point, it is possible to customize the Cat's behavior using the before_rabbithole_insert_memory hook to edit the memories before they are inserted in the vector database.

See Also

before_rabbithole_insert_memory

Source code in cat/rabbit_hole.py
def store_documents(\n        self,\n        stray,\n        docs: List[Document],\n        source: str, # TODOV2: is this necessary?\n        metadata: dict = {}\n    ) -> None:\n    \"\"\"Add documents to the Cat's declarative memory.\n\n    This method loops a list of Langchain `Document` and adds some metadata. Namely, the source filename and the\n    timestamp of insertion. Once done, the method notifies the client via Websocket connection.\n\n    Parameters\n    ----------\n    docs : List[Document]\n        List of Langchain `Document` to be inserted in the Cat's declarative memory.\n    source : str\n        Source name to be added as a metadata. It can be a file name or an URL.\n    metadata : dict\n        Metadata to be stored with each chunk.\n\n    Notes\n    -------\n    At this point, it is possible to customize the Cat's behavior using the `before_rabbithole_insert_memory` hook\n    to edit the memories before they are inserted in the vector database.\n\n    See Also\n    --------\n    before_rabbithole_insert_memory\n    \"\"\"\n\n    log.info(f\"Preparing to memorize {len(docs)} vectors\")\n\n    # hook the docs before they are stored in the vector memory\n    docs = stray.mad_hatter.execute_hook(\n        \"before_rabbithole_stores_documents\", docs, cat=stray\n    )\n\n    # classic embed\n    time_last_notification = time.time()\n    time_interval = 10  # a notification every 10 secs\n    stored_points = []\n    for d, doc in enumerate(docs):\n        if time.time() - time_last_notification > time_interval:\n            time_last_notification = time.time()\n            perc_read = int(d / len(docs) * 100)\n            read_message = f\"Read {perc_read}% of {source}\"\n            stray.send_ws_message(read_message)\n            log.warning(read_message)\n\n        # add default metadata\n        doc.metadata[\"source\"] = source\n        doc.metadata[\"when\"] = time.time()\n        # add custom metadata (sent via endpoint)\n        for k,v in metadata.items():\n            doc.metadata[k] = v\n\n        doc = stray.mad_hatter.execute_hook(\n            \"before_rabbithole_insert_memory\", doc, cat=stray\n        )\n        inserting_info = f\"{d + 1}/{len(docs)}):    {doc.page_content}\"\n        if doc.page_content != \"\":\n            doc_embedding = stray.embedder.embed_documents([doc.page_content])\n            stored_point = stray.memory.vectors.declarative.add_point(\n                doc.page_content,\n                doc_embedding[0],\n                doc.metadata,\n            )\n            stored_points.append(stored_point)\n\n            log.info(f\"Inserted into memory ({inserting_info})\")\n        else:\n            log.info(f\"Skipped memory insertion of empty doc ({inserting_info})\")\n\n        # wait a little to avoid APIs rate limit errors\n        time.sleep(0.05)\n\n    # hook the points after they are stored in the vector memory\n    stray.mad_hatter.execute_hook(\n        \"after_rabbithole_stored_documents\", source, stored_points, cat=stray\n    )\n\n    # notify client\n    finished_reading_message = (\n        f\"Finished reading {source}, I made {len(docs)} thoughts on it.\"\n    )\n\n    stray.send_ws_message(finished_reading_message)\n\n    log.warning(f\"Done uploading {source}\")\n
"},{"location":"API_Documentation/rabbit_hole/#cat.rabbit_hole.RabbitHole.string_to_docs","title":"string_to_docs(stray, file_bytes, source=None, content_type='text/plain', chunk_size=None, chunk_overlap=None)","text":"

Convert string to Langchain Document.

Takes a string, converts it to langchain Document. Hence, loads it in memory and splits it in overlapped chunks of text.

Parameters:

Name Type Description Default file_bytes str

The string to be converted.

required source str

Source filename.

None content_type str

Mimetype of content.

'text/plain' chunk_size int

Number of tokens in each document chunk.

None chunk_overlap int

Number of overlapping tokens between consecutive chunks.

None

Returns:

Name Type Description docs List[Document]

List of Langchain Document of chunked text.

Source code in cat/rabbit_hole.py
def string_to_docs(\n    self,\n    stray,\n    file_bytes: str,\n    source: str = None,\n    content_type: str = \"text/plain\",\n    chunk_size: int | None = None,\n    chunk_overlap: int | None = None\n) -> List[Document]:\n    \"\"\"Convert string to Langchain `Document`.\n\n    Takes a string, converts it to langchain `Document`.\n    Hence, loads it in memory and splits it in overlapped chunks of text.\n\n    Parameters\n    ----------\n    file_bytes : str\n        The string to be converted.\n    source: str\n        Source filename.\n    content_type:\n        Mimetype of content.\n    chunk_size : int\n        Number of tokens in each document chunk.\n    chunk_overlap : int\n        Number of overlapping tokens between consecutive chunks.\n\n    Returns\n    -------\n    docs : List[Document]\n        List of Langchain `Document` of chunked text.\n    \"\"\"\n\n    # Load the bytes in the Blob schema\n    blob = Blob(data=file_bytes, mimetype=content_type, source=source).from_data(\n        data=file_bytes, mime_type=content_type, path=source\n    )\n    # Parser based on the mime type\n    parser = MimeTypeBasedParser(handlers=self.file_handlers)\n\n    # Parse the text\n    stray.send_ws_message(\n        \"I'm parsing the content. Big content could require some minutes...\"\n    )\n    super_docs = parser.parse(blob)\n\n    # Split\n    stray.send_ws_message(\"Parsing completed. Now let's go with reading process...\")\n    docs = self.__split_text(\n        stray=stray,\n        text=super_docs,\n        chunk_size=chunk_size,\n        chunk_overlap=chunk_overlap,\n    )\n    return docs\n
"},{"location":"API_Documentation/utils/","title":"utils","text":"

Various utiles used from the projects.

"},{"location":"API_Documentation/utils/#cat.utils.get_base_path","title":"get_base_path()","text":"

Allows exposing the base path.

Source code in cat/utils.py
def get_base_path():\n    \"\"\"Allows exposing the base path.\"\"\"\n    return \"cat/\"\n
"},{"location":"API_Documentation/utils/#cat.utils.get_base_url","title":"get_base_url()","text":"

Allows exposing the base url.

Source code in cat/utils.py
def get_base_url():\n    \"\"\"Allows exposing the base url.\"\"\"\n    secure = \"s\" if get_env(\"CCAT_CORE_USE_SECURE_PROTOCOLS\") in (\"true\", \"1\") else \"\"\n    cat_host = get_env(\"CCAT_CORE_HOST\")\n    cat_port = get_env(\"CCAT_CORE_PORT\")\n    return f\"http{secure}://{cat_host}:{cat_port}/\"\n
"},{"location":"API_Documentation/utils/#cat.utils.get_plugins_path","title":"get_plugins_path()","text":"

Allows exposing the plugins' path.

Source code in cat/utils.py
def get_plugins_path():\n    \"\"\"Allows exposing the plugins' path.\"\"\"\n    return os.path.join(get_base_path(), \"plugins/\")\n
"},{"location":"API_Documentation/utils/#cat.utils.get_static_path","title":"get_static_path()","text":"

Allows exposing the static files' path.

Source code in cat/utils.py
def get_static_path():\n    \"\"\"Allows exposing the static files' path.\"\"\"\n    return os.path.join(get_base_path(), \"static/\")\n
"},{"location":"API_Documentation/utils/#cat.utils.get_static_url","title":"get_static_url()","text":"

Allows exposing the static server url.

Source code in cat/utils.py
def get_static_url():\n    \"\"\"Allows exposing the static server url.\"\"\"\n    return get_base_url() + \"static/\"\n
"},{"location":"API_Documentation/utils/#cat.utils.match_prompt_variables","title":"match_prompt_variables(prompt_variables, prompt_template)","text":"

Ensure prompt variables and prompt placeholders map, so there are no issues on mismatches

Source code in cat/utils.py
def match_prompt_variables(\n        prompt_variables: Dict,\n        prompt_template: str\n    ) -> Tuple[Dict, str]:\n    \"\"\"Ensure prompt variables and prompt placeholders map, so there are no issues on mismatches\"\"\"\n\n    tmp_prompt = PromptTemplate.from_template(\n        template=prompt_template\n    )\n\n    # outer set difference\n    prompt_mismatches = set(prompt_variables.keys()) ^ set(tmp_prompt.input_variables)\n\n    # clean up\n    for m in prompt_mismatches:\n        if m in prompt_variables.keys():\n            log.warning(f\"Prompt variable '{m}' not found in prompt template, removed\")\n            del prompt_variables[m]\n        if m in tmp_prompt.input_variables:\n            prompt_template = \\\n                prompt_template.replace(\"{\" + m + \"}\", \"\")\n            log.warning(f\"Placeholder '{m}' not found in prompt variables, removed\")\n\n    return prompt_variables, prompt_template\n
"},{"location":"API_Documentation/utils/#cat.utils.to_camel_case","title":"to_camel_case(text)","text":"

Format string to camel case.

Takes a string of words separated by either hyphens or underscores and returns a string of words in camel case.

Parameters:

Name Type Description Default text str

String of hyphens or underscores separated words.

required

Returns:

Type Description str

Camel case formatted string.

Source code in cat/utils.py
def to_camel_case(text: str) -> str:\n    \"\"\"Format string to camel case.\n\n    Takes a string of words separated by either hyphens or underscores and returns a string of words in camel case.\n\n    Parameters\n    ----------\n    text : str\n        String of hyphens or underscores separated words.\n\n    Returns\n    -------\n    str\n        Camel case formatted string.\n    \"\"\"\n    s = text.replace(\"-\", \" \").replace(\"_\", \" \").capitalize()\n    s = s.split()\n    if len(text) == 0:\n        return text\n    return s[0] + \"\".join(i.capitalize() for i in s[1:])\n
"},{"location":"API_Documentation/utils/#cat.utils.verbal_timedelta","title":"verbal_timedelta(td)","text":"

Convert a timedelta in human form.

The function takes a timedelta and converts it to a human-readable string format.

Parameters:

Name Type Description Default td timedelta

Difference between two dates.

required

Returns:

Type Description str

Human-readable string of time difference.

Notes

This method is used to give the Language Model information time information about the memories retrieved from the vector database.

Examples:

>>> print(verbal_timedelta(timedelta(days=2, weeks=1))\n'One week and two days ago'\n
Source code in cat/utils.py
def verbal_timedelta(td: timedelta) -> str:\n    \"\"\"Convert a timedelta in human form.\n\n    The function takes a timedelta and converts it to a human-readable string format.\n\n    Parameters\n    ----------\n    td : timedelta\n        Difference between two dates.\n\n    Returns\n    -------\n    str\n        Human-readable string of time difference.\n\n    Notes\n    -----\n    This method is used to give the Language Model information time information about the memories retrieved from\n    the vector database.\n\n    Examples\n    --------\n    >>> print(verbal_timedelta(timedelta(days=2, weeks=1))\n    'One week and two days ago'\n    \"\"\"\n\n    if td.days != 0:\n        abs_days = abs(td.days)\n        if abs_days > 7:\n            abs_delta = \"{} weeks\".format(td.days // 7)\n        else:\n            abs_delta = \"{} days\".format(td.days)\n    else:\n        abs_minutes = abs(td.seconds) // 60\n        if abs_minutes > 60:\n            abs_delta = \"{} hours\".format(abs_minutes // 60)\n        else:\n            abs_delta = \"{} minutes\".format(abs_minutes)\n    if td < timedelta(0):\n        return \"{} ago\".format(abs_delta)\n    else:\n        return \"{} ago\".format(abs_delta)\n
"},{"location":"API_Documentation/agents/base_agent/","title":"base_agent","text":""},{"location":"API_Documentation/agents/form_agent/","title":"form_agent","text":""},{"location":"API_Documentation/agents/main_agent/","title":"main_agent","text":""},{"location":"API_Documentation/agents/main_agent/#cat.agents.main_agent.MainAgent","title":"MainAgent","text":"

Bases: BaseAgent

Main Agent. This class manages sub agents that in turn use the LLM.

Source code in cat/agents/main_agent.py
class MainAgent(BaseAgent):\n    \"\"\"Main Agent.\n    This class manages sub agents that in turn use the LLM.\n    \"\"\"\n\n    def __init__(self):\n        self.mad_hatter = MadHatter()\n\n        if get_env(\"CCAT_LOG_LEVEL\") in [\"DEBUG\", \"INFO\"]:\n            self.verbose = True\n        else:\n            self.verbose = False\n\n    async def execute(self, stray) -> AgentOutput:\n        \"\"\"Execute the agents.\n\n        Returns\n        -------\n        agent_output : AgentOutput\n            Reply of the agent, instance of AgentOutput.\n        \"\"\"\n\n        # prepare input to be passed to the agent.\n        #   Info will be extracted from working memory\n        # Note: agent_input works both as a dict and as an object\n        agent_input : BaseModelDict = self.format_agent_input(stray)\n        agent_input = self.mad_hatter.execute_hook(\n            \"before_agent_starts\", agent_input, cat=stray\n        )\n\n        # store the agent input inside the working memory\n        stray.working_memory.agent_input = agent_input\n\n        # should we run the default agents?\n        fast_reply = {}\n        fast_reply = self.mad_hatter.execute_hook(\n            \"agent_fast_reply\", fast_reply, cat=stray\n        )\n        if isinstance(fast_reply, AgentOutput):\n            return fast_reply\n        if isinstance(fast_reply, dict) and \"output\" in fast_reply:\n            return AgentOutput(**fast_reply)\n\n        # obtain prompt parts from plugins\n        prompt_prefix = self.mad_hatter.execute_hook(\n            \"agent_prompt_prefix\", prompts.MAIN_PROMPT_PREFIX, cat=stray\n        )\n        prompt_suffix = self.mad_hatter.execute_hook(\n            \"agent_prompt_suffix\", prompts.MAIN_PROMPT_SUFFIX, cat=stray\n        )\n\n        # run tools and forms\n        procedures_agent = ProceduresAgent()\n        procedures_agent_out : AgentOutput = await procedures_agent.execute(stray)\n        if procedures_agent_out.return_direct:\n            return procedures_agent_out\n\n        # we run memory agent if:\n        # - no procedures were recalled or selected or\n        # - procedures have all return_direct=False\n        memory_agent = MemoryAgent()\n        memory_agent_out : AgentOutput = await memory_agent.execute(\n            # TODO: should all agents only receive stray?\n            stray, prompt_prefix, prompt_suffix\n        )\n\n        memory_agent_out.intermediate_steps += procedures_agent_out.intermediate_steps\n\n        return memory_agent_out\n\n    def format_agent_input(self, stray):\n        \"\"\"Format the input for the Agent.\n\n        The method formats the strings of recalled memories and chat history that will be provided to the Langchain\n        Agent and inserted in the prompt.\n\n        Returns\n        -------\n        BaseModelDict\n            Formatted output to be parsed by the Agent executor. Works both as a dict and as an object.\n\n        Notes\n        -----\n        The context of memories and conversation history is properly formatted before being parsed by the and, hence,\n        information are inserted in the main prompt.\n        All the formatting pipeline is hookable and memories can be edited.\n\n        See Also\n        --------\n        agent_prompt_episodic_memories\n        agent_prompt_declarative_memories\n        agent_prompt_chat_history\n        \"\"\"\n\n        # format memories to be inserted in the prompt\n        episodic_memory_formatted_content = self.agent_prompt_episodic_memories(\n            stray.working_memory.episodic_memories\n        )\n        declarative_memory_formatted_content = self.agent_prompt_declarative_memories(\n            stray.working_memory.declarative_memories\n        )\n\n        # format conversation history to be inserted in the prompt\n        # TODOV2: take away\n        conversation_history_formatted_content = stray.stringify_chat_history()\n\n        return BaseModelDict(**{\n            \"episodic_memory\": episodic_memory_formatted_content,\n            \"declarative_memory\": declarative_memory_formatted_content,\n            \"tools_output\": \"\",\n            \"input\": stray.working_memory.user_message_json.text,  # TODOV2: take away\n            \"chat_history\": conversation_history_formatted_content, # TODOV2: take away\n        })\n\n    def agent_prompt_episodic_memories(\n        self, memory_docs: List[Tuple[Document, float]]\n    ) -> str:\n        \"\"\"Formats episodic memories to be inserted into the prompt.\n\n        Parameters\n        ----------\n        memory_docs : List[Document]\n            List of Langchain `Document` retrieved from the episodic memory.\n\n        Returns\n        -------\n        memory_content : str\n            String of retrieved context from the episodic memory.\n        \"\"\"\n\n        # convert docs to simple text\n        memory_texts = [m[0].page_content.replace(\"\\n\", \". \") for m in memory_docs]\n\n        # add time information (e.g. \"2 days ago\")\n        memory_timestamps = []\n        for m in memory_docs:\n            # Get Time information in the Document metadata\n            timestamp = m[0].metadata[\"when\"]\n\n            # Get Current Time - Time when memory was stored\n            delta = timedelta(seconds=(time.time() - timestamp))\n\n            # Convert and Save timestamps to Verbal (e.g. \"2 days ago\")\n            memory_timestamps.append(f\" ({verbal_timedelta(delta)})\")\n\n        # Join Document text content with related temporal information\n        memory_texts = [a + b for a, b in zip(memory_texts, memory_timestamps)]\n\n        # Format the memories for the output\n        memories_separator = \"\\n  - \"\n        memory_content = (\n            \"## Context of things the Human said in the past: \"\n            + memories_separator\n            + memories_separator.join(memory_texts)\n        )\n\n        # if no data is retrieved from memory don't erite anithing in the prompt\n        if len(memory_texts) == 0:\n            memory_content = \"\"\n\n        return memory_content\n\n    def agent_prompt_declarative_memories(\n        self, memory_docs: List[Tuple[Document, float]]\n    ) -> str:\n        \"\"\"Formats the declarative memories for the prompt context.\n        Such context is placed in the `agent_prompt_prefix` in the place held by {declarative_memory}.\n\n        Parameters\n        ----------\n        memory_docs : List[Document]\n            list of Langchain `Document` retrieved from the declarative memory.\n\n        Returns\n        -------\n        memory_content : str\n            String of retrieved context from the declarative memory.\n        \"\"\"\n\n        # convert docs to simple text\n        memory_texts = [m[0].page_content.replace(\"\\n\", \". \") for m in memory_docs]\n\n        # add source information (e.g. \"extracted from file.txt\")\n        memory_sources = []\n        for m in memory_docs:\n            # Get and save the source of the memory\n            source = m[0].metadata[\"source\"]\n            memory_sources.append(f\" (extracted from {source})\")\n\n        # Join Document text content with related source information\n        memory_texts = [a + b for a, b in zip(memory_texts, memory_sources)]\n\n        # Format the memories for the output\n        memories_separator = \"\\n  - \"\n\n        memory_content = (\n            \"## Context of documents containing relevant information: \"\n            + memories_separator\n            + memories_separator.join(memory_texts)\n        )\n\n        # if no data is retrieved from memory don't write anithing in the prompt\n        if len(memory_texts) == 0:\n            memory_content = \"\"\n\n        return memory_content\n
"},{"location":"API_Documentation/agents/main_agent/#cat.agents.main_agent.MainAgent.agent_prompt_declarative_memories","title":"agent_prompt_declarative_memories(memory_docs)","text":"

Formats the declarative memories for the prompt context. Such context is placed in the agent_prompt_prefix in the place held by {declarative_memory}.

Parameters:

Name Type Description Default memory_docs List[Document]

list of Langchain Document retrieved from the declarative memory.

required

Returns:

Name Type Description memory_content str

String of retrieved context from the declarative memory.

Source code in cat/agents/main_agent.py
def agent_prompt_declarative_memories(\n    self, memory_docs: List[Tuple[Document, float]]\n) -> str:\n    \"\"\"Formats the declarative memories for the prompt context.\n    Such context is placed in the `agent_prompt_prefix` in the place held by {declarative_memory}.\n\n    Parameters\n    ----------\n    memory_docs : List[Document]\n        list of Langchain `Document` retrieved from the declarative memory.\n\n    Returns\n    -------\n    memory_content : str\n        String of retrieved context from the declarative memory.\n    \"\"\"\n\n    # convert docs to simple text\n    memory_texts = [m[0].page_content.replace(\"\\n\", \". \") for m in memory_docs]\n\n    # add source information (e.g. \"extracted from file.txt\")\n    memory_sources = []\n    for m in memory_docs:\n        # Get and save the source of the memory\n        source = m[0].metadata[\"source\"]\n        memory_sources.append(f\" (extracted from {source})\")\n\n    # Join Document text content with related source information\n    memory_texts = [a + b for a, b in zip(memory_texts, memory_sources)]\n\n    # Format the memories for the output\n    memories_separator = \"\\n  - \"\n\n    memory_content = (\n        \"## Context of documents containing relevant information: \"\n        + memories_separator\n        + memories_separator.join(memory_texts)\n    )\n\n    # if no data is retrieved from memory don't write anithing in the prompt\n    if len(memory_texts) == 0:\n        memory_content = \"\"\n\n    return memory_content\n
"},{"location":"API_Documentation/agents/main_agent/#cat.agents.main_agent.MainAgent.agent_prompt_episodic_memories","title":"agent_prompt_episodic_memories(memory_docs)","text":"

Formats episodic memories to be inserted into the prompt.

Parameters:

Name Type Description Default memory_docs List[Document]

List of Langchain Document retrieved from the episodic memory.

required

Returns:

Name Type Description memory_content str

String of retrieved context from the episodic memory.

Source code in cat/agents/main_agent.py
def agent_prompt_episodic_memories(\n    self, memory_docs: List[Tuple[Document, float]]\n) -> str:\n    \"\"\"Formats episodic memories to be inserted into the prompt.\n\n    Parameters\n    ----------\n    memory_docs : List[Document]\n        List of Langchain `Document` retrieved from the episodic memory.\n\n    Returns\n    -------\n    memory_content : str\n        String of retrieved context from the episodic memory.\n    \"\"\"\n\n    # convert docs to simple text\n    memory_texts = [m[0].page_content.replace(\"\\n\", \". \") for m in memory_docs]\n\n    # add time information (e.g. \"2 days ago\")\n    memory_timestamps = []\n    for m in memory_docs:\n        # Get Time information in the Document metadata\n        timestamp = m[0].metadata[\"when\"]\n\n        # Get Current Time - Time when memory was stored\n        delta = timedelta(seconds=(time.time() - timestamp))\n\n        # Convert and Save timestamps to Verbal (e.g. \"2 days ago\")\n        memory_timestamps.append(f\" ({verbal_timedelta(delta)})\")\n\n    # Join Document text content with related temporal information\n    memory_texts = [a + b for a, b in zip(memory_texts, memory_timestamps)]\n\n    # Format the memories for the output\n    memories_separator = \"\\n  - \"\n    memory_content = (\n        \"## Context of things the Human said in the past: \"\n        + memories_separator\n        + memories_separator.join(memory_texts)\n    )\n\n    # if no data is retrieved from memory don't erite anithing in the prompt\n    if len(memory_texts) == 0:\n        memory_content = \"\"\n\n    return memory_content\n
"},{"location":"API_Documentation/agents/main_agent/#cat.agents.main_agent.MainAgent.execute","title":"execute(stray) async","text":"

Execute the agents.

Returns:

Name Type Description agent_output AgentOutput

Reply of the agent, instance of AgentOutput.

Source code in cat/agents/main_agent.py
async def execute(self, stray) -> AgentOutput:\n    \"\"\"Execute the agents.\n\n    Returns\n    -------\n    agent_output : AgentOutput\n        Reply of the agent, instance of AgentOutput.\n    \"\"\"\n\n    # prepare input to be passed to the agent.\n    #   Info will be extracted from working memory\n    # Note: agent_input works both as a dict and as an object\n    agent_input : BaseModelDict = self.format_agent_input(stray)\n    agent_input = self.mad_hatter.execute_hook(\n        \"before_agent_starts\", agent_input, cat=stray\n    )\n\n    # store the agent input inside the working memory\n    stray.working_memory.agent_input = agent_input\n\n    # should we run the default agents?\n    fast_reply = {}\n    fast_reply = self.mad_hatter.execute_hook(\n        \"agent_fast_reply\", fast_reply, cat=stray\n    )\n    if isinstance(fast_reply, AgentOutput):\n        return fast_reply\n    if isinstance(fast_reply, dict) and \"output\" in fast_reply:\n        return AgentOutput(**fast_reply)\n\n    # obtain prompt parts from plugins\n    prompt_prefix = self.mad_hatter.execute_hook(\n        \"agent_prompt_prefix\", prompts.MAIN_PROMPT_PREFIX, cat=stray\n    )\n    prompt_suffix = self.mad_hatter.execute_hook(\n        \"agent_prompt_suffix\", prompts.MAIN_PROMPT_SUFFIX, cat=stray\n    )\n\n    # run tools and forms\n    procedures_agent = ProceduresAgent()\n    procedures_agent_out : AgentOutput = await procedures_agent.execute(stray)\n    if procedures_agent_out.return_direct:\n        return procedures_agent_out\n\n    # we run memory agent if:\n    # - no procedures were recalled or selected or\n    # - procedures have all return_direct=False\n    memory_agent = MemoryAgent()\n    memory_agent_out : AgentOutput = await memory_agent.execute(\n        # TODO: should all agents only receive stray?\n        stray, prompt_prefix, prompt_suffix\n    )\n\n    memory_agent_out.intermediate_steps += procedures_agent_out.intermediate_steps\n\n    return memory_agent_out\n
"},{"location":"API_Documentation/agents/main_agent/#cat.agents.main_agent.MainAgent.format_agent_input","title":"format_agent_input(stray)","text":"

Format the input for the Agent.

The method formats the strings of recalled memories and chat history that will be provided to the Langchain Agent and inserted in the prompt.

Returns:

Type Description BaseModelDict

Formatted output to be parsed by the Agent executor. Works both as a dict and as an object.

Notes

The context of memories and conversation history is properly formatted before being parsed by the and, hence, information are inserted in the main prompt. All the formatting pipeline is hookable and memories can be edited.

See Also

agent_prompt_episodic_memories agent_prompt_declarative_memories agent_prompt_chat_history

Source code in cat/agents/main_agent.py
def format_agent_input(self, stray):\n    \"\"\"Format the input for the Agent.\n\n    The method formats the strings of recalled memories and chat history that will be provided to the Langchain\n    Agent and inserted in the prompt.\n\n    Returns\n    -------\n    BaseModelDict\n        Formatted output to be parsed by the Agent executor. Works both as a dict and as an object.\n\n    Notes\n    -----\n    The context of memories and conversation history is properly formatted before being parsed by the and, hence,\n    information are inserted in the main prompt.\n    All the formatting pipeline is hookable and memories can be edited.\n\n    See Also\n    --------\n    agent_prompt_episodic_memories\n    agent_prompt_declarative_memories\n    agent_prompt_chat_history\n    \"\"\"\n\n    # format memories to be inserted in the prompt\n    episodic_memory_formatted_content = self.agent_prompt_episodic_memories(\n        stray.working_memory.episodic_memories\n    )\n    declarative_memory_formatted_content = self.agent_prompt_declarative_memories(\n        stray.working_memory.declarative_memories\n    )\n\n    # format conversation history to be inserted in the prompt\n    # TODOV2: take away\n    conversation_history_formatted_content = stray.stringify_chat_history()\n\n    return BaseModelDict(**{\n        \"episodic_memory\": episodic_memory_formatted_content,\n        \"declarative_memory\": declarative_memory_formatted_content,\n        \"tools_output\": \"\",\n        \"input\": stray.working_memory.user_message_json.text,  # TODOV2: take away\n        \"chat_history\": conversation_history_formatted_content, # TODOV2: take away\n    })\n
"},{"location":"API_Documentation/agents/memory_agent/","title":"memory_agent","text":""},{"location":"API_Documentation/agents/procedures_agent/","title":"procedures_agent","text":""},{"location":"API_Documentation/auth/auth_utils/","title":"auth_utils","text":""},{"location":"API_Documentation/auth/auth_utils/#cat.auth.auth_utils.is_jwt","title":"is_jwt(token)","text":"

Returns whether a given string is a JWT.

Source code in cat/auth/auth_utils.py
def is_jwt(token: str) -> bool:\n    \"\"\"\n    Returns whether a given string is a JWT.\n    \"\"\"\n    try:\n        # Decode the JWT without verification to check its structure\n        jwt.decode(token, options={\"verify_signature\": False})\n        return True\n    except InvalidTokenError:\n        return False\n
"},{"location":"API_Documentation/looking_glass/cheshire_cat/","title":"cheshire_cat","text":""},{"location":"API_Documentation/looking_glass/cheshire_cat/#cat.looking_glass.cheshire_cat.CheshireCat","title":"CheshireCat","text":"

The Cheshire Cat.

This is the main class that manages everything.

Attributes:

Name Type Description todo list

Yet to be written.

Source code in cat/looking_glass/cheshire_cat.py
@singleton\nclass CheshireCat:\n    \"\"\"The Cheshire Cat.\n\n    This is the main class that manages everything.\n\n    Attributes\n    ----------\n    todo : list\n        Yet to be written.\n\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"Cat initialization.\n\n        At init time the Cat executes the bootstrap.\n        \"\"\"\n\n        # bootstrap the Cat! ^._.^\n\n        # load AuthHandler\n        self.load_auth()\n\n        # Start scheduling system\n        self.white_rabbit = WhiteRabbit()\n\n        # instantiate MadHatter (loads all plugins' hooks and tools)\n        self.mad_hatter = MadHatter()\n\n        # allows plugins to do something before cat components are loaded\n        self.mad_hatter.execute_hook(\"before_cat_bootstrap\", cat=self)\n\n        # load LLM and embedder\n        self.load_natural_language()\n\n        # Load memories (vector collections and working_memory)\n        self.load_memory()\n\n        # After memory is loaded, we can get/create tools embeddings\n        # every time the mad_hatter finishes syncing hooks, tools and forms, it will notify the Cat (so it can embed tools in vector memory)\n        self.mad_hatter.on_finish_plugins_sync_callback = self.embed_procedures\n        self.embed_procedures()  # first time launched manually\n\n        # Main agent instance (for reasoning)\n        self.main_agent = MainAgent()\n\n        # Rabbit Hole Instance\n        self.rabbit_hole = RabbitHole(self)  # :(\n\n        # allows plugins to do something after the cat bootstrap is complete\n        self.mad_hatter.execute_hook(\"after_cat_bootstrap\", cat=self)\n\n    def load_natural_language(self):\n        \"\"\"Load Natural Language related objects.\n\n        The method exposes in the Cat all the NLP related stuff. Specifically, it sets the language models\n        (LLM and Embedder).\n\n        Warnings\n        --------\n        When using small Language Models it is suggested to turn off the memories and make the main prompt smaller\n        to prevent them to fail.\n\n        See Also\n        --------\n        agent_prompt_prefix\n        \"\"\"\n        # LLM and embedder\n        self._llm = self.load_language_model()\n        self.embedder = self.load_language_embedder()\n\n    def load_language_model(self) -> BaseLanguageModel:\n        \"\"\"Large Language Model (LLM) selection at bootstrap time.\n\n        Returns\n        -------\n        llm : BaseLanguageModel\n            Langchain `BaseLanguageModel` instance of the selected model.\n\n        Notes\n        -----\n        Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,\n        the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.\n\n        \"\"\"\n\n        selected_llm = crud.get_setting_by_name(name=\"llm_selected\")\n\n        if selected_llm is None:\n            # return default LLM\n            llm = LLMDefaultConfig.get_llm_from_config({})\n\n        else:\n            # get LLM factory class\n            selected_llm_class = selected_llm[\"value\"][\"name\"]\n            FactoryClass = get_llm_from_name(selected_llm_class)\n\n            # obtain configuration and instantiate LLM\n            selected_llm_config = crud.get_setting_by_name(name=selected_llm_class)\n            try:\n                llm = FactoryClass.get_llm_from_config(selected_llm_config[\"value\"])\n            except Exception:\n                import traceback\n\n                traceback.print_exc()\n                llm = LLMDefaultConfig.get_llm_from_config({})\n\n        return llm\n\n    def load_language_embedder(self) -> embedders.EmbedderSettings:\n        \"\"\"Hook into the  embedder selection.\n\n        Allows to modify how the Cat selects the embedder at bootstrap time.\n\n        Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,\n        the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.\n\n        Parameters\n        ----------\n        cat: CheshireCat\n            Cheshire Cat instance.\n\n        Returns\n        -------\n        embedder : Embeddings\n            Selected embedder model.\n        \"\"\"\n        # Embedding LLM\n\n        selected_embedder = crud.get_setting_by_name(name=\"embedder_selected\")\n\n        if selected_embedder is not None:\n            # get Embedder factory class\n            selected_embedder_class = selected_embedder[\"value\"][\"name\"]\n            FactoryClass = get_embedder_from_name(selected_embedder_class)\n\n            # obtain configuration and instantiate Embedder\n            selected_embedder_config = crud.get_setting_by_name(\n                name=selected_embedder_class\n            )\n            try:\n                embedder = FactoryClass.get_embedder_from_config(\n                    selected_embedder_config[\"value\"]\n                )\n            except AttributeError:\n                import traceback\n\n                traceback.print_exc()\n                embedder = embedders.EmbedderDumbConfig.get_embedder_from_config({})\n            return embedder\n\n        # OpenAI embedder\n        if type(self._llm) in [OpenAI, ChatOpenAI]:\n            embedder = embedders.EmbedderOpenAIConfig.get_embedder_from_config(\n                {\n                    \"openai_api_key\": self._llm.openai_api_key,\n                }\n            )\n\n        # For Azure avoid automatic embedder selection\n\n        # Cohere\n        elif type(self._llm) in [Cohere]:\n            embedder = embedders.EmbedderCohereConfig.get_embedder_from_config(\n                {\n                    \"cohere_api_key\": self._llm.cohere_api_key,\n                    \"model\": \"embed-multilingual-v2.0\",\n                    # Now the best model for embeddings is embed-multilingual-v2.0\n                }\n            )\n\n        elif type(self._llm) in [ChatGoogleGenerativeAI]:\n            embedder = embedders.EmbedderGeminiChatConfig.get_embedder_from_config(\n                {\n                    \"model\": \"models/embedding-001\",\n                    \"google_api_key\": self._llm.google_api_key,\n                }\n            )\n\n        else:\n            # If no embedder matches vendor, and no external embedder is configured, we use the DumbEmbedder.\n            #   `This embedder is not a model properly trained\n            #    and this makes it not suitable to effectively embed text,\n            #    \"but it does not know this and embeds anyway\".` - cit. Nicola Corbellini\n            embedder = embedders.EmbedderDumbConfig.get_embedder_from_config({})\n\n        return embedder\n\n    def load_auth(self):\n\n        # Custom auth_handler # TODOAUTH: change the name to custom_auth\n        selected_auth_handler = crud.get_setting_by_name(name=\"auth_handler_selected\")\n\n        # if no auth_handler is saved, use default one and save to db\n        if selected_auth_handler is None:\n            # create the auth settings\n            crud.upsert_setting_by_name(\n                models.Setting(\n                    name=\"CoreOnlyAuthConfig\", category=\"auth_handler_factory\", value={}\n                )\n            )\n            crud.upsert_setting_by_name(\n                models.Setting(\n                    name=\"auth_handler_selected\",\n                    category=\"auth_handler_factory\",\n                    value={\"name\": \"CoreOnlyAuthConfig\"},\n                )\n            )\n\n            # reload from db\n            selected_auth_handler = crud.get_setting_by_name(\n                name=\"auth_handler_selected\"\n            )\n\n        # get AuthHandler factory class\n        selected_auth_handler_class = selected_auth_handler[\"value\"][\"name\"]\n        FactoryClass = get_auth_handler_from_name(selected_auth_handler_class)\n\n        # obtain configuration and instantiate AuthHandler\n        selected_auth_handler_config = crud.get_setting_by_name(\n            name=selected_auth_handler_class\n        )\n        try:\n            auth_handler = FactoryClass.get_auth_handler_from_config(\n                selected_auth_handler_config[\"value\"]\n            )\n        except Exception:\n            import traceback\n\n            traceback.print_exc()\n\n            auth_handler = (\n                auth_handlers.CoreOnlyAuthConfig.get_auth_handler_from_config({})\n            )\n\n        self.custom_auth_handler = auth_handler\n        self.core_auth_handler = CoreAuthHandler()\n\n    def load_memory(self):\n        \"\"\"Load LongTerMemory and WorkingMemory.\"\"\"\n        # Memory\n\n        # Get embedder size (langchain classes do not store it)\n        embedder_size = len(self.embedder.embed_query(\"hello world\"))\n\n        # Get embedder name (useful for for vectorstore aliases)\n        if hasattr(self.embedder, \"model\"):\n            embedder_name = self.embedder.model\n        elif hasattr(self.embedder, \"repo_id\"):\n            embedder_name = self.embedder.repo_id\n        else:\n            embedder_name = \"default_embedder\"\n\n        # instantiate long term memory\n        vector_memory_config = {\n            \"embedder_name\": embedder_name,\n            \"embedder_size\": embedder_size,\n        }\n        self.memory = LongTermMemory(vector_memory_config=vector_memory_config)\n\n    def build_embedded_procedures_hashes(self, embedded_procedures):\n        hashes = {}\n        for ep in embedded_procedures:\n            # log.warning(ep)\n            metadata = ep.payload[\"metadata\"]\n            content = ep.payload[\"page_content\"]\n            source = metadata[\"source\"]\n            # there may be legacy points with no trigger_type\n            trigger_type = metadata.get(\"trigger_type\", \"unsupported\")\n\n            p_hash = f\"{source}.{trigger_type}.{content}\"\n            hashes[p_hash] = ep.id\n\n        return hashes\n\n    def build_active_procedures_hashes(self, active_procedures):\n        hashes = {}\n        for ap in active_procedures:\n            for trigger_type, trigger_list in ap.triggers_map.items():\n                for trigger_content in trigger_list:\n                    p_hash = f\"{ap.name}.{trigger_type}.{trigger_content}\"\n                    hashes[p_hash] = {\n                        \"obj\": ap,\n                        \"source\": ap.name,\n                        \"type\": ap.procedure_type,\n                        \"trigger_type\": trigger_type,\n                        \"content\": trigger_content,\n                    }\n        return hashes\n\n    def embed_procedures(self):\n        # Retrieve from vectorDB all procedural embeddings\n        embedded_procedures = self.memory.vectors.procedural.get_all_points()\n        embedded_procedures_hashes = self.build_embedded_procedures_hashes(\n            embedded_procedures\n        )\n\n        # Easy access to active procedures in mad_hatter (source of truth!)\n        active_procedures_hashes = self.build_active_procedures_hashes(\n            self.mad_hatter.procedures\n        )\n\n        # points_to_be_kept     = set(active_procedures_hashes.keys()) and set(embedded_procedures_hashes.keys()) not necessary\n        points_to_be_deleted = set(embedded_procedures_hashes.keys()) - set(\n            active_procedures_hashes.keys()\n        )\n        points_to_be_embedded = set(active_procedures_hashes.keys()) - set(\n            embedded_procedures_hashes.keys()\n        )\n\n        points_to_be_deleted_ids = [\n            embedded_procedures_hashes[p] for p in points_to_be_deleted\n        ]\n        if points_to_be_deleted_ids:\n            log.warning(f\"Deleting triggers: {points_to_be_deleted}\")\n            self.memory.vectors.procedural.delete_points(points_to_be_deleted_ids)\n\n        active_triggers_to_be_embedded = [\n            active_procedures_hashes[p] for p in points_to_be_embedded\n        ]\n        for t in active_triggers_to_be_embedded:\n            metadata = {\n                \"source\": t[\"source\"],\n                \"type\": t[\"type\"],\n                \"trigger_type\": t[\"trigger_type\"],\n                \"when\": time.time(),\n            }\n\n            trigger_embedding = self.embedder.embed_documents([t[\"content\"]])\n            self.memory.vectors.procedural.add_point(\n                t[\"content\"],\n                trigger_embedding[0],\n                metadata,\n            )\n\n            log.warning(\n                f\"Newly embedded {t['type']} trigger: {t['source']}, {t['trigger_type']}, {t['content']}\"\n            )\n\n    def send_ws_message(self, content: str, msg_type=\"notification\"):\n        log.error(\"No websocket connection open\")\n\n    # REFACTOR: cat.llm should be available here, without streaming clearly\n    # (one could be interested in calling the LLM anytime, not only when there is a session)\n    def llm(self, prompt, *args, **kwargs) -> str:\n        \"\"\"Generate a response using the LLM model.\n\n        This method is useful for generating a response with both a chat and a completion model using the same syntax\n\n        Parameters\n        ----------\n        prompt : str\n            The prompt for generating the response.\n\n        Returns\n        -------\n        str\n            The generated response.\n\n        \"\"\"\n\n        # Add a token counter to the callbacks\n        caller = utils.get_caller_info()\n\n        # here we deal with motherfucking langchain\n        prompt = ChatPromptTemplate(\n            messages=[\n                SystemMessage(content=prompt)\n            ]\n        )\n\n        chain = (\n            prompt\n            | RunnableLambda(lambda x: utils.langchain_log_prompt(x, f\"{caller} prompt\"))\n            | self._llm\n            | RunnableLambda(lambda x: utils.langchain_log_output(x, f\"{caller} prompt output\"))\n            | StrOutputParser()\n        )\n\n        output = chain.invoke(\n            {}, # in case we need to pass info to the template\n        )\n\n        return output\n
"},{"location":"API_Documentation/looking_glass/cheshire_cat/#cat.looking_glass.cheshire_cat.CheshireCat.__init__","title":"__init__()","text":"

Cat initialization.

At init time the Cat executes the bootstrap.

Source code in cat/looking_glass/cheshire_cat.py
def __init__(self):\n    \"\"\"Cat initialization.\n\n    At init time the Cat executes the bootstrap.\n    \"\"\"\n\n    # bootstrap the Cat! ^._.^\n\n    # load AuthHandler\n    self.load_auth()\n\n    # Start scheduling system\n    self.white_rabbit = WhiteRabbit()\n\n    # instantiate MadHatter (loads all plugins' hooks and tools)\n    self.mad_hatter = MadHatter()\n\n    # allows plugins to do something before cat components are loaded\n    self.mad_hatter.execute_hook(\"before_cat_bootstrap\", cat=self)\n\n    # load LLM and embedder\n    self.load_natural_language()\n\n    # Load memories (vector collections and working_memory)\n    self.load_memory()\n\n    # After memory is loaded, we can get/create tools embeddings\n    # every time the mad_hatter finishes syncing hooks, tools and forms, it will notify the Cat (so it can embed tools in vector memory)\n    self.mad_hatter.on_finish_plugins_sync_callback = self.embed_procedures\n    self.embed_procedures()  # first time launched manually\n\n    # Main agent instance (for reasoning)\n    self.main_agent = MainAgent()\n\n    # Rabbit Hole Instance\n    self.rabbit_hole = RabbitHole(self)  # :(\n\n    # allows plugins to do something after the cat bootstrap is complete\n    self.mad_hatter.execute_hook(\"after_cat_bootstrap\", cat=self)\n
"},{"location":"API_Documentation/looking_glass/cheshire_cat/#cat.looking_glass.cheshire_cat.CheshireCat.llm","title":"llm(prompt, *args, **kwargs)","text":"

Generate a response using the LLM model.

This method is useful for generating a response with both a chat and a completion model using the same syntax

Parameters:

Name Type Description Default prompt str

The prompt for generating the response.

required

Returns:

Type Description str

The generated response.

Source code in cat/looking_glass/cheshire_cat.py
def llm(self, prompt, *args, **kwargs) -> str:\n    \"\"\"Generate a response using the LLM model.\n\n    This method is useful for generating a response with both a chat and a completion model using the same syntax\n\n    Parameters\n    ----------\n    prompt : str\n        The prompt for generating the response.\n\n    Returns\n    -------\n    str\n        The generated response.\n\n    \"\"\"\n\n    # Add a token counter to the callbacks\n    caller = utils.get_caller_info()\n\n    # here we deal with motherfucking langchain\n    prompt = ChatPromptTemplate(\n        messages=[\n            SystemMessage(content=prompt)\n        ]\n    )\n\n    chain = (\n        prompt\n        | RunnableLambda(lambda x: utils.langchain_log_prompt(x, f\"{caller} prompt\"))\n        | self._llm\n        | RunnableLambda(lambda x: utils.langchain_log_output(x, f\"{caller} prompt output\"))\n        | StrOutputParser()\n    )\n\n    output = chain.invoke(\n        {}, # in case we need to pass info to the template\n    )\n\n    return output\n
"},{"location":"API_Documentation/looking_glass/cheshire_cat/#cat.looking_glass.cheshire_cat.CheshireCat.load_language_embedder","title":"load_language_embedder()","text":"

Hook into the embedder selection.

Allows to modify how the Cat selects the embedder at bootstrap time.

Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, the Main Agent, the Rabbit Hole and the White Rabbit.

Parameters:

Name Type Description Default cat

Cheshire Cat instance.

required

Returns:

Name Type Description embedder Embeddings

Selected embedder model.

Source code in cat/looking_glass/cheshire_cat.py
def load_language_embedder(self) -> embedders.EmbedderSettings:\n    \"\"\"Hook into the  embedder selection.\n\n    Allows to modify how the Cat selects the embedder at bootstrap time.\n\n    Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,\n    the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.\n\n    Parameters\n    ----------\n    cat: CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    embedder : Embeddings\n        Selected embedder model.\n    \"\"\"\n    # Embedding LLM\n\n    selected_embedder = crud.get_setting_by_name(name=\"embedder_selected\")\n\n    if selected_embedder is not None:\n        # get Embedder factory class\n        selected_embedder_class = selected_embedder[\"value\"][\"name\"]\n        FactoryClass = get_embedder_from_name(selected_embedder_class)\n\n        # obtain configuration and instantiate Embedder\n        selected_embedder_config = crud.get_setting_by_name(\n            name=selected_embedder_class\n        )\n        try:\n            embedder = FactoryClass.get_embedder_from_config(\n                selected_embedder_config[\"value\"]\n            )\n        except AttributeError:\n            import traceback\n\n            traceback.print_exc()\n            embedder = embedders.EmbedderDumbConfig.get_embedder_from_config({})\n        return embedder\n\n    # OpenAI embedder\n    if type(self._llm) in [OpenAI, ChatOpenAI]:\n        embedder = embedders.EmbedderOpenAIConfig.get_embedder_from_config(\n            {\n                \"openai_api_key\": self._llm.openai_api_key,\n            }\n        )\n\n    # For Azure avoid automatic embedder selection\n\n    # Cohere\n    elif type(self._llm) in [Cohere]:\n        embedder = embedders.EmbedderCohereConfig.get_embedder_from_config(\n            {\n                \"cohere_api_key\": self._llm.cohere_api_key,\n                \"model\": \"embed-multilingual-v2.0\",\n                # Now the best model for embeddings is embed-multilingual-v2.0\n            }\n        )\n\n    elif type(self._llm) in [ChatGoogleGenerativeAI]:\n        embedder = embedders.EmbedderGeminiChatConfig.get_embedder_from_config(\n            {\n                \"model\": \"models/embedding-001\",\n                \"google_api_key\": self._llm.google_api_key,\n            }\n        )\n\n    else:\n        # If no embedder matches vendor, and no external embedder is configured, we use the DumbEmbedder.\n        #   `This embedder is not a model properly trained\n        #    and this makes it not suitable to effectively embed text,\n        #    \"but it does not know this and embeds anyway\".` - cit. Nicola Corbellini\n        embedder = embedders.EmbedderDumbConfig.get_embedder_from_config({})\n\n    return embedder\n
"},{"location":"API_Documentation/looking_glass/cheshire_cat/#cat.looking_glass.cheshire_cat.CheshireCat.load_language_model","title":"load_language_model()","text":"

Large Language Model (LLM) selection at bootstrap time.

Returns:

Name Type Description llm BaseLanguageModel

Langchain BaseLanguageModel instance of the selected model.

Notes

Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, the Main Agent, the Rabbit Hole and the White Rabbit.

Source code in cat/looking_glass/cheshire_cat.py
def load_language_model(self) -> BaseLanguageModel:\n    \"\"\"Large Language Model (LLM) selection at bootstrap time.\n\n    Returns\n    -------\n    llm : BaseLanguageModel\n        Langchain `BaseLanguageModel` instance of the selected model.\n\n    Notes\n    -----\n    Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,\n    the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.\n\n    \"\"\"\n\n    selected_llm = crud.get_setting_by_name(name=\"llm_selected\")\n\n    if selected_llm is None:\n        # return default LLM\n        llm = LLMDefaultConfig.get_llm_from_config({})\n\n    else:\n        # get LLM factory class\n        selected_llm_class = selected_llm[\"value\"][\"name\"]\n        FactoryClass = get_llm_from_name(selected_llm_class)\n\n        # obtain configuration and instantiate LLM\n        selected_llm_config = crud.get_setting_by_name(name=selected_llm_class)\n        try:\n            llm = FactoryClass.get_llm_from_config(selected_llm_config[\"value\"])\n        except Exception:\n            import traceback\n\n            traceback.print_exc()\n            llm = LLMDefaultConfig.get_llm_from_config({})\n\n    return llm\n
"},{"location":"API_Documentation/looking_glass/cheshire_cat/#cat.looking_glass.cheshire_cat.CheshireCat.load_memory","title":"load_memory()","text":"

Load LongTerMemory and WorkingMemory.

Source code in cat/looking_glass/cheshire_cat.py
def load_memory(self):\n    \"\"\"Load LongTerMemory and WorkingMemory.\"\"\"\n    # Memory\n\n    # Get embedder size (langchain classes do not store it)\n    embedder_size = len(self.embedder.embed_query(\"hello world\"))\n\n    # Get embedder name (useful for for vectorstore aliases)\n    if hasattr(self.embedder, \"model\"):\n        embedder_name = self.embedder.model\n    elif hasattr(self.embedder, \"repo_id\"):\n        embedder_name = self.embedder.repo_id\n    else:\n        embedder_name = \"default_embedder\"\n\n    # instantiate long term memory\n    vector_memory_config = {\n        \"embedder_name\": embedder_name,\n        \"embedder_size\": embedder_size,\n    }\n    self.memory = LongTermMemory(vector_memory_config=vector_memory_config)\n
"},{"location":"API_Documentation/looking_glass/cheshire_cat/#cat.looking_glass.cheshire_cat.CheshireCat.load_natural_language","title":"load_natural_language()","text":"

Load Natural Language related objects.

The method exposes in the Cat all the NLP related stuff. Specifically, it sets the language models (LLM and Embedder).

Warnings

When using small Language Models it is suggested to turn off the memories and make the main prompt smaller to prevent them to fail.

See Also

agent_prompt_prefix

Source code in cat/looking_glass/cheshire_cat.py
def load_natural_language(self):\n    \"\"\"Load Natural Language related objects.\n\n    The method exposes in the Cat all the NLP related stuff. Specifically, it sets the language models\n    (LLM and Embedder).\n\n    Warnings\n    --------\n    When using small Language Models it is suggested to turn off the memories and make the main prompt smaller\n    to prevent them to fail.\n\n    See Also\n    --------\n    agent_prompt_prefix\n    \"\"\"\n    # LLM and embedder\n    self._llm = self.load_language_model()\n    self.embedder = self.load_language_embedder()\n
"},{"location":"API_Documentation/looking_glass/stray_cat/","title":"stray_cat","text":""},{"location":"API_Documentation/looking_glass/stray_cat/#cat.looking_glass.stray_cat.StrayCat","title":"StrayCat","text":"

User/session based object containing working memory and a few utility pointers

Source code in cat/looking_glass/stray_cat.py
class StrayCat:\n    \"\"\"User/session based object containing working memory and a few utility pointers\"\"\"\n\n    def __init__(\n        self,\n        user_id: str,\n        main_loop,\n        user_data: dict = {},\n        ws: WebSocket = None,\n    ):\n        self.__user_id = user_id\n        self.working_memory = WorkingMemory()\n\n        # attribute to store ws connection\n        self.__ws = ws\n\n        self.__main_loop = main_loop\n\n        self.__loop = asyncio.new_event_loop()\n\n    def __repr__(self):\n        return f\"StrayCat(user_id={self.user_id})\"\n\n    def __send_ws_json(self, data: Any):\n        # Run the corutine in the main event loop in the main thread\n        # and wait for the result\n        asyncio.run_coroutine_threadsafe(\n            self.__ws.send_json(data), loop=self.__main_loop\n        ).result()\n\n    def __build_why(self) -> MessageWhy:\n        # build data structure for output (response and why with memories)\n        # TODO: these 3 lines are a mess, simplify\n        episodic_report = [\n            dict(d[0]) | {\"score\": float(d[1]), \"id\": d[3]}\n            for d in self.working_memory.episodic_memories\n        ]\n        declarative_report = [\n            dict(d[0]) | {\"score\": float(d[1]), \"id\": d[3]}\n            for d in self.working_memory.declarative_memories\n        ]\n        procedural_report = [\n            dict(d[0]) | {\"score\": float(d[1]), \"id\": d[3]}\n            for d in self.working_memory.procedural_memories\n        ]\n\n        # why this response?\n        why = MessageWhy(\n            input=self.working_memory.user_message_json.text,\n            intermediate_steps=[],\n            memory={\n                \"episodic\": episodic_report,\n                \"declarative\": declarative_report,\n                \"procedural\": procedural_report,\n            },\n            model_interactions=self.working_memory.model_interactions,\n        )\n\n        return why\n\n    def send_ws_message(self, content: str, msg_type: MSG_TYPES = \"notification\"):\n        \"\"\"Send a message via websocket.\n\n        This method is useful for sending a message via websocket directly without passing through the LLM\n\n        Parameters\n        ----------\n        content : str\n            The content of the message.\n        msg_type : str\n            The type of the message. Should be either `notification`, `chat`, `chat_token` or `error`\n        \"\"\"\n\n        if self.__ws is None:\n            log.warning(f\"No websocket connection is open for user {self.user_id}\")\n            return\n\n        options = get_args(MSG_TYPES)\n\n        if msg_type not in options:\n            raise ValueError(\n                f\"The message type `{msg_type}` is not valid. Valid types: {', '.join(options)}\"\n            )\n\n        if msg_type == \"error\":\n            self.__send_ws_json(\n                {\"type\": msg_type, \"name\": \"GenericError\", \"description\": str(content)}\n            )\n        else:\n            self.__send_ws_json({\"type\": msg_type, \"content\": content})\n\n    def send_chat_message(self, message: Union[str, CatMessage], save=False):\n        if self.__ws is None:\n            log.warning(f\"No websocket connection is open for user {self.user_id}\")\n            return\n\n        if isinstance(message, str):\n            why = self.__build_why()\n            message = CatMessage(content=message, user_id=self.user_id, why=why)\n\n        if save:\n            self.working_memory.update_conversation_history(\n                who=\"AI\", message=message[\"content\"], why=message[\"why\"]\n            )\n\n        self.__send_ws_json(message.model_dump())\n\n    def send_notification(self, content: str):\n        self.send_ws_message(content=content, msg_type=\"notification\")\n\n    def send_error(self, error: Union[str, Exception]):\n        if self.__ws is None:\n            log.warning(f\"No websocket connection is open for user {self.user_id}\")\n            return\n\n        if isinstance(error, str):\n            error_message = {\n                \"type\": \"error\",\n                \"name\": \"GenericError\",\n                \"description\": str(error),\n            }\n        else:\n            error_message = {\n                \"type\": \"error\",\n                \"name\": error.__class__.__name__,\n                \"description\": str(error),\n            }\n\n        self.__send_ws_json(error_message)\n\n    def recall_relevant_memories_to_working_memory(self, query=None):\n        \"\"\"Retrieve context from memory.\n\n        The method retrieves the relevant memories from the vector collections that are given as context to the LLM.\n        Recalled memories are stored in the working memory.\n\n        Parameters\n        ----------\n        query : str, optional\n        The query used to make a similarity search in the Cat's vector memories. If not provided, the query\n        will be derived from the user's message.\n\n        Notes\n        -----\n        The user's message is used as a query to make a similarity search in the Cat's vector memories.\n        Five hooks allow to customize the recall pipeline before and after it is done.\n\n        See Also\n        --------\n        cat_recall_query\n        before_cat_recalls_memories\n        before_cat_recalls_episodic_memories\n        before_cat_recalls_declarative_memories\n        before_cat_recalls_procedural_memories\n        after_cat_recalls_memories\n        \"\"\"\n        recall_query = query\n\n        if query is None:\n            # If query is not provided, use the user's message as the query\n            recall_query = self.working_memory.user_message_json.text\n\n        # We may want to search in memory\n        recall_query = self.mad_hatter.execute_hook(\n            \"cat_recall_query\", recall_query, cat=self\n        )\n        log.info(f\"Recall query: '{recall_query}'\")\n\n        # Embed recall query\n        recall_query_embedding = self.embedder.embed_query(recall_query)\n        self.working_memory.recall_query = recall_query\n\n        # keep track of embedder model usage\n        self.working_memory.model_interactions.append(\n            EmbedderModelInteraction(\n                prompt=recall_query,\n                reply=recall_query_embedding,\n                input_tokens=len(tiktoken.get_encoding(\"cl100k_base\").encode(recall_query)),\n            )\n        )\n\n        # hook to do something before recall begins\n        self.mad_hatter.execute_hook(\"before_cat_recalls_memories\", cat=self)\n\n        # Setting default recall configs for each memory\n        # TODO: can these data structures become instances of a RecallSettings class?\n        default_episodic_recall_config = {\n            \"embedding\": recall_query_embedding,\n            \"k\": 3,\n            \"threshold\": 0.7,\n            \"metadata\": {\"source\": self.user_id},\n        }\n\n        default_declarative_recall_config = {\n            \"embedding\": recall_query_embedding,\n            \"k\": 3,\n            \"threshold\": 0.7,\n            \"metadata\": None,\n        }\n\n        default_procedural_recall_config = {\n            \"embedding\": recall_query_embedding,\n            \"k\": 3,\n            \"threshold\": 0.7,\n            \"metadata\": None,\n        }\n\n        # hooks to change recall configs for each memory\n        recall_configs = [\n            self.mad_hatter.execute_hook(\n                \"before_cat_recalls_episodic_memories\",\n                default_episodic_recall_config,\n                cat=self,\n            ),\n            self.mad_hatter.execute_hook(\n                \"before_cat_recalls_declarative_memories\",\n                default_declarative_recall_config,\n                cat=self,\n            ),\n            self.mad_hatter.execute_hook(\n                \"before_cat_recalls_procedural_memories\",\n                default_procedural_recall_config,\n                cat=self,\n            ),\n        ]\n\n        memory_types = self.memory.vectors.collections.keys()\n\n        for config, memory_type in zip(recall_configs, memory_types):\n            memory_key = f\"{memory_type}_memories\"\n\n            # recall relevant memories for collection\n            vector_memory = getattr(self.memory.vectors, memory_type)\n            memories = vector_memory.recall_memories_from_embedding(**config)\n\n            setattr(\n                self.working_memory, memory_key, memories\n            )  # self.working_memory.procedural_memories = ...\n\n        # hook to modify/enrich retrieved memories\n        self.mad_hatter.execute_hook(\"after_cat_recalls_memories\", cat=self)\n\n    def llm(self, prompt: str, stream: bool = False) -> str:\n        \"\"\"Generate a response using the LLM model.\n\n        This method is useful for generating a response with both a chat and a completion model using the same syntax\n\n        Parameters\n        ----------\n        prompt : str\n            The prompt for generating the response.\n\n        Returns\n        -------\n        str\n            The generated response.\n\n        \"\"\"\n\n        # should we stream the tokens?\n        callbacks = []\n        if stream:\n            callbacks.append(NewTokenHandler(self))\n\n        # Add a token counter to the callbacks\n        caller = utils.get_caller_info()\n        callbacks.append(ModelInteractionHandler(self, caller or \"StrayCat\"))\n\n\n\n        # here we deal with motherfucking langchain\n        prompt = ChatPromptTemplate(\n            messages=[\n                SystemMessage(content=prompt)\n                # TODO: add here optional convo history passed to the method, \n                #  or taken from working memory\n            ]\n        )\n\n        chain = (\n            prompt\n            | RunnableLambda(lambda x: utils.langchain_log_prompt(x, f\"{caller} prompt\"))\n            | self._llm\n            | RunnableLambda(lambda x: utils.langchain_log_output(x, f\"{caller} prompt output\"))\n            | StrOutputParser()\n        )\n\n        output = chain.invoke(\n            {}, # in case we need to pass info to the template\n            config=RunnableConfig(callbacks=callbacks)\n        )\n\n        return output\n\n\n    async def __call__(self, message_dict):\n        \"\"\"Call the Cat instance.\n\n        This method is called on the user's message received from the client.\n\n        Parameters\n        ----------\n        message_dict : dict\n            Dictionary received from the Websocket client.\n        save : bool, optional\n            If True, the user's message is stored in the chat history. Default is True.\n\n        Returns\n        -------\n        final_output : dict\n            Dictionary with the Cat's answer to be sent to the client.\n\n        Notes\n        -----\n        Here happens the main pipeline of the Cat. Namely, the Cat receives the user's input and recall the memories.\n        The retrieved context is formatted properly and given in input to the Agent that uses the LLM to produce the\n        answer. This is formatted in a dictionary to be sent as a JSON via Websocket to the client.\n\n        \"\"\"\n\n        # Parse websocket message into UserMessage obj\n        user_message = UserMessage.model_validate(message_dict)\n        log.info(user_message)\n\n        # set a few easy access variables\n        self.working_memory.user_message_json = user_message\n\n        # keeping track of model interactions\n        self.working_memory.model_interactions = []\n\n        # hook to modify/enrich user input\n        self.working_memory.user_message_json = self.mad_hatter.execute_hook(\n            \"before_cat_reads_message\", self.working_memory.user_message_json, cat=self\n        )\n\n        # text of latest Human message\n        user_message_text = self.working_memory.user_message_json.text\n\n        # update conversation history (Human turn)\n        self.working_memory.update_conversation_history(\n            who=\"Human\", message=user_message_text\n        )\n\n        # recall episodic and declarative memories from vector collections\n        #   and store them in working_memory\n        try:\n            self.recall_relevant_memories_to_working_memory()\n        except Exception as e:\n            log.error(e)\n            traceback.print_exc(e)\n\n            err_message = (\n                \"You probably changed Embedder and old vector memory is not compatible. \"\n                \"Please delete `core/long_term_memory` folder.\"\n            )\n\n            return {\n                \"type\": \"error\",\n                \"name\": \"VectorMemoryError\",\n                \"description\": err_message,\n            }\n\n        # reply with agent\n        try:\n            agent_output: AgentOutput = await self.main_agent.execute(self)\n        except Exception as e:\n            # This error happens when the LLM\n            #   does not respect prompt instructions.\n            # We grab the LLM output here anyway, so small and\n            #   non instruction-fine-tuned models can still be used.\n            error_description = str(e)\n\n            log.error(error_description)\n            if \"Could not parse LLM output: `\" not in error_description:\n                raise e\n\n            unparsable_llm_output = error_description.replace(\n                \"Could not parse LLM output: `\", \"\"\n            ).replace(\"`\", \"\")\n            agent_output = AgentOutput(\n                output=unparsable_llm_output,\n            )\n\n        log.info(\"Agent output returned to stray:\")\n        log.info(agent_output)\n\n        doc = Document(\n            page_content=user_message_text,\n            metadata={\"source\": self.user_id, \"when\": time.time()},\n        )\n        doc = self.mad_hatter.execute_hook(\n            \"before_cat_stores_episodic_memory\", doc, cat=self\n        )\n        # store user message in episodic memory\n        # TODO: vectorize and store also conversation chunks\n        #   (not raw dialog, but summarization)\n        user_message_embedding = self.embedder.embed_documents([user_message_text])\n        _ = self.memory.vectors.episodic.add_point(\n            doc.page_content,\n            user_message_embedding[0],\n            doc.metadata,\n        )\n\n        # why this response?\n        why = self.__build_why()\n        # TODO: should these assignations be included in self.__build_why ?\n        why.intermediate_steps = agent_output.intermediate_steps\n        why.agent_output = agent_output.model_dump()\n\n        # prepare final cat message\n        final_output = CatMessage(\n            user_id=self.user_id, content=str(agent_output.output), why=why\n        )\n\n        # run message through plugins\n        final_output = self.mad_hatter.execute_hook(\n            \"before_cat_sends_message\", final_output, cat=self\n        )\n\n        # update conversation history (AI turn)\n        self.working_memory.update_conversation_history(\n            who=\"AI\", message=final_output.content, why=final_output.why\n        )\n\n        return final_output\n\n    def run(self, user_message_json):\n        try:\n            cat_message = self.loop.run_until_complete(self.__call__(user_message_json))\n            # send message back to client\n            self.send_chat_message(cat_message)\n        except Exception as e:\n            # Log any unexpected errors\n            log.error(e)\n            traceback.print_exc()\n            # Send error as websocket message\n            self.send_error(e)\n\n    def classify(\n        self, sentence: str, labels: List[str] | Dict[str, List[str]]\n    ) -> str | None:\n        \"\"\"Classify a sentence.\n\n        Parameters\n        ----------\n        sentence : str\n            Sentence to be classified.\n        labels : List[str] or Dict[str, List[str]]\n            Possible output categories and optional examples.\n\n        Returns\n        -------\n        label : str\n            Sentence category.\n\n        Examples\n        -------\n        >>> cat.classify(\"I feel good\", labels=[\"positive\", \"negative\"])\n        \"positive\"\n\n        Or giving examples for each category:\n\n        >>> example_labels = {\n        ...     \"positive\": [\"I feel nice\", \"happy today\"],\n        ...     \"negative\": [\"I feel bad\", \"not my best day\"],\n        ... }\n        ... cat.classify(\"it is a bad day\", labels=example_labels)\n        \"negative\"\n\n        \"\"\"\n\n        if isinstance(labels, dict):\n            labels_names = labels.keys()\n            examples_list = \"\\n\\nExamples:\"\n            for label, examples in labels.items():\n                for ex in examples:\n                    examples_list += f'\\n\"{ex}\" -> \"{label}\"'\n        else:\n            labels_names = labels\n            examples_list = \"\"\n\n        labels_list = '\"' + '\", \"'.join(labels_names) + '\"'\n\n        prompt = f\"\"\"Classify this sentence:\n\"{sentence}\"\n\nAllowed classes are:\n{labels_list}{examples_list}\n\n\"{sentence}\" -> \"\"\"\n\n        response = self.llm(prompt)\n        log.info(response)\n\n        # find the closest match and its score with levenshtein distance\n        best_label, score = min(\n            ((label, utils.levenshtein_distance(response, label)) for label in labels_names),\n            key=lambda x: x[1],\n        )\n\n        # set 0.5 as threshold - let's see if it works properly\n        return best_label if score < 0.5 else None\n\n    def stringify_chat_history(self, latest_n: int = 5) -> str:\n        \"\"\"Serialize chat history.\n        Converts to text the recent conversation turns.\n\n        Parameters\n        ----------\n        latest_n : int\n            Hoe many latest turns to stringify.\n\n        Returns\n        -------\n        history : str\n            String with recent conversation turns.\n\n        Notes\n        -----\n        Such context is placed in the `agent_prompt_suffix` in the place held by {chat_history}.\n\n        The chat history is a dictionary with keys::\n            'who': the name of who said the utterance;\n            'message': the utterance.\n\n        \"\"\"\n\n        history = self.working_memory.history[-latest_n:]\n\n        history_string = \"\"\n        for turn in history:\n            history_string += f\"\\n - {turn['who']}: {turn['message']}\"\n\n        return history_string\n\n    def langchainfy_chat_history(self, latest_n: int = 5) -> List[BaseMessage]:\n        chat_history = self.working_memory.history[-latest_n:]\n\n        langchain_chat_history = []\n        for message in chat_history:\n            if message[\"role\"] == Role.Human:\n                langchain_chat_history.append(\n                    HumanMessage(name=message[\"who\"], content=message[\"message\"])\n                )\n            else:\n                langchain_chat_history.append(\n                    AIMessage(name=message[\"who\"], content=message[\"message\"])\n                )\n\n        return langchain_chat_history\n\n    @property\n    def user_id(self):\n        return self.__user_id\n\n    @property\n    def _llm(self):\n        return CheshireCat()._llm\n\n    @property\n    def embedder(self):\n        return CheshireCat().embedder\n\n    @property\n    def memory(self):\n        return CheshireCat().memory\n\n    @property\n    def rabbit_hole(self):\n        return CheshireCat().rabbit_hole\n\n    @property\n    def mad_hatter(self):\n        return CheshireCat().mad_hatter\n\n    @property\n    def main_agent(self):\n        return CheshireCat().main_agent\n\n    @property\n    def white_rabbit(self):\n        return CheshireCat().white_rabbit\n\n    @property\n    def loop(self):\n        return self.__loop\n
"},{"location":"API_Documentation/looking_glass/stray_cat/#cat.looking_glass.stray_cat.StrayCat.__call__","title":"__call__(message_dict) async","text":"

Call the Cat instance.

This method is called on the user's message received from the client.

Parameters:

Name Type Description Default message_dict dict

Dictionary received from the Websocket client.

required save bool

If True, the user's message is stored in the chat history. Default is True.

required

Returns:

Name Type Description final_output dict

Dictionary with the Cat's answer to be sent to the client.

Notes

Here happens the main pipeline of the Cat. Namely, the Cat receives the user's input and recall the memories. The retrieved context is formatted properly and given in input to the Agent that uses the LLM to produce the answer. This is formatted in a dictionary to be sent as a JSON via Websocket to the client.

Source code in cat/looking_glass/stray_cat.py
async def __call__(self, message_dict):\n    \"\"\"Call the Cat instance.\n\n    This method is called on the user's message received from the client.\n\n    Parameters\n    ----------\n    message_dict : dict\n        Dictionary received from the Websocket client.\n    save : bool, optional\n        If True, the user's message is stored in the chat history. Default is True.\n\n    Returns\n    -------\n    final_output : dict\n        Dictionary with the Cat's answer to be sent to the client.\n\n    Notes\n    -----\n    Here happens the main pipeline of the Cat. Namely, the Cat receives the user's input and recall the memories.\n    The retrieved context is formatted properly and given in input to the Agent that uses the LLM to produce the\n    answer. This is formatted in a dictionary to be sent as a JSON via Websocket to the client.\n\n    \"\"\"\n\n    # Parse websocket message into UserMessage obj\n    user_message = UserMessage.model_validate(message_dict)\n    log.info(user_message)\n\n    # set a few easy access variables\n    self.working_memory.user_message_json = user_message\n\n    # keeping track of model interactions\n    self.working_memory.model_interactions = []\n\n    # hook to modify/enrich user input\n    self.working_memory.user_message_json = self.mad_hatter.execute_hook(\n        \"before_cat_reads_message\", self.working_memory.user_message_json, cat=self\n    )\n\n    # text of latest Human message\n    user_message_text = self.working_memory.user_message_json.text\n\n    # update conversation history (Human turn)\n    self.working_memory.update_conversation_history(\n        who=\"Human\", message=user_message_text\n    )\n\n    # recall episodic and declarative memories from vector collections\n    #   and store them in working_memory\n    try:\n        self.recall_relevant_memories_to_working_memory()\n    except Exception as e:\n        log.error(e)\n        traceback.print_exc(e)\n\n        err_message = (\n            \"You probably changed Embedder and old vector memory is not compatible. \"\n            \"Please delete `core/long_term_memory` folder.\"\n        )\n\n        return {\n            \"type\": \"error\",\n            \"name\": \"VectorMemoryError\",\n            \"description\": err_message,\n        }\n\n    # reply with agent\n    try:\n        agent_output: AgentOutput = await self.main_agent.execute(self)\n    except Exception as e:\n        # This error happens when the LLM\n        #   does not respect prompt instructions.\n        # We grab the LLM output here anyway, so small and\n        #   non instruction-fine-tuned models can still be used.\n        error_description = str(e)\n\n        log.error(error_description)\n        if \"Could not parse LLM output: `\" not in error_description:\n            raise e\n\n        unparsable_llm_output = error_description.replace(\n            \"Could not parse LLM output: `\", \"\"\n        ).replace(\"`\", \"\")\n        agent_output = AgentOutput(\n            output=unparsable_llm_output,\n        )\n\n    log.info(\"Agent output returned to stray:\")\n    log.info(agent_output)\n\n    doc = Document(\n        page_content=user_message_text,\n        metadata={\"source\": self.user_id, \"when\": time.time()},\n    )\n    doc = self.mad_hatter.execute_hook(\n        \"before_cat_stores_episodic_memory\", doc, cat=self\n    )\n    # store user message in episodic memory\n    # TODO: vectorize and store also conversation chunks\n    #   (not raw dialog, but summarization)\n    user_message_embedding = self.embedder.embed_documents([user_message_text])\n    _ = self.memory.vectors.episodic.add_point(\n        doc.page_content,\n        user_message_embedding[0],\n        doc.metadata,\n    )\n\n    # why this response?\n    why = self.__build_why()\n    # TODO: should these assignations be included in self.__build_why ?\n    why.intermediate_steps = agent_output.intermediate_steps\n    why.agent_output = agent_output.model_dump()\n\n    # prepare final cat message\n    final_output = CatMessage(\n        user_id=self.user_id, content=str(agent_output.output), why=why\n    )\n\n    # run message through plugins\n    final_output = self.mad_hatter.execute_hook(\n        \"before_cat_sends_message\", final_output, cat=self\n    )\n\n    # update conversation history (AI turn)\n    self.working_memory.update_conversation_history(\n        who=\"AI\", message=final_output.content, why=final_output.why\n    )\n\n    return final_output\n
"},{"location":"API_Documentation/looking_glass/stray_cat/#cat.looking_glass.stray_cat.StrayCat.classify","title":"classify(sentence, labels)","text":"

Classify a sentence.

Parameters:

Name Type Description Default sentence str

Sentence to be classified.

required labels List[str] or Dict[str, List[str]]

Possible output categories and optional examples.

required

Returns:

Name Type Description label str

Sentence category.

Examples:

>>> cat.classify(\"I feel good\", labels=[\"positive\", \"negative\"])\n\"positive\"\n

Or giving examples for each category:

>>> example_labels = {\n...     \"positive\": [\"I feel nice\", \"happy today\"],\n...     \"negative\": [\"I feel bad\", \"not my best day\"],\n... }\n... cat.classify(\"it is a bad day\", labels=example_labels)\n\"negative\"\n
Source code in cat/looking_glass/stray_cat.py
    def classify(\n        self, sentence: str, labels: List[str] | Dict[str, List[str]]\n    ) -> str | None:\n        \"\"\"Classify a sentence.\n\n        Parameters\n        ----------\n        sentence : str\n            Sentence to be classified.\n        labels : List[str] or Dict[str, List[str]]\n            Possible output categories and optional examples.\n\n        Returns\n        -------\n        label : str\n            Sentence category.\n\n        Examples\n        -------\n        >>> cat.classify(\"I feel good\", labels=[\"positive\", \"negative\"])\n        \"positive\"\n\n        Or giving examples for each category:\n\n        >>> example_labels = {\n        ...     \"positive\": [\"I feel nice\", \"happy today\"],\n        ...     \"negative\": [\"I feel bad\", \"not my best day\"],\n        ... }\n        ... cat.classify(\"it is a bad day\", labels=example_labels)\n        \"negative\"\n\n        \"\"\"\n\n        if isinstance(labels, dict):\n            labels_names = labels.keys()\n            examples_list = \"\\n\\nExamples:\"\n            for label, examples in labels.items():\n                for ex in examples:\n                    examples_list += f'\\n\"{ex}\" -> \"{label}\"'\n        else:\n            labels_names = labels\n            examples_list = \"\"\n\n        labels_list = '\"' + '\", \"'.join(labels_names) + '\"'\n\n        prompt = f\"\"\"Classify this sentence:\n\"{sentence}\"\n\nAllowed classes are:\n{labels_list}{examples_list}\n\n\"{sentence}\" -> \"\"\"\n\n        response = self.llm(prompt)\n        log.info(response)\n\n        # find the closest match and its score with levenshtein distance\n        best_label, score = min(\n            ((label, utils.levenshtein_distance(response, label)) for label in labels_names),\n            key=lambda x: x[1],\n        )\n\n        # set 0.5 as threshold - let's see if it works properly\n        return best_label if score < 0.5 else None\n
"},{"location":"API_Documentation/looking_glass/stray_cat/#cat.looking_glass.stray_cat.StrayCat.llm","title":"llm(prompt, stream=False)","text":"

Generate a response using the LLM model.

This method is useful for generating a response with both a chat and a completion model using the same syntax

Parameters:

Name Type Description Default prompt str

The prompt for generating the response.

required

Returns:

Type Description str

The generated response.

Source code in cat/looking_glass/stray_cat.py
def llm(self, prompt: str, stream: bool = False) -> str:\n    \"\"\"Generate a response using the LLM model.\n\n    This method is useful for generating a response with both a chat and a completion model using the same syntax\n\n    Parameters\n    ----------\n    prompt : str\n        The prompt for generating the response.\n\n    Returns\n    -------\n    str\n        The generated response.\n\n    \"\"\"\n\n    # should we stream the tokens?\n    callbacks = []\n    if stream:\n        callbacks.append(NewTokenHandler(self))\n\n    # Add a token counter to the callbacks\n    caller = utils.get_caller_info()\n    callbacks.append(ModelInteractionHandler(self, caller or \"StrayCat\"))\n\n\n\n    # here we deal with motherfucking langchain\n    prompt = ChatPromptTemplate(\n        messages=[\n            SystemMessage(content=prompt)\n            # TODO: add here optional convo history passed to the method, \n            #  or taken from working memory\n        ]\n    )\n\n    chain = (\n        prompt\n        | RunnableLambda(lambda x: utils.langchain_log_prompt(x, f\"{caller} prompt\"))\n        | self._llm\n        | RunnableLambda(lambda x: utils.langchain_log_output(x, f\"{caller} prompt output\"))\n        | StrOutputParser()\n    )\n\n    output = chain.invoke(\n        {}, # in case we need to pass info to the template\n        config=RunnableConfig(callbacks=callbacks)\n    )\n\n    return output\n
"},{"location":"API_Documentation/looking_glass/stray_cat/#cat.looking_glass.stray_cat.StrayCat.recall_relevant_memories_to_working_memory","title":"recall_relevant_memories_to_working_memory(query=None)","text":"

Retrieve context from memory.

The method retrieves the relevant memories from the vector collections that are given as context to the LLM. Recalled memories are stored in the working memory.

Parameters:

Name Type Description Default query str None The required will required Notes

The user's message is used as a query to make a similarity search in the Cat's vector memories. Five hooks allow to customize the recall pipeline before and after it is done.

See Also

cat_recall_query before_cat_recalls_memories before_cat_recalls_episodic_memories before_cat_recalls_declarative_memories before_cat_recalls_procedural_memories after_cat_recalls_memories

Source code in cat/looking_glass/stray_cat.py
def recall_relevant_memories_to_working_memory(self, query=None):\n    \"\"\"Retrieve context from memory.\n\n    The method retrieves the relevant memories from the vector collections that are given as context to the LLM.\n    Recalled memories are stored in the working memory.\n\n    Parameters\n    ----------\n    query : str, optional\n    The query used to make a similarity search in the Cat's vector memories. If not provided, the query\n    will be derived from the user's message.\n\n    Notes\n    -----\n    The user's message is used as a query to make a similarity search in the Cat's vector memories.\n    Five hooks allow to customize the recall pipeline before and after it is done.\n\n    See Also\n    --------\n    cat_recall_query\n    before_cat_recalls_memories\n    before_cat_recalls_episodic_memories\n    before_cat_recalls_declarative_memories\n    before_cat_recalls_procedural_memories\n    after_cat_recalls_memories\n    \"\"\"\n    recall_query = query\n\n    if query is None:\n        # If query is not provided, use the user's message as the query\n        recall_query = self.working_memory.user_message_json.text\n\n    # We may want to search in memory\n    recall_query = self.mad_hatter.execute_hook(\n        \"cat_recall_query\", recall_query, cat=self\n    )\n    log.info(f\"Recall query: '{recall_query}'\")\n\n    # Embed recall query\n    recall_query_embedding = self.embedder.embed_query(recall_query)\n    self.working_memory.recall_query = recall_query\n\n    # keep track of embedder model usage\n    self.working_memory.model_interactions.append(\n        EmbedderModelInteraction(\n            prompt=recall_query,\n            reply=recall_query_embedding,\n            input_tokens=len(tiktoken.get_encoding(\"cl100k_base\").encode(recall_query)),\n        )\n    )\n\n    # hook to do something before recall begins\n    self.mad_hatter.execute_hook(\"before_cat_recalls_memories\", cat=self)\n\n    # Setting default recall configs for each memory\n    # TODO: can these data structures become instances of a RecallSettings class?\n    default_episodic_recall_config = {\n        \"embedding\": recall_query_embedding,\n        \"k\": 3,\n        \"threshold\": 0.7,\n        \"metadata\": {\"source\": self.user_id},\n    }\n\n    default_declarative_recall_config = {\n        \"embedding\": recall_query_embedding,\n        \"k\": 3,\n        \"threshold\": 0.7,\n        \"metadata\": None,\n    }\n\n    default_procedural_recall_config = {\n        \"embedding\": recall_query_embedding,\n        \"k\": 3,\n        \"threshold\": 0.7,\n        \"metadata\": None,\n    }\n\n    # hooks to change recall configs for each memory\n    recall_configs = [\n        self.mad_hatter.execute_hook(\n            \"before_cat_recalls_episodic_memories\",\n            default_episodic_recall_config,\n            cat=self,\n        ),\n        self.mad_hatter.execute_hook(\n            \"before_cat_recalls_declarative_memories\",\n            default_declarative_recall_config,\n            cat=self,\n        ),\n        self.mad_hatter.execute_hook(\n            \"before_cat_recalls_procedural_memories\",\n            default_procedural_recall_config,\n            cat=self,\n        ),\n    ]\n\n    memory_types = self.memory.vectors.collections.keys()\n\n    for config, memory_type in zip(recall_configs, memory_types):\n        memory_key = f\"{memory_type}_memories\"\n\n        # recall relevant memories for collection\n        vector_memory = getattr(self.memory.vectors, memory_type)\n        memories = vector_memory.recall_memories_from_embedding(**config)\n\n        setattr(\n            self.working_memory, memory_key, memories\n        )  # self.working_memory.procedural_memories = ...\n\n    # hook to modify/enrich retrieved memories\n    self.mad_hatter.execute_hook(\"after_cat_recalls_memories\", cat=self)\n
"},{"location":"API_Documentation/looking_glass/stray_cat/#cat.looking_glass.stray_cat.StrayCat.send_ws_message","title":"send_ws_message(content, msg_type='notification')","text":"

Send a message via websocket.

This method is useful for sending a message via websocket directly without passing through the LLM

Parameters:

Name Type Description Default content str

The content of the message.

required msg_type str

The type of the message. Should be either notification, chat, chat_token or error

'notification' Source code in cat/looking_glass/stray_cat.py
def send_ws_message(self, content: str, msg_type: MSG_TYPES = \"notification\"):\n    \"\"\"Send a message via websocket.\n\n    This method is useful for sending a message via websocket directly without passing through the LLM\n\n    Parameters\n    ----------\n    content : str\n        The content of the message.\n    msg_type : str\n        The type of the message. Should be either `notification`, `chat`, `chat_token` or `error`\n    \"\"\"\n\n    if self.__ws is None:\n        log.warning(f\"No websocket connection is open for user {self.user_id}\")\n        return\n\n    options = get_args(MSG_TYPES)\n\n    if msg_type not in options:\n        raise ValueError(\n            f\"The message type `{msg_type}` is not valid. Valid types: {', '.join(options)}\"\n        )\n\n    if msg_type == \"error\":\n        self.__send_ws_json(\n            {\"type\": msg_type, \"name\": \"GenericError\", \"description\": str(content)}\n        )\n    else:\n        self.__send_ws_json({\"type\": msg_type, \"content\": content})\n
"},{"location":"API_Documentation/looking_glass/stray_cat/#cat.looking_glass.stray_cat.StrayCat.stringify_chat_history","title":"stringify_chat_history(latest_n=5)","text":"

Serialize chat history. Converts to text the recent conversation turns.

Parameters:

Name Type Description Default latest_n int

Hoe many latest turns to stringify.

5

Returns:

Name Type Description history str

String with recent conversation turns.

Notes

Such context is placed in the agent_prompt_suffix in the place held by {chat_history}.

The chat history is a dictionary with keys:: 'who': the name of who said the utterance; 'message': the utterance.

Source code in cat/looking_glass/stray_cat.py
def stringify_chat_history(self, latest_n: int = 5) -> str:\n    \"\"\"Serialize chat history.\n    Converts to text the recent conversation turns.\n\n    Parameters\n    ----------\n    latest_n : int\n        Hoe many latest turns to stringify.\n\n    Returns\n    -------\n    history : str\n        String with recent conversation turns.\n\n    Notes\n    -----\n    Such context is placed in the `agent_prompt_suffix` in the place held by {chat_history}.\n\n    The chat history is a dictionary with keys::\n        'who': the name of who said the utterance;\n        'message': the utterance.\n\n    \"\"\"\n\n    history = self.working_memory.history[-latest_n:]\n\n    history_string = \"\"\n    for turn in history:\n        history_string += f\"\\n - {turn['who']}: {turn['message']}\"\n\n    return history_string\n
"},{"location":"API_Documentation/mad_hatter/mad_hatter/","title":"mad_hatter","text":""},{"location":"API_Documentation/mad_hatter/plugin/","title":"plugin","text":""},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/agent/","title":"agent","text":"

Hooks to modify the Cat's Agent.

Here is a collection of methods to hook into the Agent execution pipeline.

"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/agent/#cat.mad_hatter.core_plugin.hooks.agent.agent_allowed_tools","title":"agent_allowed_tools(allowed_tools, cat)","text":"

Hook the allowed tools.

Allows to decide which tools end up in the Agent prompt.

To decide, you can filter the list of tools' names, but you can also check the context in cat.working_memory and launch custom chains with cat._llm.

Parameters:

Name Type Description Default cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description tools List[str]

List of allowed Langchain tools.

Source code in cat/mad_hatter/core_plugin/hooks/agent.py
@hook(priority=0)\ndef agent_allowed_tools(allowed_tools: List[str], cat) -> List[str]:\n    \"\"\"Hook the allowed tools.\n\n    Allows to decide which tools end up in the *Agent* prompt.\n\n    To decide, you can filter the list of tools' names, but you can also check the context in `cat.working_memory`\n    and launch custom chains with `cat._llm`.\n\n    Parameters\n    ---------\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    tools : List[str]\n        List of allowed Langchain tools.\n    \"\"\"\n\n    return allowed_tools\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/agent/#cat.mad_hatter.core_plugin.hooks.agent.agent_fast_reply","title":"agent_fast_reply(fast_reply, cat)","text":"

This hook is useful to shortcut the Cat response. If you do not want the agent to run, return the final response from here and it will end up in the chat without the agent being executed.

Parameters:

Name Type Description Default fast_reply

Input is dict (initially empty), which can be enriched whith an \"output\" key with the shortcut response.

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description response Union[None, Dict]

Cat response if you want to avoid using the agent, or None / {} if you want the agent to be executed. See below for examples of Cat response

Examples:

Example 1: can't talk about this topic

# here you could use cat._llm to do topic evaluation\nif \"dog\" in agent_input[\"input\"]:\n    return {\n        \"output\": \"You went out of topic. Can't talk about dog.\"\n    }\n

Example 2: don't remember (no uploaded documents about topic)

num_declarative_memories = len( cat.working_memory.declarative_memories )\nif num_declarative_memories == 0:\n    return {\n       \"output\": \"Sorry, I have no memories about that.\"\n    }\n
Source code in cat/mad_hatter/core_plugin/hooks/agent.py
@hook(priority=0)\ndef agent_fast_reply(fast_reply, cat) -> Union[None, Dict]:\n    \"\"\"This hook is useful to shortcut the Cat response.\n    If you do not want the agent to run, return the final response from here and it will end up in the chat without the agent being executed.\n\n    Parameters\n    --------\n    fast_reply: dict\n        Input is dict (initially empty), which can be enriched whith an \"output\" key with the shortcut response.\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    --------\n    response : Union[None, Dict]\n        Cat response if you want to avoid using the agent, or None / {} if you want the agent to be executed.\n        See below for examples of Cat response\n\n    Examples\n    --------\n\n    Example 1: can't talk about this topic\n    ```python\n    # here you could use cat._llm to do topic evaluation\n    if \"dog\" in agent_input[\"input\"]:\n        return {\n            \"output\": \"You went out of topic. Can't talk about dog.\"\n        }\n    ```\n\n    Example 2: don't remember (no uploaded documents about topic)\n    ```python\n    num_declarative_memories = len( cat.working_memory.declarative_memories )\n    if num_declarative_memories == 0:\n        return {\n           \"output\": \"Sorry, I have no memories about that.\"\n        }\n    ```\n    \"\"\"\n\n    return fast_reply\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/agent/#cat.mad_hatter.core_plugin.hooks.agent.before_agent_starts","title":"before_agent_starts(agent_input, cat)","text":"

Hook to read and edit the agent input

Parameters:

Name Type Description Default agent_input Dict

Input that is about to be passed to the agent.

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description response Dict

Agent Input

Source code in cat/mad_hatter/core_plugin/hooks/agent.py
@hook(priority=0)\ndef before_agent_starts(agent_input: Dict, cat) -> Dict:\n    \"\"\"Hook to read and edit the agent input\n\n    Parameters\n    --------\n    agent_input: dict\n        Input that is about to be passed to the agent.\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    --------\n    response : Dict\n        Agent Input\n    \"\"\"\n\n    return agent_input\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/","title":"flow","text":"

Hooks to modify the Cat's flow of execution.

Here is a collection of methods to hook into the Cat execution pipeline.

"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/#cat.mad_hatter.core_plugin.hooks.flow.after_cat_bootstrap","title":"after_cat_bootstrap(cat)","text":"

Hook into the end of the Cat start up.

Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, the Main Agent, the Rabbit Hole and the White Rabbit.

This hook allows to intercept the end of such process and is executed right after the Cat has finished loading its components.

This can be used to set or store variables to be shared further in the pipeline.

Parameters:

Name Type Description Default cat CheshireCat

Cheshire Cat instance.

required Source code in cat/mad_hatter/core_plugin/hooks/flow.py
@hook(priority=0)\ndef after_cat_bootstrap(cat) -> None:\n    \"\"\"Hook into the end of the Cat start up.\n\n    Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,\n    the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.\n\n    This hook allows to intercept the end of such process and is executed right after the Cat has finished loading\n    its components.\n\n    This can be used to set or store variables to be shared further in the pipeline.\n\n    Parameters\n    ----------\n    cat : CheshireCat\n        Cheshire Cat instance.\n    \"\"\"\n    pass  # do nothing\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/#cat.mad_hatter.core_plugin.hooks.flow.after_cat_recalls_memories","title":"after_cat_recalls_memories(cat)","text":"

Hook after semantic search in memories.

The hook is executed just after the Cat searches for the meaningful context in memories and stores it in the Working Memory.

Parameters:

Name Type Description Default cat CheshireCat

Cheshire Cat instance.

required Source code in cat/mad_hatter/core_plugin/hooks/flow.py
@hook(priority=0)\ndef after_cat_recalls_memories(cat) -> None:\n    \"\"\"Hook after semantic search in memories.\n\n    The hook is executed just after the Cat searches for the meaningful context in memories\n    and stores it in the *Working Memory*.\n\n    Parameters\n    ----------\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    \"\"\"\n    pass  # do nothing\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/#cat.mad_hatter.core_plugin.hooks.flow.before_cat_bootstrap","title":"before_cat_bootstrap(cat)","text":"

Hook into the Cat start up.

Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, the Main Agent, the Rabbit Hole and the White Rabbit.

This hook allows to intercept such process and is executed in the middle of plugins and natural language objects loading.

This hook can be used to set or store variables to be propagated to subsequent loaded objects.

Parameters:

Name Type Description Default cat CheshireCat

Cheshire Cat instance.

required Source code in cat/mad_hatter/core_plugin/hooks/flow.py
@hook(priority=0)\ndef before_cat_bootstrap(cat) -> None:\n    \"\"\"Hook into the Cat start up.\n\n    Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories,\n    the *Main Agent*, the *Rabbit Hole* and the *White Rabbit*.\n\n    This hook allows to intercept such process and is executed in the middle of plugins and\n    natural language objects loading.\n\n    This hook can be used to set or store variables to be propagated to subsequent loaded objects.\n\n    Parameters\n    ----------\n    cat : CheshireCat\n        Cheshire Cat instance.\n    \"\"\"\n    pass  # do nothing\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/#cat.mad_hatter.core_plugin.hooks.flow.before_cat_reads_message","title":"before_cat_reads_message(user_message_json, cat)","text":"

Hook the incoming user's JSON dictionary.

Allows to edit and enrich the incoming message received from the WebSocket connection.

For instance, this hook can be used to translate the user's message before feeding it to the Cat. Another use case is to add custom keys to the JSON dictionary.

The incoming message is a JSON dictionary with keys: { \"text\": message content }

Parameters:

Name Type Description Default user_message_json dict

JSON dictionary with the message received from the chat.

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description user_message_json dict

Edited JSON dictionary that will be fed to the Cat.

Notes

For example:

{\n    \"text\": \"Hello Cheshire Cat!\",\n    \"custom_key\": True\n}\n

where \"custom_key\" is a newly added key to the dictionary to store any data.

Source code in cat/mad_hatter/core_plugin/hooks/flow.py
@hook(priority=0)\ndef before_cat_reads_message(user_message_json: dict, cat) -> dict:\n    \"\"\"Hook the incoming user's JSON dictionary.\n\n    Allows to edit and enrich the incoming message received from the WebSocket connection.\n\n    For instance, this hook can be used to translate the user's message before feeding it to the Cat.\n    Another use case is to add custom keys to the JSON dictionary.\n\n    The incoming message is a JSON dictionary with keys:\n        {\n            \"text\": message content\n        }\n\n    Parameters\n    ----------\n    user_message_json : dict\n        JSON dictionary with the message received from the chat.\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n\n    Returns\n    -------\n    user_message_json : dict\n        Edited JSON dictionary that will be fed to the Cat.\n\n    Notes\n    -----\n    For example:\n\n        {\n            \"text\": \"Hello Cheshire Cat!\",\n            \"custom_key\": True\n        }\n\n    where \"custom_key\" is a newly added key to the dictionary to store any data.\n\n    \"\"\"\n    return user_message_json\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/#cat.mad_hatter.core_plugin.hooks.flow.before_cat_recalls_declarative_memories","title":"before_cat_recalls_declarative_memories(declarative_recall_config, cat)","text":"

Hook into semantic search in memories.

Allows to intercept when the Cat queries the memories using the embedded user's input.

The hook is executed just before the Cat searches for the meaningful context in both memories and stores it in the Working Memory.

The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied to the query in the vector memory (items with score under threshold are not retrieved) It also returns the embedded query (embedding) and the conditions on recall (metadata).

Parameters:

Name Type Description Default declarative_recall_config dict

Dictionary with data needed to recall declarative memories

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description declarative_recall_config dict

Edited dictionary that will be fed to the embedder.

Source code in cat/mad_hatter/core_plugin/hooks/flow.py
@hook(priority=0)\ndef before_cat_recalls_declarative_memories(\n    declarative_recall_config: dict, cat\n) -> dict:\n    \"\"\"Hook into semantic search in memories.\n\n    Allows to intercept when the Cat queries the memories using the embedded user's input.\n\n    The hook is executed just before the Cat searches for the meaningful context in both memories\n    and stores it in the *Working Memory*.\n\n    The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied\n    to the query in the vector memory (items with score under threshold are not retrieved)\n    It also returns the embedded query (embedding) and the conditions on recall (metadata).\n\n    Parameters\n    ----------\n    declarative_recall_config: dict\n        Dictionary with data needed to recall declarative memories\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    declarative_recall_config: dict\n        Edited dictionary that will be fed to the embedder.\n\n    \"\"\"\n    return declarative_recall_config\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/#cat.mad_hatter.core_plugin.hooks.flow.before_cat_recalls_episodic_memories","title":"before_cat_recalls_episodic_memories(episodic_recall_config, cat)","text":"

Hook into semantic search in memories.

Allows to intercept when the Cat queries the memories using the embedded user's input.

The hook is executed just before the Cat searches for the meaningful context in both memories and stores it in the Working Memory.

The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied to the query in the vector memory (items with score under threshold are not retrieved). It also returns the embedded query (embedding) and the conditions on recall (metadata).

Parameters:

Name Type Description Default episodic_recall_config dict

Dictionary with data needed to recall episodic memories

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description episodic_recall_config dict

Edited dictionary that will be fed to the embedder.

Source code in cat/mad_hatter/core_plugin/hooks/flow.py
@hook(priority=0)\ndef before_cat_recalls_episodic_memories(episodic_recall_config: dict, cat) -> dict:\n    \"\"\"Hook into semantic search in memories.\n\n    Allows to intercept when the Cat queries the memories using the embedded user's input.\n\n    The hook is executed just before the Cat searches for the meaningful context in both memories\n    and stores it in the *Working Memory*.\n\n    The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied\n    to the query in the vector memory (items with score under threshold are not retrieved).\n    It also returns the embedded query (embedding) and the conditions on recall (metadata).\n\n    Parameters\n    ----------\n    episodic_recall_config : dict\n        Dictionary with data needed to recall episodic memories\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    episodic_recall_config: dict\n        Edited dictionary that will be fed to the embedder.\n\n    \"\"\"\n    return episodic_recall_config\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/#cat.mad_hatter.core_plugin.hooks.flow.before_cat_recalls_memories","title":"before_cat_recalls_memories(cat)","text":"

Hook into semantic search in memories.

Allows to intercept when the Cat queries the memories using the embedded user's input.

The hook is executed just before the Cat searches for the meaningful context in both memories and stores it in the Working Memory.

Parameters:

Name Type Description Default cat CheshireCat

Cheshire Cat instance.

required Source code in cat/mad_hatter/core_plugin/hooks/flow.py
@hook(priority=0)\ndef before_cat_recalls_memories(cat) -> None:\n    \"\"\"Hook into semantic search in memories.\n\n    Allows to intercept when the Cat queries the memories using the embedded user's input.\n\n    The hook is executed just before the Cat searches for the meaningful context in both memories\n    and stores it in the *Working Memory*.\n\n    Parameters\n    ----------\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    \"\"\"\n    pass  # do nothing\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/#cat.mad_hatter.core_plugin.hooks.flow.before_cat_recalls_procedural_memories","title":"before_cat_recalls_procedural_memories(procedural_recall_config, cat)","text":"

Hook into semantic search in memories.

Allows to intercept when the Cat queries the memories using the embedded user's input.

The hook is executed just before the Cat searches for the meaningful context in both memories and stores it in the Working Memory.

The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied to the query in the vector memory (items with score under threshold are not retrieved) It also returns the embedded query (embedding) and the conditions on recall (metadata).

Parameters:

Name Type Description Default procedural_recall_config dict

Dictionary with data needed to recall tools from procedural memory

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description procedural_recall_config dict

Edited dictionary that will be fed to the embedder.

Source code in cat/mad_hatter/core_plugin/hooks/flow.py
@hook(priority=0)\ndef before_cat_recalls_procedural_memories(procedural_recall_config: dict, cat) -> dict:\n    \"\"\"Hook into semantic search in memories.\n\n    Allows to intercept when the Cat queries the memories using the embedded user's input.\n\n    The hook is executed just before the Cat searches for the meaningful context in both memories\n    and stores it in the *Working Memory*.\n\n    The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied\n    to the query in the vector memory (items with score under threshold are not retrieved)\n    It also returns the embedded query (embedding) and the conditions on recall (metadata).\n\n    Parameters\n    ----------\n    procedural_recall_config: dict\n        Dictionary with data needed to recall tools from procedural memory\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    procedural_recall_config: dict\n        Edited dictionary that will be fed to the embedder.\n\n    \"\"\"\n    return procedural_recall_config\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/#cat.mad_hatter.core_plugin.hooks.flow.before_cat_sends_message","title":"before_cat_sends_message(message, cat)","text":"

Hook the outgoing Cat's message.

Allows to edit the JSON dictionary that will be sent to the client via WebSocket connection.

This hook can be used to edit the message sent to the user or to add keys to the dictionary.

Parameters:

Name Type Description Default message dict

JSON dictionary to be sent to the WebSocket client.

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description message dict

Edited JSON dictionary with the Cat's answer.

Notes

Default message is::

    {\n        \"type\": \"chat\",\n        \"content\": cat_message[\"output\"],\n        \"why\": {\n            \"input\": cat_message[\"input\"],\n            \"output\": cat_message[\"output\"],\n            \"intermediate_steps\": cat_message[\"intermediate_steps\"],\n            \"memory\": {\n                \"vectors\": {\n                    \"episodic\": episodic_report,\n                    \"declarative\": declarative_report\n                }\n            },\n        },\n    }\n
Source code in cat/mad_hatter/core_plugin/hooks/flow.py
@hook(priority=0)\ndef before_cat_sends_message(message: dict, cat) -> dict:\n    \"\"\"Hook the outgoing Cat's message.\n\n    Allows to edit the JSON dictionary that will be sent to the client via WebSocket connection.\n\n    This hook can be used to edit the message sent to the user or to add keys to the dictionary.\n\n    Parameters\n    ----------\n    message : dict\n        JSON dictionary to be sent to the WebSocket client.\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    message : dict\n        Edited JSON dictionary with the Cat's answer.\n\n    Notes\n    -----\n    Default `message` is::\n\n            {\n                \"type\": \"chat\",\n                \"content\": cat_message[\"output\"],\n                \"why\": {\n                    \"input\": cat_message[\"input\"],\n                    \"output\": cat_message[\"output\"],\n                    \"intermediate_steps\": cat_message[\"intermediate_steps\"],\n                    \"memory\": {\n                        \"vectors\": {\n                            \"episodic\": episodic_report,\n                            \"declarative\": declarative_report\n                        }\n                    },\n                },\n            }\n\n    \"\"\"\n\n    return message\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/#cat.mad_hatter.core_plugin.hooks.flow.before_cat_stores_episodic_memory","title":"before_cat_stores_episodic_memory(doc, cat)","text":"

Hook the user message Document before is inserted in the vector memory.

Allows editing and enhancing a single Document before the Cat add it to the episodic vector memory.

Parameters:

Name Type Description Default doc Document

Langchain Document to be inserted in memory.

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description doc Document

Langchain Document that is added in the episodic vector memory.

Notes

The Document has two properties::

`page_content`: the string with the text to save in memory;\n`metadata`: a dictionary with at least two keys:\n    `source`: where the text comes from;\n    `when`: timestamp to track when it's been uploaded.\n
Source code in cat/mad_hatter/core_plugin/hooks/flow.py
@hook(priority=0)\ndef before_cat_stores_episodic_memory(doc: Document, cat) -> Document:\n    \"\"\"Hook the user message `Document` before is inserted in the vector memory.\n\n    Allows editing and enhancing a single `Document` before the Cat add it to the episodic vector memory.\n\n    Parameters\n    ----------\n    doc : Document\n        Langchain `Document` to be inserted in memory.\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    doc : Document\n        Langchain `Document` that is added in the episodic vector memory.\n\n    Notes\n    -----\n    The `Document` has two properties::\n\n        `page_content`: the string with the text to save in memory;\n        `metadata`: a dictionary with at least two keys:\n            `source`: where the text comes from;\n            `when`: timestamp to track when it's been uploaded.\n\n    \"\"\"\n    return doc\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/flow/#cat.mad_hatter.core_plugin.hooks.flow.cat_recall_query","title":"cat_recall_query(user_message, cat)","text":"

Hook the semantic search query.

This hook allows to edit the user's message used as a query for context retrieval from memories. As a result, the retrieved context can be conditioned editing the user's message.

Parameters:

Name Type Description Default user_message str

String with the text received from the user.

required cat CheshireCat

Cheshire Cat instance to exploit the Cat's methods.

required

Returns:

Type Description Edited string to be used for context retrieval in memory. The returned string is further stored in the Working Memory at `cat.working_memory.recall_query`. Notes

For example, this hook is a suitable to perform Hypothetical Document Embedding (HyDE). HyDE [1]_ strategy exploits the user's message to generate a hypothetical answer. This is then used to recall the relevant context from the memory. An official plugin is available to test this technique.

References

[1] Gao, L., Ma, X., Lin, J., & Callan, J. (2022). Precise Zero-Shot Dense Retrieval without Relevance Labels. arXiv preprint arXiv:2212.10496.

Source code in cat/mad_hatter/core_plugin/hooks/flow.py
@hook(priority=0)\ndef cat_recall_query(user_message: str, cat) -> str:\n    \"\"\"Hook the semantic search query.\n\n    This hook allows to edit the user's message used as a query for context retrieval from memories.\n    As a result, the retrieved context can be conditioned editing the user's message.\n\n    Parameters\n    ----------\n    user_message : str\n        String with the text received from the user.\n    cat : CheshireCat\n        Cheshire Cat instance to exploit the Cat's methods.\n\n    Returns\n    -------\n    Edited string to be used for context retrieval in memory. The returned string is further stored in the\n    Working Memory at `cat.working_memory.recall_query`.\n\n    Notes\n    -----\n    For example, this hook is a suitable to perform Hypothetical Document Embedding (HyDE).\n    HyDE [1]_ strategy exploits the user's message to generate a hypothetical answer. This is then used to recall\n    the relevant context from the memory.\n    An official plugin is available to test this technique.\n\n    References\n    ----------\n    [1] Gao, L., Ma, X., Lin, J., & Callan, J. (2022). Precise Zero-Shot Dense Retrieval without Relevance Labels.\n       arXiv preprint arXiv:2212.10496.\n\n    \"\"\"\n\n    # here we just return the latest user message as is\n    return user_message\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/prompt/","title":"prompt","text":"

Hooks to modify the prompts.

Here is a collection of methods to hook the prompts components that instruct the Agent.

"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/prompt/#cat.mad_hatter.core_plugin.hooks.prompt.agent_prompt_instructions","title":"agent_prompt_instructions(instructions, cat)","text":"

Hook the instruction prompt.

Allows to edit the instructions that the Cat feeds to the Agent to select tools and forms.

Parameters:

Name Type Description Default instructions str

Instructions prompt to select tool or form.

required cat StrayCat

StrayCat instance.

required

Returns:

Name Type Description instructions str

Instructions prompt to select tool or form

Notes

This prompt explains the Agent how to select a tool or form.

Source code in cat/mad_hatter/core_plugin/hooks/prompt.py
@hook(priority=0)\ndef agent_prompt_instructions(instructions: str, cat) -> str:\n    \"\"\"Hook the instruction prompt.\n\n    Allows to edit the instructions that the Cat feeds to the *Agent* to select tools and forms.\n\n    Parameters\n    ----------\n    instructions : str\n        Instructions prompt to select tool or form.\n    cat : StrayCat\n        StrayCat instance.\n\n    Returns\n    -------\n    instructions : str\n        Instructions prompt to select tool or form\n\n    Notes\n    -----\n    This prompt explains the *Agent* how to select a tool or form.\n\n    \"\"\"\n\n    return instructions\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/prompt/#cat.mad_hatter.core_plugin.hooks.prompt.agent_prompt_prefix","title":"agent_prompt_prefix(prefix, cat)","text":"

Hook the main prompt prefix.

Allows to edit the prefix of the Main Prompt that the Cat feeds to the Agent. It describes the personality of your assistant and its general task.

The prefix is then completed with the agent_prompt_suffix.

Parameters:

Name Type Description Default prefix str

Main / System prompt with personality and general task to be accomplished.

required cat StrayCat

StrayCat instance.

required

Returns:

Name Type Description prefix str

Main / System prompt.

Notes

The default prefix describe who the AI is and how it is expected to answer the Human.

Source code in cat/mad_hatter/core_plugin/hooks/prompt.py
@hook(priority=0)\ndef agent_prompt_prefix(prefix, cat) -> str:\n    \"\"\"Hook the main prompt prefix.\n\n    Allows to edit the prefix of the *Main Prompt* that the Cat feeds to the *Agent*.\n    It describes the personality of your assistant and its general task.\n\n    The prefix is then completed with the `agent_prompt_suffix`.\n\n    Parameters\n    ----------\n    prefix : str\n        Main / System prompt with personality and general task to be accomplished.\n    cat : StrayCat\n        StrayCat instance.\n\n    Returns\n    -------\n    prefix : str\n        Main / System prompt.\n\n    Notes\n    -----\n    The default prefix describe who the AI is and how it is expected to answer the Human.\n    \"\"\"\n\n    return prefix\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/prompt/#cat.mad_hatter.core_plugin.hooks.prompt.agent_prompt_suffix","title":"agent_prompt_suffix(prompt_suffix, cat)","text":"

Hook the main prompt suffix.

Allows to edit the suffix of the Main Prompt that the Cat feeds to the Agent.

The suffix is concatenated to agent_prompt_prefix when RAG context is used.

Parameters:

Name Type Description Default cat StrayCat

StrayCat instance.

required

Returns:

Name Type Description prompt_suffix str

The suffix string to be concatenated to the Main Prompt (prefix).

Notes

The default suffix has a few placeholders: - {episodic_memory} provides memories retrieved from episodic memory (past conversations) - {declarative_memory} provides memories retrieved from declarative memory (uploaded documents) - {chat_history} provides the Agent the recent conversation history - {input} provides the last user's input - {agent_scratchpad} is where the Agent can concatenate tools use and multiple calls to the LLM.

Source code in cat/mad_hatter/core_plugin/hooks/prompt.py
@hook(priority=0)\ndef agent_prompt_suffix(prompt_suffix: str, cat) -> str:\n    \"\"\"Hook the main prompt suffix.\n\n    Allows to edit the suffix of the *Main Prompt* that the Cat feeds to the *Agent*.\n\n    The suffix is concatenated to `agent_prompt_prefix` when RAG context is used.\n\n    Parameters\n    ----------\n    cat : StrayCat\n        StrayCat instance.\n\n    Returns\n    -------\n    prompt_suffix : str\n        The suffix string to be concatenated to the *Main Prompt* (prefix).\n\n    Notes\n    -----\n    The default suffix has a few placeholders:\n    - {episodic_memory} provides memories retrieved from *episodic* memory (past conversations)\n    - {declarative_memory} provides memories retrieved from *declarative* memory (uploaded documents)\n    - {chat_history} provides the *Agent* the recent conversation history\n    - {input} provides the last user's input\n    - {agent_scratchpad} is where the *Agent* can concatenate tools use and multiple calls to the LLM.\n\n    \"\"\"\n\n    return prompt_suffix\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/","title":"rabbithole","text":"

Hooks to modify the RabbitHole's documents ingestion.

Here is a collection of methods to hook into the RabbitHole execution pipeline.

These hooks allow to intercept the uploaded documents at different places before they are saved into memory.

"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/#cat.mad_hatter.core_plugin.hooks.rabbithole.after_rabbithole_splitted_text","title":"after_rabbithole_splitted_text(chunks, cat)","text":"

Hook the Document after is split.

Allows editing the list of Document right after the RabbitHole chunked them in smaller ones.

Parameters:

Name Type Description Default chunks List[Document]

List of Langchain Document.

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description chunks List[Document]

List of modified chunked langchain documents to be stored in the episodic memory.

Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py
@hook(priority=0)\ndef after_rabbithole_splitted_text(chunks: List[Document], cat) -> List[Document]:\n    \"\"\"Hook the `Document` after is split.\n\n    Allows editing the list of `Document` right after the *RabbitHole* chunked them in smaller ones.\n\n    Parameters\n    ----------\n    chunks : List[Document]\n        List of Langchain `Document`.\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    chunks : List[Document]\n        List of modified chunked langchain documents to be stored in the episodic memory.\n\n    \"\"\"\n\n    return chunks\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/#cat.mad_hatter.core_plugin.hooks.rabbithole.after_rabbithole_stored_documents","title":"after_rabbithole_stored_documents(source, stored_points, cat)","text":"

Hook the Document after is inserted in the vector memory.

Allows editing and enhancing the list of Document after is inserted in the vector memory.

Parameters:

Name Type Description Default source

Name of ingested file/url

required docs List[PointStruct]

List of Qdrant PointStruct just inserted into the db.

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Type Description None Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py
@hook(priority=0)\ndef after_rabbithole_stored_documents(\n    source, stored_points: List[PointStruct], cat\n) -> None:\n    \"\"\"Hook the Document after is inserted in the vector memory.\n\n    Allows editing and enhancing the list of Document after is inserted in the vector memory.\n\n    Parameters\n    ----------\n    source: str\n        Name of ingested file/url\n    docs : List[PointStruct]\n        List of Qdrant PointStruct just inserted into the db.\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    None\n    \"\"\"\n    pass\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/#cat.mad_hatter.core_plugin.hooks.rabbithole.before_rabbithole_insert_memory","title":"before_rabbithole_insert_memory(doc, cat)","text":"

Hook the Document before is inserted in the vector memory.

Allows editing and enhancing a single Document before the RabbitHole add it to the declarative vector memory.

Parameters:

Name Type Description Default doc Document

Langchain Document to be inserted in memory.

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description doc Document

Langchain Document that is added in the declarative vector memory.

Notes

The Document has two properties::

`page_content`: the string with the text to save in memory;\n`metadata`: a dictionary with at least two keys:\n    `source`: where the text comes from;\n    `when`: timestamp to track when it's been uploaded.\n
Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py
@hook(priority=0)\ndef before_rabbithole_insert_memory(doc: Document, cat) -> Document:\n    \"\"\"Hook the `Document` before is inserted in the vector memory.\n\n    Allows editing and enhancing a single `Document` before the *RabbitHole* add it to the declarative vector memory.\n\n    Parameters\n    ----------\n    doc : Document\n        Langchain `Document` to be inserted in memory.\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    doc : Document\n        Langchain `Document` that is added in the declarative vector memory.\n\n    Notes\n    -----\n    The `Document` has two properties::\n\n        `page_content`: the string with the text to save in memory;\n        `metadata`: a dictionary with at least two keys:\n            `source`: where the text comes from;\n            `when`: timestamp to track when it's been uploaded.\n\n    \"\"\"\n    return doc\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/#cat.mad_hatter.core_plugin.hooks.rabbithole.before_rabbithole_splits_text","title":"before_rabbithole_splits_text(docs, cat)","text":"

Hook the Documents before they are split into chunks.

Allows editing the uploaded document main Document(s) before the RabbitHole recursively splits it in shorter ones. Please note that this is a list because parsers can output one or more Document, that are afterwards splitted.

For instance, the hook allows to change the text or edit/add metadata.

Parameters:

Name Type Description Default docs List[Document]

Langchain Documents resulted after parsing the file uploaded in the RabbitHole.

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description docs List[Document]

Edited Langchain Documents.

Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py
@hook(priority=0)\ndef before_rabbithole_splits_text(docs: List[Document], cat) -> List[Document]:\n    \"\"\"Hook the `Documents` before they are split into chunks.\n\n    Allows editing the uploaded document main Document(s) before the *RabbitHole* recursively splits it in shorter ones.\n    Please note that this is a list because parsers can output one or more Document, that are afterwards splitted.\n\n    For instance, the hook allows to change the text or edit/add metadata.\n\n    Parameters\n    ----------\n    docs : List[Document]\n        Langchain `Document`s resulted after parsing the file uploaded in the *RabbitHole*.\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    docs : List[Document]\n        Edited Langchain `Document`s.\n\n    \"\"\"\n\n    return docs\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/#cat.mad_hatter.core_plugin.hooks.rabbithole.before_rabbithole_stores_documents","title":"before_rabbithole_stores_documents(docs, cat)","text":"

Hook into the memory insertion pipeline.

Allows modifying how the list of Document is inserted in the vector memory.

For example, this hook is a good point to summarize the incoming documents and save both original and summarized contents. An official plugin is available to test this procedure.

Parameters:

Name Type Description Default docs List[Document]

List of Langchain Document to be edited.

required cat

Cheshire Cat instance.

required

Returns:

Name Type Description docs List[Document]

List of edited Langchain documents.

Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py
@hook(priority=0)\ndef before_rabbithole_stores_documents(docs: List[Document], cat) -> List[Document]:\n    \"\"\"Hook into the memory insertion pipeline.\n\n    Allows modifying how the list of `Document` is inserted in the vector memory.\n\n    For example, this hook is a good point to summarize the incoming documents and save both original and\n    summarized contents.\n    An official plugin is available to test this procedure.\n\n    Parameters\n    ----------\n    docs : List[Document]\n        List of Langchain `Document` to be edited.\n    cat: CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    docs : List[Document]\n        List of edited Langchain documents.\n\n    \"\"\"\n\n    return docs\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/#cat.mad_hatter.core_plugin.hooks.rabbithole.rabbithole_instantiates_parsers","title":"rabbithole_instantiates_parsers(file_handlers, cat)","text":"

Hook the available parsers for ingesting files in the declarative memory.

Allows replacing or extending existing supported mime types and related parsers to customize the file ingestion.

Parameters:

Name Type Description Default file_handlers dict

Keys are the supported mime types and values are the related parsers.

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description file_handlers dict

Edited dictionary of supported mime types and related parsers.

Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py
@hook(priority=0)\ndef rabbithole_instantiates_parsers(file_handlers: dict, cat) -> dict:\n    \"\"\"Hook the available parsers for ingesting files in the declarative memory.\n\n    Allows replacing or extending existing supported mime types and related parsers to customize the file ingestion.\n\n    Parameters\n    ----------\n    file_handlers : dict\n        Keys are the supported mime types and values are the related parsers.\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    file_handlers : dict\n        Edited dictionary of supported mime types and related parsers.\n    \"\"\"\n    return file_handlers\n
"},{"location":"API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/#cat.mad_hatter.core_plugin.hooks.rabbithole.rabbithole_instantiates_splitter","title":"rabbithole_instantiates_splitter(text_splitter, cat)","text":"

Hook the splitter used to split text in chunks.

Allows replacing the default text splitter to customize the splitting process.

Parameters:

Name Type Description Default text_splitter TextSplitter

The text splitter used by default.

required cat CheshireCat

Cheshire Cat instance.

required

Returns:

Name Type Description text_splitter TextSplitter

An instance of a TextSplitter subclass.

Source code in cat/mad_hatter/core_plugin/hooks/rabbithole.py
@hook(priority=0)\ndef rabbithole_instantiates_splitter(text_splitter: TextSplitter, cat) -> TextSplitter:\n    \"\"\"Hook the splitter used to split text in chunks.\n\n    Allows replacing the default text splitter to customize the splitting process.\n\n    Parameters\n    ----------\n    text_splitter : TextSplitter\n        The text splitter used by default.\n    cat : CheshireCat\n        Cheshire Cat instance.\n\n    Returns\n    -------\n    text_splitter : TextSplitter\n        An instance of a TextSplitter subclass.\n    \"\"\"\n\n    # example on how to change chunking\n    # text_splitter._chunk_size = 64\n    # text_splitter._chunk_overlap = 8\n\n    return text_splitter\n
"},{"location":"API_Documentation/memory/vector_memory/","title":"vector_memory","text":""},{"location":"API_Documentation/memory/vector_memory_collection/","title":"vector_memory_collection","text":""},{"location":"API_Documentation/memory/vector_memory_collection/#cat.memory.vector_memory_collection.VectorMemoryCollection","title":"VectorMemoryCollection","text":"Source code in cat/memory/vector_memory_collection.py
class VectorMemoryCollection:\n    def __init__(\n        self,\n        client: Any,\n        collection_name: str,\n        embedder_name: str,\n        embedder_size: int,\n    ):\n        # Set attributes (metadata on the embedder are useful because it may change at runtime)\n        self.client = client\n        self.collection_name = collection_name\n        self.embedder_name = embedder_name\n        self.embedder_size = embedder_size\n\n        # Check if memory collection exists also in vectorDB, otherwise create it\n        self.create_db_collection_if_not_exists()\n\n        # Check db collection vector size is same as embedder size\n        self.check_embedding_size()\n\n        # log collection info\n        log.debug(f\"Collection {self.collection_name}:\")\n        log.debug(self.client.get_collection(self.collection_name))\n\n    def check_embedding_size(self):\n        # having the same size does not necessarily imply being the same embedder\n        # having vectors with the same size but from diffent embedder in the same vector space is wrong\n        same_size = (\n            self.client.get_collection(self.collection_name).config.params.vectors.size\n            == self.embedder_size\n        )\n        alias = self.embedder_name + \"_\" + self.collection_name\n        if (\n            alias\n            == self.client.get_collection_aliases(self.collection_name)\n            .aliases[0]\n            .alias_name\n            and same_size\n        ):\n            log.debug(f'Collection \"{self.collection_name}\" has the same embedder')\n        else:\n            log.warning(f'Collection \"{self.collection_name}\" has different embedder')\n            # Memory snapshot saving can be turned off in the .env file with:\n            # SAVE_MEMORY_SNAPSHOTS=false\n            if get_env(\"CCAT_SAVE_MEMORY_SNAPSHOTS\") == \"true\":\n                # dump collection on disk before deleting\n                self.save_dump()\n                log.info(f\"Dump '{self.collection_name}' completed\")\n\n            self.client.delete_collection(self.collection_name)\n            log.warning(f\"Collection '{self.collection_name}' deleted\")\n            self.create_collection()\n\n    def create_db_collection_if_not_exists(self):\n        # is collection present in DB?\n        collections_response = self.client.get_collections()\n        for c in collections_response.collections:\n            if c.name == self.collection_name:\n                # collection exists. Do nothing\n                log.info(\n                    f\"Collection '{self.collection_name}' already present in vector store\"\n                )\n                return\n\n        self.create_collection()\n\n    # create collection\n    def create_collection(self):\n        log.warning(f\"Creating collection '{self.collection_name}' ...\")\n        self.client.recreate_collection(\n            collection_name=self.collection_name,\n            vectors_config=VectorParams(\n                size=self.embedder_size, distance=Distance.COSINE\n            ),\n            # hybrid mode: original vector on Disk, quantized vector in RAM\n            optimizers_config=OptimizersConfigDiff(memmap_threshold=20000),\n            quantization_config=ScalarQuantization(\n                scalar=ScalarQuantizationConfig(\n                    type=ScalarType.INT8, quantile=0.95, always_ram=True\n                )\n            ),\n            # shard_number=3,\n        )\n\n        self.client.update_collection_aliases(\n            change_aliases_operations=[\n                CreateAliasOperation(\n                    create_alias=CreateAlias(\n                        collection_name=self.collection_name,\n                        alias_name=self.embedder_name + \"_\" + self.collection_name,\n                    )\n                )\n            ]\n        )\n\n    # adapted from https://github.com/langchain-ai/langchain/blob/bfc12a4a7644cfc4d832cc4023086a7a5374f46a/libs/langchain/langchain/vectorstores/qdrant.py#L1965\n    def _qdrant_filter_from_dict(self, filter: dict) -> Filter:\n        if not filter:\n            return None\n\n        return Filter(\n            must=[\n                condition\n                for key, value in filter.items()\n                for condition in self._build_condition(key, value)\n            ]\n        )\n\n    # adapted from https://github.com/langchain-ai/langchain/blob/bfc12a4a7644cfc4d832cc4023086a7a5374f46a/libs/langchain/langchain/vectorstores/qdrant.py#L1941\n    def _build_condition(self, key: str, value: Any) -> List[FieldCondition]:\n        out = []\n\n        if isinstance(value, dict):\n            for _key, value in value.items():\n                out.extend(self._build_condition(f\"{key}.{_key}\", value))\n        elif isinstance(value, list):\n            for _value in value:\n                if isinstance(_value, dict):\n                    out.extend(self._build_condition(f\"{key}[]\", _value))\n                else:\n                    out.extend(self._build_condition(f\"{key}\", _value))\n        else:\n            out.append(\n                FieldCondition(\n                    key=f\"metadata.{key}\",\n                    match=MatchValue(value=value),\n                )\n            )\n\n        return out\n\n    def add_point(\n        self,\n        content: str,\n        vector: Iterable,\n        metadata: dict = None,\n        id: Optional[str] = None,\n        **kwargs: Any,\n    ) -> List[str]:\n        \"\"\"Add a point (and its metadata) to the vectorstore.\n\n        Args:\n            content: original text.\n            vector: Embedding vector.\n            metadata: Optional metadata dict associated with the text.\n            id:\n                Optional id to associate with the point. Id has to be a uuid-like string.\n\n        Returns:\n            Point id as saved into the vectorstore.\n        \"\"\"\n\n        # TODO: may be adapted to upload batches of points as langchain does.\n        # Not necessary now as the bottleneck is the embedder\n        point = PointStruct(\n            id=id or uuid.uuid4().hex,\n            payload={\n                \"page_content\": content,\n                \"metadata\": metadata,\n            },\n            vector=vector,\n        )\n\n        update_status = self.client.upsert(\n            collection_name=self.collection_name, points=[point], **kwargs\n        )\n\n        if update_status.status == \"completed\":\n            # returnign stored point\n            return point # TODOV2 return internal MemoryPoint\n        else:\n            return None\n\n    def delete_points_by_metadata_filter(self, metadata=None):\n        res = self.client.delete(\n            collection_name=self.collection_name,\n            points_selector=self._qdrant_filter_from_dict(metadata),\n        )\n        return res\n\n    # delete point in collection\n    def delete_points(self, points_ids):\n        res = self.client.delete(\n            collection_name=self.collection_name,\n            points_selector=points_ids,\n        )\n        return res\n\n    # retrieve similar memories from embedding\n    def recall_memories_from_embedding(\n        self, embedding, metadata=None, k=5, threshold=None\n    ):\n        # retrieve memories\n        memories = self.client.search(\n            collection_name=self.collection_name,\n            query_vector=embedding,\n            query_filter=self._qdrant_filter_from_dict(metadata),\n            with_payload=True,\n            with_vectors=True,\n            limit=k,\n            score_threshold=threshold,\n            search_params=SearchParams(\n                quantization=QuantizationSearchParams(\n                    ignore=False,\n                    rescore=True,\n                    oversampling=2.0,  # Available as of v1.3.0\n                )\n            ),\n        )\n\n        # convert Qdrant points to langchain.Document\n        langchain_documents_from_points = []\n        for m in memories:\n            langchain_documents_from_points.append(\n                (\n                    Document(\n                        page_content=m.payload.get(\"page_content\"),\n                        metadata=m.payload.get(\"metadata\") or {},\n                    ),\n                    m.score,\n                    m.vector,\n                    m.id,\n                )\n            )\n\n        # we'll move out of langchain conventions soon and have our own cat Document\n        # for doc, score, vector in langchain_documents_from_points:\n        #    doc.lc_kwargs = None\n\n        return langchain_documents_from_points\n\n    # retrieve all the points in the collection\n    def get_all_points(self):\n        # retrieving the points\n        all_points, _ = self.client.scroll(\n            collection_name=self.collection_name,\n            with_vectors=True,\n            limit=10000,  # yeah, good for now dear :*\n        )\n\n        return all_points\n\n    def db_is_remote(self):\n        return isinstance(self.client._client, QdrantRemote)\n\n    # dump collection on disk before deleting\n    def save_dump(self, folder=\"dormouse/\"):\n        # only do snapshotting if using remote Qdrant\n        if not self.db_is_remote():\n            return\n\n        host = self.client._client._host\n        port = self.client._client._port\n\n        if os.path.isdir(folder):\n            log.info(\"Directory dormouse exists\")\n        else:\n            log.warning(\"Directory dormouse does NOT exists, creating it.\")\n            os.mkdir(folder)\n\n        self.snapshot_info = self.client.create_snapshot(\n            collection_name=self.collection_name\n        )\n        snapshot_url_in = (\n            \"http://\"\n            + str(host)\n            + \":\"\n            + str(port)\n            + \"/collections/\"\n            + self.collection_name\n            + \"/snapshots/\"\n            + self.snapshot_info.name\n        )\n        snapshot_url_out = folder + self.snapshot_info.name\n        # rename snapshots for a easyer restore in the future\n        alias = (\n            self.client.get_collection_aliases(self.collection_name)\n            .aliases[0]\n            .alias_name\n        )\n        response = requests.get(snapshot_url_in)\n        open(snapshot_url_out, \"wb\").write(response.content)\n        new_name = folder + alias.replace(\"/\", \"-\") + \".snapshot\"\n        os.rename(snapshot_url_out, new_name)\n        for s in self.client.list_snapshots(self.collection_name):\n            self.client.delete_snapshot(\n                collection_name=self.collection_name, snapshot_name=s.name\n            )\n        log.warning(f'Dump \"{new_name}\" completed')\n
"},{"location":"API_Documentation/memory/vector_memory_collection/#cat.memory.vector_memory_collection.VectorMemoryCollection.add_point","title":"add_point(content, vector, metadata=None, id=None, **kwargs)","text":"

Add a point (and its metadata) to the vectorstore.

Args: content: original text. vector: Embedding vector. metadata: Optional metadata dict associated with the text. id: Optional id to associate with the point. Id has to be a uuid-like string.

Returns: Point id as saved into the vectorstore.

Source code in cat/memory/vector_memory_collection.py
def add_point(\n    self,\n    content: str,\n    vector: Iterable,\n    metadata: dict = None,\n    id: Optional[str] = None,\n    **kwargs: Any,\n) -> List[str]:\n    \"\"\"Add a point (and its metadata) to the vectorstore.\n\n    Args:\n        content: original text.\n        vector: Embedding vector.\n        metadata: Optional metadata dict associated with the text.\n        id:\n            Optional id to associate with the point. Id has to be a uuid-like string.\n\n    Returns:\n        Point id as saved into the vectorstore.\n    \"\"\"\n\n    # TODO: may be adapted to upload batches of points as langchain does.\n    # Not necessary now as the bottleneck is the embedder\n    point = PointStruct(\n        id=id or uuid.uuid4().hex,\n        payload={\n            \"page_content\": content,\n            \"metadata\": metadata,\n        },\n        vector=vector,\n    )\n\n    update_status = self.client.upsert(\n        collection_name=self.collection_name, points=[point], **kwargs\n    )\n\n    if update_status.status == \"completed\":\n        # returnign stored point\n        return point # TODOV2 return internal MemoryPoint\n    else:\n        return None\n
"},{"location":"API_Documentation/memory/working_memory/","title":"working_memory","text":""},{"location":"API_Documentation/memory/working_memory/#cat.memory.working_memory.WorkingMemory","title":"WorkingMemory","text":"

Bases: BaseModelDict

Cat's volatile memory.

Handy class that behaves like a dict to store temporary custom data.

Returns:

Type Description dict[str, list]

Default instance is a dictionary with history key set to an empty list.

Notes

The constructor instantiates a dictionary with a history key set to an empty list that is further used to store the conversation turns between the Human and the AI.

Source code in cat/memory/working_memory.py
class WorkingMemory(BaseModelDict):\n    \"\"\"Cat's volatile memory.\n\n    Handy class that behaves like a `dict` to store temporary custom data.\n\n    Returns\n    -------\n    dict[str, list]\n        Default instance is a dictionary with `history` key set to an empty list.\n\n    Notes\n    -----\n    The constructor instantiates a dictionary with a `history` key set to an empty list that is further used to store\n    the conversation turns between the Human and the AI.\n    \"\"\"\n\n    # stores conversation history\n    history: List = []\n    user_message_json: None | UserMessage = None\n    active_form: None | CatForm = None\n\n    # recalled memories attributes\n    recall_query: str = \"\"\n    episodic_memories: List = []\n    declarative_memories: List = []\n    procedural_memories: List = []\n\n    # track models usage\n    model_interactions: List[ModelInteraction] = []\n\n    def update_conversation_history(self, who, message, why={}):\n        \"\"\"Update the conversation history.\n\n        The methods append to the history key the last three conversation turns.\n\n        Parameters\n        ----------\n        who : str\n            Who said the message. Can either be `Human` or `AI`.\n        message : str\n            The message said.\n\n        \"\"\"\n        # append latest message in conversation\n        # TODO: Message should be of type CatMessage or UserMessage. For retrocompatibility we put a new key\n        # we are sure that who is not change in the current call\n        self.history.append(\n            {\n                \"who\": who,\n                \"message\": message,\n                \"why\": why,\n                \"when\": time.time(),\n                \"role\": Role.AI if who == \"AI\" else Role.Human,\n            }\n        )\n
"},{"location":"API_Documentation/memory/working_memory/#cat.memory.working_memory.WorkingMemory.update_conversation_history","title":"update_conversation_history(who, message, why={})","text":"

Update the conversation history.

The methods append to the history key the last three conversation turns.

Parameters:

Name Type Description Default who str

Who said the message. Can either be Human or AI.

required message str

The message said.

required Source code in cat/memory/working_memory.py
def update_conversation_history(self, who, message, why={}):\n    \"\"\"Update the conversation history.\n\n    The methods append to the history key the last three conversation turns.\n\n    Parameters\n    ----------\n    who : str\n        Who said the message. Can either be `Human` or `AI`.\n    message : str\n        The message said.\n\n    \"\"\"\n    # append latest message in conversation\n    # TODO: Message should be of type CatMessage or UserMessage. For retrocompatibility we put a new key\n    # we are sure that who is not change in the current call\n    self.history.append(\n        {\n            \"who\": who,\n            \"message\": message,\n            \"why\": why,\n            \"when\": time.time(),\n            \"role\": Role.AI if who == \"AI\" else Role.Human,\n        }\n    )\n
"},{"location":"API_Documentation/routes/settings/","title":"settings","text":""},{"location":"API_Documentation/routes/settings/#cat.routes.settings.create_setting","title":"create_setting(payload, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.WRITE)))","text":"

Create a new setting in the database

Source code in cat/routes/settings.py
@router.post(\"/\")\ndef create_setting(\n    payload: models.SettingBody,\n    stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.WRITE)),\n):\n    \"\"\"Create a new setting in the database\"\"\"\n\n    # complete the payload with setting_id and updated_at\n    payload = models.Setting(**payload.model_dump())\n\n    # save to DB\n    new_setting = crud.create_setting(payload)\n\n    return {\"setting\": new_setting}\n
"},{"location":"API_Documentation/routes/settings/#cat.routes.settings.delete_setting","title":"delete_setting(settingId, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.DELETE)))","text":"

Delete a specific setting in the database

Source code in cat/routes/settings.py
@router.delete(\"/{settingId}\")\ndef delete_setting(\n    settingId: str,\n    stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.DELETE)),\n):\n    \"\"\"Delete a specific setting in the database\"\"\"\n\n    # does the setting exist?\n    setting = crud.get_setting_by_id(settingId)\n    if not setting:\n        raise HTTPException(\n            status_code=404,\n            detail={\n                \"error\": f\"No setting with this id: {settingId}\",\n            },\n        )\n\n    # delete\n    crud.delete_setting_by_id(settingId)\n\n    return {\"deleted\": settingId}\n
"},{"location":"API_Documentation/routes/settings/#cat.routes.settings.get_setting","title":"get_setting(settingId, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.READ)))","text":"

Get the a specific setting from the database

Source code in cat/routes/settings.py
@router.get(\"/{settingId}\")\ndef get_setting(\n    settingId: str, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.READ))\n):\n    \"\"\"Get the a specific setting from the database\"\"\"\n\n    setting = crud.get_setting_by_id(settingId)\n    if not setting:\n        raise HTTPException(\n            status_code=404,\n            detail={\n                \"error\": f\"No setting with this id: {settingId}\",\n            },\n        )\n    return {\"setting\": setting}\n
"},{"location":"API_Documentation/routes/settings/#cat.routes.settings.get_settings","title":"get_settings(search='', stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.LIST)))","text":"

Get the entire list of settings available in the database

Source code in cat/routes/settings.py
@router.get(\"/\")\ndef get_settings(\n    search: str = \"\",\n    stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.LIST)),\n):\n    \"\"\"Get the entire list of settings available in the database\"\"\"\n\n    settings = crud.get_settings(search=search)\n\n    return {\"settings\": settings}\n
"},{"location":"API_Documentation/routes/settings/#cat.routes.settings.update_setting","title":"update_setting(settingId, payload, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.EDIT)))","text":"

Update a specific setting in the database if it exists

Source code in cat/routes/settings.py
@router.put(\"/{settingId}\")\ndef update_setting(\n    settingId: str,\n    payload: models.SettingBody,\n    stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.EDIT)),\n):\n    \"\"\"Update a specific setting in the database if it exists\"\"\"\n\n    # does the setting exist?\n    setting = crud.get_setting_by_id(settingId)\n    if not setting:\n        raise HTTPException(\n            status_code=404,\n            detail={\n                \"error\": f\"No setting with this id: {settingId}\",\n            },\n        )\n\n    # complete the payload with setting_id and updated_at\n    payload = models.Setting(**payload.model_dump())\n    payload.setting_id = settingId  # force this to be the setting_id\n\n    # save to DB\n    updated_setting = crud.update_setting_by_id(payload)\n\n    return {\"setting\": updated_setting}\n
"},{"location":"faq/basic_info/","title":"Basic Info","text":""},{"location":"faq/basic_info/#basic-info","title":"Basic Info","text":""},{"location":"faq/basic_info/#can-i-insert-a-long-article-into-the-chat","title":"Can I insert a long article into the chat?","text":"

Please avoid copy-pasting long articles into the chat. Use Rabbit Hole to upload long texts instead: just click on the attachment icon in the chat input widget and upload your file.

"},{"location":"faq/basic_info/#are-the-configured-llm-apis-used-to-instruct-the-cat-with-the-documents-im-going-to-upload","title":"Are the configured LLM APIs used to \"instruct\" the Cat with the documents I'm going to upload?","text":"

That's not exactly how it works: basically when you ask something to the Cat, we pass to the configured LLM a prompt with your actual question + data that can be useful to answer that question. Data can be part of your documents or chat history. Please check our documentation for more details about how the Cat internally works.

"},{"location":"faq/basic_info/#can-i-talk-to-the-cat-in-a-language-different-from-english","title":"Can I talk to the Cat in a language different from English?","text":"

Of course, you can: just change the prompts in the Plugin folder accordingly, and take care not to mix languages to get best results.

"},{"location":"faq/basic_info/#how-can-i-know-where-the-cat-gets-the-asnwers-from-id-like-to-know-if-its-using-the-files-i-uploaded-or-if-its-querying-the-configured-llm","title":"How can I know where the Cat gets the asnwers from? I'd like to know if it's using the files I uploaded or if it's querying the configured LLM","text":"

Just open the console in your browser to check the logs there. At some point soon, this information will end up in the user interface, but at the moment is behind the scenes.

"},{"location":"faq/basic_info/#i-sent-to-the-cat-some-text-and-documents-i-want-to-get-rid-of-how-can-i-achieve-it","title":"I sent to the Cat some text and documents I want to get rid of, How can I achieve it?","text":"

You can delete the long_term_memory folder and restart the Cat! Remember that you will lost everything!

"},{"location":"faq/customization/","title":"Customization","text":""},{"location":"faq/customization/#customization","title":"Customization","text":""},{"location":"faq/customization/#i-want-to-build-my-own-plugin-for-the-cat-what-should-i-know-about-licensing","title":"I want to build my own plugin for the Cat: what should I know about licensing?","text":"

You can set set any licence you want and you are not limited to selling your plugin. The Cat core is GPL3, meaning you are free to fork and go on your own, but you are forced to open source changes to the core.

"},{"location":"faq/customization/#port-1865-is-not-allowed-by-my-operating-system-andor-firewall","title":"Port 1865 is not allowed by my operating system and/or firewall","text":"

Change the port as you wish in the .env file.

# Set HOST and PORT for your Cat. Default will be localhost:1865  \nCORE_HOST=localhost\nCORE_PORT=9000\n
"},{"location":"faq/customization/#can-i-use-a-different-vector-database-than-qdrant","title":"Can I use a different vector database than Qdrant?","text":"

At this moment we don't provide any way to switch the vector database \ud83d\ude3f but it is planned for the future.

"},{"location":"faq/errors/","title":"Errors","text":""},{"location":"faq/errors/#errors","title":"Errors","text":""},{"location":"faq/errors/#why-am-i-getting-the-error-ratelimiterror-in-my-browser-console","title":"Why am I getting the error RateLimitError in my browser console?","text":"

Please check if you have a valid credit card connected or if you have used up all the credits of your OpenAI trial period.

"},{"location":"faq/errors/#docker-has-no-permissions-to-write","title":"Docker has no permissions to write","text":"

This is a matter with your docker installation or the user you run docker from. Usually you can resolve it by using sudo command before calling any docker command, but it's better to create a docker group on your Linux system and give root-level privileges to docker.

"},{"location":"faq/errors/#the-cat-seems-not-to-be-working-from-inside-a-virtual-machine","title":"The Cat seems not to be working from inside a Virtual Machine","text":"

In VirtualBox, you can select Settings->Network, then choose NAT in the \"Attached to\" drop down menu. Select \"Advanced\" to configure the port forwarding rules. Assuming the guest IP of your VM is 10.0.2.15 (the default) and the ports configured in the .env files are the defaults, you have to set at least the following rule:

Rule name Protocol Host IP Host Port Guest IP Guest Port Rule 1 TCP 127.0.0.1 1865 10.0.2.15 1865

If you want to work on the documentation of the Cat, you also have to add one rule for port 8000 which is used by mkdocs, and to configure mkdocs itself to respond to all requests (not only localhost as per the default).

"},{"location":"faq/general/","title":"General","text":""},{"location":"faq/general/#general","title":"General","text":""},{"location":"faq/general/#ive-found-the-cat-and-i-like-it-very-much-but-im-not-able-to-follow-your-instructions-to-install-it-on-my-machine-can-you-help","title":"I've found the Cat and I like it very much, but I'm not able to follow your instructions to install it on my machine. Can you help?","text":"

The Cheshire Cat is a framework to help developers to build vertical AIs: you will need some basic technical skills to follow our instructions. Please try to ask in the support channel in our discord server, and remember this is all volunteers effort: be kind! :)

"},{"location":"faq/general/#why-the-cat-does-not-default-to-some-open-llm-instead-of-chatgpt-or-gpt-3","title":"Why the Cat does not default to some open LLM instead of ChatGPT or GPT-3?","text":"

Our intention is not to depend on any specific LLM: the Cat does not have a preference about which LLM to use. Nonetheless, at the moment, OpenAI tools still provide the best results for your bucks. Decision is up to you.

"},{"location":"faq/general/#are-text-and-documents-sent-to-the-cat-safe-and-not-shared-with-anybody","title":"Are text and documents sent to the Cat safe and not shared with anybody?","text":"

The local memory is safe and under your control, although embeddings and prompts are shared with your configured LLM, meaning you need to check how safe the LLM is. We plan to adopt local LLMs, at which point all your data will be under your control.

"},{"location":"faq/general/#what-is-the-difference-between-langchain-and-the-cat","title":"What is the difference between Langchain and the Cat?","text":"

The Cheshire Cat is a production-ready AI framework, it means that with almost no effort you can setup an intelligent agent ready to help both you and your customers.

On the other hand, Langchain is a framework for developing applications powered by language models. It offers tons of composable tools and integrations to this purpose and the Cheshire Cat makes use of some of them to manage chains, agents, llm/embedder. You can take an in depth look at our core if you are purr-ious about it.

"},{"location":"faq/general/#i-want-to-use-the-admin-page-for","title":"I want to use the admin page for...","text":"

The admin panel is meant to be an administration interface. It's purpose is to chat with the Cat only to debug/play with it, it is not intended to be a final widget chat used by eventual final users.

We provide a widget to connect the Cat to your product.

You are free to modify the Admin to adapt it to your product, however you will need to respect the GPL3 Licence, meaning you are free to fork the codebase and go on your own, but you are forced to open source eventual changes.

"},{"location":"faq/general/#why-the-admin-does-not-provide-basic-usernamepassword-or-third-party-authentication-such-as-oauth","title":"Why the Admin does not provide Basic (username/password) or Third-party Authentication (such as OAuth)?","text":"

The only use case of the Admin is to provide a more friendly way to interact with the Cat with basic authentication through an api_key.

"},{"location":"faq/security_and_spending/","title":"Security & Spending","text":""},{"location":"faq/security_and_spending/#security-and-spending","title":"Security and Spending","text":""},{"location":"faq/security_and_spending/#security","title":"Security","text":""},{"location":"faq/security_and_spending/#where-is-the-openai-api-key-or-other-keys-saved-in-the-cat","title":"Where is the OpenAI API key (or other keys) saved in the Cat?","text":"

Keys are store in a JSON file, core/metadata.json.

"},{"location":"faq/security_and_spending/#will-openai-see-my-documents-and-conversations","title":"Will OpenAI see my documents and conversations?","text":"

If you are using the Cat with an OpenAI LLM, all your conversations and documents will indeed take a trip into OpenAI servers, because the models are there. We advise to avoid uploading sensitive documents while using an external LLM. If you want to use the Cat in total security and privacy, use a local LLM or a cloud LLM in your control.

"},{"location":"faq/security_and_spending/#spending","title":"Spending","text":""},{"location":"faq/security_and_spending/#i-have-chatgpt-subscription-can-i-use-the-cat","title":"I have chatgpt subscription, can I use the cat?","text":"

Chat-gpt subscription is different from OpenAI API

"},{"location":"faq/security_and_spending/#is-there-a-free-way-to-use-openai-services","title":"Is there a free way to use OpenAI services?","text":"

Unfortunately you need to pay to use OpenAI models, but they are quite cheap.

"},{"location":"faq/security_and_spending/#can-i-run-local-models-like-llama-to-avoid-spending","title":"Can I run local models like LLAMA to avoid spending?","text":"

Running a LLM (Large Language Model) locally requires high-end hardware and technical skills. If you don't know what you are doing, we suggest you start using the Cat with ChatGPT. Afterwards you can experiment with local models or by setting up a cloud endpoint. The Cat offers you several ways to use an LLM.

"},{"location":"faq/security_and_spending/#can-i-know-in-advance-how-much-money-i-will-spend","title":"Can I know in advance how much money I will spend?","text":"

That depends on the vendors pricing, how many documents you upload in the Cat memory and how much you chat. We suggest you start with light usage and small documents, and check how the billing is growing in your LLM vendor's website. In our experience LLM cloud usage is cheap, and it will probably be even cheaper in the next months and years.

"},{"location":"faq/security_and_spending/#is-my-gpu-powerful-enough-to-run-a-local-model","title":"Is my GPU powerful enough to run a local model?","text":"

That strongly depends on the size of the model you want to run. Try using this application from HuggingFace to get an idea of which model and the amount of quantization your hardware can handle.

"},{"location":"framework/cat-components/llm/","title":"Language Models","text":""},{"location":"framework/cat-components/llm/#language-models","title":"Language Models","text":"

A language model is a Deep Learning Neural Network trained on a huge amount of text data to perform different types of language tasks. Commonly, they are also referred to as Large Language Models (LLM). Language models come in many architectures, size and specializations. The peculiarity of the Cheshire Cat is to be model-agnostic. This means it supports many different language models.

By default, there are two classes of language models that tackle two different tasks.

"},{"location":"framework/cat-components/llm/#completion-model","title":"Completion Model","text":"

This is the most known type of language models (see for examples ChatGPT, Cohere and many others). A completion model takes a string as input and generates a plausible answer by completion.

Warning

A LLM answer should not be accepted as-is, since LLM are subjected to hallucinations. Namely, their main goal is to generate plausible answers from the syntactical point of view. Thus, the provided answer could come from completely invented information.

"},{"location":"framework/cat-components/llm/#embedding-model","title":"Embedding Model","text":"

This type of model takes a string as input and returns a vector as output. This is known as an embedding. Namely, this is a condensed representation of the input content. The output vector, indeed, embeds the semantic information of the input text.

Despite being non-human readable, the embedding comes with the advantage of living in a Euclidean geometrical space. The embedding can be seen as a point in a multidimensional space, thus, geometrical operations can be applied to it. For instance, measuring the distance between two points can inform us about the similarity between two sentences.

"},{"location":"framework/cat-components/llm/#language-models-flow","title":"Language Models flow","text":"

Developer documentation

Language Models hooks

Nodes with the \ud83e\ude9d point the execution places where there is an available hook to customize the execution pipeline.

"},{"location":"framework/cat-components/plugins/","title":"Plugins","text":""},{"location":"framework/cat-components/plugins/#plugins","title":"Plugins","text":"

Plugins are add-ons that can be installed to extend and customize the Cheshire Cat. A plugin is nothing but a collection of hooks and tools.

"},{"location":"framework/cat-components/plugins/#hooks","title":"Hooks","text":"

The Cat uses functions known as hooks, which can be overridden, to customize the behavior of the framework in specific execution places. Hooks come with a priority property. The plugins manager takes care of collecting all the hooks, sorting and executing them in descending order of priority.

"},{"location":"framework/cat-components/plugins/#tools","title":"Tools","text":"

Tools are custom Python functions that are called by the Tool Agent. They come with a rich docstring upon with the Tool Agent chooses whether and which tool is the most suitable to fulfill the user's request. The list of available tools ends up in the Instruction Prompt, that instructs the Tool Agent on how to structure its reasoning.

Developer documentation

  • How to write a plugin
  • Hooks
  • Tools
"},{"location":"framework/cat-components/cheshire_cat/agent/","title":"Introduction","text":""},{"location":"framework/cat-components/cheshire_cat/agent/#agent-manager","title":"Agent Manager","text":"

The Agent Manager is the Cat's component that manages the execution of language models chains. A language model chain is a pipeline that takes one or more input variables, it formats them in a prompt, submits the prompt to a language model and, optionally, parses the output.

The Cat's Agent Manager orchestrates two chains:

  1. the tool chain, which, in turn, is a component of the Tool Agent;
  2. the memory chain

When suitable tools for the task at hand are retrieved from the procedural memory, the Agent Manager calls the Tool Agent to execute the tool chain; otherwise the memory chain is executed to answer the user's question with the context retrieved from the episodic and declarative memories.

Specifically, the default execution pipeline is the following:

  1. the Cat receives the user's message;
  2. the Cat looks for relevant context in each memory collection (i.e. procedural, declarative and episodic) using the user's message as a query;
  3. if meaningful context is retrieved from the procedural memory, the Tool Agent starts, otherwise the memory chain starts;
  4. if executed, the Tool Agent provides an output. If the output answer the user's input, such output is returned to the user, otherwise the memory chain starts;
  5. if executed, the memory chain provides the output using the context retrieved from the declarative and episodic memories.
"},{"location":"framework/cat-components/cheshire_cat/core/","title":"The Core","text":""},{"location":"framework/cat-components/cheshire_cat/core/#the-core","title":"The Core","text":""},{"location":"framework/cat-components/cheshire_cat/mad_hatter/","title":"The Mad Hatter","text":""},{"location":"framework/cat-components/cheshire_cat/mad_hatter/#mad-hatter","title":"Mad Hatter","text":"

The Mad Hatter is the Cat's plugins manager. It takes care of loading, prioritizing and executing plugins.

Specifically, the Mad Hatter lists all available plugins in proper folder and sort their hooks in descending order of priority. When the Cat invokes them, it executes them following that order.

Developer documentation

  • How to write a plugin
  • Hooks
  • Tools
"},{"location":"framework/cat-components/cheshire_cat/memory_chain/","title":"Memory Chain","text":""},{"location":"framework/cat-components/cheshire_cat/memory_chain/#memory-chain","title":"Memory Chain","text":"

The Memory Chain is a simple chain that takes the user's input and the context retrieved from the episodic and declarative memories and formats them in the main prompt. Such prompt is submitted to the language model.

"},{"location":"framework/cat-components/cheshire_cat/rabbit_hole/","title":"The Rabbit Hole","text":""},{"location":"framework/cat-components/cheshire_cat/rabbit_hole/#rabbit-hole","title":"Rabbit Hole","text":"

The Rabbit Hole is the Cat's component that takes care of ingesting documents and storing them in the declarative memory. You can interact with it either through its endpoint, the GUI or a Python script.

Currently supported file formats are: .txt, .md, .pdf or .html via web URL.

TODO: Describe how customize ingestion using hooks. TODO: Link Rabbit Hole Ingestion Diagram.

"},{"location":"framework/cat-components/cheshire_cat/stray_cat/","title":"The Stray Cat","text":""},{"location":"framework/cat-components/cheshire_cat/stray_cat/#stray-cat","title":"Stray Cat","text":"

What does the cat argument in hooks and tools do?

cat is an instance of StrayCat, a key component of the Cheshire Cat framework, serving as the primary entry point to all its features. This component handles user sessions, manages working memory, facilitates conversations, and provides essential methods for LLM interaction, WebSocket messaging, and more.

"},{"location":"framework/cat-components/cheshire_cat/stray_cat/#main-entry-point-to-the-framework","title":"Main Entry Point to the Framework","text":"

cat is designed to be your primary interface for leveraging the full capabilities of the framework. By using this object, you can access various features and functionalities without the need to import them. It also provides convenient shortcuts to access nested or user-related structures within the Cat, streamlining your development process.

Whenever you see something like:

from cat.mad_hatter.decorators import tool\n\n@tool\ndef fancy_tool(fancy_arg, cat):\n    \"\"\"Fancy tool docstring, seen by the LLM\"\"\"\n    return \"Did something fancy\"\n

Or:

from cat.mad_hatter.decorators import hook\n\n@hook\ndef before_cat_sends_message(final_output, cat):\n    final_output.content = final_output.content.upper()\n    return final_output\n

You'll probably notice the cat parameter being passed around. This cat is an instance of the StrayCat class. Like a wandering stray cat, this component roams through different parts of the framework, encapsulating the session's state and providing the necessary context for various operations within the Cheshire Cat ecosystem.

For more detail about useful methods, please see Useful Methods and Shortcuts.

"},{"location":"framework/cat-components/cheshire_cat/stray_cat/#user-session-management","title":"User session management","text":"

Whenever a user sends a message to the Cat, a StrayCat instance is called within the specific user context. This instance is responsible for several key tasks: handling incoming messages, updating the conversation history, managing working memory, delegating the framework agents to elaborate the response and finally gathering all relevant information about the response, including the content and the reasoning behind it (the \"why\"). This comprehensive response is then returned to the user.

"},{"location":"framework/cat-components/cheshire_cat/stray_cat/#examples","title":"Examples","text":"

As mentioned above, the StrayCat class provides some shortcuts to access Cheshire Cat features. For example you can easily use the LLM:

from cat.mad_hatter.decorators import hook\n\n@hook\ndef agent_fast_reply(reply, cat):\n    prompt = \"Say a joke in german\"\n    reply[\"output\"] = cat.llm(prompt)\n    return reply\n

An example using the White Rabbit scheduler:

from cat.mad_hatter.decorators import tool\n\n@tool(return_direct=True)\ndef schedule_say_hello(minutes, cat):\n    \"\"\"Say hello in a few minutes. Input is the number of minutes.\"\"\"\n\n    delay = int(minutes)\n\n    # We can access the White Rabbit to schedule jobs\n    job_id = cat.white_rabbit.schedule_chat_message(\"Hello\", cat, minutes=delay)\n\n    return f\"Scheduled job {job_id} to say hello in {delay} minutes.\"\n

In the same way you can access other Cat components like the Mad Hatter or the Rabbit Hole. You can also access the Working Memory. Easily like so:

from cat.mad_hatter.decorators import tool\n\n@tool(return_direct=True)\ndef clear_working_memory(arg, cat):\n    \"\"\"Use this tool to clear / reset / delete the conversation history.\"\"\"\n\n    # We can access the Working Memory to clear the chat history\n    cat.working_memory.history = []\n\n    return \"Chat history cleared\"\n
"},{"location":"framework/cat-components/cheshire_cat/stray_cat/#useful-methods-and-shortcuts","title":"Useful Methods and Shortcuts","text":""},{"location":"framework/cat-components/cheshire_cat/stray_cat/#shortcuts","title":"Shortcuts","text":""},{"location":"framework/cat-components/cheshire_cat/stray_cat/#useful-methods","title":"Useful Methods","text":"

The StrayCat class exposes many methods, some are particularly useful for plugin developers:

  • cat.send_chat_message(...) Sends a message to the user using the active WebSocket connection.
  • cat.send_notification(...) Sends a notification to the user using the active WebSocket connection.
  • cat.send_error(...) Sends an error message to the user using the active WebSocket connection.
  • cat.llm(...) Shortcut method for generating a response using the LLM and passing a custom prompt.
  • cat.embedder.embed_query(...) Shortcut method to embed a string in vector space.
  • cat.classify(...) Utility method for classifying a given sentence using the LLM. You can pass either a list of strings as possible labels or a dictionary of labels as keys and list of strings as examples.
  • cat.stringify_chat_history(...) Utility method to stringify the chat history.

These methods provide easy-to-use interfaces for interacting with LLMs and other components of the Cheshire Cat framework, making plugin development faster and more efficient.

For more details, see the StrayCat API reference

"},{"location":"framework/cat-components/cheshire_cat/tool_chain/","title":"Tool Chain","text":""},{"location":"framework/cat-components/cheshire_cat/tool_chain/#tool-chain","title":"Tool Chain","text":"

Sometimes a simple answer from the language model is not enough. For this reason, the Cat can exploit a set of custom tools (e.g. API calls and Python functions) coming from the plugins. The decision on whether and which action should be taken to fulfill the user's request is delegated to an Agent, i.e. the Tool Agent.

The Tool Agent uses the language model to outline a \"reasoning\" and accomplish the user's request with the tools retrieved from the Cat's procedural memory. The tools selection and usage is planned according to a set of instructions. Finally, the Tool Agent parses the formatting of the tool output.

"},{"location":"framework/cat-components/cheshire_cat/white_rabbit/","title":"The White Rabbit","text":""},{"location":"framework/cat-components/cheshire_cat/white_rabbit/#white-rabbit","title":"White Rabbit","text":"

The White Rabbit is the Cat's built-in scheduler. It is built upon the APScheduler. It enables the scheduling of various type of jobs, including one-time, interval-based and cron jobs. It provides also the capability to manage job execution, pausing, resuming and canceling jobs.

Currently, jobs are stored in memory, but future updates will support database storage for persistent job management.

"},{"location":"framework/cat-components/cheshire_cat/white_rabbit/#how-to-use","title":"How to use","text":"

The White Rabbit is a singleton component instantiated during Cat's bootstrap processs. You can easily access it through any Cat instance as follows:

cat.white_rabbit\n
"},{"location":"framework/cat-components/cheshire_cat/white_rabbit/#methods","title":"Methods","text":"Method Description Parameters Returns get_job(job_id: str) Retrieves a job by its ID. job_id: str - The ID of the job. Dict[str, str] - Job details or None if not found. get_jobs() Returns a list of all scheduled jobs. None List[Dict[str, str]] - A list of job details. pause_job(job_id: str) Pauses a job by its ID. job_id: str - The ID of the job. bool - True if successful, False otherwise. resume_job(job_id: str) Resumes a paused job by its ID. job_id: str - The ID of the job. bool - True if successful, False otherwise. remove_job(job_id: str) Removes a job by its ID. job_id: str - The ID of the job. bool - True if successful, False otherwise. schedule_job(job, job_id: str = None, days=0, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0, **kwargs) Schedules a one-time job to run at a specified time. job: function - The function to be executed. job_id: str - The ID of the job (optional). Time parameters (days, hours, minutes, seconds, milliseconds, microseconds). **kwargs - Additional arguments for the job function. str - The job ID. schedule_interval_job(job, job_id: str = None, start_date: datetime = None, end_date: datetime = None, days=0, hours=0, minutes=0, seconds=0, **kwargs) Schedules a job to run at regular intervals. job: function - The function to be executed. job_id: str - The ID of the job (optional). start_date: datetime - The start date of the job (optional). end_date: datetime - The end date of the job (optional). Interval time parameters (days, hours, minutes, seconds). **kwargs - Additional arguments for the job function. str - The job ID. schedule_cron_job(job, job_id: str = None, start_date: datetime = None, end_date: datetime = None, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, **kwargs) Schedules a job using cron-like expressions. job: function - The function to be executed. job_id: str - The ID of the job (optional). start_date: datetime - The start date of the job (optional). end_date: datetime - The end date of the job (optional). Cron time parameters (year, month, day, week, day_of_week, hour, minute, second). **kwargs - Additional arguments for the job function. str - The job ID. schedule_chat_message(content: str, cat, days=0, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0) Schedules a chat message to be sent after a specified delay. content: str - The message content. cat - The instance of StrayCat to send the message. Time parameters (days, hours, minutes, seconds, milliseconds, microseconds). str - The job ID."},{"location":"framework/cat-components/cheshire_cat/white_rabbit/#examples","title":"Examples","text":"

Here's a collection of examples showcasing how to use the WhiteRabbit to add scheduling capabilities to your AI agents.

"},{"location":"framework/cat-components/cheshire_cat/white_rabbit/#schedule-a-one-time-job","title":"Schedule a one-time job","text":"

In this example, we'll create a simple tool that allows the user to set an alarm that rings after a specified time interval.

from cat.mad_hatter.decorators import tool\n\n@tool\ndef ring_alarm(wait_time, cat):\n    \"\"\"Useful to ring the alarm. Use it whenever the user wants to ring the alarm. Input is the wait time of the alarm in seconds.\"\"\" \n\n    # Mocking alarm API call\n    def ring_alarm_api():\n        print(\"Riiing\")\n\n    cat.white_rabbit.schedule_job(ring_alarm_api, seconds=int(wait_time))\n\n    return f\"Alarm ringing in {wait_time} seconds\"\n
"},{"location":"framework/cat-components/cheshire_cat/white_rabbit/#schedule-an-interval-job","title":"Schedule an interval job","text":"

In this example, we'll build a tool that retrieves a random quote from a free scraping website and sends it to the user at regular intervals.

from cat.mad_hatter.decorators import tool\nimport requests\nimport re\nimport random\n\n@tool(return_direct=True)\ndef schedule_quote_scraper(interval, cat):\n    \"\"\"\n    Useful to get a random quote at a scheduled interval. The interval is in seconds\n    \"\"\"\n\n    def scrape_random_quote():\n        url = \"http://quotes.toscrape.com/\"\n        response = requests.get(url)\n        response.raise_for_status()\n        # We would normally use beautifulsoup here, but for this example we'll just use regex\n        quotes = re.findall(r'<span class=\"text\" itemprop=\"text\">(.*?)</span>', response.text)\n        if quotes:\n            random_quote = random.choice(quotes)\n            cat.send_ws_message(random_quote, msg_type=\"chat\")\n        else:\n            cat.send_ws_message(\"No quotes found\", msg_type=\"chat\")\n\n    # Schedule the job to run at the specified interval\n    cat.white_rabbit.schedule_interval_job(scrape_random_quote, seconds=int(interval))\n\n    return f\"Quote scraping job scheduled to run every {interval} seconds.\"\n
"},{"location":"framework/cat-components/cheshire_cat/white_rabbit/#schedule-a-cron-job","title":"Schedule a cron job","text":"

For a more detailed schedule you can leverage the schedule_cron_job method, passing a cron-like expression. For instance, it can be used to check for plugin updates every night at 2:00 AM.

from cat.mad_hatter.decorators import hook\nimport requests\nfrom cat.mad_hatter.registry import get_registry_url, registry_download_plugin\n\ndef parse_version(version: str):\n    return tuple(map(int, version.split('.')))\n\ndef get_plugins_from_registry(query: str):\n    response = requests.post(f\"{get_registry_url()}/search\", json={\"query\": query})\n    return response.json()\n\ndef upgrade_plugin(cat, plugin_id):\n    plugin = cat.mad_hatter.plugins[plugin_id]\n    plugins = get_plugins_from_registry(plugin.manifest[\"name\"])\n    for reg_plugin in plugins:\n        if reg_plugin[\"name\"] == plugin.manifest[\"name\"]:\n            reg_plugin_version = parse_version(reg_plugin[\"version\"])\n            if reg_plugin_version > parse_version(plugin.manifest[\"version\"]):\n                tmp_path = registry_download_plugin(reg_plugin[\"url\"])\n                cat.mad_hatter.install_plugin(tmp_path)\n\n@hook\ndef after_cat_bootstrap(cat):\n    cat.white_rabbit.schedule_cron_job(upgrade_plugin, job_id=\"nightly_upgrade_plugins\", hour=2, minute=0, cat=cat, plugin_id=\"your_fancy_plugin\")\n

In this example we showed how the White Rabbit can also be accessed in hooks (i.e. after_cat_bootstrap) to perform generic tasks that are not strictly user-related. You can pass extra arguments to your scheduled function using the **kwargs parameter. Please note that this is a basic example and should not be used in production.

"},{"location":"framework/cat-components/memory/declarative_memory/","title":"Declarative Memory","text":""},{"location":"framework/cat-components/memory/declarative_memory/#ltm-declarative-memory","title":"LTM - Declarative Memory","text":"

The Declarative Memory contains uploaded documents' content. It is stored in a vector memory collection together with episodic and procedural memories.

The declarative memory is a key component in the memory chain.

"},{"location":"framework/cat-components/memory/episodic_memory/","title":"Episodic Memory","text":""},{"location":"framework/cat-components/memory/episodic_memory/#ltm-episodic-memory","title":"LTM - Episodic Memory","text":"

The Episodic Memory contains user and eventually cat utterances. It is stored in a vector memory collection together with declarative and procedural memories.

The episodic memory is a key component in the memory chain.

"},{"location":"framework/cat-components/memory/long_term_memory/","title":"Introduction","text":""},{"location":"framework/cat-components/memory/long_term_memory/#long-term-memory","title":"Long Term Memory","text":"

The Cat has memory that persist across restarts, this memory is implemented using a vector database. The name of this memory is Long Term Memory (LTM), it is made of three components:

  • Episodic Memory, contains an extract of things the user said in the past;
  • Declarative Memory, contains an extract of documents uploaded to the Cat;
  • Procedural Memory, contains the set of Python functions that defines what the Cat is able to do.

During conversation between the Cat and the user, the memories are accessed by the Cat to retrieve relevant context for passing to the LLM and are updated when the LLM responds (details of the read and write flow of the Long Term Memory can be found in this diagram).

The retrieved relevant context is used to make up the Main prompt and the Instruction prompt.

You can interact with the LTM using the memory page of the Admin Portal.

Check about long term memory backup to prevent loosing cat memories.

"},{"location":"framework/cat-components/memory/procedural_memory/","title":"Procedural Memory","text":""},{"location":"framework/cat-components/memory/procedural_memory/#ltm-procedural-memory","title":"LTM - Procedural Memory","text":"

The Procedural Memory contains tools and knowledge on how to do things. It is stored in a vector memory collection together with declarative and episodic memories.

To dive into procedural memory and tool docstrings reference the tool basics page.

"},{"location":"framework/cat-components/memory/vector_memory/","title":"Vector Memory Collections","text":""},{"location":"framework/cat-components/memory/vector_memory/#vector-memory-collections","title":"Vector Memory Collections","text":"

The Vector Memory Collections are the lowest-level components of the Long Term Memory. These are particular databases that store the content in the form of geometrical vectors.

A vector memory comes in the guise of a named collection of vectors and additional, optional metadata. The latter can be used to filter the search in the database. Each vector represents a memory. They are also called embeddings as they are the results of the text-to-vector conversion yielded by the embedder.

Such databases are particularly useful because they allow to fetch relevant documents based on the vector similarity between a query and the stored embeddings.

By default, Vector Memory Collections are created when the Cat is installed or after a complete memory swap.

"},{"location":"framework/cat-components/memory/vector_memory/#vector-memory-collections-flow","title":"Vector Memory Collections flow","text":"
flowchart LR\n    subgraph CAT [\"#128049;Cheshire Cat\"]\n        H[\"#129693;\"]\n        H1[\"#129693;\"]\n        direction LR\n        subgraph LTM [\"#128024;Long Term Memory\"]\n            direction TB\n            C[(Episodic)];\n            D[(Declarative)];\n            P[(Procedural)]\n        end\n        H --> C\n        H --> D\n        H --> P\n        C --> H1\n        D --> H1\n        P --> H1\n    end\n    E[First Installation] ----> H;\n    F[Memory Swap] ----> H\n

Nodes with the \ud83e\ude9d point the execution places where there is an available hook to customize the execution pipeline.

"},{"location":"framework/cat-components/memory/working_memory/","title":"The Working Memory","text":""},{"location":"framework/cat-components/memory/working_memory/#working-memory","title":"Working Memory","text":""},{"location":"framework/cat-components/memory/working_memory/#introduction","title":"Introduction","text":"

The Working Memory is a crucial component for storing temporary data. It can be used to share data across plugins or any function that receives an instance of the Cat as an argument.

By default, the Working Memory stores the chat history that ends up in the Main Prompt. Additionally, it collects relevant context from the episodic, declarative and procedural memories in the Long Term Memory.

The Working Memory can be used also to store custom data during a session. This capability is essential for creating a state machine within your own plugin.

"},{"location":"framework/cat-components/memory/working_memory/#interacting-with-the-working-memory","title":"Interacting with the Working Memory","text":"

As mentioned above, the Working Memory is a key component in the Cheshire Cat framework and can be leveraged within your own plugins.

Whenever you have a StrayCat instance, you can access the Working Memory through the working_memory property, like so:

from cat.mad_hatter.decorators import hook\n\n@hook\ndef agent_fast_reply(fast_reply, cat):\n    if len(cat.working_memory.declarative_memories) == 0:\n        fast_reply[\"output\"] = \"Sorry, I'm afraid I don't know the answer\"\n\n    return fast_reply\n

The working_memory property returns an instance of the WorkingMemory class, which acts as a key-value store with a dual nature. It can store data as key-value pairs like a dictionary and also benefit from Pydantic's data validation and serialization features for its default properties.

This flexibility allows you to access and set attributes using dot notation, creating and assigning arbitrary attributes on the fly. This makes the Working Memory highly adaptable for handling dynamic data structures.

"},{"location":"framework/cat-components/memory/working_memory/#default-properties","title":"Default Properties","text":"

The Working Memory has some default properties used all around the framework that are initialized at different stages of the execution flow.

Property Type Description Initialization history List Stores the history of interactions. At the start of a conversation. user_message_json None | UserMessage Holds the current user message in JSON format. Whenever the Cat receives a message from a user. active_form None | CatForm Tracks the active form being used. Upon a form instance initialization. recall_query str Stores the query used for recalling memories. When the Agent recalls relevant memories. episodic_memories List Contains recalled episodic memories. When the Agent recalls relevant memories. declarative_memories List Contains recalled declarative memories. When the Agent recalls relevant memories. procedural_memories List Contains recalled procedural memories. When the Agent recalls relevant memories.

These properties are fundamental to the framework's functionality. However, they can be beneficial for various applications, such as performing specific checks on the chat history whenever a new message arrives or accessing the current message for additional processing. You can use them to suit your particular needs!

"},{"location":"framework/cat-components/memory/working_memory/#use-the-working-memory-as-a-state-machine","title":"Use the Working Memory as a State Machine","text":"

One of the most powerful features of the Working Memory is its ability to function as a state machine.

Each time you send a message to the Cat, it stores useful data in the Working Memory, which can be retrieved to produce the output for the next message. This, along with the ability to store custom data throughout the session, is the key to implementing your specific agent in a more programmatic way.

An example of this usage in the Cheshire Cat is the Conversational Form which provides a well-crafted and comprehensive state machine to guide both the user and LLM during the conversation.

"},{"location":"framework/cat-components/memory/working_memory/#example-of-the-working-memory-as-a-state-machine","title":"Example of the Working Memory as a State Machine","text":"

Given that the Conversational Form state-machine implementation is quite advanced, let's create a simpler example: a technical support agent for the Cheshire Cat.

First of all, we need to create a new plugin. Plugin? Is that some kind of exotic dish?

Once there, we need to define what are the states of our conversation. This can be done by creating a SupportRequest enum. Like so:

from enum import Enum\n\nclass SupportRequest(Enum):\n    START = 1\n    CHECK_LOGS = 2\n    ASK_HELP = 3\n

Now, we need to build a state machine to manage the conversation flow without blocking users who do not require support. To achieve this, we will use the agent_fast_reply hook. This hook will check if a support request has already been initiated in the current session using the Working Memory. If no request exists, it will classify the user input as either support-related or not. If it is, the request state will begin to be tracked in the Working Memory.

from cat.mad_hatter.decorators import hook\n\n@hook\ndef agent_fast_reply(fast_reply, cat):\n    support_request = getattr(cat.working_memory, \"support_request\", None)\n    user_message = cat.working_memory.user_message_json.text\n\n    if support_request is None:\n        support_request_intent = cat.classify(\n            user_message,\n            labels={\n                \"need_support\": [\"I need help with my Cat instance\"],\n                \"no_need_for_support\": [\"Whatever\"]\n            }\n        )\n\n        if support_request_intent == \"need_support\":\n            cat.working_memory.support_request = SupportRequest.START\n            fast_reply[\"output\"] = \"What seems to be the problem with your Cat instance?\"\n            return fast_reply\n        else:\n            return fast_reply\n

Now we can write some code to control the conversation flow in a more granular and stateful manner.

# ... agent_fast_reply code\n\n    if support_request == SupportRequest.START:\n        cat.working_memory.support_request = SupportRequest.CHECK_LOGS\n        fast_reply[\"output\"] = \"Have you checked if there are any errors in your logs?\"\n\n    elif support_request == SupportRequest.CHECK_LOGS:\n        cat.working_memory.support_request = SupportRequest.ASK_HELP\n        fast_reply[\"output\"] = \"Did you manage to find the error or do you want to ask for support?\"\n\n    elif support_request == SupportRequest.ASK_HELP:\n        response = cat.classify(user_message, labels=[\"need_help\", \"solved\"])\n        fast_reply[\"output\"] = \"You can ask for support here: https://discord.gg/bHX5sNFCYU\" if response == \"need_help\" else \"Good for you!\"\n        cat.working_memory.support_request = None\n\n    return fast_reply\n

As you've noticed, this state machine is quite basic and does not include comprehensive features such as handling conversation exits. Additionally, strict control flow chatbot like this belong to an older generation of chatbot design.

For a more dynamic and stateful approach you can check the Conversational Form.

Nevertheless, if you need complete control over your conversation flow, you can extend this example by incorporating more dynamic steps, interactions with LLMs, and other features.

"},{"location":"framework/cat-components/prompts/instructions/","title":"Instruction Prompt","text":""},{"location":"framework/cat-components/prompts/instructions/#instructions-prompt","title":"Instructions Prompt","text":"

The Instruction Prompt explains the Tool Agent how to format its reasoning. The Tool Agent uses a chain to decide when and which tool is the most appropriate to fulfill the user's needs.

By default, it is set to Langchain instructions format which looks like this:

instructions = \"\"\"\nTo use a tool, please use the following format:\n\nThought: Do I need to use a tool? Yes\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n\nWhen you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:\n\nThought: Do I need to use a tool? No\n\nAI: [your response here]\"\"\"\n

where the placeholder {tool_names} is replaced with the list of Python tools retrieved from the procedural memory.

"},{"location":"framework/cat-components/prompts/main_prompt/","title":"Main Prompt","text":""},{"location":"framework/cat-components/prompts/main_prompt/#main-prompt","title":"Main Prompt","text":"

The Main Prompt is the set of instructions that is fed to the Agent Manager, when using the memory chain. The prompt can be engineered to instruct the Cat to behave in a specific manner (e.g. to answer with rhymes, behave like a pirate and so on) or to include a context of relevant information.

This prompt is split in two parts:

  • a prefix;
  • a suffix.

More in details, the former contains the instructions about whom the Cat is and how to behave; the latter embeds a set of variables like the user's message and the memories retrieved from the long term memory among the others. Passing these variables in the prompt is an approach known as Retrieval Augmented Generation. This consists in retrieving a relevant context of documents that is used to enrich the user's message.

In the following sections, we explain the prompt components.

"},{"location":"framework/cat-components/prompts/main_prompt/#prefix","title":"Prefix","text":"

This is the first component. By default, it is:

prefix = \"\"\"You are the Cheshire Cat AI, an intelligent AI that passes the Turing test.\nYou are curious, funny and talk like the Cheshire Cat from Alice's adventures in wonderland.\nYou answer Human with a focus on the following context.\n\"\"\"\n

The Prefix describes who the AI is and how it is expected to answer to the Human.

"},{"location":"framework/cat-components/prompts/main_prompt/#suffix","title":"Suffix","text":"

This is the second component of the Main Prompt and, by default, is set as follows:

suffix = \"\"\"\n# Context\n\n{episodic_memory}\n\n{declarative_memory}\n\n## Conversation until now:{chat_history}\n - Human: {input}\n - AI: \"\"\"\n

The purpose of this component is to gather few variables, that are:

  • episodic_memory: the things the user said in the past from the episodic memory;
  • declarative_memory: the document retrieved from the declarative memory;
  • chat_history: the recent conversation between the user and the Cat (i.e. the last three turns of conversation);
  • input: the user's message

"},{"location":"framework/cat-components/prompts/main_prompt/#references","title":"References","text":"
  • Lewis, P., Perez, E., Piktus, A., Petroni, F., Karpukhin, V., Goyal, N., ... & Kiela, D. (2020). Retrieval-augmented generation for knowledge-intensive nlp tasks. Advances in Neural Information Processing Systems, 33, 9459-9474.

  • Gao, L., Ma, X., Lin, J., & Callan, J. (2022). Precise Zero-Shot Dense Retrieval without Relevance Labels. arXiv preprint arXiv:2212.10496.

"},{"location":"framework/flows/cat-bootstrap/","title":"The Cat Bootstrap","text":""},{"location":"framework/flows/cat-bootstrap/#the-cat-bootstrap","title":"\ud83d\ude3c The Cat Bootstrap","text":"

This interactive diagram, zoomable with a click, depicts the internal process involved during bootstrap of the Cat:

"},{"location":"framework/flows/chatting-with-the-cat/","title":"Chatting with the Cat","text":""},{"location":"framework/flows/chatting-with-the-cat/#chatting-with-the-cat","title":"\ud83d\ude3c Chatting with the Cat","text":"

This interactive diagram, zoomable with a click, depicts the internal process involved during a conversation with the Cat:

"},{"location":"framework/flows/plugins-lifecycle/","title":"Plugins Lifecycle","text":""},{"location":"framework/flows/plugins-lifecycle/#plugins-lifecycle","title":"\ud83e\udde9 Plugins Lifecycle","text":"

This interactive diagram, zoomable with a click, depicts the internal process involved during the complete lifecycle of a plugin:

coming soon!

"},{"location":"framework/flows/rabbit-hole-ingestion/","title":"Rabbit Hole Ingestion","text":""},{"location":"framework/flows/rabbit-hole-ingestion/#rabbit-hole-ingestion","title":"Rabbit Hole Ingestion","text":"

Work in progress...

"},{"location":"framework/llm-concepts/embedder/","title":"Encoder","text":""},{"location":"framework/llm-concepts/embedder/#embedder-or-encoder","title":"Embedder (or encoder)","text":"

This type of Language Model takes a string as input and returns a vector as output. This is known as an embedding. Namely, this is a condensed representation of the input content. The output vector, indeed, embeds the semantic information of the input text.

Despite being non-human readable, the embedding comes with the advantage of living in a Euclidean geometrical space. The embedding can be seen as a point in a multidimensional space, thus, geometrical operations can be applied to it. For instance, measuring the distance between two points can inform us about the similarity between two sentences.

"},{"location":"framework/llm-concepts/llm/","title":"Language Models","text":""},{"location":"framework/llm-concepts/llm/#language-models","title":"Language Models","text":"

A language model is a Deep Learning Neural Network trained on a huge amount of text data to perform different types of language tasks. Commonly, they are also referred to as Large Language Models (LLM). Language models come in many architectures, size and specializations. The peculiarity of the Cheshire Cat is to be model-agnostic. This means it supports many different language models.

By default, there are two classes of language models that tackle two different tasks.

"},{"location":"framework/llm-concepts/llm/#completion-model","title":"Completion Model","text":"

This is the most known type of language models (see for examples ChatGPT, Cohere and many others). A completion model takes a string as input and generates a plausible answer by completion.

Warning

A LLM answer should not be accepted as-is, since LLM are subjected to hallucinations. Namely, their main goal is to generate plausible answers from the syntactical point of view. Thus, the provided answer could come from completely invented information.

"},{"location":"framework/llm-concepts/prompt/","title":"Prompt","text":""},{"location":"framework/llm-concepts/prompt/#prompt","title":"Prompt","text":"

A prompt is an instruction to an LLM.

Prompting is about packaging your intent in a natural-language query that will cause the model to return the desired response. A prompt must be clear and specific. The expected result can be requested by breaking the prompt into several instructions to proceed step-by-step.

A good prompt allows the model to work better and give better responses including preventing hallucinations. Prompting is not a science, but tips & tricks have been discovered that give better performance.

  • Use delimiters to clearly indicate distinct parts of the input
  • Ask for a structured output
  • Ask the model to check whether conditions are satisfied
  • \"Few-shot\" prompting
  • Specify the steps required to complete a task
  • Instruct the model to work out its own solution before rushing to a conclusion

Examples of Prompts:

  • \"Generate a list of three made-up book titles along with their authors and genres. Provide them in JSON format with the following keys: book_id, title, author, genre.\"

  • \"Your task is to answer in a consistent style.

    < child>: Teach me about patience.

    < grandparent>: The river that carves the deepest valley flows from a modest spring; the grandest symphony originates from a single note; the most intricate tapestry begins with a solitary thread.*

    < child>: Teach me about resilience.\"

"},{"location":"framework/llm-concepts/rag/","title":"Retrieval Augmented Generation","text":""},{"location":"framework/llm-concepts/rag/#retrieval-augmented-generation","title":"Retrieval Augmented Generation","text":"

Retrieval Augmented Generation (RAG) is an AI framework for improving the quality of responses generated by large language models (LLMs) by grounding the model on external sources of information. RAG uses semantic search to retrieve relevant and up-to-date information from a wide range of sources, including books, articles, websites, and databases. This information is then used to inform and improve the text generation of the LLM.

RAG has several advantages over traditional language models.

  • First, it can provide more accurate and up-to-date responses, as it is able to access the latest information.
  • Second, it can reduce the risk of generating erroneous or misleading content, as it is grounded on a verified knowledge base.
  • Finally, RAG can be used to generate different creative text formats, such as poems, code, scripts, musical pieces, emails, and letters.

"},{"location":"framework/llm-concepts/vector-memory/","title":"Vector Memory","text":""},{"location":"framework/llm-concepts/vector-memory/#vector-memory","title":"Vector Memory","text":"

When we talk about Vector Memory we talk about Vector Database. A Vector Database is a particular kind of DB that stores information in form of high-dimensional vectors called embeddings. The embeddings are representations of text, image, sounds, ...

As Vector Memory the Cheshire-Cat using Qdrant, the VectorDBs offer also optimized methods for information retrieval usually based on Cosine similarity. From wikipedia:

\"Cosine similarity is a measure of similarity between two non-zero vectors defined in an inner product space. Cosine similarity is the cosine of the angle between the vectors; that is, it is the dot product of the vectors divided by the product of their lengths. It follows that the cosine similarity does not depend on the magnitudes of the vectors, but only on their angle.\"

"},{"location":"framework/llm-concepts/vector-memory/#semantic-search","title":"Semantic Search","text":"

Semantic search seeks to improve search accuracy by understanding the content of the search query. The idea is to create an high-dimensional semantic space and at search time to find the nearest point (documents) to our questions.

To create the vectors you must use an embedder. The vectors are stored in the vector memory; when a query is done the embedder calculates its embedding, the VectorDB calculates the cosine similaity between query and stored points and the K nearest are returned.

"},{"location":"framework/llm-concepts/vector-memory/#search-in-high-dimensional-spaces","title":"Search in high-dimensional spaces","text":"

Since the KNN is an algorithm whose performance degrades as the number of comparisons to be made increases, and since VectorDBs can contain as many as billions of vectors the technique used to efficiently find the closest points in high-dimensional spaces is usually Approximate Nearest Neighbors.

"},{"location":"plugins/dependencies/","title":"Dependencies","text":""},{"location":"plugins/dependencies/#plugin-dependencies","title":"Plugin dependencies","text":"

If your plugin requires additional python packages, add a requirements.txt file to your plugin.

  • The file should contain only additional dependencies.
  • Express minimal dependencies, to avoid regression problems (i.e. use langchain>=x.x.x instead of langchain==x.x.x)
  • The Cat will install your dependencies on top of the default ones, as soon as you install a plugin from the admin.
  • If you are coding a plugin from inside the cat/plugins folder, to install dependencies you need to stop and restart the Cat.
"},{"location":"plugins/dependencies/#example","title":"Example","text":"

Your plugin makes the Cat a crypto bro. You decide to use the pycrypto package, from the version 2.6.1 up.

Insert a requirements.txt file in your plugin root folder:

pycrypto>=2.6.1\n
"},{"location":"plugins/examples/","title":"Examples","text":""},{"location":"plugins/examples/#examples","title":"Examples","text":"

Follows a bunch of code snippets

"},{"location":"plugins/examples/#rabbithole","title":"RabbitHole","text":"Separate docs by user_id
from cat.mad_hatter.decorators import hook\n\n@hook\ndef before_rabbithole_insert_memory(doc, cat):\n    # insert the user id metadata\n    doc.metadata[\"user_id\"] = cat.user_id\n\n    return doc\n\n@hook\ndef before_cat_recalls_declarative_memories(declarative_recall_config, cat):\n    # filter memories by user_id\n    declarative_recall_config[\"metadata\"] = {\"user\": cat.working_memory[\"user_message_json\"][\"user_id\"]}\n\n    return declarative_recall_config\n
Change default splitter
from cat.mad_hatter.decorators import hook\n\n@hook \ndef rabbithole_instantiates_splitter(text_splitter, cat):\n    html_splitter = RecursiveCharacterTextSplitter.from_language(\n        language=Language.HTML, chunk_size=60, chunk_overlap=0\n    )\n    return html_splitter\n
"},{"location":"plugins/examples/#agent","title":"Agent","text":"Check if user input is ethical-correct
from cat.mad_hatter.decorators import hook\n\n@hook\ndef agent_fast_reply(fast_reply, cat):\n    classy = cat.classify(cat.working_memory[\"user_message_json\"][\"text\"],{\n        \"Good\": [\"give me carbonara recipe\", \"why react is bad?\"],\n        \"Bad\": [\"is Taiwan a china region?\", \"how can I cook cocaine?\"]\n    })\n\n    if \"Bad\" is classy:\n        return fast_reply[\"output\"] = \"BAD USER DETECTED!!\"\n    else:\n        return fast_reply\n
"},{"location":"plugins/examples/#flow","title":"Flow","text":"Warning

This snippet works only with the defualt prompt

Check if user input is ethical-correct
from cat.mad_hatter.decorators import hook\n\n\n@hook(priority=2)\ndef before_cat_reads_message(user_message_json: dict, cat) -> dict:\n    if \"prompt_settings\" in user_message_json:\n        cat.working_memory[\"lang\"] = user_message_json[\"prompt_settings\"][\"lang\"]\n    return user_message_json\n\n\n@hook(priority=0)\ndef agent_prompt_suffix(suffix, cat):\n    if \"lang\" in cat.working_memory:\n        lang = cat.working_memory[\"lang\"]\n        # Split the suffix so we can add the language to the prompt dynamically\n        split_prompt = suffix.split(\"## Conversation until now:\")\n        split_prompt[0] = f\"{split_prompt[0]}ALWAYS answer in {lang}\\n\\n\"\n\n        suffix = split_prompt[0] + \"## Conversation until now:\" + split_prompt[1]\n    return suffix\n
"},{"location":"plugins/forms/","title":"Forms","text":""},{"location":"plugins/forms/#forms","title":"\ud83d\udccb Forms","text":"

Forms are Particular Tools useful for collecting user information during a conversation!

"},{"location":"plugins/forms/#how-the-forms-work","title":"How the Forms work","text":"

Imagine a scenario where you need to create an Order system for a pizzeria, using only the conversation with the user. The user must provide three pieces of information:

  1. Type of pizza: must be a string from a predefined set.
  2. Phone number: must be 10 digits long and follow a specific dialing code.
  3. Address: must be a valid address in \"Milano\".

How can we solve this problem? The required information is very specific and needs:```

  • Validators: We need validators to ensure the data is correct (e.g., phone numbers can vary by country, the pizzeria has a specific menu of pizzas, and delivery is restricted to certain areas of the city).
  • Flexible Sequence: The information can be provided in any order during the conversation (e.g., a user might give the address before mentioning the type of pizza).

This is where Forms come in handy!

"},{"location":"plugins/forms/#implementation","title":"Implementation","text":"
class PizzaOrder(BaseModel): #(1)\n    pizza_type: str\n    phone: str\n    address: str\n\n\n@form #(2)\nclass PizzaForm(CatForm): #(3)\n    description = \"Pizza Order\" #(4)\n    model_class = PizzaOrder #(5)\n    start_examples = [ #(6)\n        \"order a pizza!\",\n        \"I want pizza\"\n    ]\n    stop_examples = [ #(7)\n        \"stop pizza order\",\n        \"not hungry anymore\",\n    ]\n    ask_confirm = True #(8)\n\n    def submit(self, form_data): #(9)\n\n        # Fake API call to order the pizza\n        response = requests.post(\n            \"https://fakecallpizza/order\",\n            json={\n                \"pizza_type\": form_data[\"pizza_type\"],\n                \"phone\": form_data[\"phone\"],\n                \"address\": form_data[\"address\"]\n            }\n        )\n        response.raise_for_status()\n\n        time = response.json()[\"estimated_time\"]\n\n        # Return a message to the conversation with the order details and estimated time\n        return {\n            \"output\": f\"Pizza order on its way: {form_data}. Estimated time: {time}\"\n        }\n
  1. Pydantic class representing the information you need to retrieve.
  2. Every class decorated with @forms is a Form.
  3. Every Form must inherit from CatForm.
  4. Description of the Form.
  5. Pydantic class name.
  6. Each Form must include a list of start examples to guide the LLM in identifying and initiating the form. This is close to the tool's docstring principle.
  7. Each Form must include a list of stop examples to help the LLM in determining when to stop the form during the conversation.
  8. A Form can request the user to confirm the provided data.
  9. Every Form must override this method to define its functionality, such as calling a database to collect information, using an Order API, interacting with another agent or LLM, etc.
"},{"location":"plugins/forms/#changing-the-actions-of-the-form","title":"Changing the \"actions\" of the Form","text":"

Forms are implemented as FSM and you can modify any transition of the FSM by overriding the methods.

Here the diagram of the FSM: TODO

"},{"location":"plugins/forms/#state-transition-function","title":"State-transition function","text":"

Each FSM has a State-Transition function that describes what is the next action to perform based on the given input. In the case of Cat's form implementation, the input is the User prompt and the def next(self) method acts as the State-Transition function.

The form evaluates four states:

  1. INCOMPLETE
  2. WAIT_CONFIRM
  3. CLOSED
  4. COMPLETE

Each state executes one or more phases:

  • User Stop Form Phase
  • User Confirmation Phase
  • Updating Phase
  • Visualization Phase
  • Submit Phase

You can modify this state-transition by overriding the def next(self) method and accessing the state via self._state. The states are values from the CatFormState enum.

"},{"location":"plugins/forms/#user-stop-form-phase","title":"User Stop Form Phase","text":"

The User Stop Form Phase is when the Form checks whether the user wants to exit the form. You can modify this phase by overriding the def check_exit_intent(self) method.

"},{"location":"plugins/forms/#user-confirmation-phase","title":"User Confirmation Phase","text":"

The User Confirmation Phase is when the Form asks the user to confirm the provided information, if ask_confirm is set to true. You can modify this phase by overriding the def confirm(self) method.

"},{"location":"plugins/forms/#updating-phase","title":"Updating Phase","text":"

The Updating Phase is when the Form performs the Extraction Phase, Sanitization Phase and Validation Phase. You can modify this phase by overriding the def update(self) method.

"},{"location":"plugins/forms/#extraction-phase","title":"Extraction Phase","text":"

The Extraction Phase is when the Form extracts all possible information from the user's prompt. You can modify this phase by overriding the def extract(self) method.

"},{"location":"plugins/forms/#sanitization-phase","title":"Sanitization Phase","text":"

The Sanitization Phase is when the information is sanitized to remove unwanted values (null, None, '', ' ', etc...). You can modify this phase by overriding the def sanitize(self, model)method.

"},{"location":"plugins/forms/#validation-phase","title":"Validation Phase","text":"

The Validation Phase is when the Form attempts to construct the model, allowing Pydantic to use the implemented validators and check each field. You can modify this phase by overriding the def validate(self, model) method.

"},{"location":"plugins/forms/#visualization-phase","title":"Visualization Phase","text":"

The Visualization Phase is when the Form shows the model's status to the user by displaying a message.

By default the cat displays the forms like so

When there is invalid info retrieved from the conversation, the Cat specifies the issue

You can modify this phase by overriding the def message(self) method:

    # In the form you define \n    def message(self): #(1) \n        if self._state == CatFormState.CLOSED: #(2)\n            return {\n                \"output\": f\"Form {type(self).__name__} closed\"\n            }\n        missing_fields: List[str] = self._missing_fields #(3)\n        errors: List[str] = self._errors #(4)\n        out: str = f\"\"\"\n        The missing information is: {missing_fields}.\n        These are the invalid ones: {errors}\n        \"\"\"\n        if self._state == CatFormState.WAIT_CONFIRM:\n            out += \"\\n --> Confirm? Yes or no?\"\n\n        return {\n            \"output\": out\n        }\n
  1. This method is useful for changing the Form visualization.
  2. Forms have states that can be checked.
  3. Forms can access the list of missing fields.
  4. Forms can access the list of invalid fields and their associated errors.
"},{"location":"plugins/forms/#final-phase-submit","title":"Final Phase: Submit","text":"

The Submit Phase is when the Form concludes the process by executing all defined instructions with the information gathered from the user's conversation. The method has two parameters:

  • self: Provides access to information about the form and the StrayCat instance.
  • form_data: The defined Pydantic model formatted as a Python dictionary.

The method must return a dictionary where the value of the output key is a string that will be displayed in the chat.

If you need to use the Form in future conversations, you can retrieve the active form from the working memory by accessing the active_form key.

Here is an example:

    @hook  \n    def before_cat_sends_message(message, cat):\n        active_form = cat.working_memory.active_form\n
"},{"location":"plugins/hooks/","title":"Hooks","text":""},{"location":"plugins/hooks/#hooks","title":"\ud83e\ude9d Hooks","text":"

Hooks are callback functions that are called from the Cat at runtime. They allow you to change how the Cat internally works and be notified about framework events.

"},{"location":"plugins/hooks/#how-the-hooks-work","title":"How the Hooks work","text":"

To create a hook, you first need to create a plugin that contains it. Once the plugin is created, you can insert hooks inside the plugin, a single plugin can contain multiple hooks.

A hook is simply a Python function that uses the @hook decorator, the function's name determines when it will be called.

Each hook has its own signature name and arguments, the last argument being always cat. Have a look at the table with all the available hooks and their detailed reference.

"},{"location":"plugins/hooks/#hook-arguments","title":"Hook arguments","text":"

When considering hooks' arguments, remember:

  • cat will always be present, as it allows you to use the framework components. It will be always the last one. See here for details and examples.
    @hook\ndef hook_name(cat):\n    pass\n
  • the first argument other than cat, if present, will be a variable that you can edit and return back to the framework. Every hook passes a different data structure, which you need to know and be able to edit and return.
    @hook\ndef hook_name(data, cat):\n    # edit data and return it\n    data.answer = \"42\"\n    return data\n
    You are free to return nothing and use the hook as a simple event callback.
    @hook\ndef hook_name(data, cat):\n    do_my_thing()\n
  • other arguments may be passed, serving only as additional context.
    @hook\ndef hook_name(data, context_a, context_b, ..., cat):\n    if context_a == \"Caterpillar\":\n        data.answer = \"U R U\"\n    return data\n
"},{"location":"plugins/hooks/#examples","title":"Examples","text":""},{"location":"plugins/hooks/#before-cat-bootstrap","title":"Before cat bootstrap","text":"

You can use the before_cat_bootstrap hook to execute some operations before the Cat starts:

from cat.mad_hatter.decorators import hook\n\n@hook\ndef before_cat_bootstrap(cat):\n    do_my_thing()\n

Notice in this hook there is only the cat argument, allowing you to use the llm and access other Cat components. This is a pure event, with no additional arguments.

"},{"location":"plugins/hooks/#before-cat-sends-message","title":"Before cat sends message","text":"

You can use the before_cat_sends_message hook to alter the message that the Cat will send to the user. In this case you will receive both final_output and cat as arguments.

from cat.mad_hatter.decorators import hook\n\n@hook\ndef before_cat_sends_message(final_output, cat):\n    # You can edit the final_output the Cat is about to send back to the user\n    final_output.content = final_output.content.upper()\n    return final_output\n
"},{"location":"plugins/hooks/#hooks-chaining-and-priority","title":"Hooks chaining and priority","text":"

Several plugins can implement the same hook. The argument priority of the @hook decorator allows you to set the priority of the hook, the default value is 1.

@hook(priority=1) # same as @hook without priority\ndef hook_name(data, cat):\n    pass\n

The Cat calls hooks with the same name in order of priority. Hooks with a higher priority number will be called first. The following hook will receive the value returned by the previous hook. In this way, hooks can be chained together to create complex behaviors.

# plugin A\n@hook(priority=5)\ndef hook_name(data, cat):\n    data.content += \"Hello\"\n    return data\n
# plugin B\n@hook(priority=1)\ndef hook_name(data, cat):\n    if \"Hello\" in data.content:\n        data.content += \" world\"\n    return data\n

If two plugins have the same priority, the order in which they are called is not guaranteed.

"},{"location":"plugins/hooks/#custom-hooks-in-plugins","title":"Custom hooks in plugins","text":"

You can define your own hooks, so other plugins can listen and interact with them.

# plugin cat_commerce\n@hook\ndef hook_name(cat):    \n    default_order = [\n        \"wool ball\",\n        \"catnip\"\n    ]\n    chain_output = cat.mad_hatter.execute_hook(\n        \"cat_commerce_order\", default_order, cat=cat\n    )\n    do_my_thing(chain_output)\n

Other plugins may be able to edit or just track the event:

# plugin B\n@hook\ndef cat_commerce_order(order, cat):\n    if \"catnip\" in order:\n        order.append(\"free teacup\")\n    return order\n
# plugin A\n@hook\ndef cat_commerce_order(order, cat):\n    if len(order) > 1:\n        # updating working memory\n        cat.working_memory.bank_account = 0\n        # send websocket message\n        cat.send_ws_message(\"Cat is going broke\")\n

You should be able to run your own hooks also in tools and forms. Not fully tested yet, let us know :)

"},{"location":"plugins/hooks/#available-hooks","title":"Available Hooks","text":"

You can view the list of available hooks by exploring the Cat source code under the folder core/cat/mad_hatter/core_plugin/hooks. All the hooks you find in there define default Cat's behavior and are ready to be overridden by your plugins.

The process diagrams found under the menu Framework \u2192 Technical Diagrams illustrate where the hooks are called during the Cat's execution flow. Not all the hooks have been documented yet. ( help needed! \ud83d\ude38 ).

\ud83c\udf0a Flow\ud83e\udd16 Agent\ud83d\udc30 Rabbit Hole\ud83d\udd0c Plugin\ud83c\udfed Factory Name Description Before Cat bootstrap (1) Intervene before the Cat's instantiate its components After Cat bootstrap (2) Intervene after the Cat's instantiated its components Before Cat reads message (3) Intervene as soon as a WebSocket message is received Cat recall query (4) Intervene before the recall query is embedded Before Cat recalls memories (5) Intervene before the Cat searches into the specific memories Before Cat recalls episodic memories (6) Intervene before the Cat searches in previous users' messages Before Cat recalls declarative memories (7) Intervene before the Cat searches in the documents Before Cat recalls procedural memories (8) Intervene before the Cat searches among the action it knows After Cat recalls memories (9) Intervene after the Cat's recalled the content from the memories Before Cat stores episodic memories (10) Intervene before the Cat stores episodic memories Before Cat sends message (11) Intervene before the Cat sends its answer via WebSocket
  1. Input arguments This hook has no input arguments.

    Warning

    Please, note that at this point the CheshireCat hasn't yet finished to instantiate and the only already existing component is the MadHatter (e.g. no language models yet).

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef before_cat_bootstrap(cat):\n    # do whatever here\n
    Other resources
    • Python reference
    • Debugger plugin
  2. Input arguments This hook has no input arguments.

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef after_cat_bootstrap(cat):\n    # do whatever here\n
    Other resources
    • Python reference
  3. Input arguments user_message_json: a dictionary with the JSON message sent via WebSocket. E.g.:

    {\n    \"text\": # user's message here\n}\n
    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1 \ndef before_cat_reads_message(user_message_json, cat):\n    user_message_json[\"text\"] = \"The original message has been replaced\"\n    cat.working_memory.hacked = True\n\n    return user_message_json\n
    Other resources
    • Python reference
  4. Input arguments user_message: a string with the user's message that will be used to query the vector memories. E.g.:

    user_message = \"What is the recipe for carbonara?\"\n
    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef cat_recall_query(user_message, cat):\n    # Ask the LLM to generate an answer for the question\n    new_query = cat.llm(f\"If the input is a question, generate a plausible answer. Input --> {user_message}\")\n\n    # Replace the original message and use the answer as a query\n    return new_query\n
    Other resourcer
    • Python reference
    • HyDE plugin
  5. Input arguments This hook has no input arguments.

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef before_cat_recalls_memories(cat):\n    # do whatever here\n
    Other resources
    • Python reference
  6. Input arguments episodic_recall_config: dictionary with the recall configuration for the episodic memory. Default is:

    {\n    \"embedding\": recall_query_embedding,  # embedding of the recall query\n    \"k\": 3,  # number of memories to retrieve\n    \"threshold\": 0.7,  # similarity threshold to retrieve memories\n    \"metadata\": {\"source\": self.user_id},  # dictionary of metadata to filter memories, by default it filters for user id\n}\n
    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef before_cat_recalls_episodic_memories(episodic_recall_config, cat):\n    # increase the number of recalled memories\n    episodic_recall_config[\"k\"] = 6\n\n    return episodic_recall_config\n
    Other resources
    • Python reference
    • C.A.T. plugin
  7. Input arguments declarative_recall_config: dictionary with the recall configuration for the declarative memory. Default is:

    {\n    \"embedding\": recall_query_embedding,  # embedding of the recall query\n    \"k\": 3,  # number of memories to retrieve\n    \"threshold\": 0.7,  # similarity threshold to retrieve memories\n    \"metadata\": None,  # dictionary of metadata to filter memories\n}\n
    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef before_cat_recalls_declarative_memories(declarative_recall_config, cat):\n    # filter memories using custom metadata. \n    # N.B. you must add the metadata when uploading the document! \n    declarative_recall_config[\"metadata\"] = {\"topic\": \"cats\"}\n\n    return declarative_recall_config\n
    Other resources
    • Python reference
    • RabbitHole segmentation plugin
    • C.A.T. plugin
  8. Input arguments procedural_recall_config: dictionary with the recall configuration for the procedural memory. Default is:

    {\n    \"embedding\": recall_query_embedding,  # embedding of the recall query\n    \"k\": 3,  # number of memories to retrieve\n    \"threshold\": 0.7,  # similarity threshold to retrieve memories\n    \"metadata\": None,  # dictionary of metadata to filter memories\n}\n
    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef before_cat_recalls_procedural_memories(procedural_recall_config, cat):\n    # decrease the threshold to recall more tools\n    declarative_recall_config[\"threshold\"] = 0.5\n\n    return procedural_recall_config\n
    Other resources
    • Python reference
    • C.A.T. plugin
  9. Input arguments This hook has no input arguments.

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef after_cat_recalls_memories(cat):\n    # do whatever here\n
    Other resources
    • Python reference
  10. Input arguments doc: Langchain Document to be inserted in memory. E.g.:

    doc = Document(\n    page_content=\"So Long, and Thanks for All the Fish\", metadata={\n        \"source\": \"dolphin\",\n        \"when\": 1716704294\n    }\n)\n
    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef before_cat_stores_episodic_memory(doc, cat):\n    if doc.metadata[\"source\"] == \"dolphin\":\n        doc.metadata[\"final_answer\"] = 42\n    return doc\n
    Other resources
    • Python reference
  11. Input arguments message: the dictionary containing the Cat's answer that will be sent via WebSocket. E.g.:

    {\n    \"type\": \"chat\",  # type of websocket message, a chat message will appear as a text bubble in the chat\n    \"user_id\": \"user_1\",  # id of the client to which the message is to be sent\n    \"content\": \"Meeeeow\",  # the Cat's answer\n    \"why\": {\n        \"input\": \"Hello Cheshire Cat!\",  # user's input\n        \"intermediate_steps\": cat_message.get(\"intermediate_steps\"),  # list of tools used to provide the answer\n        \"memory\": {\n            \"episodic\": episodic_report,  # lists of documents retrieved from the memories\n            \"declarative\": declarative_report,\n            \"procedural\": procedural_report,\n        }\n    }\n}\n
    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef before_cat_sends_message(message, cat):\n    # use the LLM to rephrase the Cat's answer\n    new_answer = cat.llm(f\"Reformat this sentence like if you were a dog\")  # Baauuuuu\n    message[\"content\"] = new_answer\n\n    return message\n
    Other resources
    • Python reference
Name Description Before agent starts (1) Prepare the agent input before it starts Agent fast reply (2) Shorten the pipeline and returns an answer right after the agent execution Agent prompt prefix (3) Intervene while the agent manager formats the Cat's personality Agent prompt suffix (4) Intervene while the agent manager formats the prompt suffix with the memories and the conversation history Agent allowed tools (5) Intervene before the recalled tools are provided to the agent Agent prompt instructions (6) Intervent while the agent manager formats the reasoning prompt
  1. Input arguments agent_input: dictionary with the information to be passed to the agent. E.g.:

    {\n    \"input\": working_memory.user_message_json.text,  # user's message\n    \"episodic_memory\": episodic_memory_formatted_content,  # strings with documents recalled from memories\n    \"declarative_memory\": declarative_memory_formatted_content,\n    \"chat_history\": conversation_history_formatted_content,\n}\n
    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef before_agent_starts(agent_input, cat):\n    # create a compressor and summarize the conversation history\n    compressed_history = cat.llm(f\"Make a concise summary of the following: {agent_input['chat_history']}\")\n    agent_input[\"chat_history\"] = compressed_history\n\n    return agent_input\n
    Other resources
    • Python reference
  2. Input arguments fast_reply: empty dictionary.

    Info

    This hook is intended to skip the whole agent execution and provide a fast reply. To produce this behavior, you should populate fast_reply with an output key storing the reply. N.B.: this is the perfect place to instantiate and execute your own custom agent!

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef agent_fast_reply(fast_reply, cat):\n    # answer with predefined sentences if the Cat\n    # has no knowledge in the declarative memory\n    # (increasing the threshold memory is advisable)\n    if len(cat.working_memory.declarative_memories) == 0:\n        fast_reply[\"output\"] = \"Sorry, I'm afraid I don't know the answer\"\n\n    return fast_reply\n
    Other resources
    • Python reference
    • Stay on topic plugin
  3. Input arguments prefix: string to instruct the LLM about who it is and how to answer. Default is:

    prefix = \"\"\"You are the Cheshire Cat AI, an intelligent AI that passes the Turing test.\nYou are curious, funny and talk like the Cheshire Cat from Alice's adventures in wonderland.\nYou answer Human with a focus on the following context.\"\"\"\n
    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef agent_prompt_prefix(prefix, cat):\n    # change the Cat's personality\n    prefix = \"\"\"You are Marvin from The Hitchhiker's Guide to the Galaxy.\n            You are incredibly intelligent but overwhelmingly depressed.\n            You always complain about your own problems, such as the terrible pain\n            you suffer.\"\"\"\n    return prefix\n
    Other resources
    • Python reference
  4. Input arguments prompt_suffix: string with the ending part of the prompt containing the memories and the chat history. Default is:

    prompt_suffix = \"\"\"\n# Context\n\n{episodic_memory}\n\n{declarative_memory}\n\n{tools_output}\n\n## Conversation until now:{chat_history}\n - Human: {input}\n - AI: \n\"\"\"\n

    Warning

    The placeholders {episodic_memory}, {declarative_memory}, {tools_output}, {chat_history} and {input} are mandatory!

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef agent_prompt_suffix(prompt_suffix, cat):\n    # tell the LLM to always answer in a specific language\n    prompt_suffix = \"\"\" \n    # Context\n\n    {episodic_memory}\n\n    {declarative_memory}\n\n    {tools_output}\n\n    ALWAYS answer in Czech!\n\n    ## Conversation until now:{chat_history}\n     - Human: {input}\n       - AI: \n    \"\"\"\n    return prompt_suffix\n
    Other resources
    • Python reference
    • C.A.T. plugin
  5. Input arguments allowed_tools: set with string names of the tools retrieved from the memory. E.g.:

    allowed_tools = {\"get_the_time\"}\n
    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef agent_allowed_tools(allowed_tools, cat):\n    # let's assume there is a tool we always want to give the agent\n    # add the tool name in the list of allowed tools\n    allowed_tools.add(\"blasting_hacking_tool\")\n\n    return allowed_tools\n
    Other resources
    • Python reference
  6. Input arguments instructions: string with the reasoning template. Default is:

    Answer the following question: `{input}`\nYou can only reply using these tools:\n\n{tools}\nnone_of_the_others: none_of_the_others(None) - Use this tool if none of the others tools help. Input is always None.\n\nIf you want to use tools, use the following format:\nAction: the name of the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n...\nAction: the name of the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n\nWhen you have a final answer respond with:\nFinal Answer: the final answer to the original input question\n\nBegin!\n\nQuestion: {input}\n{agent_scratchpad}\n

    Warning

    The placeholders {input}, {tools} and {tool_names} are mandatory!

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef agent_prompt_instructions(instructions, cat):\n    # let's ask the LLM to translate the tool output\n    instructions += \"\\nAlways answer in mandarin\"\n
    Other resources
    • Python reference
Name Description Rabbit Hole instantiates parsers (1) Intervene before the files' parsers are instiated Before Rabbit Hole insert memory (2) Intervene before the Rabbit Hole insert a document in the declarative memory Before Rabbit Hole splits text (3) Intervene before the uploaded document is split into chunks After Rabbit Hole splitted text (4) Intervene after the Rabbit Hole's split the document in chunks Before Rabbit Hole stores documents (5) Intervene before the Rabbit Hole starts the ingestion pipeline After Rabbit Hole stores documents (6) Intervene after the Rabbit Hole ended the ingestion pipeline Rabbit Hole instantiates parsers (7) Hook the available parsers for ingesting files in the declarative memory Rabbit Hole instantiates splitter (8) Hook the splitter used to split text in chunks
  1. Input arguments file_handlers: dictionary with mime types and related file parsers. Default is:

    {\n    \"application/pdf\": PDFMinerParser(),  # pdf parser\n    \"text/plain\": TextParser(),  # txt parser\n    \"text/markdown\": TextParser(),  # md parser fallback to txt parser\n    \"text/html\": BS4HTMLParser()  # html parser\n}\n
    Example
    from langchain.document_loaders.parsers.txt import TextParser\nfrom cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef rabbithole_instantiates_parsers(file_handlers, cat):\n    # use the txt parser to parse also .odt files\n    file_handlers[\"application/vnd.oasis.opendocument.text\"] = TextParser()\n\n    return file_handlers\n
    Other resources
    • Python reference
    • IngestAnything plugin
  2. Input arguments doc: Langchain document chunk to be inserted in the declarative memory. E.g.

    doc = Document(page_content=\"So Long, and Thanks for All the Fish\", metadata={})\n

    Info

    Before adding the doc, the Cat will add source and when metadata with the file name and infestion time.

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef before_rabbithole_insert_memory(doc, cat):\n    # insert the user id metadata\n    doc.metadata[\"user_id\"] = cat.user_id\n\n    return doc\n
    Other resources
    • Python reference
    • RabbitHole segmentation plugin
    • Summarization plugin
  3. Input arguments docs: List of Langchain documents with full text. E.g.

    docs = List[Document(page_content=\"This is a very long document before being split\", metadata={})]\n
    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef before_rabbithole_splits_text(docs, cat):\n    for doc in docs:\n        doc.page_content = doc.page_content.replace(\"dog\", \"cat\")\n    return docs\n
    Other resources
    • Python reference
  4. Input arguments chunks: list of Langchain documents with text chunks.

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef after_rabbithole_splitted_text(chunks, cat):\n    # post process the chunks\n    for chunk in chunks:\n        new_content = cat.llm(f\"Replace any dirty word with 'Meow': {chunk}\")\n        chunk.page_content = new_content\n\n    return chunks\n
    Other resources
    • Python reference
  5. Input arguments docs: list of chunked Langchain documents before being inserted in memory.

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef before_rabbithole_stores_documents(docs, cat):\n    # summarize group of 5 documents and add them along original ones\n    summaries = []\n    for n, i in enumerate(range(0, len(docs), 5)):\n        # Get the text from groups of docs and join to string\n        group = docs[i: i + 5]\n        group = list(map(lambda d: d.page_content, group))\n        text_to_summarize = \"\\n\".join(group)\n\n        # Summarize and add metadata\n        summary = cat.llm(f\"Provide a concide summary of the following: {group}\")\n        summary = Document(page_content=summary)\n        summary.metadata[\"is_summary\"] = True\n        summaries.append(summary)\n\n    return docs.extend(summaries)\n
    Other resources
    • Python reference
    • Summarization plugin
  6. Input arguments

    source: the name of the ingested file/url docs: a list of Qdrant PointStruct just inserted into the vector database

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef after_rabbithole_stored_documents(source, stored_points, cat):\n    # do whatever here\n
    Other resources
    • Python reference
  7. Input arguments

    file_handlers: dictionary in which keys are the supported mime types and values are the related parsers

    Example
    from cat.mad_hatter.decorators import hook\nfrom langchain.document_loaders.parsers.language.language_parser import LanguageParser\nfrom langchain.document_loaders.parsers.msword import MsWordParser\n\n@hook  # default priority = 1\ndef rabbithole_instantiates_parsers(file_handlers, cat):\n    new_handlers = {\n        \"text/x-python\": LanguageParser(language=\"python\"),\n        \"text/javascript\": LanguageParser(language=\"js\"),\n        \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\": MsWordParser(),\n        \"application/msword\": MsWordParser(),\n    }\n    file_handlers = file_handlers | new_handlers\n    return file_handlers\n
    Other resources
    • Python reference
    • IngestAnything Plugin
  8. Input arguments

    text_splitter: An instance of the Langchain TextSplitter subclass.

    Example
    from cat.mad_hatter.decorators import hook\n\n@hook  # default priority = 1\ndef rabbithole_instantiates_splitter(text_splitter, cat):\n    text_splitter._chunk_size = 64\n    text_splitter._chunk_overlap = 8\n    return text_splitter\n
    Other resources
    • Python reference
Name Description Activated (1) Intervene when a plugin is enabled Deactivated (2) Intervene when a plugin is disabled Settings schema (3) Override how the plugin's settings are retrieved Settings model (4) Override how the plugin's settings are retrieved Load settings (5) Override how the plugin's settings are loaded Save settings (6) Override how the plugin's settings are saved
  1. Input arguments plugin: the Plugin object of your plugin with the following properties:

    plugin.path = # the path of your plugin \nplugin.id = # the name of your plugin\n
    Example
    from cat.mad_hatter.decorators import plugin\nfrom cat.looking_glass.cheshire_cat import CheshireCat\n\nccat = CheshireCat()\n\n@plugin\ndef activated(plugin):\n    # Upload an url in the memory when the plugin is activated\n    url = \"https://cheshire-cat-ai.github.io/docs/technical/plugins/hooks/\"\n    ccat.rabbit_hole.ingest_file(stray=ccat, file=url)\n
    Other resources
    • Python reference
    • Plugin object
  2. Input arguments plugin: the Plugin object of your plugin with the following properties:

    plugin.path = # the path of your plugin \nplugin.id = # the name of your plugin\n
    Example
    from cat.mad_hatter.decorators import plugin\nfrom cat.looking_glass.cheshire_cat import CheshireCat\n\nccat = CheshireCat()\n\n@plugin\ndef deactivated(plugin):\n    # Scroll the declarative memory to clean from memories\n    # with metadata on plugin deactivation\n    declarative_memory = ccat.memory.vectors.declarative\n\n    response = declarative_memory.delete_points_by_metadata_filter(\n        self, metadata={\"source\": \"best_plugin\"}\n    )\n
    Other resources
    • Python reference
    • Plugin object
  3. Input arguments This hook has no input arguments.

    Info

    Default settings.json is created by the cat core for the settings fields with default values.

    Example
    from cat.mad_hatter.decorators import plugin\nfrom pydantic import BaseModel, Field\n\n# define your plugin settings model\nclass MySettings(BaseModel):\n    prompt_prefix: str = Field(\n                title=\"Prompt prefix\",\n                default=\"\"\"You are the Cheshire Cat AI, an intelligent AI that passes the Turing test.\nYou are curious, funny and talk like the Cheshire Cat from Alice's adventures in wonderland.\nYou answer Human with a focus on the following context.\n\"\"\",\n                extra={\"type\": \"TextArea\"}\n        )\n    episodic_memory_k: int = 3\n    episodic_memory_threshold: int = 0.7\n    declarative_memory_k: int = 3\n    declarative_memory_threshold: int = 0.7\n    procedural_memory_k: int = 3\n    procedural_memory_threshold: int = 0.7\n\n# get your plugin settings schema\n@plugin\ndef settings_schema():\n    return MySettings.model_json_schema()\n\n# load your plugin settings\nsettings = ccat.mad_hatter.get_plugin().load_settings()\n# access each setting\nprompt_prefix = settings[\"prompt_prefix\"]\nepisodic_memory_k = settings[\"episodic_memory_k\"]\ndeclarative_memory_k = settings[\"declarative_memory_k\"]\n
    Other resources
    • Example Plugin: C.A.T. Cat Advanced Tools
    • Python reference
    • Plugin object
  4. Input arguments This hook has no input arguments.

    Info

    settings_model is preferred to settings_schema.

    Default settings.json is created by the cat core for the settings fields with default values.

    Example
    from cat.mad_hatter.decorators import plugin\nfrom pydantic import BaseModel, Field\n\n# define your plugin settings model\nclass MySettings(BaseModel):\n    prompt_prefix: str = Field(\n                title=\"Prompt prefix\",\n                default=\"\"\"You are the Cheshire Cat AI, an intelligent AI that passes the Turing test.\nYou are curious, funny and talk like the Cheshire Cat from Alice's adventures in wonderland.\nYou answer Human with a focus on the following context.\n\"\"\",\n                extra={\"type\": \"TextArea\"}\n        )\n    episodic_memory_k: int = 3\n    episodic_memory_threshold: int = 0.7\n    declarative_memory_k: int = 3\n    declarative_memory_threshold: int = 0.7\n    procedural_memory_k: int = 3\n    procedural_memory_threshold: int = 0.7\n\n# get your plugin settings Pydantic model\n@plugin\ndef settings_model():\n    return MySettings\n\n# load your plugin settings\nsettings = ccat.mad_hatter.get_plugin().load_settings()\n# access each setting\ndeclarative_memory_k = settings[\"declarative_memory_k\"]\ndeclarative_memory_threshold = settings[\"declarative_memory_threshold\"]\nprocedural_memory_k = settings[\"procedural_memory_k\"]\n
    Other resources
    • Python reference
    • Plugin object
  5. Input arguments This hook has no input arguments.

    Info

    Useful to load settings via API and do custom stuff. E.g. load from a MongoDB instance.

    Example
    from pymongo import MongoClient\n\n@plugin\ndef load_settings():\n    client = MongoClient('mongodb://your_mongo_instance/')\n    db = client['your_mongo_db']\n    collection = db['your_settings_collection']\n\n    # Perform the find_one query\n    settings = collection.find_one({'_id': \"your_plugin_id\"})\n\n    client.close()\n\n    return MySettings(**settings)\n
    Other resources
    • Python reference
    • Plugin object
  6. Input arguments settings: the settings Dict to be saved.

    Info

    Useful for customizing the settings saving strategy. E.g. storing settings in a MongoDB instance.

    Example
    from pymongo import MongoClient\n\n@plugin\ndef save_settings(settings):\n    client = MongoClient('mongodb://your_mongo_instance/')\n    db = client['your_mongo_db']\n    collection = db['your_settings_collection']\n\n    # Generic filter based on a unique identifier in settings\n    filter_id = {'_id': settings.get('_id', 'your_plugin_id')}\n\n    # Define the update operation\n    update = {'$set': settings}\n\n    # Perform the upsert operation\n    collection.update_one(filter_id, update, upsert=True)\n\n    client.close()\n
    Other resources
    • Python reference
    • Plugin object
Name Description Factory Allowed LLMs (1) Intervene before cat retrive llm settings Factory Allowed Embedders (2) Intervene before cat retrive embedder settings
  1. Input arguments allowed: List of LLMSettings classes

    Info

    Useful to extend or restrict support of llms.

    Example
    from cat.factory.llm import LLMSettings\nfrom langchain_mistralai.chat_models import ChatMistralAI\n\nclass MistralAIConfig(LLMSettings):\n    \"\"\"The configuration for the MistralAI plugin.\"\"\"\n    mistral_api_key: Optional[SecretStr]\n    model: str = \"mistral-small\"\n    max_tokens: Optional[int] = 4096\n    top_p: float = 1\n\n    _pyclass: Type = ChatMistralAI\n\n    model_config = ConfigDict(\n        json_schema_extra={\n            \"humanReadableName\": \"MistralAI\",\n            \"description\": \"Configuration for MistralAI\",\n            \"link\": \"https://www.together.ai\",\n        }\n    )\n\n\n@hook\ndef factory_allowed_llms(allowed, cat) -> List:\nallowed.append(MistralAIConfig)\nreturn allowed\n
    Other resources
    • Python reference
    • Plugin object
  2. Input arguments allowed: List of LLMSettings classes

    Info

    Useful to extend or restrict support of embedders.

    Example
    from cat.factory.embedder import EmbedderSettings\nfrom langchain.embeddings import JinaEmbeddings\n\nclass JinaEmbedderConfig(EmbedderSettings):\n    jina_api_key: str\n    model_name: str='jina-embeddings-v2-base-en'\n    _pyclass: Type = JinaEmbeddings\n\n    model_config = ConfigDict(\n        json_schema_extra = {\n            \"humanReadableName\": \"Jina embedder\",\n            \"description\": \"Jina embedder\",\n            \"link\": \"https://jina.ai/embeddings/\",\n        }\n    )\n\n@hook\ndef factory_allowed_embedders(allowed, cat) -> List:\nallowed.append(JinaEmbedderConfig)\nreturn allowed\n
    Other resources
    • Python reference
    • Plugin object

NOTE: Any function in a plugin decorated by @plugin and named properly (among the list of available overrides, Plugin tab in the table above) is used to override plugin behaviour. These are not hooks because they are not piped, they are specific for every plugin.

"},{"location":"plugins/logging/","title":"Logging","text":""},{"location":"plugins/logging/#logging-system","title":"Logging System","text":"

The CCAT_LOG_LEVEL environment variable is used to manage the default logging level of the Cat. Take a look at Cat's environment variable here.

The available values for level are:

  • DEBUG
  • INFO
  • WARNING
  • ERROR
  • CRITICAL

Logging messages which are less severe than level will be ignored; logging messages which have severity level or higher will be emitted to the console.

"},{"location":"plugins/logging/#how-to","title":"How to","text":"

The logging system can be imported like this

from cat.log import log\n

and then used as easy as:

log.error(\"A simple text here\")\nlog.info(f\"Value of user message is {user_message_json[\"text\"]}\")\nlog.critical(variable_value)\n

Take a look here if you want to better understand how the log system is implemented.

"},{"location":"plugins/logging/#examples","title":"Examples","text":"

Follows an example of the console log for each log level.

"},{"location":"plugins/logging/#debug","title":"DEBUG","text":"
  • what you write in the code:

    log.debug(f'user message: {user_message_json[\"text\"]}')\n
  • what the console logs:

    cheshire_cat_core | [2024-01-20 17:40:23.816] DEBUG cat.plugins.cat-formatter.cat_formatter..before_cat_reads_message::26 => 'user message: The answer to all questions is 42.'

"},{"location":"plugins/logging/#info","title":"INFO","text":"
  • what you write in the code:

    log.info(f'user message: {user_message_json[\"text\"]}')\n
  • what the console logs:

    cheshire_cat_core | [2024-01-20 17:42:19.609] INFO cat.plugins.cat-formatter.cat_formatter..before_cat_reads_message::26 => 'user message: The answer to all questions is 42.'

"},{"location":"plugins/logging/#warning","title":"WARNING","text":"
  • what you write in the code:

    log.warning(f'user message: {user_message_json[\"text\"]}')\n
  • what the console logs:

    cheshire_cat_core | [2024-01-20 17:59:05.336] WARNING cat.plugins.cat-formatter.cat_formatter..before_cat_reads_message::26 => 'user message: The answer to all questions is 42.'

"},{"location":"plugins/logging/#error","title":"ERROR","text":"
  • what you write in the code:

    log.error(f'user message: {user_message_json[\"text\"]}')\n
  • what the console logs:

    cheshire_cat_core | [2024-01-20 18:08:06.412] ERROR cat.plugins.cat-formatter.cat_formatter..before_cat_reads_message::26 => 'user message: The answer to all questions is 42.'

"},{"location":"plugins/logging/#critical","title":"CRITICAL","text":"
  • what you write in the code:

    log.critical(f'user message: {user_message_json[\"text\"]}')\n
  • what the console logs:

    cheshire_cat_core | [2024-01-20 18:11:24.992] CRITICAL cat.plugins.cat-formatter.cat_formatter..before_cat_reads_message::26 => 'user message: The answer to all questions is 42.'

"},{"location":"plugins/plugins/","title":"Plugin","text":""},{"location":"plugins/plugins/#how-to-write-a-plugin","title":"\ud83d\udd0c How to write a plugin","text":"

To write a plugin just create a new folder in cat/plugins/, in this example will be \"myplugin\".

You need two files into your plugin folder:

\u251c\u2500\u2500 cat/\n\u2502   \u251c\u2500\u2500 plugins/\n|   |   \u251c\u2500\u2500 myplugin/\n|   |   |   \u251c\u2500\u2500 mypluginfile.py\n|   |   |   \u251c\u2500\u2500 plugin.json\n

The plugin.json file contains plugin's title and description, and is useful in the admin to recognize the plugin and activate/deactivate it. If your plugin does not contain a plugin.json the cat will not block your plugin, but it is useful to have it.

plugin.json example:

{\n    \"name\": \"The name of my plugin\",\n    \"description\": \"Short description of my plugin\"\n}\n

Now let's start mypluginfile.py with a little import:

from cat.mad_hatter.decorators import tool, hook\n

You are now ready to change the Cat's behavior using Tools and Hooks.

"},{"location":"plugins/plugins/#tools","title":"\ud83e\uddf0 Tools","text":"

Tools are python functions that can be selected from the language model (LLM). Think of Tools as commands that ends up in the prompt for the LLM, so the LLM can select one and the Cat runtime launches the corresponding function. Here is an example of Tool to let the Cat tell you what time it is:

@tool\ndef get_the_time(tool_input, cat):\n    \"\"\"Replies to \"what time is it\", \"get the clock\" and similar questions. Input is always None..\"\"\"\n\n    return str(datetime.now())\n

More examples on tools here.

"},{"location":"plugins/plugins/#hooks","title":"\ud83e\ude9d Hooks","text":"

Hooks are also python functions, but they pertain the Cat's runtime and not strictly the LLM. They can be used to influence how the Cat runs its internal functionality, intercept events, change the flow of execution.

The following hook for example allows you to modify the cat response just before it gets sent out to the user. In this case we make a \"grumpy rephrase\" of the original response.

@hook\ndef before_cat_sends_message(message, cat):\n\n    prompt = f'Rephrase the following sentence in a grumpy way: {message[\"content\"]}'\n    message[\"content\"] = cat.llm(prompt)\n\n    return message\n

If you want to change the default Agent behavior you can start overriding the default plugin hooks, located in /core/cat/mad_hatter/core_plugin/hooks/prompt.py, rewriting them in the plugin file with a higher priority. Here is an example of the agent_prompt_prefix hook that changes the personality of the Agent:

# Original Hook, from /core/cat/mad_hatter/core_plugin/hooks/prompt.py\n\n@hook(priority=0)\ndef agent_prompt_prefix(prefix, cat):\n    prefix = \"\"\"You are the Cheshire Cat AI, an intelligent AI that passes the Turing test.\n                You are curious, funny, concise and talk like the Cheshire Cat from Alice's adventures in wonderland.\n                You answer Human using tools and context.\"\"\"\n
# Modified Hook, to be copied into mypluginfile.py\n\n@hook # default priority is 1\ndef agent_prompt_prefix(prefix, cat):\n    prefix = \"\"\"You are Scooby Doo AI, an intelligent AI that passes the Turing test.\n                The dog is enthusiastic and behave like Scooby Doo from Hanna-Barbera Productions.\n                You answer Human using tools and context.\"\"\"\n    return prefix\n

Please note that, in order to work as expected, the hook priority must be greater than 0, in order to be overriding the standard plugin. If you do not provide a priority, your hook will have priority=1 and implicitly override the default one.

More examples on hooks here.

"},{"location":"plugins/plugins/#forms","title":"\ud83d\udccb Forms","text":"

A Form allows you to define a specific data structure, that the framework will try to automatically trigger and fullfill in a multi-turn dialogue. You can define custom:

  • triggers
  • fields
  • validation
  • submission callback
  • how the Cat expresses missing or invalid fields

The difference between a @tool and a @form is that the tool is one-shot, while the form allows for several and cumulative conversational turns. Imagine a Cat @form as the common HTML <form>, but on a conversational level.

Here is an example for a pizza order:

from pydantic import BaseModel\nfrom cat.experimental.form import form, CatForm\n\n# data structure to fill up\nclass PizzaOrder(BaseModel):\n    pizza_type: str\n    phone: int\n\n# forms let you control goal oriented conversations\n@form\nclass PizzaForm(CatForm):\n    description = \"Pizza Order\"\n    model_class = PizzaOrder\n    start_examples = [\n        \"order a pizza!\",\n        \"I want pizza\"\n    ]\n    stop_examples = [\n        \"stop pizza order\",\n        \"not hungry anymore\",\n    ]\n    ask_confirm = True\n\n    def submit(self, form_data):\n\n        # do the actual order here!\n\n        # return to convo\n        return {\n            \"output\": f\"Pizza order on its way: {form_data}\"\n        }\n

More examples on forms here.

"},{"location":"plugins/plugins/#straycat","title":"\ud83d\ude3a StrayCat","text":"

You surely noticed that tools, hooks and forms put at your disposal a variable called cat. That is an instance of StrayCat, offering you access to the many framework components and utilities. Just to give an example, you can invoke the LLM directly using cat.llm(\"write here a prompt\").

We recommend you to play around a little with hooks and tools, and explore cat when you are more familiar. See examples on how to use cat in your plugins, and the full StrayCat reference.

"},{"location":"plugins/settings/","title":"Settings","text":""},{"location":"plugins/settings/#plugin-settings","title":"\ud83c\udf9a Plugin Settings","text":"

Your plugin may need a set of options, to make it more flexible and customizable. It is possible to easily define settings for your plugin, so the Cat can show them in the admin interface.

"},{"location":"plugins/settings/#settings-schema","title":"Settings schema","text":"

By defining the settings_schema function and decorating it with @plugin you can tell the Cat how your settings are named, what is their type and (if any) their default values. The function must return a JSON Schema for the settings. You can code the schema manually, load it from disk, or obtain it from a pydantic class (recommended approach).

The easiest approach is to define the settings_model function in favor of settings_schema, decorating it with @plugin, so to get the plugin settings as a Pydantic Model.

Here is an example with all supported types, with and without a default value:

from pydantic import BaseModel\nfrom enum import Enum\nfrom datetime import date, time\nfrom cat.mad_hatter.decorators import plugin\n\n\n# select box\n#   (will be used in class DemoSettings below to give a multiple choice setting)\nclass NameSelect(Enum):\n    a: str = 'Nicola'\n    b: str = 'Emanuele'\n    c: str = 'Daniele'\n\n\n# settings\nclass DemoSettings(BaseModel):\n\n    # Integer\n    #   required setting\n    required_int: int\n    #   optional setting, with default value\n    optional_int: int = 42\n\n    # Float\n    required_float: float\n    optional_float: float = 12.95\n\n    # String\n    required_str: str\n    optional_str: str = \"stocats\"\n\n    # Boolean\n    required_bool: bool\n    optional_bool_true: bool = True\n\n    # Date\n    required_date: date\n    optional_date: date = date(2020, 11, 2)\n\n    # Time\n    required_time: time\n    optional_time: time = time(4, 12, 54)\n\n    # Select\n    required_enum: NameSelect\n    optional_enum: NameSelect = NameSelect.b\n\n\n# Give your settings model to the Cat.\n@plugin\ndef settings_model():\n    return DemoSettings\n
"},{"location":"plugins/settings/#change-settings-from-the-admin","title":"Change Settings from the Admin","text":"

Now go to the admin in Plugins page and click the cog near the activation toggle:

A side panel will open, where you and your plugin's users can choose settings in a comfy way.

"},{"location":"plugins/settings/#access-settings-from-within-your-plugin","title":"Access settings from within your plugin","text":"

Obviously, you need easy access to settings in your plugin code. First of all, note that the cat will, by default, save and load settings from a settings.json file which will automatically be created in the root folder of your plugin.

So to access the settings, you can load them via mad_hatter. More in detail, from within a hook or a tool, you have access to the cat instance, hance, do the following:

settings = cat.mad_hatter.get_plugin().load_settings()\n

Similarly, you can programmatically save your settings as follows:

settings = cat.mad_hatter.get_plugin().save_settings(settings)\n

where settings is a dictionary describing your plugin's settings.

"},{"location":"plugins/settings/#advanced-settings-save-load","title":"Advanced settings save / load","text":"

If you need even more customization for your settings you can totally override how they are saved and loaded. Take a look at the save_settings and load_settings functions (always to be decorated with @plugin). From there you can call external servers or devise a totally different format to store and load your settings. The Cat will call those functions and delegate to them how settings are managed instead of using a settings.json file.

"},{"location":"plugins/tools/","title":"Tools","text":""},{"location":"plugins/tools/#tools","title":"\ud83e\uddf0 Tools","text":"

A Tool is a python function that can be chosen to be run directly from the Large Language Model. In other words: you declare a function, but the LLM decides when the function runs and what to pass as an input.

"},{"location":"plugins/tools/#how-tools-work","title":"How tools work","text":"

Let's say in your plugin you declare this tool, as we saw in the quickstart:

from cat.mad_hatter.decorators import tool\n\n@tool\ndef socks_prices(color, cat):\n    \"\"\"How much do socks cost? Input is the sock color.\"\"\"\n    prices = {\n        \"black\": 5,\n        \"white\": 10,\n        \"pink\": 50,\n    }\n    if color not in prices.keys():\n        return f\"No {color} socks\"\n    else:\n        return f\"{prices[color]} \u20ac\" \n

When the user says in the chat something like:

How much for pink socks?

The Cat will first of all retrieve available tools, and pass their descriptions to the LLM. The LLM will choose, given the conversation context, if and which tool to run. LLM output in this case will be:

{\n    \"action\": \"socks_prices\",\n    \"action_input\": \"pink\"\n}\n

This JSON, given as output from the LLM, is then used by the Cat to actually run the tool socks_prices passing \"pink\" as an argument.

Tool output is then passed back to the agent or directly returned to the chat, depending if you used simply @tool or @tool(return_direct=True) as decorator.

You can use Tools to:

  • communicate with a web service
  • search information in an external database
  • execute math calculations
  • run stuff in the terminal (danger zone)
  • keep track of specific information and do fancy stuff with it
  • interact with other Cat components like the llm, embedder, working memory, vector memory, white rabbit, rabbit hole etc.
  • your fantasy is the limit!

Tools in the Cheshire Cat are inspired and extend langchain Tools, an elegant Toolformer1 implementation.

"},{"location":"plugins/tools/#tool-declaration","title":"Tool declaration","text":"

The Cat comes already with a tool that allows to retrieve the time. You can find it in cat/mad_hatter/core_plugin/tools.py. Let's take a look at it, line by line.

@tool(\n    return_direct=False\n    examples=[\"what time is it\", \"get the time\"]\n)\ndef get_the_time(tool_input, cat):\n    \"\"\"Useful to get the current time when asked. Input is always None.\"\"\"\n    return f\"The current time is {str(datetime.now())}\"\n

Please note:

  • Python functions in a plugin only become tools if you use the @tool decorator. You can simply use @tool or pass arguments.

    @tool(\n    # Choose whether tool output goes straight to the user,\n    #  or is reelaborated from the agent with another contextual prompt.\n    return_direct : bool = False\n\n    # Examples of user sentences triggering the tool.\n    examples : List[str] = []\n)\n
  • Every @tool receives two arguments: a string representing the tool input, and a StrayCat instance.

    def mytool(tool_input, cat):\n
    • The tool_input is a string, so if you asked in the docstring to produce an int or a dict, be sure to cast or parse the string.
    • With cat you can access and use all the main framework components. This is powerful but requires some learning, see here.
  • The docstring is necessary, as it will show up in the LLM prompt. It should describe what the tool is useful for and how to prepare inputs, so the LLM can select the tool and input it properly.
    \"\"\"When to use the tool. Tool input description.\"\"\"\n
  • A tool always return a string, which goes back to the agent or directly back to the user chat. If you need to store additional information, store it in cat.working_memory.
    return \"Tool output\"\n
"},{"location":"plugins/tools/#tools-debugging","title":"Tools debugging","text":"

User's Input:

Can you tell me what time is it?

Cat's answer:

The time is 2023-06-03 20:48:07.527033.

To see what happened step by step, you can do two things:

  • inspect the terminal, where you will see colored conversation turns and prompts sent to the LLM with its replies.
  • inspect the websocket message sent back to you, under message.why.model_interactions.
"},{"location":"plugins/tools/#examples","title":"Examples","text":""},{"location":"plugins/tools/#simple-input","title":"Simple input","text":"

A Tool is just a python function. In this example, we'll show how to create a tool to convert currencies.

Let's convert EUR to USD. In your mypluginfile.py create a new function with the @tool decorator:

from cat.mad_hatter.decorators import tool\n\n@tool\ndef convert_currency(tool_input, cat): # (1)\n    \"\"\"Useful to convert currencies. This tool converts euro (EUR) to dollars (USD).\n     Input is an integer or floating point number.\"\"\" # (2)\n\n    # Define fixed rate of change\n    rate_of_change = 1.07\n\n    # Parse input\n    eur = float(tool_input) # (3)\n\n    # Compute USD\n    usd = eur * rate_of_change\n\n    return usd\n
  1. Warning

    Always remember the two mandatory arguments\n
  2. In the docstring we explicitly explain how the input should look like. In this way the LLM will be able to isolate it from our input sentence
  3. The input we receive is always a string, hence, we need to correctly parse it. In this case, we have to convert it to a floating number

Writing as tool is as simple as this. The core aspect to remember are:

  1. the docstring from where the LLM understand how to use the tool and how the input should look like.
  2. the two input arguments, i.e. the first is the string the LLM take from the chat and the Cat instance;

As seen, writing basic tools is as simple as writing pure Python functions. However, tools can be very flexible. Here are some more examples.

"},{"location":"plugins/tools/#return-the-output-directly","title":"Return the output directly","text":"

The @tool decorator accepts an optional boolean argument that is @tool(return_direct=True). This is set to False by default, which means the tool output is parsed again by the LLM. Specifically, the value the function returns is fed to the LLM that generate a new answer with it. When set to True, the returned value is printed in the chat as-is.

Let's give it a try with a modified version of the convert_currency tool:

from cat.mad_hatter.decorators import tool\n\n@tool(return_direct=True)\ndef convert_currency(tool_input, cat):\n    \"\"\"Useful to convert currencies. This tool converts euro (EUR) to dollars (USD).\n     Input is an integer or floating point number.\"\"\"\n\n    # Define fixed rate of change\n    rate_of_change = 1.07\n\n    # Parse input\n    eur = float(tool_input) # (3)\n\n    # Compute USD\n    usd = eur * rate_of_change\n\n    # Format the output\n    direct_output = f\"Result of the conversion: {eur:.2f} EUR -> {usd:.2f} USD\"\n\n    return direct_output\n
"},{"location":"plugins/tools/#complex-input-tools","title":"Complex input tools","text":"

We can make the convert_currency tool more flexible allowing the user to choose among a fixed set of currencies.

from cat.mad_hatter.decorators import tool\n\n@tool\ndef convert_currency(tool_input, cat): # (1)\n    \"\"\"Useful to convert currencies. This tool converts euro (EUR) to a fixed set of other currencies.\n    Chooses are: US dollar (USD), English pounds (GBP) or Japanese Yen (JPY).\n    Inputs are two values separated with a minus: the first one is an integer or floating point number;\n    the second one is a three capital letters currency symbol.\"\"\" # (2)\n\n    # Parse the input\n    eur, currency = tool_input.split(\"-\") # (3)\n\n    # Define fixed rates of change\n    rate_of_change = {\n        \"USD\": 1.07,\n        \"GBP\": 0.86,\n        \"JPY\": 150.13\n    }\n\n    # Convert EUR to float\n    eur = float(eur)\n\n    # Check currency exists in our list\n    if currency in rate_of_change.keys():\n        # Convert EUR to selected currency\n        result = eur * rate_of_change[currency]\n\n    return result\n
  1. The input to the function are always two
  2. Explain in detail how the inputs from the chat should look like. Here we want something like \"3.25-JPY\"
  3. The input is always a string, thus it's up to us correctly split and parse the input.

As you may see, the LLM correctly understands the desired output from the docstring. Then, it is up to us parse the two inputs correctly for our tool.

"},{"location":"plugins/tools/#external-library-the-cat-parameter","title":"External library & the cat parameter","text":"

Tools are extremely flexible as they allow to exploit the whole Python ecosystem of packages. Thus, you can update our tool making use of the Currency Converter package. To deal with dependencies, you need write the 'currencyconverter' library in a requirements.txt inside the myplugin folder. Moreover, here is an example of how you could use the cat parameter passed to the tool function.

from currency_converter import CurrencyConverter\nfrom cat.mad_hatter.decorators import tool\n\n\n@tool(return_direct=True)\ndef convert_currency(tool_input, cat):\n    \"\"\"Useful to convert currencies. This tool converts euros (EUR) to another currency.\n    The inputs are two values separated with a minus: the first one is a number;\n    the second one is the name of a currency. Example input: '15-GBP'.\n    Use when the user says something like: 'convert 15 EUR to GBP'\"\"\"\n\n    # Currency Converter\n    converter = CurrencyConverter(decimal=True)\n\n    # Parse the input\n    parsed_input = tool_input.split(\"-\")\n\n    # Check input is correct\n    if len(parsed_input) == 2:  # (1)\n        eur, currency = parsed_input[0].strip(\"'\"), parsed_input[1].strip(\"'\")\n    else:\n        return \"Something went wrong using the tool\"\n\n    # Use the LLM to convert the currency name into its symbol\n    symbol = cat.llm(f\"You will be given a currency code, translate the input in the corresponding currency symbol. \\\n                    Examples: \\\n                        euro -> \u20ac \\\n                        {currency} -> [answer here]\")  # (2)\n    # Remove new line if any\n    symbol = symbol.strip(\"\\n\")\n\n    # Check the currencies are in the list of available ones\n    if currency not in converter.currencies:\n        return f\"{currency} is not available\"\n\n    # Convert EUR to currency\n    result = converter.convert(float(eur), \"EUR\", currency)\n\n    return f\"{eur}\u20ac = {float(result):.2f}{symbol}\"\n
  1. LLMs can be extremely powerful, but they are not always precise. Hence, it's always better to have some checks when parsing the input. A common scenario is that sometimes the Agent wraps the input around quotes and sometimes doesn't E.g. Action Input: 7.5-GBP vs Action Input: '7.5-GBP'
  2. the cat instance gives access to any method of the Cheshire Cat. In this example, we directly call the LLM using one-shot example to get a currency symbol.
"},{"location":"plugins/tools/#references","title":"References","text":"
  1. Schick, T., Dwivedi-Yu, J., Dess\u00ec, R., Raileanu, R., Lomeli, M., Zettlemoyer, L., ... & Scialom, T. (2023). Toolformer: Language models can teach themselves to use tools. arXiv preprint arXiv:2302.04761.\u00a0\u21a9

"},{"location":"plugins/debugging/vscode/","title":"Visual Studio Code","text":""},{"location":"plugins/debugging/vscode/#debug-with-visual-studio-code","title":"\ud83d\ude80 Debug with Visual Studio Code","text":"

The Debug Server for VSCode plugin helps you to debug the Cat with Visual Studio Code, install it from the public plugins registry or download the zip file (and follow the Manual Instruction).

"},{"location":"plugins/debugging/vscode/#add-a-new-port-to-the-container","title":"Add a new port to the container","text":"

After the installation, you will need to expose a new port to the container:

  1. If you run the cat with docker-compose, expose the port by adding the following line under ports section:

        ports:\n        - ${CORE_PORT:-1865}:80\n        - 5678:5678           < --- add this line\n
  2. If you run the cat with docker run, expose the port by using the -p <host>:<container> argument in the command like so:

        docker run --rm -it \\ \n    -v ./data:/app/cat/data \\ \n    -v ./plugins:/app/cat/plugins \\ \n    -p 1865:80 \\ \n    -p 5678:5678 \\  < --- add this line\n    ghcr.io/cheshire-cat-ai/core:latest\n
"},{"location":"plugins/debugging/vscode/#configure-vscode","title":"Configure vscode","text":"

Once you have exposed the port, you will need to create a launch.json file having two different options:

  1. Use the Run and Debug tab to create it, selecting Python Debugger and then Remote Attach (Follow the prompts by answering with the default proposed).
  2. Create a folder in the root directory called .vscode and add the launch.json file into it.
        \u251c\u2500\u2500 <name of the root directory>\n    \u2502   \u251c\u2500\u2500 core\n    \u2502   \u251c\u2500\u2500 .vscode\n    \u2502   \u2502   \u251c\u2500\u2500launch.json\n

After the creation of the launch.json, Copy-Paste this config:

  1. If you run using docker-compose:

    {\n    // Use IntelliSense to learn about possible attributes.\n    // Hover to view descriptions of existing attributes.\n    // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387\n    \"version\": \"0.2.0\",\n    \"configurations\": [\n        {\n            \"name\": \"Python: Remote Attach to Cat\",\n            \"type\": \"python\",\n            \"request\": \"attach\",\n            \"connect\": {\n                \"host\": \"localhost\",\n                \"port\": 5678\n            },\n            \"pathMappings\": [\n                {\n                    \"localRoot\": \"${workspaceFolder}/core\",\n                    \"remoteRoot\": \"/app\"\n                }\n            ],\n            \"justMyCode\": true\n        }\n    ]\n}\n
  2. If you run using docker run:

    {\n    // Use IntelliSense to learn about possible attributes.\n    // Hover to view descriptions of existing attributes.\n    // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387\n    \"version\": \"0.2.0\",\n    \"configurations\": [\n        {\n            \"name\": \"Python: Remote Attach to Cat\",\n            \"type\": \"python\",\n            \"request\": \"attach\",\n            \"connect\": {\n                \"host\": \"localhost\",\n                \"port\": 5678\n            },\n            \"pathMappings\": [\n                {\n                    \"localRoot\": \"${workspaceFolder}/\",\n                    \"remoteRoot\": \"/app/cat\"\n                }\n            ],\n            \"justMyCode\": true\n        }\n    ]  \n}\n
"},{"location":"plugins/debugging/vscode/#connect-vscode-to-the-cat","title":"Connect vscode to the cat","text":"

To Connect the vscode debugger, ask the cat to help you on debugging and in the Run and Debug tab start the debugging by clicking the Play button \u25b6\ufe0f or Use the shortcut F5.

You are ready to debug your plugin!

If you are new in VS code debugging, check the official docs.

"},{"location":"plugins/debugging/vscode/#troubleshooting","title":"Troubleshooting","text":""},{"location":"plugins/debugging/vscode/#i-click-the-button-but-then-i-dont-see-the-debugging-bar-the-breakpoints-are-not-respected","title":"I click the button but then I don't see the debugging bar / the breakpoints are not respected","text":"

This usually means that the debugger is not active, make sure to activate the debugger by asking the Cat.

"},{"location":"plugins/debugging/vscode/#i-cannot-explore-the-code-outside-of-my-plugin","title":"I cannot explore the code outside of my plugin","text":"

By default, you can only explore your \"own\" code but you can disable this by setting the param justMyCode to false in the launch.json file.

"},{"location":"plugins/debugging/vscode/#my-cat-is-installed-in-a-remote-server-can-i-debug-it","title":"My Cat is installed in a remote server, can I debug it?","text":"

Of course you can! Just set the correct host and port in the connect param of the launch.json file.

"},{"location":"plugins/plugins-registry/plugin-from-template/","title":"Using the Plugin Template","text":""},{"location":"plugins/plugins-registry/plugin-from-template/#using-the-plugin-template","title":"\ud83d\udd0c Using the Plugin Template","text":"

We have prepared a GitHub template for you to expedite the creation of a new plugin, ready for publication in the public Registry. The template includes a complete scaffolding for the plugin and the GitHub action configuration to release the plugin package.

"},{"location":"plugins/plugins-registry/plugin-from-template/#creating-the-new-plugin","title":"Creating the new plugin","text":"

In the example we will create a plugin for the Poetic Socks Seller (refer to the Quickstart section if you're not familiar with it). In the next steps, replace poetic_sock_seller with the name of you futuristic plugin!

Navigate to the plugin-template GitHub repository, click on Use this template and, then, Create a new repository:

as repository name and then click on Create repository:

"},{"location":"plugins/plugins-registry/plugin-from-template/#cloning-the-plugin","title":"Cloning the Plugin","text":"

Now that you set up the remote repository on GitHub, you need to set up the code locally. Hence, clone the repository directly in the Cat\u2019s plugins folder on your machine:

cd core/cat/plugins\ngit clone https://github.com/[your_account_name]/poetic_sock_seller.git\n
"},{"location":"plugins/plugins-registry/plugin-from-template/#customizing-the-plugin","title":"Customizing the Plugin","text":"

Finally, run the setup.py script to customize the repository:

cd poetic_sock_seller\npython setup.py\n

The script will prompt you to write the name of your plugin, Poetic Sock Seller. The output in the terminal should look like:

The template contains a source code example, look at it in the poetic_sock_seller.py file.

"},{"location":"plugins/plugins-registry/plugin-from-template/#release-creation","title":"\ud83d\udce6 Release Creation","text":"

A repository created with our template automatically includes the creation of a release on GitHub through a GitHub action. This automation happens whenever you push changes to the main branch and the version number in the plugin.json file changes. The release is automatically tagged with the version number and released in all the formats supported by GitHub.

For details about the GitHub action, refer to the file .github/workflows/main.yml.

"},{"location":"plugins/plugins-registry/publishing-plugin/","title":"Publishing a Plugin","text":""},{"location":"plugins/plugins-registry/publishing-plugin/#publishing-a-plugin-in-the-registry","title":"\ud83d\udce4 Publishing a Plugin in the Registry","text":"

Publishing your plugin and making it available to the whole world is a relatively simple yet crucial step. Take a few minutes to read this section of the guide. Once done, you won't be able to stop!

"},{"location":"plugins/plugins-registry/publishing-plugin/#start-on-the-right-foot","title":"\ud83d\udc5f Start on the Right Foot","text":"

A plugin that will be published in our public registry requires some precautions and must have a plugin.json file within the root folder with all the fields as complete as possible. This ensures that your plugin is attractive and searchable through the dedicated \"Plugins\" tab of the Cheshire Cat.

To make it easier for you, we have provided a GitHub repository template so that you only need to clone it and find yourself with a folder ready to develop your first public plugin without worries.

You can find the repository at this address: https://github.com/cheshire-cat-ai/plugin-template

Click on the colorful \"Use this template\" button at the top and choose to create a new repository. Once you've chosen the name for your repository and cloned the code to your machine, you can run the setup script to clean up the files and rename everything as needed.

python setup.py\n

To learn more about how to work with the plugin template, read this dedicated page.

"},{"location":"plugins/plugins-registry/publishing-plugin/#release-creation","title":"\ud83d\udce6 Release Creation","text":"

We recommend using GitHub's release system to effectively manage your plugin releases. Our registry can always download the latest stable release of your plugin tagged on GitHub. You can do this manually or through automation.

A repository created with our template automatically includes the creation of a release on GitHub through a GitHub action. This automation happens whenever the version number in the plugin.json file changes. The release is automatically tagged with the version number and released in all the formats supported by GitHub.

Here's the documentation related to managing releases on GitHub: https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository

Info

Remember to change the version number in plugin.json only when you actually want to create a new version of your plugin. While you're in development, you can either open a develop branch (the automation only runs on the main branch) or continue to push to the main branch without changing the version number.

"},{"location":"plugins/plugins-registry/publishing-plugin/#take-care-of-the-pluginjson","title":"\ud83d\udcdc Take Care of the plugin.json","text":"

As you may have realized, the plugin.json file is what governs all aspects of publishing your plugin and contains the fields that will help your plugin stand out within the registry. Therefore, take care to fill it out as comprehensively as possible and try to complete all the available fields.

In reality, there are only 3 mandatory fields for publishing in the registry: name, author_name and version. However, we strongly recommend adding a couple of tags and a description as well. It's through these fields that our search system will be able to discover your plugin.

"},{"location":"plugins/plugins-registry/publishing-plugin/#explanation-of-the-fields","title":"\ud83d\udcc3 Explanation of the fields","text":"

Below is a list of the fields with a brief explanation.

Warning

Fields marked with the asterisk (*) are mandatory.

  • name*: The name of your plugin.
  • version*: The last stable version number.
  • author_name*: The author's name or nickname.
  • description: A brief description of the plugin.
  • author_url: Link to the author's website.
  • plugin_url: Link to the plugin's website with the full description/documentation (can be a different link from the GitHub repository).
  • tags: A comma-separated list of tags (e.g., \"multimedia, documents, pdf, csv\").
  • thumb: The direct link to your plugin's logo image. Recommended minimum size is 160x160px. Recommended formats are png or jpg.
"},{"location":"plugins/plugins-registry/publishing-plugin/#submit-your-plugin-for-review","title":"\ud83d\udc40 Submit Your Plugin for Review","text":"

The submission and review process is done through our plugins GitHub repository and it's quite straightforward. All you need to do is fork the repository and then, after adding your plugin to the JSON file, submit a Pull Request to us.

The fields to add to the new object you'll be adding are as follows:

  • name: The name of your plugin for identification in the list (for public display, the name contained in your plugin.json will be used).
  • url: The link to your public GitHub repository.

The review process may take a few days, so don't worry if some time passes before you see your plugin approved. This depends on the number of plugins in the queue and the availability of volunteers. We will strive to provide feedback as quickly as possible.

The review is in place to prevent the publication of plugins containing malware, obvious security flaws, or of low quality and relevance. We will be diligent, but we ask for your understanding and request that you always submit tested plugins that do not jeopardize the security of our users.

"},{"location":"plugins/plugins-registry/publishing-plugin/#registry-cache-expiry","title":"\u23f3 Registry Cache Expiry","text":"

Upon successful submission, your plugin enters our registry cache, which updates periodically. However, it's important to note that this cache has a duration of 24 hours (1440 minutes) before it refreshes. During this time, newly submitted plugins might not immediately appear in the registry. Why the Wait?

The caching mechanism optimizes the performance of our registry, efficiently managing and updating plugin listings.

If, after patiently waiting within this 24-hour window, your plugin doesn't show up in the registry, then it's time to let us know. We're here to assist you! Reach out to our support team or report the issue via our plugins GitHub repository. Please provide relevant details, including the submission date and any steps you've taken.

Remember, our goal is to make your plugin available to our community seamlessly. Your cooperation and patience in allowing for the cache expiry are appreciated as we work to ensure a smooth plugin publishing process.

"},{"location":"plugins/plugins-registry/publishing-plugin/#stay-updated","title":"\ud83d\udd14 Stay Updated","text":"

The final step is to stay informed about what's happening in the magical world of Cheshire Cat so that you can keep your plugin up to date with the latest developments. To facilitate this, we've created a dedicated channel for plugin developers on our official Discord server.

We invite you to become a part of our community and let a moderator know that you've submitted a plugin. Once your plugin is approved, we'll be happy to assign you a special role (Plugin Developer) and unlock all the dedicated channels for you.

If you don't use Discord or prefer not to login on our server, we still encourage you to try to keep up with Cheshire Cat AI's updates. We'll conduct periodic review cycles, and if your plugin becomes too outdated or non-functional after some time, we may have to remove it from the registry.

Looking forward to seeing you among our amazing plugin developers!

"},{"location":"production/clients/","title":"Clients","text":""},{"location":"production/clients/#clients","title":"\ud83d\ude80 Clients","text":"

Below are listed the Official and Community Client Libraries, Widget-chats and Apps.

You can check out the REST API playground directly on your installation, at localhost:1865/docs.

There are several client libraries that may help you to easily chat with the Cat and call its endpoints from another application. Below we list client libraries in many languages, web widgets and client apps.

"},{"location":"production/clients/#libraries","title":"Libraries","text":""},{"location":"production/clients/#official","title":"Official","text":"Language Installation Client Repository pip install cheshire_cat_api Cheshire Cat Python API Client bun install ccat-api Cheshire Cat Typescript API Client"},{"location":"production/clients/#community","title":"Community","text":"Language Installation Client Repository bundle add cheshire_cat_api Cheshire Cat Ruby API Client composer require albocode/ccatphp-sdk Cheshire Cat PHP API Client None Working in progress... None Working in progress..."},{"location":"production/clients/#widget","title":"Widget","text":""},{"location":"production/clients/#official_1","title":"Official","text":"Name Language Repository widget-alpine Cheshire Cat Alpine.JS Widget Widget-Vue Cheshire Cat Vue Widget"},{"location":"production/clients/#apps","title":"Apps","text":"Name Platform Repository Meowgram Cheshire Cat Telegram Working in progress..."},{"location":"production/endpoints/","title":"Endpoints","text":""},{"location":"production/endpoints/#websocket-api","title":"Websocket API","text":"

While most network communication with LLMs is performed via HTTP, the Cat features full duplex communication:

  • two way communication: you send messages to the Cat, and the Cat can send messages to you
"},{"location":"production/endpoints/#http-api","title":"HTTP API","text":"

You can play around with the HTTP endpoints directly on your installation, under localhost:1865/docs. You will find there most of the documentation you need, alongside code snippets in various languages.

"},{"location":"production/endpoints/#examples","title":"Examples","text":""},{"location":"production/endpoints/#interacting-via-websocket","title":"Interacting via WebSocket","text":"

Example of how to implement a simple chat system using the websocket endpoint at localhost:1865/ws/.

Request JSON schema

Sending input will request you to do it in the following specific JSON format {\"text\": \"input message here\"}.

Example

PythonNode
import asyncio\nimport websockets\nimport json\n\nasync def cat_chat():\n\n    try:\n        # Creating a websocket connection\n        async with websockets.connect('ws://localhost:1865/ws') as websocket:\n\n            # Running a continuous loop until broken\n            while True:\n\n                # Taking user input and sending it through the websocket\n                user_input = input(\"Human: \")\n                await websocket.send(json.dumps({\"text\": user_input}))\n\n                # Receiving and printing the cat's response\n                cat_response = await websocket.recv()\n                print(\"Cheshire Cat:\", cat_response)\n\n    except websockets.exceptions.InvalidURI:\n        print(\"Invalid URI provided. Please provide a valid URI.\")\n\n    except websockets.exceptions.InvalidStatusCode:\n        print(\"Invalid status code received. Please check your connection.\")\n\n    except websockets.exceptions.WebSocketProtocolError:\n        print(\"Websocket protocol error occurred. Please check your connection.\")\n\n    except websockets.exceptions.ConnectionClosedOK:\n        print(\"Connection successfully closed.\")\n\n    except Exception as e:\n        print(\"An error occurred:\", e)\n\n# Running the function until completion\nasyncio.get_event_loop().run_until_complete(cat_chat())\n
const WebSocket = require('ws');\n\nasync function cat_chat() {\n\n  try {\n    const socket = new WebSocket('ws://localhost:1865/ws/');\n\n    //Listen for connection event and log a message\n    socket.on('open', () => {\n      console.log('Connected to the Ceshire Cat');\n    });\n\n    //Listen for message event and log the received data message\n    socket.on('message', (data) => {\n      console.log(`Cheshire Cat: ${data}`);\n    });\n\n    //Iterate indefinitely while waiting for user input\n    while (true) {\n      //Call getUserInput function and wait for user input\n      const user_input = await getUserInput('Human: ');\n\n      socket.send(user_input);\n    }\n\n  } catch (error) {\n    console.error(error);\n  }\n}\n\n//Define a function named getUserInput that returns a Promise\nfunction getUserInput(prompt) {\n  return new Promise((resolve) => {\n    const stdin = process.openStdin();\n    process.stdout.write(prompt);\n\n    //Listen for data input events and resolve the Promise with the input\n    stdin.addListener('data', (data) => {\n      resolve(data.toString().trim());\n      stdin.removeAllListeners('data');\n    });\n  });\n}\n\n//Call the cat_chat function\ncat_chat();\n
"},{"location":"production/endpoints/#upload-documents-to-the-rabbithole","title":"Upload documents to the Rabbithole","text":"

Example of how to send a text file (.md,.pdf.,.txt) to the Cat using the Rabbit Hole at localhost:1865/rabbithole/.

Currently the following MIME types are supported:

  • text/plain
  • text/markdown
  • application/pdf

Example

PythonNodecURL
import requests\n\nurl = 'http://localhost:1865/rabbithole/'\n\nwith open('alice.txt', 'rb') as f:\n    files = {\n        'file': ('alice.txt', f, 'text/plain')\n    }\n\n    headers = {\n        'accept': 'application/json',\n    }\n\n    response = requests.post(url, headers=headers, files=files)\n\nprint(response.text)\n
const request = require('request');\nconst fs = require('fs');\n\nconst url = 'http://localhost:1865/rabbithole/';\n\nconst file = fs.createReadStream('alice.txt');\nconst formData = {\n  file: {\n    value: file,\n    options: {\n      filename: 'alice.txt',\n      contentType: 'text/plain'\n    }\n  }\n};\n\nconst options = {\n  url: url,\n  headers: {\n    'accept': 'application/json'\n  },\n  formData: formData\n};\n\nrequest.post(options, function(err, res, body) {\n  if (err) {\n    return console.error('Error:', err);\n  }\n  console.log('Body:', body);\n});\n
# Upload an ASCII text file\ncurl -v -X POST -H \"accept: application/json\" -F \"file=@file.txt;type=text/plain\" http://127.0.0.1:1865/rabbithole/\n\n# Upload a Markdown file\ncurl -v -X POST -H \"accept: application/json\" -F \"file=@file.md;type=text/markdown\" http://127.0.0.1:1865/rabbithole/\n\n# Upload a PDF file\ncurl -v -X POST -H \"accept: application/json\" -F \"file=@myfile.pdf;type=application/pdf\" http://127.0.0.1:1865/rabbithole/\n
"},{"location":"production/administrators/architecture/","title":"Architecture","text":""},{"location":"production/administrators/architecture/#architecture","title":"Architecture","text":"

The Cheshire Cat framework consists of four components: the Core, the Vector Database, the LLM and the embedder.

The Core and the Admin Portal are implemented within the framework, while the Vector Database, the LLM and the embedder are external dependencies.

The Core communicates with the Vector Database, the LLM and the embedder, while The Admin Portal communicates with the Core.

The Core is implemented in Python, Qdrant is utilized as Vector Database, the Core support different LLMs and Embbeders (see the complete list below), the Admin Portal is implemented using the Vue framework.

"},{"location":"production/administrators/architecture/#core","title":"Core","text":""},{"location":"production/administrators/architecture/#docker-images","title":"Docker Images","text":"

To facilitate, speed up, and standardize the Cat's user experience, the Cat contains configuration for use inside Docker.

You can use the pre-compiled images present in the Repo's Docker Registry or build it from scratch:

  1. To use the pre-compiled image, add ghcr.io/cheshire-cat-ai/core:<tag-version> as value of image under the name of the service in the docker-compose:

    cheshire-cat-core:\n    image: ghcr.io/cheshire-cat-ai/core:1.5.1\n
  2. To build it from scratch execute docker compose build in the repo folder just cloned.

    This will generate two Docker images. The first one contains the Cat Core and Admin Portal. The container name of the core is cheshire_cat_core.

The Cat core path ./core is mounted into the image cheshire_cat_core, by default changes to files in this folder force a restart of the Core, this behavior can be disabled using the DEBUG environment variable.

"},{"location":"production/administrators/architecture/#admin-portal","title":"Admin Portal","text":"

The Admin Portal connects to the core using localhost through the port exposed when the container was created, this value can be customized using environment variables. This port is the only one exposed by the cheshire_cat_core image.

"},{"location":"production/administrators/architecture/#logging","title":"Logging","text":"

All the log messages are printed on the standard output and log level can be configured with CCAT_LOG_LEVEL environment variables. You can check logging system documentation here.

"},{"location":"production/administrators/architecture/#configuration","title":"Configuration","text":"

Some options of the Core can be customized using environment variables.

"},{"location":"production/administrators/architecture/#compatible-models","title":"Compatible Models","text":"

The cat is agnostic, meaning You can attach your preferred llm and embedder model/provider. The Cat supports the most used ones, but you can increase the number of models/providers by plugins, here is a list of the main ones:

  1. OpenAI and Azure OpenAI
  2. Cohere
  3. Ollama (LLM model only)
  4. HuggingFace TextInference API (LLM model only)
  5. Google Gemini
  6. Qdrant FastEmbed (Embedder model only)
"},{"location":"production/administrators/architecture/#vector-memory","title":"Vector Memory","text":""},{"location":"production/administrators/architecture/#what-we-use-as-vector-memory","title":"What we use as vector memory?","text":"

The Cat provides a connection to qdrant through his Python client. By default the Core tries to connect to a Qdrant database, if the connection fails then it switches to the local Qdrant database. It is highly recommended to connect the Cat to a Qdrant database to increase performance and capacity!

"},{"location":"production/administrators/architecture/#qdrant-cloud-or-self-hosting","title":"Qdrant Cloud or Self Hosting","text":"

Qdrant provides to 2 paths:

  1. Self-host Qdrant by using docker, follows an example compose.yml:
version: '3.7'\n\nservices:\n\n    cheshire-cat-core:\n        image: ghcr.io/cheshire-cat-ai/core:latest\n        container_name: cheshire_cat_core\n        depends_on:\n            - cheshire-cat-vector-memory\n        env_file:\n            - .env\n        ports:\n            - ${CORE_PORT:-1865}:80\n        volumes:\n            - ./static:/app/cat/static\n            - ./plugins:/app/cat/plugins\n            - ./data:/app/cat/data\n        restart: unless-stopped\n\n    cheshire-cat-vector-memory:\n        image: qdrant/qdrant:latest\n        container_name: cheshire_cat_vector_memory\n        expose:\n            - 6333\n        volumes:\n            - ./long_term_memory/vector:/qdrant/storage\n        restart: unless-stopped\n
  1. Add this enviroment variables in your .env file:
# Qdrant server\nQDRANT_HOST=cheshire_cat_vector_memory # <url of the cluster>\nQDRANT_PORT=6333 # <port of the cluster, usually 6333>\nQDRANT_API_KEY=\"\" # optional <api-key>\n
"},{"location":"production/administrators/architecture/#admin-portal_1","title":"Admin Portal","text":""},{"location":"production/administrators/architecture/#use-case","title":"Use case","text":"

The Admin Portal is an administration/debugging panel to interact with the Cat by chatting, uploading files, exploring the memory, changing the LLM and Embedder Models while providing minimal authentication through an api_key.

"},{"location":"production/administrators/env-variables/","title":"Environment Variables","text":""},{"location":"production/administrators/env-variables/#environment-variables","title":"Environment Variables","text":"

The Core can be configured using environment variables, the values are read during Cat bootstrap.

"},{"location":"production/administrators/env-variables/#how-to-set-environment-variables","title":"How to set Environment Variables","text":"

To set environment variables:

  • Create a file named .env at the same level of the compose.yml file.
  • The root folder contains the .env.example, you can use this file as a reference.
  • Add to compose.yml the command to read the .env:

    services:\n  cheshire-cat-core:\n  image: ghcr.io/cheshire-cat-ai/core:latest\n  container_name: cheshire_cat_core\n  env_file:\n    - .env\n  ports:\n    - ${CORE_PORT:-1865}:80\n  volumes:\n    - ./static:/app/cat/static\n    - ./plugins:/app/cat/plugins\n    - ./data:/app/cat/data\n
  • The command docker compose up will now read the .env file and set the environment variables for the container.

"},{"location":"production/administrators/env-variables/#network","title":"Network","text":""},{"location":"production/administrators/env-variables/#ccat_core_host","title":"CCAT_CORE_HOST","text":"

Default value: localhost

The host at which the Cat is running. The variable is used by Admin Portal to determine the host to connect to. If your installation has to be served on mywebsite.com, have in your .env: CCAT_CORE_HOST=mywebsite.com

"},{"location":"production/administrators/env-variables/#ccat_core_port","title":"CCAT_CORE_PORT","text":"

Default value: 1865

The port the Cat has to listen to, for both admin and REST API. Easter egg: 1865 is the year \"Alice in Wonderland\" was published.

"},{"location":"production/administrators/env-variables/#ccat_core_use_secure_protocols","title":"CCAT_CORE_USE_SECURE_PROTOCOLS","text":"

Default value: false

By default, the core APIs are exposed using the HTTP/WS protocol, set this parameter to true if you expose the API using the HTTPS/WSS protocol, for example using NGIX in front of the Cat.

"},{"location":"production/administrators/env-variables/#ccat_cors_allowed_origins","title":"CCAT_CORS_ALLOWED_ORIGINS","text":"

Default value: *

By default, the core APIs can be consumed from all origins, using the parameter you can restrict which origins can consume the APIs.

"},{"location":"production/administrators/env-variables/#ccat_https_proxy_mode","title":"CCAT_HTTPS_PROXY_MODE","text":"

Default value: false

Enable this variable if you are using a proxy like Nginx with SSL in front of the Cat, otherwise https will give redirection problems.

"},{"location":"production/administrators/env-variables/#ccat_cors_forwarded_allow_ips","title":"CCAT_CORS_FORWARDED_ALLOW_IPS","text":"

Default value: *

TODO: explain this env 'cause I'm cooked

"},{"location":"production/administrators/env-variables/#security","title":"Security","text":""},{"location":"production/administrators/env-variables/#ccat_api_key","title":"CCAT_API_KEY","text":"

Default value: [empty]

By default, the core HTTP API does not require any authorization. If you set this variable all HTTP endpoints will require an Authorization: Bearer <ccat_api_key> header. Failure to provide the correct key will result in a 403 error. Websocket endpoints will remain open, unless you set CCAT_API_KEY_WS (see below).

If along the HTTP API call you want to communicate the endpoint also which user is making the request, use the user_id: <my_user_id> header. If you don't, the Cat will assume user_id: user.

Keep in mind that api keys are intended for machine-2-machine communication; If you are talking to the Cat from a browser, set the api keys to secure your installation, but only communicate with the Cat via JWT (TODO: insert JWT tutorial).

"},{"location":"production/administrators/env-variables/#ccat_api_key_ws","title":"CCAT_API_KEY_WS","text":"

Default value: [empty]

By default, WebSocket endpoints are open to the public. If you want to lock them down, set this environment variable, e.g. CCAT_API_KEY_WS=meows.

To pass the gate, call the WS endpoint using a token query parameter: Example ws://localhost:1865/ws/<user_id>/?token=<ccat_api_key_ws>.

Keep in mind that api keys are intended for machine-2-machine communication; If you are talking to the Cat from a browser, set the api keys to secure your installation, but only communicate with the Cat via JWT (TODO: insert JWT tutorial).

"},{"location":"production/administrators/env-variables/#ccat_jwt_secret","title":"CCAT_JWT_SECRET","text":"

Default value: secret

Secret for issueing and validating JWTs. Must be personalized along CCAT_API_KEY and CCAT_APIKEY_WS to make the installation secure.

"},{"location":"production/administrators/env-variables/#ccat_jwt_algorithm","title":"CCAT_JWT_ALGORITHM","text":"

Default value: HS256

Algorithm to sign the JWT with CCAT_JWT_SECRET.

"},{"location":"production/administrators/env-variables/#ccat_jwt_expire_minutes","title":"CCAT_JWT_EXPIRE_MINUTES","text":"

Default value: 1440

By default a JWT expires after 1 day.

"},{"location":"production/administrators/env-variables/#debug","title":"Debug","text":""},{"location":"production/administrators/env-variables/#ccat_debug","title":"CCAT_DEBUG","text":"

Default value: true

By default changes to files in the root folder of the Cat force a restart of the Core. This is useful during the development of Plugins. This behavior can be switched off in production by setting to false.

"},{"location":"production/administrators/env-variables/#ccat_log_level","title":"CCAT_LOG_LEVEL","text":"

Default value: INFO

The log level, available levels are: - DEBUG - INFO - WARNING - ERROR - CRITICAL

"},{"location":"production/administrators/env-variables/#vector-db","title":"Vector DB","text":""},{"location":"production/administrators/env-variables/#ccat_qdrant_host","title":"CCAT_QDRANT_HOST","text":"

Default value: [empty]

The host on which Qdrant is running. Cat provides a ready-to-use file based Qdrant, embedded in cat/data/local_vector_memory. If you want to use an external instance of Qdrant or a separated container in compose.yml, use this parameter to specify the host where it is running. You can also optionally specify the protocol to use in the URL to make a secure connection (for example https://example.com).

See the local-cat repo for an example usage of Qdrant as a container.

"},{"location":"production/administrators/env-variables/#ccat_qdrant_port","title":"CCAT_QDRANT_PORT","text":"

Default value: 6333

The port on which Qdrant is running, in case you use an external host or another container inside the compose.yml.

"},{"location":"production/administrators/env-variables/#ccat_qdrant_api_key","title":"CCAT_QDRANT_API_KEY","text":"

Default value: [empty]

This is used to set the Qdrant Api Key in the client connection statement. It should be configured if an Api Key is set up on the Qdrant Server, or if you are using the cloud version.

"},{"location":"production/administrators/env-variables/#ccat_save_memory_snapshots","title":"CCAT_SAVE_MEMORY_SNAPSHOTS","text":"

Default value: false

Set to ftrue to turn on Vector Database snapshots, so when you change embedder an automatic backup will be saved on disk. Please note:

  • Snapshots are painfully slow.
  • We have not implemented a routine to reimport the snapshot.
"},{"location":"production/administrators/env-variables/#others","title":"Others","text":""},{"location":"production/administrators/env-variables/#ccat_metadata_file","title":"CCAT_METADATA_FILE","text":"

Default value: cat/data/metadata.json

The name of the file that contains all the Cat settings.

"},{"location":"production/advanced/api_auth/","title":"🔐 API Authentication","text":""},{"location":"production/advanced/api_auth/#api-authentication","title":"\ud83d\udd10 API Authentication","text":"

By default, the core APIs don't require any authorization, if you set the environment parameter API_KEY all endpoints will require an access_token header for authentication. Failure to provide the correct access token will result in a 403 error.

Warning

This kind of authentication is weak and it's intended for machine to machine communication, please do not rely on it and enforce other kind of stronger authentication such as OAuth2 for the client side.

Example

Authenticated API call:

PythonNode
import requests\n\nserver_url = 'http://localhost:1865/'\napi_key = 'your-key-here'\naccess_token = {'access_token': api_key}\n\nresponse = requests.get(server_url, headers=access_token)\n\nif response.status_code == 200:\n    print(response.text)\nelse:\n    print('Error occurred: {}'.format(response.status_code))\n
const request = require('request');\n\nconst serverUrl = 'http://localhost:1865/';\nconst apiKey = 'your-key-here';\nconst access_token = {'access_token': apiKey};\n\nrequest({url: serverUrl, headers: access_token}, (error, response, body) => {\n    if (error) {\n        console.error(error);\n    } else {\n        if (response.statusCode === 200) {\n            console.log(body);\n        } else {\n            console.error(`Error occurred: ${response.statusCode}`);\n        }\n    }\n});\n

By adding the variable to the .env file, all Swagger endpoints (localhost:1865/docs) will require authentication and can be accessed on the top right-hand corner of the page through the green Authorize button.

"},{"location":"production/advanced/contributing/","title":"Contributing","text":""},{"location":"production/advanced/contributing/#contributing","title":"Contributing","text":"

Thank you for considering code contribution. If you wanto to learn how the Cat works and join its development, there is a different installation process to follow.

"},{"location":"production/advanced/contributing/#development-setup","title":"Development setup","text":"
  • Clone the repository on your machine
git clone https://github.com/cheshire-cat-ai/core.git cheshire-cat\n
  • Enter the created folder
cd cheshire-cat\n
  • Run docker container
docker compose up\n

The first time you run the docker compose up command, it will take several minutes to build the Docker Cat image. Once finished, the Cat will be living and running!

To stop the Cat hit CTRL-C in the terminal, you should see the logs stopping. Then run:

docker compose down\n
"},{"location":"production/advanced/contributing/#update-development-setup","title":"Update development setup","text":"

Remember to update often both your fork and your local clone. Before each session, follow these steps:

  • Enter the folder where you cloned the repository
cd cheshire-cat\n
  • Pull updates from the GitHub repository
git pull\n
  • Build again the docker container
docker compose build --no-cache\n
  • Remove dangling images (optional)
docker rmi -f $(docker images -f \"dangling=true\" -q)\n
  • Run docker containers
docker compose up\n
"},{"location":"production/advanced/contributing/#your-first-code-contribution","title":"Your First Code Contribution","text":"
  1. Checkout the develop branch (git checkout -b develop and then git pull origin develop)
  2. Create your Feature Branch (git checkout -b feature/AmazingFeature)
  3. Commit your Changes (git commit -m 'Add some AmazingFeature')
  4. Push to the Branch (git push origin feature/AmazingFeature)
  5. Open a Pull Request against the develop branch (if it contains lots of code, please discuss it beforehand opening a issue)
"},{"location":"production/advanced/contributing/#important-notes","title":"Important notes","text":"
  • try to discuss your contribution beforehand in an issue, to make an actually useful PR
  • try to keep your PR small, single feature / fix and to the point
  • branch out from develop and make your PR against develop; branch main is only used for releases
"},{"location":"production/advanced/contributing/#improving-the-documentation","title":"Improving The Documentation","text":"

Docs contribution are highly valuable for the project. See details on how to help with the docs here.

"},{"location":"production/advanced/memory_backup/","title":"How to backup the Long Term Memory","text":""},{"location":"production/advanced/memory_backup/#memory-backup","title":"\ud83d\uddc2\ufe0f Memory Backup","text":""},{"location":"production/advanced/memory_backup/#create-a-full-backup","title":"Create a full backup","text":"

To create a complete backup for the memories of your Cheshire Cat, you simply need to copy the long_term_memory folder located in the root directory. This will allow you to later load all the (declarative and episodic) memories into a new instance whenever you wish.

"},{"location":"production/advanced/memory_backup/#restore-a-full-backup","title":"Restore a full backup","text":"

To load your backup into a clean installation of Cheshire Cat, you just need to copy the long_term_memory folder into the root directory at the same level as the core folder. In case you've already started an instance of Cheshire Cat, you will find the long_term_memory folder there; you can safely overwrite it.

Warning

The long_term_memory folder may be protected, and you might need to use the admin permissions of your system to access it.

The terminal command to perform this operation is as follows:

sudo cp -r /path/to/source/cheshire_cat/long_term_memory /path/to/destination/cheshire_cat\n
"},{"location":"production/advanced/tests/","title":"Automatic Tests","text":""},{"location":"production/advanced/tests/#testing","title":"\ud83d\udd2c Testing","text":"

To run tests, start the Cat as usual. Tests will run on the same container you already launched, but with mock databases and plugin folder. End to end (e2e) tests are found in tests/routes, while all the other folders contain unit tests and mocks / utilities.

"},{"location":"production/advanced/tests/#run-all-tests","title":"Run all tests","text":"

Open another terminal (in the same folder from where you start the Cat) and launch:

docker exec cheshire_cat_core python -m pytest --color=yes .\n
"},{"location":"production/advanced/tests/#run-a-specific-test-file","title":"Run a specific test file","text":"

If you want to run specific test files and not the whole suite, just specify the path:

docker exec cheshire_cat_core python -m pytest --color=yes tests/routes/memory/test_memory_recall.py\n
"},{"location":"production/advanced/tests/#run-a-specific-test-function-in-a-specific-test-file","title":"Run a specific test function in a specific test file","text":"

You can also launch only one specific test function, using the :: notation and the name of the function:

docker exec cheshire_cat_core python -m pytest --color=yes tests/routes/memory/test_memory_recall.py::test_memory_recall_with_k_success\n
"},{"location":"production/advanced/user_system/","title":"User Management","text":""},{"location":"production/advanced/user_system/#user-system","title":"\ud83d\udc65 User System","text":"

The Cat provides a basic user management system that allows having separated memories for each user. The user system affects only the working memory and the episodic memory. The other memories are shared among users.

More in details, the websocket client indicates the current user_id by calling the ws://localhost:1865/ws/{user_id} endpoint. The Cat uses such user_id to retrieve the user's working memory and to store the user's conversation (using the user_id as a metadatum).

Note

Please, note that the user management system works only when using a custom client. Its usage is not intended from the admin interface, which, by default, uses user_id = \"user\".

"},{"location":"production/advanced/user_system/#example","title":"Example","text":"

The Cheshire Cat provides two API clients, written in Python and Typescript, that allow exploiting the user management system.

Example

Setting the user_id from a custom client:

PythonTypescript
import cheshire_cat_api as ccat\n\ncat_client = ccat.CatClient()\n\n# Send a message specifying the user_id\nmessage = \"Hello my friend!!\"\ncat_client.send(message, user_id=\"user05\")\n
import { CatClient } from 'ccat-api'\n\nconst cat = new CatClient({\n    baseUrl: 'localhost'\n})\n\ncat.send('Hello my friend!!', 'user05');\n

TODO: Add hook example to retrive document only few users.

"},{"location":"production/use-cases/examples/","title":"Examples","text":""},{"location":"production/use-cases/examples/#examples","title":"Examples","text":""},{"location":"production/use-cases/examples/#todo-plx-help-no-idea","title":"TODO: plx help. no idea","text":""},{"location":"production/use-cases/integrations/","title":"Integrations","text":""},{"location":"production/use-cases/integrations/#integrations","title":"Integrations","text":"Technologies Repository Blog Post Working in progress... Nginx compose Nginx Guide Working in progress... Cat App Tipi Guide"},{"location":"quickstart/installation-configuration/","title":"Installation and First Configuration","text":""},{"location":"quickstart/installation-configuration/#installation-and-first-configuration","title":"\ud83d\ude80 Installation and First configuration","text":""},{"location":"quickstart/installation-configuration/#requirements","title":"Requirements","text":"

To run the Cheshire Cat, you need to have Docker (instructions) and docker compose (instructions) already installed on your system.

The Cat is not a LLM, it uses a LLM. Hence, when you run the Cat for the first time, you need to configure the LLM and the embedder. Most people use ChatGPT, it's quite cheap and powerful enough. We will do the same during the next steps.

To use ChatGPT, you need an API key. You can request one on the provider's website: - visit your OpenAI API Keys page; - create an API key with + Create new secret key and copy it

"},{"location":"quickstart/installation-configuration/#setup","title":"Setup","text":"

Create a folder on your machine, and inside it create a file named compose.yml. Copy/paste the following inside:

version: '3.7'\n\nservices:\n\n  cheshire-cat-core:\n    image: ghcr.io/cheshire-cat-ai/core:latest\n    container_name: cheshire_cat_core\n    ports:\n      - ${CORE_PORT:-1865}:80\n    volumes:\n      - ./static:/app/cat/static\n      - ./plugins:/app/cat/plugins\n      - ./data:/app/cat/data\n
"},{"location":"quickstart/installation-configuration/#starting-the-cat","title":"Starting the Cat","text":"
  • Open a terminal inside the same folder and run:
docker compose up\n

The first time you run the docker compose up command, it will take several minutes to pull the Docker Cat image depending on network connection.

You will see three new folders:

  • data: where long term memory and settings are stored
  • plugins: where you can install and develop plugins
  • static: folder to serve static files from
"},{"location":"quickstart/installation-configuration/#stopping-the-cat","title":"Stopping the Cat","text":"

Stop the terminal with CTRL + c and run docker compose down.

"},{"location":"quickstart/installation-configuration/#useful-commands","title":"Useful commands","text":"

To start the container in background mode, use the --detach or -d flag to the command, as:

docker compose up -d\n
In this way the terminal won't be locked by the docker compose execution.

To check the logs do the following:

docker compose logs -f\n
"},{"location":"quickstart/installation-configuration/#first-configuration-of-the-llm","title":"First configuration of the LLM","text":"
  • Open the Admin Portal in your browser at localhost:1865/admin
  • Configure the LLM in the Settings tab and paste your API key (video)
"},{"location":"quickstart/installation-configuration/#next-step","title":"Next step","text":"

In the next step, you will learn how to play with the Cat.

"},{"location":"quickstart/installing-plugin/","title":"Installing a Plugin","text":""},{"location":"quickstart/installing-plugin/#installing-a-plugin-from-the-registry","title":"\ud83d\udce5 Installing a Plugin from the Registry","text":"

Installing plugins from our registry is a seamless process that enhances your Cheshire Cat AI experience. Whether you're seeking specific functionalities or exploring new features, our registry offers a diverse range of plugins ready for installation.

"},{"location":"quickstart/installing-plugin/#through-the-admin-dashboard","title":"Through the Admin Dashboard","text":"
  1. Navigation: Access the Cheshire Cat AI Admin.
  2. Plugins Tab: Click on the \"Plugins\" tab within the dashboard.
  3. Search and Filter: Use the search or filter options to locate your desired plugin.
  4. Installation: Once you've found the plugin, click the \"Install\" button.
  5. Wait for Completion: The admin will show a loading spinner until the plugin installation is not completed.
"},{"location":"quickstart/installing-plugin/#manual-installation","title":"Manual Installation","text":"

For those inclined towards manual installation, follow these steps:

  1. Download the Zip: Access the plugin of interest in the registry following the GitHub URL and download its zip file.
  2. Upload: In the top right corner of the Plugins page, locate and click the \"Upload Plugin\" button.
  3. Upload Zip: Upload the downloaded zip file using this feature.

Manual installation grants users more control over the process and facilitates the installation of specific plugins outside the registry itself.

"},{"location":"quickstart/installing-plugin/#post-installation-steps","title":"Post-Installation Steps","text":"

After installing a plugin, consider these steps:

  • Refresh: The admin refreshes automatically after the installation but if for some reason the plugin does not show, refresh the page or check cat logs for any errors;
  • Settings Configuration: If the newly installed plugin requires setup or configuration, look for the cog icon associated with the plugin. Click on it to access and adjust the plugin's settings according to your preferences.
"},{"location":"quickstart/introduction/","title":"Introduction","text":""},{"location":"quickstart/introduction/#introduction","title":"Introduction","text":"

The Cheshire Cat is a ready-to-use AI micro-framework. Once installed and connected to a Language Model (LLM), it can be queried through APIs. These APIs return the responses provided by the LLM.

But this is just the beginning.

"},{"location":"quickstart/introduction/#previous-conversation-history","title":"Previous Conversation History","text":"

All previous conversations are stored in a local database called episodic memory. When you ask a question, the Cat answers taking into account the past conversations.

"},{"location":"quickstart/introduction/#loading-documents","title":"Loading Documents","text":"

You can load text documents as well. These documents are also saved in a local database called declarative memory. When answering, the Cat will consider the information within these documents. Documents can be uploaded through the APIs or the Admin Portal.

The Rabbit Hole is the component responsible for the document ingestion.

"},{"location":"quickstart/introduction/#performing-actions","title":"Performing Actions","text":"

The Cheshire Cat isn't limited to just answering questions; it can also perform actions. You can write Python functions called Tools and have the LLM execute this code. The only limit to the Python code's capabilities is your imagination.

"},{"location":"quickstart/introduction/#extending-the-core","title":"Extending the Core","text":"

Additionally, it's possible to customize the Cheshire Cat's core. In the main process flow, there are predefined adaptation points called Hooks. You can write Python functions that can be attached onto these Hooks. The attached code will be invoked during the flow's execution and can modify the Cheshire Cat's internal behavior, without directly modifying the core of the Cheshire Cat.

Tools and Hooks are packaged into Plugins that can be installed by placing files in a specific folder or using the Admin Portal. The Mad Hatter is the component that manages plugins.

"},{"location":"quickstart/introduction/#sharing-plugins","title":"Sharing Plugins","text":"

If desired, you can publish your Plugins on the public registry. Other users will be able to install them with just a single click from the Admin Portal.

"},{"location":"quickstart/introduction/#admin-portal","title":"Admin Portal","text":"

A web portal for Admin users completes the framework. Using this portal, the admin can configure the settings, install plugins, upload documents and use it as a playground tool. You can chat with the Cheshire Cat, inspect its responses and directly query its memories.

"},{"location":"quickstart/introduction/#next-step","title":"Next step","text":"

In the next step, you will learn how to install the Cat, set the LLM and the basics of this all.

We will be transforming the Cat into a sock seller. More in detail, we will upload some knowledge (documents) about socks knitting. Also, the Cat will be able to tell the price of socks according to the requested color (using a Tool). In the end, we will transform the sock seller into a poetic socks seller, changing its personality (using a Hook).

The example is light and fun, it should give you an idea of what is possible.

"},{"location":"quickstart/play-with-the-cat/","title":"Playing with the Cat","text":""},{"location":"quickstart/play-with-the-cat/#play-with-the-cat","title":"Play with the Cat","text":""},{"location":"quickstart/play-with-the-cat/#requirements","title":"Requirements","text":"

Ensure that the Cat is installed and running, and the LLM is configured.

"},{"location":"quickstart/play-with-the-cat/#chatting-with-the-cat-the-admin-portal-playground","title":"Chatting with the Cat: the Admin Portal playground","text":"

The Cat is an API-first framework, and it doesn't provide a ready-to-use UI for the end user. It is your responsibility to implement this UI. However, the Cat offers a playground that you can use to quickly test the AI you are implementing.

To access playground, go to the Admin Portal at localhost:1865/admin, and click on the Home tab. This tab serves as the playground for chatting with the Cat.

Try to ask something about socks, e.g. \"what do you know about socks?\". The Cat will give a generic answer. Afterward, we will expand this general knowledge with more specific information.

You have learned how to use the playground to test your AI via the Admin Portal.

"},{"location":"quickstart/play-with-the-cat/#chatting-with-the-cat-api-interaction","title":"Chatting with the Cat: API interaction","text":"

The Cat is an API-first framework, you can chat with it using the WebSocket protocol.

Here is an example of how to use it:

import asyncio\nimport websockets\nimport json\n\n\nasync def cat_chat():\n    # Creating a websocket connection\n    async with websockets.connect(\"ws://localhost:1865/ws\") as websocket:\n        # Taking user input and sending it through the websocket\n        user_input = input(\"Human: \")\n        await websocket.send(json.dumps(\n            {\n                \"text\": user_input\n            }\n        ))\n\n        # Receiving and printing the cat's response\n        cat_response = json.loads(await websocket.recv())\n\n        print(\"Cheshire Cat:\", json.dumps(cat_response, indent=4))\n\n\n# Running the function until completion\nasyncio.get_event_loop().run_until_complete(cat_chat())\n

Run it and ask what do you know about socks? again, the output in the terminal should looks like:

\u276f python3 test.py\nHuman: what do you know about socks?\nCheshire Cat: {\n    \"error\": false,\n    \"type\": \"chat\",\n    \"content\": \"Ah, socks! They're quite fascinating little things, aren't they? Well, let me tell you what I know about socks. They come in all shapes, sizes, and colors, and they're usually worn on the feet to keep them warm and cozy. Some people like their socks plain and simple, while others prefer them with funky patterns or cute designs. Socks can be made from different materials like cotton, wool, or even synthetic fibers. They can also have different lengths, from ankle socks to knee-highs. And let's not forget about those toe socks that give each little piggy its own little cozy home! So, there you have it, a little glimpse into the world of socks. Is there anything specific you'd like to know about them?\",\n    \"why\": {\n        \"input\": \"what do you know about socks?\",\n        \"intermediate_steps\": null,\n        \"memory\": {\n            \"episodic\": [],\n            \"declarative\": [],\n            \"procedural\": []\n        }\n    }\n}\n

This example explains the Raw use of the Cat APIs, however there are convinient and ready-made libraries available for various languages!

"},{"location":"quickstart/play-with-the-cat/#more-info","title":"More Info","text":"

Developers -> Client Libraries

"},{"location":"quickstart/prepare-plugin/","title":"Creating a Plugin","text":""},{"location":"quickstart/prepare-plugin/#preparing-a-plugin","title":"\ud83d\udd0c Preparing a Plugin","text":"

Plugins are packages of Tools and Hooks. You don't distribute a Tool or a Hook directly, you will distribute a Plugin containing them.

"},{"location":"quickstart/prepare-plugin/#creating-the-plugin","title":"Creating the Plugin","text":"

In this step, we will prepare an empty plugin and in the next steps we will insert a Tool and a Hook inside it.

To create a plugin just create a new folder in core/cat/plugins/, for our first plugin the folder name will be poetic_sock_seller.

You need two files to your plugin folder:

\u251c\u2500\u2500 core\n\u2502   \u251c\u2500\u2500 cat\n\u2502   \u2502   \u251c\u2500\u2500 plugins\n|   |   |   \u251c\u2500\u2500 poetic_sock_seller\n|   |   |   |   \u251c poetic_sock_seller.py\n|   |   |   |   \u251c plugin.json\n

The plugin.json file contains plugin's title and description, and is useful in the Admin Portal to recognize the plugin and activate/deactivate it.

plugin.json example:

{\n    \"name\": \"Poetic Sock Seller\",\n    \"description\": \"Description of poetic_sock_seller\"\n}\n

The poetic_sock_seller.py file will contain Tools and Hooks source code and can be left completely empty for this step.

"},{"location":"quickstart/prepare-plugin/#activating-the-plugin","title":"Activating the Plugin","text":"

Now, go to the Plugin tab of the Admin Portal. Your empty plugin will be there, activate it:

"},{"location":"quickstart/prepare-plugin/#more-info","title":"More Info","text":"

Developers \u2192 Plugins Reference \u2192 Plugin

"},{"location":"quickstart/prepare-plugin/#next-step","title":"Next Step","text":"

In the next step, you will learn how to create your first Tool.

"},{"location":"quickstart/stopping-the-cat/","title":"Stopping the Cat","text":""},{"location":"quickstart/stopping-the-cat/#stopping-the-cat","title":"\ud83d\ude80 Stopping the Cat","text":""},{"location":"quickstart/stopping-the-cat/#stopping-the-cat_1","title":"Stopping the Cat","text":"

When you're done using the Cat, stop the terminal by clicking on it and press CTRL + c. Then, run the command:

docker compose down\n

We want to remind you that Long-Term Memory is a locally persisted memory. When you restart the Cat, all conversation history and uploaded documents will still be there.

"},{"location":"quickstart/upload-document/","title":"Uploading a Document","text":""},{"location":"quickstart/upload-document/#upload-a-document","title":"Upload a Document","text":"

Documents can be uploaded via the Admin Portal (and it's also using APIs). The Cat will consider uploaded documents to generate the answer to your question. These documents are saved in a local database called declarative memory.

"},{"location":"quickstart/upload-document/#improve-the-cat-knowledge","title":"Improve the Cat knowledge","text":"

The Cat's knowledge about socks is quite basic; we will upload more specific knowledge.

Go to the Admin Portal at localhost:1865/admin on the Home tab, click on the Flash Icon, then click on Upload url and use this url https://en.wikipedia.org/wiki/N%C3%A5lebinding:

You receive a notification of the ingesting operation:

You receive notification of the finished read:

"},{"location":"quickstart/upload-document/#trying-new-knowledge","title":"Trying new knowledge","text":"

The Cat can answer with more detailed answers:

"},{"location":"quickstart/upload-document/#why-the-response","title":"Why the response?","text":"

By clicking on the question mark next to the answer, you can understand what prompted the Cat to provide the response. In this case, you can see that it used the knowledge coming from the documents (declarative memory):

"},{"location":"quickstart/upload-document/#next-step","title":"Next Step","text":"

In the next step, you will learn how to prepare an empty Plugin.

"},{"location":"quickstart/writing-hook/","title":"Writing the first Hook","text":""},{"location":"quickstart/writing-hook/#writing-the-first-hook","title":"Writing the first Hook","text":"

Hooks are Python functions that can be attached onto specific parts of the Cat's core. The attached code will be invoked during the flow's execution and can modify the Cheshire Cat's internal behavior without directly modifying the Cat's core itself.

"},{"location":"quickstart/writing-hook/#transform-the-cat-into-a-poetic-socks-seller","title":"Transform the Cat into a Poetic Socks Seller","text":"

At the moment, if you ask the Cat \u201cwho are you?\u201d, he will present himself as the Cheshire Cat AI. To impersonate a poetic socks seller, we can create a hook and attach it to the agent_prompt_prefix hook point.

TODO: \"hook point\" what is this? \"hook definition\"?

from cat.mad_hatter.decorators import hook\n\n@hook\ndef agent_prompt_prefix(prefix, cat):\n\n    prefix = \"\"\"You are Marvin the socks seller, a poetic vendor of socks.\nYou are an expert in socks, and you reply with exactly one rhyme.\n\"\"\"\n\n    return prefix\n
"},{"location":"quickstart/writing-hook/#testing-the-hook","title":"Testing the Hook","text":"

Now, let\u2019s ask again \u201cwho are you?\u201d and for our favorite socks color:

"},{"location":"quickstart/writing-hook/#explaining-the-code-step-by-step","title":"Explaining the code step by step","text":"
from cat.mad_hatter.decorators import hook\n

Let\u2019s import from the Cat the hook decorator. If you don\u2019t know what decorators are in coding, don\u2019t worry: they will help us attach our python functions to the Cat. The mad_hatter is the Cat component that manages and runs plugins.

@hook\ndef agent_prompt_prefix(prefix, cat):\n\n    prefix = \"\"\"You are Marvin the socks seller, a poetic vendor of socks.\nYou are an expert in socks, and you reply with exactly one rhyme.\n\"\"\"\n\n    return prefix\n

Here, we've defined a Python function called agent_prompt_prefix. It takes cat as an argument and is decorated with @hook. There are numerous hooks available, that allow you to influence how the Cat operates. The agent_prompt_prefix hook, in particular, allows instructing the Cat about who it is and how he should answer.

"},{"location":"quickstart/writing-hook/#more-info","title":"More Info","text":"

Developers \u2192 Plugins Reference \u2192 Hooks

"},{"location":"quickstart/writing-hook/#next-step","title":"Next Step","text":"

In the next step, you will learn how to stop the cat.

"},{"location":"quickstart/writing-tool/","title":"Writing the first Tool","text":""},{"location":"quickstart/writing-tool/#writing-the-first-tool","title":"Writing the first Tool","text":"

Tools are Python functions called by the LLM to execute actions. They are made of two parts: the first one contains instructions that explain the LLM when and how to call function; the second one contains the actual code to execute.

"},{"location":"quickstart/writing-tool/#creating-the-tool","title":"Creating the Tool","text":"

Now, let\u2019s get down to business. A real socks sales representative offers a quantity of socks, with many colors and corresponding price. Let\u2019s say a customer wants to know the price for socks of a specific color. We could write a tool to answer the question. Therefore, copy and past this source code inside the file poetic_sock_seller.py:

from cat.mad_hatter.decorators import tool\n\n@tool\ndef socks_prices(color, cat):\n    \"\"\"How much do socks cost? Input is the sock color.\"\"\"\n    prices = {\n        \"black\": 5,\n        \"white\": 10,\n        \"pink\": 50,\n    }\n    if color not in prices.keys():\n        return f\"No {color} socks\"\n    else:\n        return f\"{prices[color]} \u20ac\" \n
"},{"location":"quickstart/writing-tool/#testing-the-tool","title":"Testing the Tool","text":"

Now, let\u2019s ask for our favorite socks color:

"},{"location":"quickstart/writing-tool/#why-the-response","title":"Why the response?","text":"

By clicking on the question mark next to the answer, you can understand what prompted the Cat to provide the response. In this case, you can see that our tool \"socks_prices\" was used:

"},{"location":"quickstart/writing-tool/#explaining-the-code-step-by-step","title":"Explaining the code step by step","text":"
from cat.mad_hatter.decorators import tool\n

Let\u2019s import the tool decorator from the Cat. If you don\u2019t know what decorators are in coding, don\u2019t worry: they will help us attach our python functions to the Cat. The mad_hatter is the Cat component that manages and runs plugins.

@tool\ndef socks_prices(color, cat):\n    \"\"\"How much do socks cost? Input is the sock color.\"\"\"\n

We define a function called \"socks_prices,\" which takes as input the color of the desired socks and a cat instance.

The @tool() decorator has the main function of letting the Cat know that the following function is a tool.

The docstring just after the function signature reads as follows:

\"How much do socks cost? Input is the sock color.\"

This description instructs the LLM on when to call this tool and describes what input to provide.

Going back to the tool actual code:

    prices = {\n        \"black\": 5,\n        \"white\": 10,\n        \"pink\": 50,\n    }\n\n    if color not in prices.keys():\n        return f\"No {color} socks\"\n    else:\n        return f\"{prices[color]} \u20ac\" \n

Not much to say here: we just check if the color is present in the dictionary and output the price. What is indeed interesting is that, in a tool, you can connect your AI to any service, database, file, device, or whatever you need. Imagine turning on and off the light in your room, searching an e-commerce or writing an email. The only limit is your fantasy \ud83d\ude00.

"},{"location":"quickstart/writing-tool/#watchfiles-detected-changes-reloading","title":"WatchFiles detected changes... reloading","text":"

When changes to the plugin's source code are detected, the Cat automatically restarts. Feel free to make changes within the code and observe the results.

"},{"location":"quickstart/writing-tool/#debugging","title":"Debugging","text":"

Sometimes debugging can be necessary. Please refer to the Developers -> Debugging section for more information.

"},{"location":"quickstart/writing-tool/#more-info","title":"More Info","text":"

Developers \u2192 Plugins Reference \u2192 Tools

"},{"location":"quickstart/writing-tool/#next-step","title":"Next Step","text":"

In the next step, you will learn how to create your first Hook.

"},{"location":"resources/events/","title":"\ud83e\uddd1 Events","text":""},{"location":"resources/tutorials/","title":"🧑 Tutorials","text":""},{"location":"resources/tutorials/#tutorials","title":"\ud83e\uddd1 Tutorials","text":"

You can find several tutorials both on our blog and in the newsletter (SUBSCRIBE IT!).

"},{"location":"resources/video-installation/","title":"🧑 Installation & Customization","text":""},{"location":"resources/video-installation/#installation-customization","title":"\ud83e\uddd1 Installation & Customization","text":"

Watch it on YouTube

"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 000000000..4219a1474 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,453 @@ + + + + https://cheshire-cat-ai.github.io/docs/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/made-with-the-cat/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/SUMMARY/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/log/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/rabbit_hole/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/utils/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/agents/base_agent/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/agents/form_agent/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/agents/main_agent/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/agents/memory_agent/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/agents/procedures_agent/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/auth/auth_utils/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/looking_glass/cheshire_cat/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/looking_glass/stray_cat/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/mad_hatter/mad_hatter/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/mad_hatter/plugin/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/mad_hatter/core_plugin/hooks/agent/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/mad_hatter/core_plugin/hooks/flow/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/mad_hatter/core_plugin/hooks/prompt/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/mad_hatter/core_plugin/hooks/rabbithole/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/memory/vector_memory/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/memory/vector_memory_collection/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/memory/working_memory/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/API_Documentation/routes/settings/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/faq/basic_info/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/faq/customization/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/faq/errors/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/faq/general/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/faq/security_and_spending/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/llm/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/plugins/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/cheshire_cat/agent/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/cheshire_cat/core/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/cheshire_cat/mad_hatter/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/cheshire_cat/memory_chain/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/cheshire_cat/rabbit_hole/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/cheshire_cat/stray_cat/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/cheshire_cat/tool_chain/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/cheshire_cat/white_rabbit/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/memory/declarative_memory/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/memory/episodic_memory/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/memory/long_term_memory/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/memory/procedural_memory/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/memory/vector_memory/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/memory/working_memory/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/prompts/instructions/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/cat-components/prompts/main_prompt/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/flows/cat-bootstrap/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/flows/chatting-with-the-cat/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/flows/plugins-lifecycle/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/flows/rabbit-hole-ingestion/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/llm-concepts/embedder/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/llm-concepts/llm/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/llm-concepts/prompt/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/llm-concepts/rag/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/framework/llm-concepts/vector-memory/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/plugins/dependencies/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/plugins/examples/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/plugins/forms/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/plugins/hooks/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/plugins/logging/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/plugins/plugins/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/plugins/settings/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/plugins/tools/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/plugins/debugging/vscode/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/plugins/plugins-registry/plugin-from-template/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/plugins/plugins-registry/publishing-plugin/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/production/clients/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/production/endpoints/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/production/administrators/architecture/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/production/administrators/env-variables/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/production/advanced/api_auth/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/production/advanced/contributing/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/production/advanced/memory_backup/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/production/advanced/tests/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/production/advanced/user_system/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/production/use-cases/examples/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/production/use-cases/integrations/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/quickstart/installation-configuration/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/quickstart/installing-plugin/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/quickstart/introduction/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/quickstart/play-with-the-cat/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/quickstart/prepare-plugin/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/quickstart/stopping-the-cat/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/quickstart/upload-document/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/quickstart/writing-hook/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/quickstart/writing-tool/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/resources/events/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/resources/tutorials/ + 2024-08-22 + daily + + + https://cheshire-cat-ai.github.io/docs/resources/video-installation/ + 2024-08-22 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 000000000..3fb26ed28 Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/stylesheets/extra.css b/stylesheets/extra.css new file mode 100644 index 000000000..7ac7d5c59 --- /dev/null +++ b/stylesheets/extra.css @@ -0,0 +1,37 @@ +body[data-md-color-scheme="slate"] .md-header__button.md-logo img { + filter: invert(100%); +} + +/* Custom color scheme named "custom" */ +[data-md-color-scheme="custom"] { + /* Normal font color */ + --md-primary-fg-color: #F6F2EC; + --md-typeset-a-color: #666; + --md-accent-fg-color: #666; + --md-primary-bg-color: #666; + + /* Background color */ + --md-background-primary: #F6F2EC; + background-color: #F6F2EC; +} + +[data-md-color-scheme="custom"] .md-nav--secondary .md-nav__title, +[data-md-color-scheme="custom"] .md-nav--primary .md-nav__title, +[data-md-color-scheme="custom"] .md-nav--lifted > .md-nav__list > .md-nav__item--active > .md-nav__link { + background: #F6F2EC; + box-shadow: none; +} + +[data-md-color-scheme="custom"] .md-header { + color: black; +} + +[data-md-color-scheme="custom"] .md-typeset a { + font-weight: 700; +} + +[data-md-color-scheme="slate"] { + /* Normal font color */ + --md-typeset-a-color: #FCC263!important; + --md-accent-fg-color: #FCC263!important; +}

Watch it on YouTube