diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..64ab95875 --- /dev/null +++ b/404.html @@ -0,0 +1,3726 @@ + + + +
+ + + + + + + + + + + + + + +MainAgent
+
+
+
+ Bases: BaseAgent
Main Agent. +This class manages sub agents that in turn use the LLM.
+ +cat/agents/main_agent.py
16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 |
|
agent_prompt_declarative_memories(memory_docs)
+
+Formats the declarative memories for the prompt context.
+Such context is placed in the agent_prompt_prefix
in the place held by {declarative_memory}.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
memory_docs |
+
+ List[Document]
+ |
+
+
+
+ list of Langchain |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
memory_content |
+ str
+ |
+
+
+
+ String of retrieved context from the declarative memory. + |
+
cat/agents/main_agent.py
agent_prompt_episodic_memories(memory_docs)
+
+Formats episodic memories to be inserted into the prompt.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
memory_docs |
+
+ List[Document]
+ |
+
+
+
+ List of Langchain |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
memory_content |
+ str
+ |
+
+
+
+ String of retrieved context from the episodic memory. + |
+
cat/agents/main_agent.py
execute(stray)
+
+
+ async
+
+
+Execute the agents.
+ + +Returns:
+Name | Type | +Description | +
---|---|---|
agent_output |
+ AgentOutput
+ |
+
+
+
+ Reply of the agent, instance of AgentOutput. + |
+
cat/agents/main_agent.py
format_agent_input(stray)
+
+Format the input for the Agent.
+The method formats the strings of recalled memories and chat history that will be provided to the Langchain +Agent and inserted in the prompt.
+ + +Returns:
+Type | +Description | +
---|---|
+ BaseModelDict
+ |
+
+
+
+ Formatted output to be parsed by the Agent executor. Works both as a dict and as an object. + |
+
The context of memories and conversation history is properly formatted before being parsed by the and, hence, +information are inserted in the main prompt. +All the formatting pipeline is hookable and memories can be edited.
+agent_prompt_episodic_memories +agent_prompt_declarative_memories +agent_prompt_chat_history
+cat/agents/main_agent.py
The log engine.
+ + + +CatLogEngine
+
+
+The log engine.
+Engine to filter the logs in the terminal according to the level of severity.
+ + +Attributes:
+Name | +Type | +Description | +
---|---|---|
LOG_LEVEL |
+
+ str
+ |
+
+
+
+ Level of logging set in the |
+
The logging level set in the .env
file will print all the logs from that level to above.
+Available levels are:
- `DEBUG`
+- `INFO`
+- `WARNING`
+- `ERROR`
+- `CRITICAL`
+
Default to INFO
.
cat/log.py
18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 |
|
__call__(msg, level='DEBUG')
+
+critical(msg)
+
+debug(msg)
+
+default_log()
+
+Set the same debug level to all the project dependencies.
+ +cat/log.py
error(msg)
+
+get_caller_info(skip=3)
+
+Get the name of a caller in the format module.class.method.
+Copied from: https://gist.github.com/techtonik/2151727
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
skip |
+
+ int
+ |
+
+
+
+ Specifies how many levels of stack to skip while getting caller name. + |
+
+ 3
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
package |
+ str
+ |
+
+
+
+ Caller package. + |
+
module |
+ str
+ |
+
+
+
+ Caller module. + |
+
klass |
+ str
+ |
+
+
+
+ Caller classname if one otherwise None. + |
+
caller |
+ str
+ |
+
+
+
+ Caller function or method (if a class exist). + |
+
line |
+ int
+ |
+
+
+
+ The line of the call. + |
+
skip=1 means "who calls me", +skip=2 "who calls my caller" etc.
+An empty string is returned if skipped levels exceed stack height.
+cat/log.py
97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 |
|
info(msg)
+
+log(msg, level='DEBUG')
+
+Log a message
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
msg |
+ + | +
+
+
+ Message to be logged. + |
+ + required + | +
level |
+
+ str
+ |
+
+
+
+ Logging level. + |
+
+ 'DEBUG'
+ |
+
cat/log.py
show_log_level(record)
+
+Allows to show stuff in the log based on the global setting.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
record |
+
+ dict
+ |
+
+
+
+
+ |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ bool
+ |
+
+
+
+
+ |
+
cat/log.py
warning(msg)
+
+welcome()
+
+Welcome message in the terminal.
+ +cat/log.py
CheshireCat
+
+
+The Cheshire Cat.
+This is the main class that manages everything.
+ + +Attributes:
+Name | +Type | +Description | +
---|---|---|
todo |
+
+ list
+ |
+
+
+
+ Yet to be written. + |
+
cat/looking_glass/cheshire_cat.py
44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 |
|
__init__()
+
+Cat initialization.
+At init time the Cat executes the bootstrap.
+ +cat/looking_glass/cheshire_cat.py
llm(prompt, *args, **kwargs)
+
+Generate a response using the LLM model.
+This method is useful for generating a response with both a chat and a completion model using the same syntax
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
prompt |
+
+ str
+ |
+
+
+
+ The prompt for generating the response. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ str
+ |
+
+
+
+ The generated response. + |
+
cat/looking_glass/cheshire_cat.py
load_language_embedder()
+
+Hook into the embedder selection.
+Allows to modify how the Cat selects the embedder at bootstrap time.
+Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, +the Main Agent, the Rabbit Hole and the White Rabbit.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
cat |
+ + | +
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
embedder |
+ Embeddings
+ |
+
+
+
+ Selected embedder model. + |
+
cat/looking_glass/cheshire_cat.py
154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 |
|
load_language_model()
+
+Large Language Model (LLM) selection at bootstrap time.
+ + +Returns:
+Name | Type | +Description | +
---|---|---|
llm |
+ BaseLanguageModel
+ |
+
+
+
+ Langchain |
+
Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, +the Main Agent, the Rabbit Hole and the White Rabbit.
+cat/looking_glass/cheshire_cat.py
load_memory()
+
+Load LongTerMemory and WorkingMemory.
+ +cat/looking_glass/cheshire_cat.py
load_natural_language()
+
+Load Natural Language related objects.
+The method exposes in the Cat all the NLP related stuff. Specifically, it sets the language models +(LLM and Embedder).
+ + +When using small Language Models it is suggested to turn off the memories and make the main prompt smaller +to prevent them to fail.
+agent_prompt_prefix
+cat/looking_glass/cheshire_cat.py
StrayCat
+
+
+User/session based object containing working memory and a few utility pointers
+ +cat/looking_glass/stray_cat.py
27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 |
|
__call__(message_dict)
+
+
+ async
+
+
+Call the Cat instance.
+This method is called on the user's message received from the client.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
message_dict |
+
+ dict
+ |
+
+
+
+ Dictionary received from the Websocket client. + |
+ + required + | +
save |
+
+ bool
+ |
+
+
+
+ If True, the user's message is stored in the chat history. Default is True. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
final_output |
+ dict
+ |
+
+
+
+ Dictionary with the Cat's answer to be sent to the client. + |
+
Here happens the main pipeline of the Cat. Namely, the Cat receives the user's input and recall the memories. +The retrieved context is formatted properly and given in input to the Agent that uses the LLM to produce the +answer. This is formatted in a dictionary to be sent as a JSON via Websocket to the client.
+cat/looking_glass/stray_cat.py
322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 |
|
classify(sentence, labels)
+
+Classify a sentence.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
sentence |
+
+ str
+ |
+
+
+
+ Sentence to be classified. + |
+ + required + | +
labels |
+
+ List[str] or Dict[str, List[str]]
+ |
+
+
+
+ Possible output categories and optional examples. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
label |
+ str
+ |
+
+
+
+ Sentence category. + |
+
Examples:
+ +Or giving examples for each category:
+>>> example_labels = {
+... "positive": ["I feel nice", "happy today"],
+... "negative": ["I feel bad", "not my best day"],
+... }
+... cat.classify("it is a bad day", labels=example_labels)
+"negative"
+
cat/looking_glass/stray_cat.py
465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 |
|
llm(prompt, stream=False)
+
+Generate a response using the LLM model.
+This method is useful for generating a response with both a chat and a completion model using the same syntax
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
prompt |
+
+ str
+ |
+
+
+
+ The prompt for generating the response. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ str
+ |
+
+
+
+ The generated response. + |
+
cat/looking_glass/stray_cat.py
recall_relevant_memories_to_working_memory(query=None)
+
+Retrieve context from memory.
+The method retrieves the relevant memories from the vector collections that are given as context to the LLM. +Recalled memories are stored in the working memory.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
query |
+
+ str
+ |
+
+
+
+
+ |
+
+ None
+ |
+
The |
+ + | +
+
+
+
+ |
+ + required + | +
will |
+ + | +
+
+
+
+ |
+ + required + | +
The user's message is used as a query to make a similarity search in the Cat's vector memories. +Five hooks allow to customize the recall pipeline before and after it is done.
+cat_recall_query +before_cat_recalls_memories +before_cat_recalls_episodic_memories +before_cat_recalls_declarative_memories +before_cat_recalls_procedural_memories +after_cat_recalls_memories
+cat/looking_glass/stray_cat.py
157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 |
|
send_ws_message(content, msg_type='notification')
+
+Send a message via websocket.
+This method is useful for sending a message via websocket directly without passing through the LLM
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
content |
+
+ str
+ |
+
+
+
+ The content of the message. + |
+ + required + | +
msg_type |
+
+ str
+ |
+
+
+
+ The type of the message. Should be either |
+
+ 'notification'
+ |
+
cat/looking_glass/stray_cat.py
stringify_chat_history(latest_n=5)
+
+Serialize chat history. +Converts to text the recent conversation turns.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
latest_n |
+
+ int
+ |
+
+
+
+ Hoe many latest turns to stringify. + |
+
+ 5
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
history |
+ str
+ |
+
+
+
+ String with recent conversation turns. + |
+
Such context is placed in the agent_prompt_suffix
in the place held by {chat_history}.
The chat history is a dictionary with keys:: + 'who': the name of who said the utterance; + 'message': the utterance.
+cat/looking_glass/stray_cat.py
Hooks to modify the Cat's Agent.
+Here is a collection of methods to hook into the Agent execution pipeline.
+ + + +agent_allowed_tools(allowed_tools, cat)
+
+Hook the allowed tools.
+Allows to decide which tools end up in the Agent prompt.
+To decide, you can filter the list of tools' names, but you can also check the context in cat.working_memory
+and launch custom chains with cat._llm
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
tools |
+ List[str]
+ |
+
+
+
+ List of allowed Langchain tools. + |
+
cat/mad_hatter/core_plugin/hooks/agent.py
agent_fast_reply(fast_reply, cat)
+
+This hook is useful to shortcut the Cat response. +If you do not want the agent to run, return the final response from here and it will end up in the chat without the agent being executed.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
fast_reply |
+ + | +
+
+
+ Input is dict (initially empty), which can be enriched whith an "output" key with the shortcut response. + |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
response |
+ Union[None, Dict]
+ |
+
+
+
+ Cat response if you want to avoid using the agent, or None / {} if you want the agent to be executed. +See below for examples of Cat response + |
+
Examples:
+Example 1: can't talk about this topic +
# here you could use cat._llm to do topic evaluation
+if "dog" in agent_input["input"]:
+ return {
+ "output": "You went out of topic. Can't talk about dog."
+ }
+
Example 2: don't remember (no uploaded documents about topic) +
num_declarative_memories = len( cat.working_memory.declarative_memories )
+if num_declarative_memories == 0:
+ return {
+ "output": "Sorry, I have no memories about that."
+ }
+
cat/mad_hatter/core_plugin/hooks/agent.py
before_agent_starts(agent_input, cat)
+
+Hook to read and edit the agent input
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
agent_input |
+
+ Dict
+ |
+
+
+
+ Input that is about to be passed to the agent. + |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
response |
+ Dict
+ |
+
+
+
+ Agent Input + |
+
cat/mad_hatter/core_plugin/hooks/agent.py
Hooks to modify the Cat's flow of execution.
+Here is a collection of methods to hook into the Cat execution pipeline.
+ + + +after_cat_bootstrap(cat)
+
+Hook into the end of the Cat start up.
+Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, +the Main Agent, the Rabbit Hole and the White Rabbit.
+This hook allows to intercept the end of such process and is executed right after the Cat has finished loading +its components.
+This can be used to set or store variables to be shared further in the pipeline.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
cat/mad_hatter/core_plugin/hooks/flow.py
after_cat_recalls_memories(cat)
+
+Hook after semantic search in memories.
+The hook is executed just after the Cat searches for the meaningful context in memories +and stores it in the Working Memory.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
cat/mad_hatter/core_plugin/hooks/flow.py
before_cat_bootstrap(cat)
+
+Hook into the Cat start up.
+Bootstrapping is the process of loading the plugins, the natural language objects (e.g. the LLM), the memories, +the Main Agent, the Rabbit Hole and the White Rabbit.
+This hook allows to intercept such process and is executed in the middle of plugins and +natural language objects loading.
+This hook can be used to set or store variables to be propagated to subsequent loaded objects.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
cat/mad_hatter/core_plugin/hooks/flow.py
before_cat_reads_message(user_message_json, cat)
+
+Hook the incoming user's JSON dictionary.
+Allows to edit and enrich the incoming message received from the WebSocket connection.
+For instance, this hook can be used to translate the user's message before feeding it to the Cat. +Another use case is to add custom keys to the JSON dictionary.
+The incoming message is a JSON dictionary with keys: + { + "text": message content + }
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
user_message_json |
+
+ dict
+ |
+
+
+
+ JSON dictionary with the message received from the chat. + |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
user_message_json |
+ dict
+ |
+
+
+
+ Edited JSON dictionary that will be fed to the Cat. + |
+
For example:
+{
+ "text": "Hello Cheshire Cat!",
+ "custom_key": True
+}
+
where "custom_key" is a newly added key to the dictionary to store any data.
+cat/mad_hatter/core_plugin/hooks/flow.py
before_cat_recalls_declarative_memories(declarative_recall_config, cat)
+
+Hook into semantic search in memories.
+Allows to intercept when the Cat queries the memories using the embedded user's input.
+The hook is executed just before the Cat searches for the meaningful context in both memories +and stores it in the Working Memory.
+The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied +to the query in the vector memory (items with score under threshold are not retrieved) +It also returns the embedded query (embedding) and the conditions on recall (metadata).
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
declarative_recall_config |
+
+ dict
+ |
+
+
+
+ Dictionary with data needed to recall declarative memories + |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
declarative_recall_config |
+ dict
+ |
+
+
+
+ Edited dictionary that will be fed to the embedder. + |
+
cat/mad_hatter/core_plugin/hooks/flow.py
before_cat_recalls_episodic_memories(episodic_recall_config, cat)
+
+Hook into semantic search in memories.
+Allows to intercept when the Cat queries the memories using the embedded user's input.
+The hook is executed just before the Cat searches for the meaningful context in both memories +and stores it in the Working Memory.
+The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied +to the query in the vector memory (items with score under threshold are not retrieved). +It also returns the embedded query (embedding) and the conditions on recall (metadata).
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
episodic_recall_config |
+
+ dict
+ |
+
+
+
+ Dictionary with data needed to recall episodic memories + |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
episodic_recall_config |
+ dict
+ |
+
+
+
+ Edited dictionary that will be fed to the embedder. + |
+
cat/mad_hatter/core_plugin/hooks/flow.py
before_cat_recalls_memories(cat)
+
+Hook into semantic search in memories.
+Allows to intercept when the Cat queries the memories using the embedded user's input.
+The hook is executed just before the Cat searches for the meaningful context in both memories +and stores it in the Working Memory.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
cat/mad_hatter/core_plugin/hooks/flow.py
before_cat_recalls_procedural_memories(procedural_recall_config, cat)
+
+Hook into semantic search in memories.
+Allows to intercept when the Cat queries the memories using the embedded user's input.
+The hook is executed just before the Cat searches for the meaningful context in both memories +and stores it in the Working Memory.
+The hook return the values for maximum number (k) of items to retrieve from memory and the score threshold applied +to the query in the vector memory (items with score under threshold are not retrieved) +It also returns the embedded query (embedding) and the conditions on recall (metadata).
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
procedural_recall_config |
+
+ dict
+ |
+
+
+
+ Dictionary with data needed to recall tools from procedural memory + |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
procedural_recall_config |
+ dict
+ |
+
+
+
+ Edited dictionary that will be fed to the embedder. + |
+
cat/mad_hatter/core_plugin/hooks/flow.py
before_cat_sends_message(message, cat)
+
+Hook the outgoing Cat's message.
+Allows to edit the JSON dictionary that will be sent to the client via WebSocket connection.
+This hook can be used to edit the message sent to the user or to add keys to the dictionary.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
message |
+
+ dict
+ |
+
+
+
+ JSON dictionary to be sent to the WebSocket client. + |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
message |
+ dict
+ |
+
+
+
+ Edited JSON dictionary with the Cat's answer. + |
+
Default message
is::
{
+ "type": "chat",
+ "content": cat_message["output"],
+ "why": {
+ "input": cat_message["input"],
+ "output": cat_message["output"],
+ "intermediate_steps": cat_message["intermediate_steps"],
+ "memory": {
+ "vectors": {
+ "episodic": episodic_report,
+ "declarative": declarative_report
+ }
+ },
+ },
+ }
+
cat/mad_hatter/core_plugin/hooks/flow.py
before_cat_stores_episodic_memory(doc, cat)
+
+Hook the user message Document
before is inserted in the vector memory.
Allows editing and enhancing a single Document
before the Cat add it to the episodic vector memory.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
doc |
+
+ Document
+ |
+
+
+
+ Langchain |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
doc |
+ Document
+ |
+
+
+
+ Langchain |
+
The Document
has two properties::
`page_content`: the string with the text to save in memory;
+`metadata`: a dictionary with at least two keys:
+ `source`: where the text comes from;
+ `when`: timestamp to track when it's been uploaded.
+
cat/mad_hatter/core_plugin/hooks/flow.py
cat_recall_query(user_message, cat)
+
+Hook the semantic search query.
+This hook allows to edit the user's message used as a query for context retrieval from memories. +As a result, the retrieved context can be conditioned editing the user's message.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
user_message |
+
+ str
+ |
+
+
+
+ String with the text received from the user. + |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance to exploit the Cat's methods. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Edited string to be used for context retrieval in memory. The returned string is further stored in the
+ |
+
+
+
+
+ |
+
+ Working Memory at `cat.working_memory.recall_query`.
+ |
+
+
+
+
+ |
+
For example, this hook is a suitable to perform Hypothetical Document Embedding (HyDE). +HyDE [1]_ strategy exploits the user's message to generate a hypothetical answer. This is then used to recall +the relevant context from the memory. +An official plugin is available to test this technique.
+[1] Gao, L., Ma, X., Lin, J., & Callan, J. (2022). Precise Zero-Shot Dense Retrieval without Relevance Labels. + arXiv preprint arXiv:2212.10496.
+cat/mad_hatter/core_plugin/hooks/flow.py
Hooks to modify the prompts.
+Here is a collection of methods to hook the prompts components that instruct the Agent.
+ + + +agent_prompt_instructions(instructions, cat)
+
+Hook the instruction prompt.
+Allows to edit the instructions that the Cat feeds to the Agent to select tools and forms.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
instructions |
+
+ str
+ |
+
+
+
+ Instructions prompt to select tool or form. + |
+ + required + | +
cat |
+
+ StrayCat
+ |
+
+
+
+ StrayCat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
instructions |
+ str
+ |
+
+
+
+ Instructions prompt to select tool or form + |
+
This prompt explains the Agent how to select a tool or form.
+cat/mad_hatter/core_plugin/hooks/prompt.py
agent_prompt_prefix(prefix, cat)
+
+Hook the main prompt prefix.
+Allows to edit the prefix of the Main Prompt that the Cat feeds to the Agent. +It describes the personality of your assistant and its general task.
+The prefix is then completed with the agent_prompt_suffix
.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
prefix |
+
+ str
+ |
+
+
+
+ Main / System prompt with personality and general task to be accomplished. + |
+ + required + | +
cat |
+
+ StrayCat
+ |
+
+
+
+ StrayCat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
prefix |
+ str
+ |
+
+
+
+ Main / System prompt. + |
+
The default prefix describe who the AI is and how it is expected to answer the Human.
+cat/mad_hatter/core_plugin/hooks/prompt.py
agent_prompt_suffix(prompt_suffix, cat)
+
+Hook the main prompt suffix.
+Allows to edit the suffix of the Main Prompt that the Cat feeds to the Agent.
+The suffix is concatenated to agent_prompt_prefix
when RAG context is used.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
cat |
+
+ StrayCat
+ |
+
+
+
+ StrayCat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
prompt_suffix |
+ str
+ |
+
+
+
+ The suffix string to be concatenated to the Main Prompt (prefix). + |
+
The default suffix has a few placeholders: +- {episodic_memory} provides memories retrieved from episodic memory (past conversations) +- {declarative_memory} provides memories retrieved from declarative memory (uploaded documents) +- {chat_history} provides the Agent the recent conversation history +- {input} provides the last user's input +- {agent_scratchpad} is where the Agent can concatenate tools use and multiple calls to the LLM.
+cat/mad_hatter/core_plugin/hooks/prompt.py
Hooks to modify the RabbitHole's documents ingestion.
+Here is a collection of methods to hook into the RabbitHole execution pipeline.
+These hooks allow to intercept the uploaded documents at different places before they are saved into memory.
+ + + +after_rabbithole_splitted_text(chunks, cat)
+
+Hook the Document
after is split.
Allows editing the list of Document
right after the RabbitHole chunked them in smaller ones.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
chunks |
+
+ List[Document]
+ |
+
+
+
+ List of Langchain |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
chunks |
+ List[Document]
+ |
+
+
+
+ List of modified chunked langchain documents to be stored in the episodic memory. + |
+
cat/mad_hatter/core_plugin/hooks/rabbithole.py
after_rabbithole_stored_documents(source, stored_points, cat)
+
+Hook the Document after is inserted in the vector memory.
+Allows editing and enhancing the list of Document after is inserted in the vector memory.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
source |
+ + | +
+
+
+ Name of ingested file/url + |
+ + required + | +
docs |
+
+ List[PointStruct]
+ |
+
+
+
+ List of Qdrant PointStruct just inserted into the db. + |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ None
+ |
+
+
+
+
+ |
+
cat/mad_hatter/core_plugin/hooks/rabbithole.py
before_rabbithole_insert_memory(doc, cat)
+
+Hook the Document
before is inserted in the vector memory.
Allows editing and enhancing a single Document
before the RabbitHole add it to the declarative vector memory.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
doc |
+
+ Document
+ |
+
+
+
+ Langchain |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
doc |
+ Document
+ |
+
+
+
+ Langchain |
+
The Document
has two properties::
`page_content`: the string with the text to save in memory;
+`metadata`: a dictionary with at least two keys:
+ `source`: where the text comes from;
+ `when`: timestamp to track when it's been uploaded.
+
cat/mad_hatter/core_plugin/hooks/rabbithole.py
before_rabbithole_splits_text(docs, cat)
+
+Hook the Documents
before they are split into chunks.
Allows editing the uploaded document main Document(s) before the RabbitHole recursively splits it in shorter ones. +Please note that this is a list because parsers can output one or more Document, that are afterwards splitted.
+For instance, the hook allows to change the text or edit/add metadata.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
docs |
+
+ List[Document]
+ |
+
+
+
+ Langchain |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
docs |
+ List[Document]
+ |
+
+
+
+ Edited Langchain |
+
cat/mad_hatter/core_plugin/hooks/rabbithole.py
before_rabbithole_stores_documents(docs, cat)
+
+Hook into the memory insertion pipeline.
+Allows modifying how the list of Document
is inserted in the vector memory.
For example, this hook is a good point to summarize the incoming documents and save both original and +summarized contents. +An official plugin is available to test this procedure.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
docs |
+
+ List[Document]
+ |
+
+
+
+ List of Langchain |
+ + required + | +
cat |
+ + | +
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
docs |
+ List[Document]
+ |
+
+
+
+ List of edited Langchain documents. + |
+
cat/mad_hatter/core_plugin/hooks/rabbithole.py
rabbithole_instantiates_parsers(file_handlers, cat)
+
+Hook the available parsers for ingesting files in the declarative memory.
+Allows replacing or extending existing supported mime types and related parsers to customize the file ingestion.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
file_handlers |
+
+ dict
+ |
+
+
+
+ Keys are the supported mime types and values are the related parsers. + |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
file_handlers |
+ dict
+ |
+
+
+
+ Edited dictionary of supported mime types and related parsers. + |
+
cat/mad_hatter/core_plugin/hooks/rabbithole.py
rabbithole_instantiates_splitter(text_splitter, cat)
+
+Hook the splitter used to split text in chunks.
+Allows replacing the default text splitter to customize the splitting process.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
text_splitter |
+
+ TextSplitter
+ |
+
+
+
+ The text splitter used by default. + |
+ + required + | +
cat |
+
+ CheshireCat
+ |
+
+
+
+ Cheshire Cat instance. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
text_splitter |
+ TextSplitter
+ |
+
+
+
+ An instance of a TextSplitter subclass. + |
+
cat/mad_hatter/core_plugin/hooks/rabbithole.py
VectorMemoryCollection
+
+
+cat/memory/vector_memory_collection.py
30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 |
|
add_point(content, vector, metadata=None, id=None, **kwargs)
+
+Add a point (and its metadata) to the vectorstore.
+Args: + content: original text. + vector: Embedding vector. + metadata: Optional metadata dict associated with the text. + id: + Optional id to associate with the point. Id has to be a uuid-like string.
+Returns: + Point id as saved into the vectorstore.
+ +cat/memory/vector_memory_collection.py
WorkingMemory
+
+
+
+ Bases: BaseModelDict
Cat's volatile memory.
+Handy class that behaves like a dict
to store temporary custom data.
Returns:
+Type | +Description | +
---|---|
+ dict[str, list]
+ |
+
+
+
+ Default instance is a dictionary with |
+
The constructor instantiates a dictionary with a history
key set to an empty list that is further used to store
+the conversation turns between the Human and the AI.
cat/memory/working_memory.py
update_conversation_history(who, message, why={})
+
+Update the conversation history.
+The methods append to the history key the last three conversation turns.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
who |
+
+ str
+ |
+
+
+
+ Who said the message. Can either be |
+ + required + | +
message |
+
+ str
+ |
+
+
+
+ The message said. + |
+ + required + | +
cat/memory/working_memory.py
RabbitHole
+
+
+Manages content ingestion. I'm late... I'm late!
+ +cat/rabbit_hole.py
25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 |
|
__split_text(stray, text, chunk_size, chunk_overlap)
+
+Split text in overlapped chunks.
+This method executes the rabbithole_splits_text
to split the incoming text in overlapped
+chunks of text. Other two hooks are available to edit the text before and after the split step.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
text |
+
+ str
+ |
+
+
+
+ Content of the loaded file. + |
+ + required + | +
chunk_size |
+
+ int
+ |
+
+
+
+ Number of tokens in each document chunk. + |
+ + required + | +
chunk_overlap |
+
+ int
+ |
+
+
+
+ Number of overlapping tokens between consecutive chunks. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
docs |
+ List[Document]
+ |
+
+
+
+ List of split Langchain |
+
The default behavior only executes the rabbithole_splits_text
hook. before_rabbithole_splits_text
and
+after_rabbithole_splitted_text
hooks return the original input without any modification.
before_rabbithole_splits_text +rabbithole_splits_text +after_rabbithole_splitted_text
+cat/rabbit_hole.py
file_to_docs(stray, file, chunk_size=None, chunk_overlap=None)
+
+Load and convert files to Langchain Document
.
This method takes a file either from a Python script, from the /rabbithole/
or /rabbithole/web
endpoints.
+Hence, it loads it in memory and splits it in overlapped chunks of text.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
file |
+
+ (str, UploadFile)
+ |
+
+
+
+ The file can be either a string path if loaded programmatically, a FastAPI |
+ + required + | +
chunk_size |
+
+ int
+ |
+
+
+
+ Number of tokens in each document chunk. + |
+
+ None
+ |
+
chunk_overlap |
+
+ int
+ |
+
+
+
+ Number of overlapping tokens between consecutive chunks. + |
+
+ None
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
docs |
+ List[Document]
+ |
+
+
+
+ List of Langchain |
+
This method is used by both /rabbithole/
and /rabbithole/web
endpoints.
+Currently supported files are .txt
, .pdf
, .md
and web pages.
cat/rabbit_hole.py
178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 |
|
ingest_file(stray, file, chunk_size=None, chunk_overlap=None, metadata={})
+
+Load a file in the Cat's declarative memory.
+The method splits and converts the file in Langchain Document
. Then, it stores the Document
in the Cat's
+memory.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
file |
+
+ (str, UploadFile)
+ |
+
+
+
+ The file can be a path passed as a string or an |
+ + required + | +
chunk_size |
+
+ int
+ |
+
+
+
+ Number of tokens in each document chunk. + |
+
+ None
+ |
+
chunk_overlap |
+
+ int
+ |
+
+
+
+ Number of overlapping tokens between consecutive chunks. + |
+
+ None
+ |
+
metadata |
+
+ dict
+ |
+
+
+
+ Metadata to be stored with each chunk. + |
+
+ {}
+ |
+
Currently supported formats are .txt
, .pdf
and .md
.
+You cn add custom ones or substitute the above via RabbitHole hooks.
before_rabbithole_stores_documents
+cat/rabbit_hole.py
ingest_memory(stray, file)
+
+Upload memories to the declarative memory from a JSON file.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
file |
+
+ UploadFile
+ |
+
+
+
+ File object sent via |
+ + required + | +
This method allows uploading a JSON file containing vector and text memories directly to the declarative memory. +When doing this, please, make sure the embedder used to export the memories is the same as the one used +when uploading. +The method also performs a check on the dimensionality of the embeddings (i.e. length of each vector).
+cat/rabbit_hole.py
63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 |
|
store_documents(stray, docs, source, metadata={})
+
+Add documents to the Cat's declarative memory.
+This method loops a list of Langchain Document
and adds some metadata. Namely, the source filename and the
+timestamp of insertion. Once done, the method notifies the client via Websocket connection.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
docs |
+
+ List[Document]
+ |
+
+
+
+ List of Langchain |
+ + required + | +
source |
+
+ str
+ |
+
+
+
+ Source name to be added as a metadata. It can be a file name or an URL. + |
+ + required + | +
metadata |
+
+ dict
+ |
+
+
+
+ Metadata to be stored with each chunk. + |
+
+ {}
+ |
+
At this point, it is possible to customize the Cat's behavior using the before_rabbithole_insert_memory
hook
+to edit the memories before they are inserted in the vector database.
before_rabbithole_insert_memory
+cat/rabbit_hole.py
313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 |
|
string_to_docs(stray, file_bytes, source=None, content_type='text/plain', chunk_size=None, chunk_overlap=None)
+
+Convert string to Langchain Document
.
Takes a string, converts it to langchain Document
.
+Hence, loads it in memory and splits it in overlapped chunks of text.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
file_bytes |
+
+ str
+ |
+
+
+
+ The string to be converted. + |
+ + required + | +
source |
+
+ str
+ |
+
+
+
+ Source filename. + |
+
+ None
+ |
+
content_type |
+
+ str
+ |
+
+
+
+ Mimetype of content. + |
+
+ 'text/plain'
+ |
+
chunk_size |
+
+ int
+ |
+
+
+
+ Number of tokens in each document chunk. + |
+
+ None
+ |
+
chunk_overlap |
+
+ int
+ |
+
+
+
+ Number of overlapping tokens between consecutive chunks. + |
+
+ None
+ |
+
Returns:
+Name | Type | +Description | +
---|---|---|
docs |
+ List[Document]
+ |
+
+
+
+ List of Langchain |
+
cat/rabbit_hole.py
create_setting(payload, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.WRITE)))
+
+Create a new setting in the database
+ +cat/routes/settings.py
delete_setting(settingId, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.DELETE)))
+
+Delete a specific setting in the database
+ +cat/routes/settings.py
get_setting(settingId, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.READ)))
+
+Get the a specific setting from the database
+ +cat/routes/settings.py
get_settings(search='', stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.LIST)))
+
+Get the entire list of settings available in the database
+ +cat/routes/settings.py
update_setting(settingId, payload, stray=Depends(HTTPAuth(AuthResource.SETTINGS, AuthPermission.EDIT)))
+
+Update a specific setting in the database if it exists
+ +cat/routes/settings.py
Various utiles used from the projects.
+ + + +get_base_path()
+
+get_base_url()
+
+Allows exposing the base url.
+ +cat/utils.py
get_plugins_path()
+
+get_static_path()
+
+get_static_url()
+
+match_prompt_variables(prompt_variables, prompt_template)
+
+Ensure prompt variables and prompt placeholders map, so there are no issues on mismatches
+ +cat/utils.py
to_camel_case(text)
+
+Format string to camel case.
+Takes a string of words separated by either hyphens or underscores and returns a string of words in camel case.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
text |
+
+ str
+ |
+
+
+
+ String of hyphens or underscores separated words. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ str
+ |
+
+
+
+ Camel case formatted string. + |
+
cat/utils.py
verbal_timedelta(td)
+
+Convert a timedelta in human form.
+The function takes a timedelta and converts it to a human-readable string format.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
td |
+
+ timedelta
+ |
+
+
+
+ Difference between two dates. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ str
+ |
+
+
+
+ Human-readable string of time difference. + |
+
This method is used to give the Language Model information time information about the memories retrieved from +the vector database.
+Examples:
+ + +cat/utils.py