-
Notifications
You must be signed in to change notification settings - Fork 37
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat: boost - whole bunch of experimental custom modules, slight impr…
…ovements to chat/llm APIs, better error for missing Auth, modules can now accept params via completion request body with "@boost" prefixed keys
- Loading branch information
Showing
12 changed files
with
586 additions
and
22 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
import chat as ch | ||
import llm | ||
|
||
ID_PREFIX = '3t' | ||
|
||
|
||
async def apply(chat: 'ch.Chat', llm: 'llm.LLM'): | ||
side_chat = ch.Chat( | ||
tail=ch.ChatNode( | ||
content=""" | ||
I will ask you to answer my question three times. Each time you will provide a different answer. | ||
Try to use the chance to correct any mistakes you made in the previous answers. | ||
""".strip() | ||
) | ||
) | ||
side_chat.llm = llm | ||
|
||
side_chat.user('Here is the question:') | ||
side_chat.user(chat.tail.content) | ||
side_chat.user('Please provide the first answer to the question.') | ||
await side_chat.emit_status('First') | ||
await side_chat.emit_advance() | ||
|
||
side_chat.user( | ||
'Please provide the second answer to the question. Remember, it must be different from the first one.' | ||
) | ||
await side_chat.emit_status('Second') | ||
await side_chat.emit_advance() | ||
|
||
side_chat.user( | ||
'Please provide the third answer to the question. It must be different from the first two.' | ||
) | ||
await side_chat.emit_status('Third') | ||
await side_chat.emit_advance() | ||
|
||
side_chat.user( | ||
""" | ||
Now, think about the answers you provided. Is there anything wrong with them? Which one is the most correct? | ||
What is the final answer to the question? | ||
""".strip() | ||
) | ||
await side_chat.emit_status('Final') | ||
await llm.stream_final_completion(chat=side_chat) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
import chat as ch | ||
import llm | ||
|
||
ID_PREFIX = 'ambi' | ||
|
||
ambi_prompt = """ | ||
<instruction> | ||
Find the sources of ambiguities in the given question and describe them. | ||
</instruction> | ||
<question> | ||
{question} | ||
</question> | ||
""".strip() | ||
|
||
detail_prompt = """ | ||
<instruction> | ||
Find the conditions that significantly affect the interpretation of the question and describe them. | ||
</instruction> | ||
<question> | ||
{question} | ||
</question> | ||
""".strip() | ||
|
||
definition_prompt = """ | ||
<instruction> | ||
Define the terms in the question and provide a detailed explanation for each. | ||
</instruction> | ||
<question> | ||
{question} | ||
</question> | ||
""".strip() | ||
|
||
discrepancies_prompt = """ | ||
<instruction> | ||
Find the discrepancies in the question and describe them. | ||
</instruction> | ||
<question> | ||
{question} | ||
</question> | ||
""".strip() | ||
|
||
final_prompt = """ | ||
<instruction> | ||
Provide a clear and definitive answer to the question. | ||
</instruction> | ||
<question> | ||
{question} | ||
</question> | ||
<information> | ||
### Ambiguities | ||
{ambiguities} | ||
### Details | ||
{details} | ||
### Definitions | ||
{definitions} | ||
### Discrepancies | ||
{discrepancies} | ||
</information> | ||
""".strip() | ||
|
||
|
||
async def apply(chat: 'ch.Chat', llm: 'llm.LLM'): | ||
await llm.emit_status('Ambiguiity') | ||
ambiguities = await llm.stream_chat_completion( | ||
prompt=ambi_prompt, question=chat.tail.content | ||
) | ||
|
||
await llm.emit_status('Details') | ||
details = await llm.stream_chat_completion( | ||
prompt=detail_prompt, question=chat.tail.content | ||
) | ||
|
||
await llm.emit_status('Definitions') | ||
definitions = await llm.stream_chat_completion( | ||
prompt=definition_prompt, question=chat.tail.content | ||
) | ||
|
||
await llm.emit_status('Discrepancies') | ||
discrepancies = await llm.stream_chat_completion( | ||
prompt=discrepancies_prompt, question=chat.tail.content | ||
) | ||
|
||
await llm.emit_status('Final') | ||
await llm.stream_final_completion( | ||
prompt=final_prompt, | ||
question=chat.tail.content, | ||
ambiguities=ambiguities, | ||
details=details, | ||
definitions=definitions, | ||
discrepancies=discrepancies | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,94 @@ | ||
import random | ||
|
||
import chat as ch | ||
import llm | ||
import log | ||
|
||
ID_PREFIX = 'cea' # 'cellular automata' | ||
|
||
|
||
def cellular_automata(rule, initial_state, generations): | ||
""" | ||
Runs a one-dimensional cellular automata and records results in binary, | ||
allowing the state to grow. | ||
Args: | ||
rule (int): The rule number for the cellular automata (0-255). | ||
initial_state (list): The initial state of the cellular automata. | ||
generations (int): The number of generations to run. | ||
Returns: | ||
list: A list of binary strings representing the state of the cellular automata at each generation. | ||
""" | ||
# Convert the rule number to a binary string and pad with zeros to 8 bits | ||
rule_binary = format(rule, '08b') | ||
|
||
# Initialize the list to store the results | ||
results = ["".join(map(str, initial_state))] | ||
|
||
# Run the cellular automata for the specified number of generations | ||
current_state = initial_state.copy() | ||
for _ in range(generations): | ||
# Initialize the next state with a zero on each end | ||
next_state = [0] + current_state + [0] | ||
|
||
# Apply the rule to each cell in the current state | ||
for i in range(1, len(next_state) - 1): | ||
# Get the left, center, and right cells | ||
left = current_state[i - 2] if i > 1 else 0 | ||
center = current_state[i - 1] | ||
right = current_state[i] if i < len(current_state) else 0 | ||
|
||
# Convert the left, center, and right cells to a binary string | ||
neighborhood = f"{left}{center}{right}" | ||
|
||
# Get the next state of the cell based on the rule | ||
next_state[i] = int(rule_binary[7 - int(neighborhood, 2)]) | ||
|
||
# Update the current state and append the next state to the results | ||
current_state = next_state | ||
results.append("".join(map(str, next_state))) | ||
|
||
return results | ||
|
||
def render_ca(results): | ||
""" | ||
Renders the results of a cellular automata as a string. | ||
Args: | ||
results (list): A list of binary strings representing the state of the cellular automata at each generation. | ||
Returns: | ||
str: A string representation of the cellular automata results. | ||
""" | ||
return join.join(["".join(["|" if cell == "1" else "." for cell in result]) for result in results]) | ||
|
||
|
||
|
||
initial_state = [1] | ||
join = '\n' | ||
|
||
|
||
async def apply(chat: 'ch.Chat', llm: 'llm.LLM'): | ||
rule = int(llm.boost_params.get('cea_rule', '73')) | ||
gens = int(llm.boost_params.get('cea_generations', '32')) | ||
|
||
chat.user( | ||
f""" | ||
Before completing my request, please think for a while. | ||
""".strip() | ||
) | ||
chat.assistant( | ||
f"""Good idea! Let me think... | ||
```thoughts | ||
{render_ca(cellular_automata(rule, initial_state, gens))} | ||
``` | ||
""" | ||
) | ||
await llm.emit_message(chat.tail.content) | ||
chat.user(f""" | ||
Now, please address my request. | ||
""".strip()) | ||
await llm.stream_final_completion() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
import chat as ch | ||
import llm | ||
|
||
ID_PREFIX = 'clarity' | ||
|
||
should_clarify_prompt = """ | ||
<instruction> | ||
Is this question requires any clarification or is ready to be answered? | ||
Reply only with "clarify" or "ready" and nothing else. Everything else will be ignored. | ||
</instruction> | ||
<question> | ||
{question} | ||
</question> | ||
""".strip() | ||
|
||
|
||
async def apply(chat: 'ch.Chat', llm: 'llm.LLM'): | ||
iterations = 0 | ||
max_iterations = 15 | ||
|
||
side_chat = ch.Chat.from_conversation([chat.tail.message()]) | ||
side_chat.llm = llm | ||
|
||
while iterations < max_iterations: | ||
iterations += 1 | ||
side_chat.user( | ||
""" | ||
Are there any sources of ambiguity in my request? | ||
Answer with "yes" or "no" and nothing else. Everything else will be ignored. | ||
""".strip() | ||
) | ||
await side_chat.advance() | ||
await llm.emit_status(f'Clarification: {side_chat.tail.content}') | ||
|
||
if side_chat.tail.contains('no'): | ||
break | ||
|
||
side_chat.user(""" | ||
Clarify the ambiguity you mentioned. | ||
""".strip()) | ||
await side_chat.emit_advance() | ||
|
||
if iterations >= max_iterations: | ||
break | ||
|
||
side_chat.user('Now, please provide a clear answer to the question.') | ||
await side_chat.emit_advance() | ||
|
||
await llm.emit_status('Final') | ||
|
||
side_chat.user( | ||
""" | ||
Think trough the response you just gave. Is there anything wrong? If so, please correct it. | ||
Otherwise, write down your final answer to my request. | ||
""".strip() | ||
) | ||
await llm.stream_final_completion(chat=chat) |
Oops, something went wrong.