Skip to content

Commit

Permalink
feat: boost - whole bunch of experimental custom modules, slight impr…
Browse files Browse the repository at this point in the history
…ovements to chat/llm APIs, better error for missing Auth, modules can now accept params via completion request body with "@boost" prefixed keys
  • Loading branch information
av committed Oct 6, 2024
1 parent 91bc947 commit eeb031e
Show file tree
Hide file tree
Showing 12 changed files with 586 additions and 22 deletions.
16 changes: 16 additions & 0 deletions boost/src/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ def from_conversation(messages):
tail = ChatNode.from_conversation(messages)
return Chat(tail=tail)

def from_tail(chat):
new_tail = ChatNode(role=chat.tail.role, content=chat.tail.content)
return Chat(tail=new_tail)

def __init__(self, **kwargs):
self.tail = kwargs.get('tail')
self.llm = kwargs.get('llm')
Expand Down Expand Up @@ -83,5 +87,17 @@ async def emit_advance(self):
response = await self.llm.stream_chat_completion(chat=self)
self.assistant(response)

async def emit_status(self, status):
"""
Emit a status message
Will be streamed back to the client
"""

if not self.llm:
raise ValueError("Chat: unable to emit status without an LLM")

await self.llm.emit_status(status)

def __str__(self):
return '\n'.join([str(msg) for msg in self.plain()])
19 changes: 10 additions & 9 deletions boost/src/chat_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

logger = log.setup_logger(__name__)


class ChatNode:
id: str
content: str
Expand Down Expand Up @@ -70,6 +71,12 @@ def parents(self):

return parents[::-1]

def message(self):
return {
"role": self.role,
"content": self.content,
}

def ancestor(self):
node = self
while node.parent:
Expand All @@ -78,19 +85,13 @@ def ancestor(self):

def history(self):
node = self
messages = [{
"role": node.role,
"content": node.content,
}]
messages = [node.message()]

while node.parent:
node = node.parent
messages.append({
"role": node.role,
"content": node.content,
})
messages.append(node.message())

return messages[::-1]

def __str__(self):
return f"{self.role}: {self.content}"
return f"{self.role}: {self.content}"
43 changes: 43 additions & 0 deletions boost/src/custom_modules/3t.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import chat as ch
import llm

ID_PREFIX = '3t'


async def apply(chat: 'ch.Chat', llm: 'llm.LLM'):
side_chat = ch.Chat(
tail=ch.ChatNode(
content="""
I will ask you to answer my question three times. Each time you will provide a different answer.
Try to use the chance to correct any mistakes you made in the previous answers.
""".strip()
)
)
side_chat.llm = llm

side_chat.user('Here is the question:')
side_chat.user(chat.tail.content)
side_chat.user('Please provide the first answer to the question.')
await side_chat.emit_status('First')
await side_chat.emit_advance()

side_chat.user(
'Please provide the second answer to the question. Remember, it must be different from the first one.'
)
await side_chat.emit_status('Second')
await side_chat.emit_advance()

side_chat.user(
'Please provide the third answer to the question. It must be different from the first two.'
)
await side_chat.emit_status('Third')
await side_chat.emit_advance()

side_chat.user(
"""
Now, think about the answers you provided. Is there anything wrong with them? Which one is the most correct?
What is the final answer to the question?
""".strip()
)
await side_chat.emit_status('Final')
await llm.stream_final_completion(chat=side_chat)
100 changes: 100 additions & 0 deletions boost/src/custom_modules/ambi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
import chat as ch
import llm

ID_PREFIX = 'ambi'

ambi_prompt = """
<instruction>
Find the sources of ambiguities in the given question and describe them.
</instruction>
<question>
{question}
</question>
""".strip()

detail_prompt = """
<instruction>
Find the conditions that significantly affect the interpretation of the question and describe them.
</instruction>
<question>
{question}
</question>
""".strip()

definition_prompt = """
<instruction>
Define the terms in the question and provide a detailed explanation for each.
</instruction>
<question>
{question}
</question>
""".strip()

discrepancies_prompt = """
<instruction>
Find the discrepancies in the question and describe them.
</instruction>
<question>
{question}
</question>
""".strip()

final_prompt = """
<instruction>
Provide a clear and definitive answer to the question.
</instruction>
<question>
{question}
</question>
<information>
### Ambiguities
{ambiguities}
### Details
{details}
### Definitions
{definitions}
### Discrepancies
{discrepancies}
</information>
""".strip()


async def apply(chat: 'ch.Chat', llm: 'llm.LLM'):
await llm.emit_status('Ambiguiity')
ambiguities = await llm.stream_chat_completion(
prompt=ambi_prompt, question=chat.tail.content
)

await llm.emit_status('Details')
details = await llm.stream_chat_completion(
prompt=detail_prompt, question=chat.tail.content
)

await llm.emit_status('Definitions')
definitions = await llm.stream_chat_completion(
prompt=definition_prompt, question=chat.tail.content
)

await llm.emit_status('Discrepancies')
discrepancies = await llm.stream_chat_completion(
prompt=discrepancies_prompt, question=chat.tail.content
)

await llm.emit_status('Final')
await llm.stream_final_completion(
prompt=final_prompt,
question=chat.tail.content,
ambiguities=ambiguities,
details=details,
definitions=definitions,
discrepancies=discrepancies
)
94 changes: 94 additions & 0 deletions boost/src/custom_modules/cea.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
import random

import chat as ch
import llm
import log

ID_PREFIX = 'cea' # 'cellular automata'


def cellular_automata(rule, initial_state, generations):
"""
Runs a one-dimensional cellular automata and records results in binary,
allowing the state to grow.
Args:
rule (int): The rule number for the cellular automata (0-255).
initial_state (list): The initial state of the cellular automata.
generations (int): The number of generations to run.
Returns:
list: A list of binary strings representing the state of the cellular automata at each generation.
"""
# Convert the rule number to a binary string and pad with zeros to 8 bits
rule_binary = format(rule, '08b')

# Initialize the list to store the results
results = ["".join(map(str, initial_state))]

# Run the cellular automata for the specified number of generations
current_state = initial_state.copy()
for _ in range(generations):
# Initialize the next state with a zero on each end
next_state = [0] + current_state + [0]

# Apply the rule to each cell in the current state
for i in range(1, len(next_state) - 1):
# Get the left, center, and right cells
left = current_state[i - 2] if i > 1 else 0
center = current_state[i - 1]
right = current_state[i] if i < len(current_state) else 0

# Convert the left, center, and right cells to a binary string
neighborhood = f"{left}{center}{right}"

# Get the next state of the cell based on the rule
next_state[i] = int(rule_binary[7 - int(neighborhood, 2)])

# Update the current state and append the next state to the results
current_state = next_state
results.append("".join(map(str, next_state)))

return results

def render_ca(results):
"""
Renders the results of a cellular automata as a string.
Args:
results (list): A list of binary strings representing the state of the cellular automata at each generation.
Returns:
str: A string representation of the cellular automata results.
"""
return join.join(["".join(["|" if cell == "1" else "." for cell in result]) for result in results])



initial_state = [1]
join = '\n'


async def apply(chat: 'ch.Chat', llm: 'llm.LLM'):
rule = int(llm.boost_params.get('cea_rule', '73'))
gens = int(llm.boost_params.get('cea_generations', '32'))

chat.user(
f"""
Before completing my request, please think for a while.
""".strip()
)
chat.assistant(
f"""Good idea! Let me think...
```thoughts
{render_ca(cellular_automata(rule, initial_state, gens))}
```
"""
)
await llm.emit_message(chat.tail.content)
chat.user(f"""
Now, please address my request.
""".strip())
await llm.stream_final_completion()
58 changes: 58 additions & 0 deletions boost/src/custom_modules/clarity.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import chat as ch
import llm

ID_PREFIX = 'clarity'

should_clarify_prompt = """
<instruction>
Is this question requires any clarification or is ready to be answered?
Reply only with "clarify" or "ready" and nothing else. Everything else will be ignored.
</instruction>
<question>
{question}
</question>
""".strip()


async def apply(chat: 'ch.Chat', llm: 'llm.LLM'):
iterations = 0
max_iterations = 15

side_chat = ch.Chat.from_conversation([chat.tail.message()])
side_chat.llm = llm

while iterations < max_iterations:
iterations += 1
side_chat.user(
"""
Are there any sources of ambiguity in my request?
Answer with "yes" or "no" and nothing else. Everything else will be ignored.
""".strip()
)
await side_chat.advance()
await llm.emit_status(f'Clarification: {side_chat.tail.content}')

if side_chat.tail.contains('no'):
break

side_chat.user("""
Clarify the ambiguity you mentioned.
""".strip())
await side_chat.emit_advance()

if iterations >= max_iterations:
break

side_chat.user('Now, please provide a clear answer to the question.')
await side_chat.emit_advance()

await llm.emit_status('Final')

side_chat.user(
"""
Think trough the response you just gave. Is there anything wrong? If so, please correct it.
Otherwise, write down your final answer to my request.
""".strip()
)
await llm.stream_final_completion(chat=chat)
Loading

0 comments on commit eeb031e

Please sign in to comment.