Skip to content

Commit

Permalink
message types
Browse files Browse the repository at this point in the history
  • Loading branch information
MadcowD committed Aug 22, 2024
1 parent 396eb58 commit a9c64ef
Show file tree
Hide file tree
Showing 33 changed files with 196 additions and 352 deletions.
2 changes: 1 addition & 1 deletion docs/ramblings/# what happemns duiring losure.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def to_str(self):
test = # [Test object]

import ell.caching
@ell.lm("gpt-4-turbo", temperature=0.1, max_tokens=5)
@ell.text("gpt-4-turbo", temperature=0.1, max_tokens=5)
def write_a_complete_python_class(user_spec : str):
return [ell.system(f"""You are an expert python programmer capable of interpreting a user's spec and writing a python class to accomidate their request. You should document all your code, and you best practices.
{CODE_INSTURCTIONS} {test.to_str()}
Expand Down
26 changes: 13 additions & 13 deletions docs/ramblings/parsing_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def parse_outputs(result : lstr) -> str:
return Personality(name, backstory)


@ell.lm(model="gpt-4o-mini", temperature=1.0, output_parser=parse_outputs)
@ell.text(model="gpt-4o-mini", temperature=1.0, output_parser=parse_outputs)
def create_random_personality():
"""You are backstoryGPT. You come up with a backstory for a character incljuding name. Choose a completely random name from the list. Format as follows.
Expand All @@ -37,7 +37,7 @@ def parse_outputs(result : lstr) -> str:
return Personality(name, backstory)


@ell.lm(model="gpt-4o-mini", temperature=1.0, output_parser=parse_outputs)
@ell.text(model="gpt-4o-mini", temperature=1.0, output_parser=parse_outputs)
def create_random_personality():
"""You are backstoryGPT. You come up with a backstory for a character incljuding name. Choose a completely random name from the list. Format as follows.
Expand All @@ -61,7 +61,7 @@ def parse_outputs(result : lstr) -> str:
backstory = result.split(":")[1]
return Personality(name, backstory)

@ell.lm(model="gpt-4o-mini", temperature=1.0, output_parser=Personality.parse_outputs)
@ell.text(model="gpt-4o-mini", temperature=1.0, output_parser=Personality.parse_outputs)
def create_random_personality():
"""You are backstoryGPT. You come up with a backstory for a character incljuding name. Choose a completely random name from the list. Format as follows.
Expand All @@ -79,7 +79,7 @@ def parse_outputs(result : lstr) -> str:
return Personality(name, backstory)

@ell.structure(parser=parse_outputs, retries=3)
@ell.lm(model="gpt-4o-mini", temperature=1.0)
@ell.text(model="gpt-4o-mini", temperature=1.0)
def create_random_personality():
"""You are backstoryGPT. You come up with a backstory for a character incljuding name. Choose a completely random name from the list. Format as follows.
Expand Down Expand Up @@ -134,7 +134,7 @@ def parser(result : lstr) -> OutputFormat:

# 3. Define our LM we can use the format from our schema or something else
@ell.structures(parserer=parser, retries=3)
@ell.lm(model="gpt-4o-mini", temperature=1.0)
@ell.text(model="gpt-4o-mini", temperature=1.0)
def create_random_personality():
f"""Answer in the format {OutputFormat.get_format_prompt()}"""

Expand All @@ -159,7 +159,7 @@ def parser(pstr):
return name, backstory

@ell.structure(parserer=parser, retries=3)
@ell.lm(model="gpt-4o-mini", temperature=1.0)
@ell.text(model="gpt-4o-mini", temperature=1.0)
def create_random_personality():
f"""Answer in the format {format}"""

Expand All @@ -169,7 +169,7 @@ def create_random_personality():
############################


@ell.lm(model="gpt-4o-mini", temperature=1.0)
@ell.text(model="gpt-4o-mini", temperature=1.0)
def create_random_personality_str():
f"""Answer in the format {format}"""

Expand Down Expand Up @@ -197,7 +197,7 @@ def parser(pstr):

return name, backstory

@ell.lm(model="gpt-4o-mini", temperature=1.0, parser=parser)
@ell.text(model="gpt-4o-mini", temperature=1.0, parser=parser)
def create_random_personality():
f"""Answer in the format {format}"""

Expand All @@ -224,7 +224,7 @@ def create_random_personality():
If I have
"""
@ell.lm(model="gpt-4o-mini", temperature=1.0, parser=parser)
@ell.text(model="gpt-4o-mini", temperature=1.0, parser=parser)
def create_random_personality():
f"""Answer in the format {format}"""

Expand All @@ -238,7 +238,7 @@ def parse_to_my_fucked_up_unserializable_format(pstr):
return MyFuckedUpObject(pstr)

@ell.structure(parser=parse_to_my_fucked_up_unserializable_format, retries=3)
@ell.lm(model="gpt-4o-mini", temperature=1.0)
@ell.text(model="gpt-4o-mini", temperature=1.0)
def create_random_personality():
f"""Answer in the format {format}"""

Expand All @@ -255,7 +255,7 @@ def create_random_personality():
This is equivalent to the following
"""
@ell.lm(model="gpt-4o-mini", temperature=1.0)
@ell.text(model="gpt-4o-mini", temperature=1.0)
def create_random_personality():
return "Come up with a backstory about " + random.choice(names_list)

Expand Down Expand Up @@ -307,13 +307,13 @@ def converted_lm_func():
return new_system_prompt, user_prompt

return retry(schema.parse(
ell.lm(**lm_kwargs)(converted_lm_func)), tries=3)
ell.text(**lm_kwargs)(converted_lm_func)), tries=3)

return decorator

# This does this:

@ell.lm(model="gpt-4o-mini", temperature=1.0)
@ell.text(model="gpt-4o-mini", temperature=1.0)
def internal_make_a_rpg_character(name : str):
return [
ell.system("You are a rpg character creator. You create rpg characters. You must respond only in JSON in the following format: " + {RPGSchema.get_format_prompt()}),
Expand Down
2 changes: 1 addition & 1 deletion examples/bv.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def get_lmp(z = 10):
y = 13
y = z

@ell.lm("gpt-4o-mini", temperature=0.1, max_tokens=6)
@ell.text("gpt-4o-mini", temperature=0.1, max_tokens=6)
def write_a_complete_python_class(user_spec : str):
return [ell.system(f"""You are an mid-tier python programmer capable of interpreting a user's spec and writing a python class to accomidate their request. You should document all your code, and you best practices.
{CODE_INSTURCTIONS} {z} {y} {test} {another_serializeable_global}
Expand Down
2 changes: 1 addition & 1 deletion examples/calculator_structured.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class Div:
CalcOp = Union[Add, Sub, Mul, Div]


@ell.lm(model="gpt-4o", temperature=0.1)
@ell.text(model="gpt-4o", temperature=0.1)
def parse_json(task: str, type: Type[Any]):
return [
ell.system(
Expand Down
226 changes: 2 additions & 224 deletions examples/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@ def approve_claim(claim_id : str):
"""Approve a claim"""
pass

@ell.multimodal(model="gpt-4o", tools=[create_claim_draft, approve_claim], temperature=0.1)
@ell.multimodal(model="gpt-4o", tools=[create_claim_draft, approve_claim], temperature=0.1, exempt_from_tracking=True)
def insurance_claim_chatbot(message_history: List[str]):
return [
ell.system( """You are a an insurance adjuster AI. You are given a dialogue with a user and have access to various tools to effectuate the insurance claim adjustment process. Ask question until you have enough information to create a claim draft. Then ask for approval."""),
] + [
ell.Message(role="user" if i % 2 == 0 else "assistant", content=message)
ell.message(role="user" if i % 2 == 0 else "assistant", content=message)
for i, message in enumerate(message_history)
]

Expand All @@ -42,225 +42,3 @@ def insurance_claim_chatbot(message_history: List[str]):
message_history.append(response)




# import openai




# class StrContent(BaseModel):
# content: str
# type : Literal["text"]


# class Message(BaseModel):
# content: Union[StrContent, FunctionCall, Image, Audio]


# x = my_lm()
# if x.content.type == "text":
# print(x.content.text)

# if x.text:
# print(x.text)









# class Message(BaseModel):
# text: str
# image: str
# tool_result: str
# audio: str
# function_call : FunctionCall

# # class FunctionCall(BaseModel):
# # name: str
# # arguments: BaseModel
# # fn : Callable
# # def call(self):
# # return self.fn(self.arguments)



# @ell.lm(model="claude-3.5-sonnet")
# def i_use_different_content_blocks():
# return "asdasd"



# i_use_different_content_blocks()
# ->
# [
# {
# "type": "text",
# "content": "asdasd"
# },
# {
# "type": "function_call",
# "content": {
# "name": "asdasd",
# "arguments": "asdasd"
# }
# },
# {
# "type": "file",
# "content": "asdasd"
# }
# ]


# @ell.multimodal_lm(model="gpt-4o")
# def i_use_different_content_blocks():
# return "asdasd"


# class MultimodalMessage(BaseModel):
# text: str
# image: str
# tool_result: str
# audio: str
# function_call : FunctionCall
# _raw




# @ell.text_lm(model="gpt-4o")
# def i_use_different_content_blocks():
# return "asdasd"



# # -> str





# TOOLS = [
# tool1,
# tool2,
# tool3,
# ]
# @ell.lm(model="llama-3-8b-instruct")
# def i_use_tools(request : str):
# return [
# ell.system("You are a helpful assistant. You have access to the following tools:" + "\n" + tool.prompt()),
# ell.user(request),
# ]



# TOOLS = [
# tool1,
# tool2,
# tool3,
# ]
# @ell.lm(model="gpt-4o", tools=TOOLS)
# def i_use_tools(request : str):
# pass



# # It's our job to abstract different kidns of Foundation Models

# @ell.openai.lm(model="gpt-4o", tools=TOOLS)
# def i_use_tools(request : str):
# pass


# # Throws an error: # llama lms do not support explicit tool calling ,you need to prompt this
# @ell.llama.lm(model="llama-3-8b-instruct", tools=TOOLS)
# def i_use_tools(request : str):
# pass

# # therefore : ->
# @ell.llama.lm(model="llama-3-8b-instruct")
# def i_use_tools(request : str):
# return [
# ell.system("You are a helpful assistant. You have access to the following tools:" + "\n" + tool.prompt()),
# ell.user(request),
# ]


# @ell.anthropic.lm(model="claude-3.5-sonnet")
# def i_use_tools(request : str):
# return [
# ell.system("You are a helpful assistant. You have access to the following tools:" + "\n" + tool.prompt()),
# ell.user(request),
# ]
# class AnthropicMessage(BaseModel):
# content_blocks : List[ContentBlock]

# class ContentBlock(BaseModel):
# type : Literal["text", "function_call", "file"]
# content : str



# #------------------------------- -- Against øur design philosophy

# @ell.lm(model="gpt-4o")
# def i_use_tools(request : str):
# return MessageCreationParams(
# tools=tools,
# message=[
# ell.system("You are a helpful assistant. You have access to the following tools:" + "\n" + tool.prompt()),
# ell.user(request),
# ]
# )


# @ell.lm(model="gpt-4o")
# def i_use_tools(request : str):
# """Suystem prompt"""
# return "user pomrpt"




# # --------------


# @ell.lm # ALLL STRING BASED GET FUCKEd
# def gn():
# return "asdasd"

# gn() # -> str



# @ell.omni(tools=TOOLS, structured_outputs=True) # Gives your raw return tpes form the mdoel
# def fn():
# return "asdasd"

# fn() # -> OpenAI.ChatCompletion

# fn().choices[0].message.content




# class ContnetBlock(BaseModel):
# text :lstr
# audio : lnumpy
# image : lmnumpy
# function_call : lFunctionCall


# T = TypeVar("ContentBlockType")
# class ContentBlock(BaseModel):
# type: Literal["text", "audio", "image", "function_call"]
# content : T


# class TextContent(BaseModel):
# type: Literal["text"]
# content: str
Loading

0 comments on commit a9c64ef

Please sign in to comment.