Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat/new models #248

Merged
merged 4 commits into from
Aug 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion packages/gnosis/customs/ofv_market_resolver/component.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeie2727utrijcawhgm5bgj7p5lfah2pzepaufnk6m5swuwyduhf2eu
ofv_market_resolver.py: bafybeibzog6em3xjnau5gju7rchjgnydmx6fikgcedcfam4ocnwr3xjqku
ofv_market_resolver.py: bafybeic6kxngm3mdkpmcjucrrerhyex7kizn7tw7qmpehaiycnc7cb6umu
fingerprint_ignore_patterns: []
entry_point: omen_buy_sell.py
callable: run
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def execute() -> MechResponse:
return wrapper


DEFAULT_OPENAI_MODEL = "gpt-4-0125-preview"
DEFAULT_OPENAI_MODEL = "gpt-4o-2024-08-06"
ALLOWED_TOOLS = ["ofv_market_resolver"]
ALLOWED_MODELS = [DEFAULT_OPENAI_MODEL]

Expand Down
2 changes: 1 addition & 1 deletion packages/gnosis/customs/omen_tools/component.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi
omen_buy_sell.py: bafybeie77xanpmjbumzcagtooo23a2xfp6xpeirg63mpzq7i6qrges3yim
omen_buy_sell.py: bafybeicb3xx7fdz4heudikuic7brnqjb6silop3lnvlheb4bfk76opd2hy
fingerprint_ignore_patterns: []
entry_point: omen_buy_sell.py
callable: run
Expand Down
9 changes: 7 additions & 2 deletions packages/gnosis/customs/omen_tools/omen_buy_sell.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ def build_buy_tx(

except Exception as e:
traceback.print_exception(e)
return f"exception occurred - {e}", None, None, None
return f"exception occurred - {e}", "", None, None


def build_return_from_tx_params(
Expand Down Expand Up @@ -303,7 +303,7 @@ def build_sell_tx(

except Exception as e:
traceback.print_exception(e)
return f"exception occurred - {e}", None, None, None
return f"exception occurred - {e}", "", None, None


def with_key_rotation(func: Callable):
Expand Down Expand Up @@ -348,6 +348,11 @@ def error_response(msg: str) -> Tuple[str, None, None, None]:
"limit_max_tokens": 8192,
"temperature": 0,
},
"gpt-4o-2024-08-06": {
"default_max_tokens": 500,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why this limit on 500?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just copied over from above.

"limit_max_tokens": 4096,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"temperature": 0,
},
}

ALLOWED_MODELS = list(LLM_SETTINGS.keys())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeicqdub7wb5n454snmgxymim63itq6st7j2whznnsz6aiyxwaaokbi
prediction_sentence_embeddings.py: bafybeid7l5vy6utk2bwjj6syc62g4mvq5dnwl6qy4pavrecltujm453hk4
prediction_sentence_embeddings.py: bafybeicpd3o4fyxzw3pjee6r6iyguk4lheiilfxya2yqww5asuinbfwbgi
fingerprint_ignore_patterns: []
entry_point: prediction_sentence_embeddings.py
callable: run
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,11 @@ def count_tokens(text: str, model: str) -> int:
"default_max_tokens": 500,
"limit_max_tokens": 8192,
"temperature": 0,
},
"gpt-4o-2024-08-06": {
"default_max_tokens": 500,
"limit_max_tokens": 4096,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

"temperature": 0,
}
}
ALLOWED_TOOLS = [
Expand All @@ -287,7 +292,7 @@ def count_tokens(text: str, model: str) -> int:
ALLOWED_MODELS = list(LLM_SETTINGS.keys())
TOOL_TO_ENGINE = {
"prediction-sentence-embedding-conservative": "gpt-3.5-turbo-0125",
"prediction-sentence-embedding-bold": "gpt-4-0125-preview",
"prediction-sentence-embedding-bold": "gpt-4o-2024-08-06",
}

PREDICTION_PROMPT = """
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi
prediction_sum_url_content.py: bafybeiaudp5x4vuemxrpnssjqr52liofsisdbpb3fd2sqe5i26t73b5zku
prediction_sum_url_content.py: bafybeibwogzjmcaetqjtxsgxlwuigoocjnibx3vozwgcgeuya2fiidsrwm
fingerprint_ignore_patterns: []
entry_point: prediction_sum_url_content.py
callable: run
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,8 @@ def count_tokens(text: str, model: str) -> int:
"prediction-online-sum-url-content",
]
TOOL_TO_ENGINE = {
"prediction-offline-sum-url-content": "gpt-4-0125-preview",
"prediction-online-sum-url-content": "gpt-4-0125-preview",
"prediction-offline-sum-url-content": "gpt-4o-2024-08-06",
"prediction-online-sum-url-content": "gpt-4o-2024-08-06",
}


Expand Down Expand Up @@ -1033,7 +1033,7 @@ def fetch_additional_information(
google_api_key: str,
google_engine: str,
nlp,
engine: str = "gpt-4-0125-preview",
engine: str = "gpt-4o-2024-08-06",
temperature: float = 1.0,
max_compl_tokens: int = 500,
) -> str:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeibt7f7crtwvmkg7spy3jhscmlqltvyblzp32g6gj44v7tlo5lycuq
prediction_request_rag.py: bafybeihpdur7tklfwjldbirghx6sj2e4qsjkrvmqzohjkqbqkzogccmcay
prediction_request_rag.py: bafybeicsvujeom6iqdfy3eynmvtrv44l6gnezxqu24sxbnjnui6sgbriwe
fingerprint_ignore_patterns: []
entry_point: prediction_request_rag.py
callable: run
params:
default_model: gpt-4-0125-preview
default_model: gpt-4o-2024-08-06
dependencies:
google-api-python-client:
version: ==2.95.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,11 @@ def embeddings(self, model, input):
"limit_max_tokens": 8192,
"temperature": 0,
},
"gpt-4o-2024-08-06": {
"default_max_tokens": 500,
"limit_max_tokens": 4096,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think thats for the input no?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in the url that I shared.... search for sonnet and check the config that is there and the one you wrote for limit_max_tokens you put the max input tokens value, not the max_output tokens value. Then why for the gpt model you are putting in limit_max_tokens the max output tokens instead of the input max?

"temperature": 0,
},
"claude-3-haiku-20240307": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_000,
Expand All @@ -264,6 +269,11 @@ def embeddings(self, model, input):
"limit_max_tokens": 200_000,
"temperature": 0,
},
"claude-3-5-sonnet-20240620": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_000,
"temperature": 0,
},
"claude-3-opus-20240229": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_000,
Expand Down Expand Up @@ -364,10 +374,10 @@ def multi_queries(
model: str,
num_queries: int,
counter_callback: Optional[Callable[[int, int, str], None]] = None,
temperature: Optional[float] = LLM_SETTINGS["claude-3-sonnet-20240229"][
temperature: Optional[float] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][
"temperature"
],
max_tokens: Optional[int] = LLM_SETTINGS["claude-3-sonnet-20240229"][
max_tokens: Optional[int] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][
"default_max_tokens"
],
) -> List[str]:
Expand Down Expand Up @@ -612,10 +622,10 @@ def fetch_additional_information(
source_links: Optional[List[str]] = None,
num_urls: Optional[int] = DEFAULT_NUM_URLS,
num_queries: Optional[int] = DEFAULT_NUM_QUERIES,
temperature: Optional[float] = LLM_SETTINGS["claude-3-sonnet-20240229"][
temperature: Optional[float] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][
"temperature"
],
max_tokens: Optional[int] = LLM_SETTINGS["claude-3-sonnet-20240229"][
max_tokens: Optional[int] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][
"default_max_tokens"
],
) -> Tuple[str, Callable[[int, int, str], None]]:
Expand Down Expand Up @@ -745,7 +755,7 @@ def run(**kwargs) -> Tuple[Optional[str], Any, Optional[Dict[str, Any]], Any]:
tool = kwargs["tool"]
model = kwargs.get("model")
if "claude" in tool: # maintain backwards compatibility
model = "claude-3-sonnet-20240229"
model = "claude-3-5-sonnet-20240620"
print(f"MODEL: {model}")
with LLMClientManager(kwargs["api_keys"], model, embedding_provider="openai"):
prompt = extract_question(kwargs["prompt"])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeib36ew6vbztldut5xayk5553rylrq7yv4cpqyhwc5ktvd4cx67vwu
prediction_request_reasoning.py: bafybeie737vbcmdnivmf3sxjjadamq2v2dq6hlgrrnz34txhs3sbya5kkm
prediction_request_reasoning.py: bafybeihlvlkfzkbmn6a5s7mrozby6uvmyvizozfp2wtrje6b75rqc3xdv4
fingerprint_ignore_patterns: []
entry_point: prediction_request_reasoning.py
callable: run
params:
default_model: gpt-4-0125-preview
default_model: gpt-4o-2024-08-06
dependencies:
google-api-python-client:
version: ==2.95.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,11 @@ def embeddings(self, model, input):
"limit_max_tokens": 8192,
"temperature": 0,
},
"gpt-4o-2024-08-06": {
"default_max_tokens": 500,
"limit_max_tokens": 4096,
"temperature": 0,
},
"claude-3-haiku-20240307": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_000,
Expand All @@ -267,6 +272,11 @@ def embeddings(self, model, input):
"limit_max_tokens": 200_000,
"temperature": 0,
},
"claude-3-5-sonnet-20240620": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_000,
"temperature": 0,
},
"claude-3-opus-20240229": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_000,
Expand Down Expand Up @@ -457,8 +467,8 @@ def multi_queries(
model: str,
num_queries: int,
counter_callback: Optional[Callable[[int, int, str], None]] = None,
temperature: Optional[float] = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"],
max_tokens: Optional[int] = LLM_SETTINGS["gpt-4-0125-preview"][
temperature: Optional[float] = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"],
max_tokens: Optional[int] = LLM_SETTINGS["gpt-4o-2024-08-06"][
"default_max_tokens"
],
) -> List[str]:
Expand Down Expand Up @@ -673,8 +683,8 @@ def find_similar_chunks(
def multi_questions_response(
prompt: str,
model: str,
temperature: float = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"],
max_tokens: int = LLM_SETTINGS["gpt-4-0125-preview"]["default_max_tokens"],
temperature: float = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"],
max_tokens: int = LLM_SETTINGS["gpt-4o-2024-08-06"]["default_max_tokens"],
counter_callback: Optional[Callable[[int, int, str], None]] = None,
) -> List[str]:
"""Generate multiple questions for fetching information from the web."""
Expand Down Expand Up @@ -782,8 +792,8 @@ def fetch_additional_information(
source_links: Optional[List[str]] = None,
num_urls: Optional[int] = DEFAULT_NUM_URLS,
num_queries: Optional[int] = DEFAULT_NUM_QUERIES,
temperature: Optional[float] = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"],
max_tokens: Optional[int] = LLM_SETTINGS["gpt-4-0125-preview"][
temperature: Optional[float] = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"],
max_tokens: Optional[int] = LLM_SETTINGS["gpt-4o-2024-08-06"][
"default_max_tokens"
],
) -> Tuple[str, List[str], Optional[Callable[[int, int, str], None]]]:
Expand Down Expand Up @@ -906,7 +916,7 @@ def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]:
tool = kwargs["tool"]
model = kwargs.get("model")
if "claude" in tool: # maintain backwards compatibility
model = "claude-3-sonnet-20240229"
model = "claude-3-5-sonnet-20240620"
print(f"MODEL: {model}")
with LLMClientManager(kwargs["api_keys"], model, embedding_provider="openai"):
prompt = extract_question(kwargs["prompt"])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeib36ew6vbztldut5xayk5553rylrq7yv4cpqyhwc5ktvd4cx67vwu
prediction_request_reasoning.py: bafybeig5zyj5lr23gmubjefdgbapynm7wppb6v5vweqza5d67n6zulkpbi
prediction_request_reasoning.py: bafybeifvknjyif5gmfo2l76f4pmmfzxmaejgrx2l5vrn7yfattvhchv64y
fingerprint_ignore_patterns: []
entry_point: prediction_request_reasoning.py
callable: run
params:
default_model: gpt-4-0125-preview
default_model: gpt-4o-2024-08-06
dependencies:
google-api-python-client:
version: ==2.95.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,11 @@ def embeddings(self, model, input):
"limit_max_tokens": 8192,
"temperature": 0,
},
"gpt-4o-2024-08-06": {
"default_max_tokens": 500,
"limit_max_tokens": 4096,
"temperature": 0,
},
"claude-3-haiku-20240307": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_000,
Expand All @@ -266,6 +271,11 @@ def embeddings(self, model, input):
"limit_max_tokens": 200_000,
"temperature": 0,
},
"claude-3-5-sonnet-20240620": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_000,
"temperature": 0,
},
"claude-3-opus-20240229": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_000,
Expand Down Expand Up @@ -456,8 +466,8 @@ def multi_queries(
model: str,
num_queries: int,
counter_callback: Optional[Callable[[int, int, str], None]] = None,
temperature: Optional[float] = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"],
max_tokens: Optional[int] = LLM_SETTINGS["gpt-4-0125-preview"][
temperature: Optional[float] = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"],
max_tokens: Optional[int] = LLM_SETTINGS["gpt-4o-2024-08-06"][
"default_max_tokens"
],
) -> List[str]:
Expand Down Expand Up @@ -687,8 +697,8 @@ def find_similar_chunks(
def multi_questions_response(
prompt: str,
model: str,
temperature: float = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"],
max_tokens: int = LLM_SETTINGS["gpt-4-0125-preview"]["default_max_tokens"],
temperature: float = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"],
max_tokens: int = LLM_SETTINGS["gpt-4o-2024-08-06"]["default_max_tokens"],
counter_callback: Optional[Callable[[int, int, str], None]] = None,
) -> List[str]:
"""Generate multiple questions for fetching information from the web."""
Expand Down Expand Up @@ -796,8 +806,8 @@ def fetch_additional_information(
source_links: Optional[List[str]] = None,
num_urls: Optional[int] = DEFAULT_NUM_URLS,
num_queries: Optional[int] = DEFAULT_NUM_QUERIES,
temperature: Optional[float] = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"],
max_tokens: Optional[int] = LLM_SETTINGS["gpt-4-0125-preview"][
temperature: Optional[float] = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"],
max_tokens: Optional[int] = LLM_SETTINGS["gpt-4o-2024-08-06"][
"default_max_tokens"
],
) -> Tuple[str, List[str], Optional[Callable[[int, int, str], None]]]:
Expand Down Expand Up @@ -920,7 +930,7 @@ def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]:
tool = kwargs["tool"]
model = kwargs.get("model")
if "claude" in tool: # maintain backwards compatibility
model = "claude-3-sonnet-20240229"
model = "claude-3-5-sonnet-20240620"
print(f"MODEL: {model}")
with LLMClientManager(kwargs["api_keys"], model, embedding_provider="openai"):
prompt = extract_question(kwargs["prompt"])
Expand Down
4 changes: 2 additions & 2 deletions packages/napthaai/customs/prediction_url_cot/component.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@ license: Apache-2.0
aea_version: '>=1.0.0, <2.0.0'
fingerprint:
__init__.py: bafybeiflni5dkn5fqe7fnu4lgbqxzfrgochhqfbgzwz3vlf5grijp3nkpm
prediction_url_cot.py: bafybeiczu54zafzdmgcqfmlk6h2ze6yxgzbn5xqsisnedptwpnfjbtcgby
prediction_url_cot.py: bafybeibt7fs2lzr6srk6rbq6bmgthr22vh2towk5ibwjuh75b3oyiqh3fe
fingerprint_ignore_patterns: []
entry_point: prediction_url_cot.py
callable: run
params:
default_model: claude-3-sonnet-20240229
default_model: claude-3-5-sonnet-20240620
dependencies:
google-api-python-client:
version: ==2.95.0
Expand Down
Loading
Loading