diff --git a/packages/gnosis/customs/ofv_market_resolver/component.yaml b/packages/gnosis/customs/ofv_market_resolver/component.yaml index 82f22ddb..dcd6dd64 100644 --- a/packages/gnosis/customs/ofv_market_resolver/component.yaml +++ b/packages/gnosis/customs/ofv_market_resolver/component.yaml @@ -7,7 +7,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeie2727utrijcawhgm5bgj7p5lfah2pzepaufnk6m5swuwyduhf2eu - ofv_market_resolver.py: bafybeibzog6em3xjnau5gju7rchjgnydmx6fikgcedcfam4ocnwr3xjqku + ofv_market_resolver.py: bafybeic6kxngm3mdkpmcjucrrerhyex7kizn7tw7qmpehaiycnc7cb6umu fingerprint_ignore_patterns: [] entry_point: omen_buy_sell.py callable: run diff --git a/packages/gnosis/customs/ofv_market_resolver/ofv_market_resolver.py b/packages/gnosis/customs/ofv_market_resolver/ofv_market_resolver.py index 3f57e155..be469655 100644 --- a/packages/gnosis/customs/ofv_market_resolver/ofv_market_resolver.py +++ b/packages/gnosis/customs/ofv_market_resolver/ofv_market_resolver.py @@ -61,7 +61,7 @@ def execute() -> MechResponse: return wrapper -DEFAULT_OPENAI_MODEL = "gpt-4-0125-preview" +DEFAULT_OPENAI_MODEL = "gpt-4o-2024-08-06" ALLOWED_TOOLS = ["ofv_market_resolver"] ALLOWED_MODELS = [DEFAULT_OPENAI_MODEL] diff --git a/packages/gnosis/customs/omen_tools/component.yaml b/packages/gnosis/customs/omen_tools/component.yaml index c062c8f5..2c2f5294 100644 --- a/packages/gnosis/customs/omen_tools/component.yaml +++ b/packages/gnosis/customs/omen_tools/component.yaml @@ -8,7 +8,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi - omen_buy_sell.py: bafybeie77xanpmjbumzcagtooo23a2xfp6xpeirg63mpzq7i6qrges3yim + omen_buy_sell.py: bafybeicb3xx7fdz4heudikuic7brnqjb6silop3lnvlheb4bfk76opd2hy fingerprint_ignore_patterns: [] entry_point: omen_buy_sell.py callable: run diff --git a/packages/gnosis/customs/omen_tools/omen_buy_sell.py b/packages/gnosis/customs/omen_tools/omen_buy_sell.py index a67d8be0..450cda7f 100644 --- a/packages/gnosis/customs/omen_tools/omen_buy_sell.py +++ b/packages/gnosis/customs/omen_tools/omen_buy_sell.py @@ -262,7 +262,7 @@ def build_buy_tx( except Exception as e: traceback.print_exception(e) - return f"exception occurred - {e}", None, None, None + return f"exception occurred - {e}", "", None, None def build_return_from_tx_params( @@ -303,7 +303,7 @@ def build_sell_tx( except Exception as e: traceback.print_exception(e) - return f"exception occurred - {e}", None, None, None + return f"exception occurred - {e}", "", None, None def with_key_rotation(func: Callable): @@ -348,6 +348,11 @@ def error_response(msg: str) -> Tuple[str, None, None, None]: "limit_max_tokens": 8192, "temperature": 0, }, + "gpt-4o-2024-08-06": { + "default_max_tokens": 500, + "limit_max_tokens": 4096, + "temperature": 0, + }, } ALLOWED_MODELS = list(LLM_SETTINGS.keys()) diff --git a/packages/jhehemann/customs/prediction_sentence_embeddings/component.yaml b/packages/jhehemann/customs/prediction_sentence_embeddings/component.yaml index b3e5e896..b265e6ac 100644 --- a/packages/jhehemann/customs/prediction_sentence_embeddings/component.yaml +++ b/packages/jhehemann/customs/prediction_sentence_embeddings/component.yaml @@ -7,7 +7,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeicqdub7wb5n454snmgxymim63itq6st7j2whznnsz6aiyxwaaokbi - prediction_sentence_embeddings.py: bafybeid7l5vy6utk2bwjj6syc62g4mvq5dnwl6qy4pavrecltujm453hk4 + prediction_sentence_embeddings.py: bafybeicpd3o4fyxzw3pjee6r6iyguk4lheiilfxya2yqww5asuinbfwbgi fingerprint_ignore_patterns: [] entry_point: prediction_sentence_embeddings.py callable: run diff --git a/packages/jhehemann/customs/prediction_sentence_embeddings/prediction_sentence_embeddings.py b/packages/jhehemann/customs/prediction_sentence_embeddings/prediction_sentence_embeddings.py index 366eac21..40b70422 100644 --- a/packages/jhehemann/customs/prediction_sentence_embeddings/prediction_sentence_embeddings.py +++ b/packages/jhehemann/customs/prediction_sentence_embeddings/prediction_sentence_embeddings.py @@ -278,6 +278,11 @@ def count_tokens(text: str, model: str) -> int: "default_max_tokens": 500, "limit_max_tokens": 8192, "temperature": 0, + }, + "gpt-4o-2024-08-06": { + "default_max_tokens": 500, + "limit_max_tokens": 4096, + "temperature": 0, } } ALLOWED_TOOLS = [ @@ -287,7 +292,7 @@ def count_tokens(text: str, model: str) -> int: ALLOWED_MODELS = list(LLM_SETTINGS.keys()) TOOL_TO_ENGINE = { "prediction-sentence-embedding-conservative": "gpt-3.5-turbo-0125", - "prediction-sentence-embedding-bold": "gpt-4-0125-preview", + "prediction-sentence-embedding-bold": "gpt-4o-2024-08-06", } PREDICTION_PROMPT = """ diff --git a/packages/jhehemann/customs/prediction_sum_url_content/component.yaml b/packages/jhehemann/customs/prediction_sum_url_content/component.yaml index 3ea87dc3..19fd94f0 100644 --- a/packages/jhehemann/customs/prediction_sum_url_content/component.yaml +++ b/packages/jhehemann/customs/prediction_sum_url_content/component.yaml @@ -8,7 +8,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi - prediction_sum_url_content.py: bafybeiaudp5x4vuemxrpnssjqr52liofsisdbpb3fd2sqe5i26t73b5zku + prediction_sum_url_content.py: bafybeibwogzjmcaetqjtxsgxlwuigoocjnibx3vozwgcgeuya2fiidsrwm fingerprint_ignore_patterns: [] entry_point: prediction_sum_url_content.py callable: run diff --git a/packages/jhehemann/customs/prediction_sum_url_content/prediction_sum_url_content.py b/packages/jhehemann/customs/prediction_sum_url_content/prediction_sum_url_content.py index efa6599c..dbe961fb 100644 --- a/packages/jhehemann/customs/prediction_sum_url_content/prediction_sum_url_content.py +++ b/packages/jhehemann/customs/prediction_sum_url_content/prediction_sum_url_content.py @@ -137,8 +137,8 @@ def count_tokens(text: str, model: str) -> int: "prediction-online-sum-url-content", ] TOOL_TO_ENGINE = { - "prediction-offline-sum-url-content": "gpt-4-0125-preview", - "prediction-online-sum-url-content": "gpt-4-0125-preview", + "prediction-offline-sum-url-content": "gpt-4o-2024-08-06", + "prediction-online-sum-url-content": "gpt-4o-2024-08-06", } @@ -1033,7 +1033,7 @@ def fetch_additional_information( google_api_key: str, google_engine: str, nlp, - engine: str = "gpt-4-0125-preview", + engine: str = "gpt-4o-2024-08-06", temperature: float = 1.0, max_compl_tokens: int = 500, ) -> str: diff --git a/packages/napthaai/customs/prediction_request_rag/component.yaml b/packages/napthaai/customs/prediction_request_rag/component.yaml index 88dba890..eba52e70 100644 --- a/packages/napthaai/customs/prediction_request_rag/component.yaml +++ b/packages/napthaai/customs/prediction_request_rag/component.yaml @@ -7,12 +7,12 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibt7f7crtwvmkg7spy3jhscmlqltvyblzp32g6gj44v7tlo5lycuq - prediction_request_rag.py: bafybeihpdur7tklfwjldbirghx6sj2e4qsjkrvmqzohjkqbqkzogccmcay + prediction_request_rag.py: bafybeicsvujeom6iqdfy3eynmvtrv44l6gnezxqu24sxbnjnui6sgbriwe fingerprint_ignore_patterns: [] entry_point: prediction_request_rag.py callable: run params: - default_model: gpt-4-0125-preview + default_model: gpt-4o-2024-08-06 dependencies: google-api-python-client: version: ==2.95.0 diff --git a/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py b/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py index b9d01227..09cdf846 100644 --- a/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py +++ b/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py @@ -254,6 +254,11 @@ def embeddings(self, model, input): "limit_max_tokens": 8192, "temperature": 0, }, + "gpt-4o-2024-08-06": { + "default_max_tokens": 500, + "limit_max_tokens": 4096, + "temperature": 0, + }, "claude-3-haiku-20240307": { "default_max_tokens": 1000, "limit_max_tokens": 200_000, @@ -264,6 +269,11 @@ def embeddings(self, model, input): "limit_max_tokens": 200_000, "temperature": 0, }, + "claude-3-5-sonnet-20240620": { + "default_max_tokens": 1000, + "limit_max_tokens": 200_000, + "temperature": 0, + }, "claude-3-opus-20240229": { "default_max_tokens": 1000, "limit_max_tokens": 200_000, @@ -364,10 +374,10 @@ def multi_queries( model: str, num_queries: int, counter_callback: Optional[Callable[[int, int, str], None]] = None, - temperature: Optional[float] = LLM_SETTINGS["claude-3-sonnet-20240229"][ + temperature: Optional[float] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][ "temperature" ], - max_tokens: Optional[int] = LLM_SETTINGS["claude-3-sonnet-20240229"][ + max_tokens: Optional[int] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][ "default_max_tokens" ], ) -> List[str]: @@ -612,10 +622,10 @@ def fetch_additional_information( source_links: Optional[List[str]] = None, num_urls: Optional[int] = DEFAULT_NUM_URLS, num_queries: Optional[int] = DEFAULT_NUM_QUERIES, - temperature: Optional[float] = LLM_SETTINGS["claude-3-sonnet-20240229"][ + temperature: Optional[float] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][ "temperature" ], - max_tokens: Optional[int] = LLM_SETTINGS["claude-3-sonnet-20240229"][ + max_tokens: Optional[int] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][ "default_max_tokens" ], ) -> Tuple[str, Callable[[int, int, str], None]]: @@ -745,7 +755,7 @@ def run(**kwargs) -> Tuple[Optional[str], Any, Optional[Dict[str, Any]], Any]: tool = kwargs["tool"] model = kwargs.get("model") if "claude" in tool: # maintain backwards compatibility - model = "claude-3-sonnet-20240229" + model = "claude-3-5-sonnet-20240620" print(f"MODEL: {model}") with LLMClientManager(kwargs["api_keys"], model, embedding_provider="openai"): prompt = extract_question(kwargs["prompt"]) diff --git a/packages/napthaai/customs/prediction_request_reasoning/component.yaml b/packages/napthaai/customs/prediction_request_reasoning/component.yaml index 7d725c90..e4be094f 100644 --- a/packages/napthaai/customs/prediction_request_reasoning/component.yaml +++ b/packages/napthaai/customs/prediction_request_reasoning/component.yaml @@ -7,12 +7,12 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeib36ew6vbztldut5xayk5553rylrq7yv4cpqyhwc5ktvd4cx67vwu - prediction_request_reasoning.py: bafybeie737vbcmdnivmf3sxjjadamq2v2dq6hlgrrnz34txhs3sbya5kkm + prediction_request_reasoning.py: bafybeihlvlkfzkbmn6a5s7mrozby6uvmyvizozfp2wtrje6b75rqc3xdv4 fingerprint_ignore_patterns: [] entry_point: prediction_request_reasoning.py callable: run params: - default_model: gpt-4-0125-preview + default_model: gpt-4o-2024-08-06 dependencies: google-api-python-client: version: ==2.95.0 diff --git a/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py b/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py index 6c08e24a..7108f911 100644 --- a/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py +++ b/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py @@ -257,6 +257,11 @@ def embeddings(self, model, input): "limit_max_tokens": 8192, "temperature": 0, }, + "gpt-4o-2024-08-06": { + "default_max_tokens": 500, + "limit_max_tokens": 4096, + "temperature": 0, + }, "claude-3-haiku-20240307": { "default_max_tokens": 1000, "limit_max_tokens": 200_000, @@ -267,6 +272,11 @@ def embeddings(self, model, input): "limit_max_tokens": 200_000, "temperature": 0, }, + "claude-3-5-sonnet-20240620": { + "default_max_tokens": 1000, + "limit_max_tokens": 200_000, + "temperature": 0, + }, "claude-3-opus-20240229": { "default_max_tokens": 1000, "limit_max_tokens": 200_000, @@ -457,8 +467,8 @@ def multi_queries( model: str, num_queries: int, counter_callback: Optional[Callable[[int, int, str], None]] = None, - temperature: Optional[float] = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"], - max_tokens: Optional[int] = LLM_SETTINGS["gpt-4-0125-preview"][ + temperature: Optional[float] = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"], + max_tokens: Optional[int] = LLM_SETTINGS["gpt-4o-2024-08-06"][ "default_max_tokens" ], ) -> List[str]: @@ -673,8 +683,8 @@ def find_similar_chunks( def multi_questions_response( prompt: str, model: str, - temperature: float = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"], - max_tokens: int = LLM_SETTINGS["gpt-4-0125-preview"]["default_max_tokens"], + temperature: float = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"], + max_tokens: int = LLM_SETTINGS["gpt-4o-2024-08-06"]["default_max_tokens"], counter_callback: Optional[Callable[[int, int, str], None]] = None, ) -> List[str]: """Generate multiple questions for fetching information from the web.""" @@ -782,8 +792,8 @@ def fetch_additional_information( source_links: Optional[List[str]] = None, num_urls: Optional[int] = DEFAULT_NUM_URLS, num_queries: Optional[int] = DEFAULT_NUM_QUERIES, - temperature: Optional[float] = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"], - max_tokens: Optional[int] = LLM_SETTINGS["gpt-4-0125-preview"][ + temperature: Optional[float] = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"], + max_tokens: Optional[int] = LLM_SETTINGS["gpt-4o-2024-08-06"][ "default_max_tokens" ], ) -> Tuple[str, List[str], Optional[Callable[[int, int, str], None]]]: @@ -906,7 +916,7 @@ def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]: tool = kwargs["tool"] model = kwargs.get("model") if "claude" in tool: # maintain backwards compatibility - model = "claude-3-sonnet-20240229" + model = "claude-3-5-sonnet-20240620" print(f"MODEL: {model}") with LLMClientManager(kwargs["api_keys"], model, embedding_provider="openai"): prompt = extract_question(kwargs["prompt"]) diff --git a/packages/napthaai/customs/prediction_request_reasoning_lite/component.yaml b/packages/napthaai/customs/prediction_request_reasoning_lite/component.yaml index b9ea9ab9..aa40153a 100644 --- a/packages/napthaai/customs/prediction_request_reasoning_lite/component.yaml +++ b/packages/napthaai/customs/prediction_request_reasoning_lite/component.yaml @@ -7,12 +7,12 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeib36ew6vbztldut5xayk5553rylrq7yv4cpqyhwc5ktvd4cx67vwu - prediction_request_reasoning.py: bafybeig5zyj5lr23gmubjefdgbapynm7wppb6v5vweqza5d67n6zulkpbi + prediction_request_reasoning.py: bafybeifvknjyif5gmfo2l76f4pmmfzxmaejgrx2l5vrn7yfattvhchv64y fingerprint_ignore_patterns: [] entry_point: prediction_request_reasoning.py callable: run params: - default_model: gpt-4-0125-preview + default_model: gpt-4o-2024-08-06 dependencies: google-api-python-client: version: ==2.95.0 diff --git a/packages/napthaai/customs/prediction_request_reasoning_lite/prediction_request_reasoning.py b/packages/napthaai/customs/prediction_request_reasoning_lite/prediction_request_reasoning.py index 60e1ee4a..2da0c4d0 100644 --- a/packages/napthaai/customs/prediction_request_reasoning_lite/prediction_request_reasoning.py +++ b/packages/napthaai/customs/prediction_request_reasoning_lite/prediction_request_reasoning.py @@ -256,6 +256,11 @@ def embeddings(self, model, input): "limit_max_tokens": 8192, "temperature": 0, }, + "gpt-4o-2024-08-06": { + "default_max_tokens": 500, + "limit_max_tokens": 4096, + "temperature": 0, + }, "claude-3-haiku-20240307": { "default_max_tokens": 1000, "limit_max_tokens": 200_000, @@ -266,6 +271,11 @@ def embeddings(self, model, input): "limit_max_tokens": 200_000, "temperature": 0, }, + "claude-3-5-sonnet-20240620": { + "default_max_tokens": 1000, + "limit_max_tokens": 200_000, + "temperature": 0, + }, "claude-3-opus-20240229": { "default_max_tokens": 1000, "limit_max_tokens": 200_000, @@ -456,8 +466,8 @@ def multi_queries( model: str, num_queries: int, counter_callback: Optional[Callable[[int, int, str], None]] = None, - temperature: Optional[float] = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"], - max_tokens: Optional[int] = LLM_SETTINGS["gpt-4-0125-preview"][ + temperature: Optional[float] = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"], + max_tokens: Optional[int] = LLM_SETTINGS["gpt-4o-2024-08-06"][ "default_max_tokens" ], ) -> List[str]: @@ -687,8 +697,8 @@ def find_similar_chunks( def multi_questions_response( prompt: str, model: str, - temperature: float = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"], - max_tokens: int = LLM_SETTINGS["gpt-4-0125-preview"]["default_max_tokens"], + temperature: float = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"], + max_tokens: int = LLM_SETTINGS["gpt-4o-2024-08-06"]["default_max_tokens"], counter_callback: Optional[Callable[[int, int, str], None]] = None, ) -> List[str]: """Generate multiple questions for fetching information from the web.""" @@ -796,8 +806,8 @@ def fetch_additional_information( source_links: Optional[List[str]] = None, num_urls: Optional[int] = DEFAULT_NUM_URLS, num_queries: Optional[int] = DEFAULT_NUM_QUERIES, - temperature: Optional[float] = LLM_SETTINGS["gpt-4-0125-preview"]["temperature"], - max_tokens: Optional[int] = LLM_SETTINGS["gpt-4-0125-preview"][ + temperature: Optional[float] = LLM_SETTINGS["gpt-4o-2024-08-06"]["temperature"], + max_tokens: Optional[int] = LLM_SETTINGS["gpt-4o-2024-08-06"][ "default_max_tokens" ], ) -> Tuple[str, List[str], Optional[Callable[[int, int, str], None]]]: @@ -920,7 +930,7 @@ def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]: tool = kwargs["tool"] model = kwargs.get("model") if "claude" in tool: # maintain backwards compatibility - model = "claude-3-sonnet-20240229" + model = "claude-3-5-sonnet-20240620" print(f"MODEL: {model}") with LLMClientManager(kwargs["api_keys"], model, embedding_provider="openai"): prompt = extract_question(kwargs["prompt"]) diff --git a/packages/napthaai/customs/prediction_url_cot/component.yaml b/packages/napthaai/customs/prediction_url_cot/component.yaml index c7c4a92b..85b50ccc 100644 --- a/packages/napthaai/customs/prediction_url_cot/component.yaml +++ b/packages/napthaai/customs/prediction_url_cot/component.yaml @@ -7,12 +7,12 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeiflni5dkn5fqe7fnu4lgbqxzfrgochhqfbgzwz3vlf5grijp3nkpm - prediction_url_cot.py: bafybeiczu54zafzdmgcqfmlk6h2ze6yxgzbn5xqsisnedptwpnfjbtcgby + prediction_url_cot.py: bafybeibt7fs2lzr6srk6rbq6bmgthr22vh2towk5ibwjuh75b3oyiqh3fe fingerprint_ignore_patterns: [] entry_point: prediction_url_cot.py callable: run params: - default_model: claude-3-sonnet-20240229 + default_model: claude-3-5-sonnet-20240620 dependencies: google-api-python-client: version: ==2.95.0 diff --git a/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py b/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py index f4c10160..8c16d18a 100644 --- a/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py +++ b/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py @@ -239,6 +239,11 @@ def embeddings(self, model, input): "limit_max_tokens": 200_000, "temperature": 0, }, + "claude-3-5-sonnet-20240620": { + "default_max_tokens": 1000, + "limit_max_tokens": 200_000, + "temperature": 0, + }, } ALLOWED_TOOLS = [ "prediction-url-cot", @@ -318,10 +323,10 @@ def multi_queries( model: str, num_queries: int, counter_callback: Optional[Callable[[int, int, str], None]] = None, - temperature: Optional[float] = LLM_SETTINGS["claude-3-sonnet-20240229"][ + temperature: Optional[float] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][ "temperature" ], - max_tokens: Optional[int] = LLM_SETTINGS["claude-3-sonnet-20240229"][ + max_tokens: Optional[int] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][ "default_max_tokens" ], ) -> List[str]: @@ -566,10 +571,10 @@ def fetch_additional_information( source_links: Optional[List[str]] = None, num_urls: Optional[int] = NUM_URLS_PER_QUERY, num_queries: Optional[int] = NUM_QUERIES, - temperature: Optional[float] = LLM_SETTINGS["claude-3-sonnet-20240229"][ + temperature: Optional[float] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][ "temperature" ], - max_tokens: Optional[int] = LLM_SETTINGS["claude-3-sonnet-20240229"][ + max_tokens: Optional[int] = LLM_SETTINGS["claude-3-5-sonnet-20240620"][ "default_max_tokens" ], n_docs: int = N_DOCS, @@ -659,7 +664,7 @@ def run(**kwargs) -> Tuple[Optional[str], Any, Optional[Dict[str, Any]], Any]: tool = kwargs["tool"] model = kwargs.get("model") if "claude" in tool: # maintain backwards compatibility - model = "claude-3-sonnet-20240229" + model = "claude-3-5-sonnet-20240620" print(f"MODEL: {model}") with LLMClientManager(kwargs["api_keys"], model): prompt = extract_question(kwargs["prompt"]) diff --git a/packages/napthaai/customs/resolve_market_reasoning/component.yaml b/packages/napthaai/customs/resolve_market_reasoning/component.yaml index 076e119b..91521092 100644 --- a/packages/napthaai/customs/resolve_market_reasoning/component.yaml +++ b/packages/napthaai/customs/resolve_market_reasoning/component.yaml @@ -7,12 +7,12 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeib36ew6vbztldut5xayk5553rylrq7yv4cpqyhwc5ktvd4cx67vwu - resolve_market_reasoning.py: bafybeibibdzyr3luhkz3a4f3quvr4y3pdtzmi4aekgbabwffh5l4safooa + resolve_market_reasoning.py: bafybeicok3opjbowli35ezctd7wa3e4yjlqbtcmejemvvhv2mz52cdfu4i fingerprint_ignore_patterns: [] entry_point: resolve_market_reasoning.py callable: run params: - default_model: gpt-4-0125-preview + default_model: gpt-4o-2024-08-06 dependencies: google-api-python-client: version: ==2.95.0 diff --git a/packages/napthaai/customs/resolve_market_reasoning/resolve_market_reasoning.py b/packages/napthaai/customs/resolve_market_reasoning/resolve_market_reasoning.py index 2087c92e..1c800f6e 100644 --- a/packages/napthaai/customs/resolve_market_reasoning/resolve_market_reasoning.py +++ b/packages/napthaai/customs/resolve_market_reasoning/resolve_market_reasoning.py @@ -128,6 +128,7 @@ def __exit__(self, exc_type, exc_value, traceback) -> None: MAX_TOKENS = { "gpt-3.5-turbo-0125": 4096, "gpt-4-0125-preview": 8192, + "gpt-4o-2024-08-06": 4096, } ALLOWED_TOOLS = [ "resolve-market-reasoning-gpt-3.5-turbo", @@ -135,7 +136,7 @@ def __exit__(self, exc_type, exc_value, traceback) -> None: ] TOOL_TO_ENGINE = { "resolve-market-reasoning-gpt-3.5-turbo": "gpt-3.5-turbo-0125", - "resolve-market-reasoning-gpt-4": "gpt-4-0125-preview", + "resolve-market-reasoning-gpt-4": "gpt-4o-2024-08-06", } DEFAULT_NUM_WORDS: Dict[str, Optional[int]] = defaultdict(lambda: 300) NUM_QUERIES = 3 diff --git a/packages/nickcom007/customs/prediction_request_sme/component.yaml b/packages/nickcom007/customs/prediction_request_sme/component.yaml index 03675296..e11d9481 100644 --- a/packages/nickcom007/customs/prediction_request_sme/component.yaml +++ b/packages/nickcom007/customs/prediction_request_sme/component.yaml @@ -8,12 +8,12 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi - prediction_request_sme.py: bafybeigasbnl5obetqi3doky3evpgcug4k3tapyfhy6o6owtebehiiwyge + prediction_request_sme.py: bafybeibpvmf4tqxudeyuy3uj2kapd4e5dlr6xhgvvp33xu7jotb5f6x7c4 fingerprint_ignore_patterns: [] entry_point: prediction_request_sme.py callable: run params: - default_model: gpt-4-0125-preview + default_model: gpt-4o-2024-08-06 dependencies: requests: {} google-api-python-client: diff --git a/packages/nickcom007/customs/prediction_request_sme/prediction_request_sme.py b/packages/nickcom007/customs/prediction_request_sme/prediction_request_sme.py index 63c473d5..7dfece61 100644 --- a/packages/nickcom007/customs/prediction_request_sme/prediction_request_sme.py +++ b/packages/nickcom007/customs/prediction_request_sme/prediction_request_sme.py @@ -74,14 +74,15 @@ def count_tokens(text: str, model: str) -> int: MAX_TOKENS = { "gpt-3.5-turbo-0125": 4096, "gpt-4-0125-preview": 8192, + "gpt-4o-2024-08-06": 4096, } ALLOWED_TOOLS = [ "prediction-offline-sme", "prediction-online-sme", ] TOOL_TO_ENGINE = { - "prediction-offline-sme": "gpt-4-0125-preview", - "prediction-online-sme": "gpt-4-0125-preview", + "prediction-offline-sme": "gpt-4o-2024-08-06", + "prediction-online-sme": "gpt-4o-2024-08-06", } PREDICTION_PROMPT = """ diff --git a/packages/nickcom007/customs/prediction_request_sme_lite/component.yaml b/packages/nickcom007/customs/prediction_request_sme_lite/component.yaml index 5b6ab880..da069a67 100644 --- a/packages/nickcom007/customs/prediction_request_sme_lite/component.yaml +++ b/packages/nickcom007/customs/prediction_request_sme_lite/component.yaml @@ -8,12 +8,12 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi - prediction_request_sme.py: bafybeicz6praxf3njyd4ufmsbvzwyu6oimwon62fxlrhamui3moyzt2woq + prediction_request_sme.py: bafybeibsnt5ftgg2q33iwcgu6me2yfvqe3lwza7iwqbmys53befzrqax4q fingerprint_ignore_patterns: [] entry_point: prediction_request_sme.py callable: run params: - default_model: gpt-4-0125-preview + default_model: gpt-4o-2024-08-06 dependencies: requests: {} google-api-python-client: diff --git a/packages/nickcom007/customs/prediction_request_sme_lite/prediction_request_sme.py b/packages/nickcom007/customs/prediction_request_sme_lite/prediction_request_sme.py index 06f2cecd..7a4b3fdf 100644 --- a/packages/nickcom007/customs/prediction_request_sme_lite/prediction_request_sme.py +++ b/packages/nickcom007/customs/prediction_request_sme_lite/prediction_request_sme.py @@ -74,14 +74,15 @@ def count_tokens(text: str, model: str) -> int: MAX_TOKENS = { "gpt-3.5-turbo-0125": 4096, "gpt-4-0125-preview": 8192, + "gpt-4o-2024-08-06": 4096, } ALLOWED_TOOLS = [ "prediction-offline-sme", "prediction-online-sme", ] TOOL_TO_ENGINE = { - "prediction-offline-sme": "gpt-4-0125-preview", - "prediction-online-sme": "gpt-4-0125-preview", + "prediction-offline-sme": "gpt-4o-2024-08-06", + "prediction-online-sme": "gpt-4o-2024-08-06", } PREDICTION_PROMPT = """ diff --git a/packages/nickcom007/customs/sme_generation_request/component.yaml b/packages/nickcom007/customs/sme_generation_request/component.yaml index cd93dec6..30347549 100644 --- a/packages/nickcom007/customs/sme_generation_request/component.yaml +++ b/packages/nickcom007/customs/sme_generation_request/component.yaml @@ -8,7 +8,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi - sme_generation_request.py: bafybeihzfix5w756qqfjnk3nqoinsghukski6pfcorn7kvtmt63uu23yay + sme_generation_request.py: bafybeiazmlek3xk5q5mpkruxw3m4sc5oadkkbkf7qdmhbbvosj44gxdej4 fingerprint_ignore_patterns: [] entry_point: sme_generation_request.py callable: run diff --git a/packages/nickcom007/customs/sme_generation_request/sme_generation_request.py b/packages/nickcom007/customs/sme_generation_request/sme_generation_request.py index d9776440..bbd43350 100644 --- a/packages/nickcom007/customs/sme_generation_request/sme_generation_request.py +++ b/packages/nickcom007/customs/sme_generation_request/sme_generation_request.py @@ -121,7 +121,7 @@ def count_tokens(text: str, model: str) -> int: ] TOOL_TO_ENGINE = { - "strong-sme-generator": "gpt-4-0125-preview", + "strong-sme-generator": "gpt-4o-2024-08-06", "normal-sme-generator": "gpt-3.5-turbo-0125", } diff --git a/packages/packages.json b/packages/packages.json index 41d75ca4..19c85fde 100644 --- a/packages/packages.json +++ b/packages/packages.json @@ -2,35 +2,35 @@ "dev": { "custom/valory/native_transfer_request/0.1.0": "bafybeifi2ojtjohfwy3qlvdsms5y7qa7iqbow7wq7gcjptgz2gbymve3vu", "custom/valory/prediction_request_claude/0.1.0": "bafybeia74qg6simxsc5qdoi3brfu3slo3fjbkto4x2t4pv7hx4tyc7qrfa", - "custom/valory/openai_request/0.1.0": "bafybeiaaxf6usuff3udn5vcpesqto4upsvjlgx3pw5wcvt6wgji2bjxnd4", - "custom/valory/prediction_request_embedding/0.1.0": "bafybeicxpxqn42g74jhq3akcq2342rgep5ch3oocqiqpdzwfe6fi7fhzna", - "custom/valory/resolve_market/0.1.0": "bafybeicnbm4s65b5yphrsmepa6gzxpn7b3wf3pvirxaz3ihyo5yibmo3oe", - "custom/valory/prediction_request/0.1.0": "bafybeicogmamuqkomazmt7kcedk2sk3rgaefkfezgi7qrjsgoaohhi2yca", + "custom/valory/openai_request/0.1.0": "bafybeigz5brshryms5awq5zscxsxibjymdofm55dw5o6ud7gtwmodm3vmq", + "custom/valory/prediction_request_embedding/0.1.0": "bafybeihtwykqnoxluqo2n4w2ccoh4xqoc6pifevol6obho3fneg7touzj4", + "custom/valory/resolve_market/0.1.0": "bafybeidog2vsqmezxe63jqjpf7p6qmqy3opq3rppvihqtehf6k44hzyo74", + "custom/valory/prediction_request/0.1.0": "bafybeigupgsneg4nsaljassdcq4mu53abrglmw42vfrss5kwxy7fybtisu", "custom/valory/stability_ai_request/0.1.0": "bafybeiamqdkh3nqsul6ihgijvkxyyretpwzpssh6dps3cmovippaau7wmy", "custom/polywrap/prediction_with_research_report/0.1.0": "bafybeiebis63otzt7vy44zxk4uwfknrttfsibnas5x7sttwgh4lzuhrnna", - "custom/jhehemann/prediction_sum_url_content/0.1.0": "bafybeifd3y4ecgvhxuxouxn3alm6ditqkha522vjbyi2anyu53z5kma5gu", - "custom/psouranis/optimization_by_prompting/0.1.0": "bafybeidagnmpmfx6ltrtog5eaxa6bvi6zrugslqjnmgxqzvvvvvt3samcu", - "custom/nickcom007/sme_generation_request/0.1.0": "bafybeihhgdeagbqyn3n2g77bdggseqp43cg67suhxgedmcia5onsyio66i", - "custom/nickcom007/prediction_request_sme/0.1.0": "bafybeig4cmemfhvesa2jqyw7obzjjmt4ov6q7xmawtwvh4khxdwgjnpxsi", - "custom/napthaai/resolve_market_reasoning/0.1.0": "bafybeiciy3cihdnuclh5nfevuwzn7sxfxlvajxnijfkz47uif4v6y2bvsa", - "custom/napthaai/prediction_request_rag/0.1.0": "bafybeihb7ghzprpu7x5cp5onbs2vboq2j3pvtefmdiacxoxyan4reb4txi", - "custom/napthaai/prediction_request_reasoning/0.1.0": "bafybeib43xf542ftm22yvfjvlqgvmt5qgaqvsla5vrhchobt4zmbf7sj3q", + "custom/jhehemann/prediction_sum_url_content/0.1.0": "bafybeih6wp7icu5apa2uyuyisg65reh6ptl5umeji7qvgoluwplufkrypy", + "custom/psouranis/optimization_by_prompting/0.1.0": "bafybeigvweriadejipt7rhsekoksf6ff6tqwaovjywzmhnzh22khdtfbfa", + "custom/nickcom007/sme_generation_request/0.1.0": "bafybeicjcszg5hig6pr46vwsn2wsod6xl4jo3nj2ftxdkbotoe2h43t7bi", + "custom/nickcom007/prediction_request_sme/0.1.0": "bafybeif24uhwzxur2fdutrwgrhvzeo6m5rnwn6s5sfexdykyxqakle5huq", + "custom/napthaai/resolve_market_reasoning/0.1.0": "bafybeibyvxgspwaieatpi2hp63jo6cxcwleff4jorkiigjwjznl4vtd33a", + "custom/napthaai/prediction_request_rag/0.1.0": "bafybeianz73tnhtmsuwnfucyivdd2sbkasp46ipiaw4zykuogzppkxjdcq", + "custom/napthaai/prediction_request_reasoning/0.1.0": "bafybeie4vd6nwclpthkhar4oe7m567hben2qsdyc6knxcbd4yhlr3bbvym", "custom/valory/prepare_tx/0.1.0": "bafybeiggkqqyrj4n6qmtwgmx4r4qtrb2k6f67v42qyfgrgoow677vywgw4", "custom/valory/short_maker/0.1.0": "bafybeif63rt4lkopu3rc3l7sg6tebrrwg2lxqufjx6dx4hoda5yzax43fa", - "custom/napthaai/prediction_url_cot/0.1.0": "bafybeienwamxwdzswwcnqezkgub2kel64vbcwjj5dze5bmh42jdlr7w2tq", + "custom/napthaai/prediction_url_cot/0.1.0": "bafybeicdqi4udhqhtmlnk7mbmywlce4wnyj7vcpjhl7ffgmwhnyaqigdca", "custom/napthaai/prediction_url_cot_claude/0.1.0": "bafybeicbjywni5hx5ssoiv6tnnjbqzsck6cmtsdpr6m562z6afogz5eh44", "custom/napthaai/prediction_request_reasoning_claude/0.1.0": "bafybeihtx2cejxoy42jwk2i5m4evfzz537aic5njuawxnzdzwlo63kdduq", "custom/napthaai/prediction_request_rag_claude/0.1.0": "bafybeickr32t7nmapuoymjyo3cf5rr2v2zapksxcivuqsgjr2gn6zo6y7y", "custom/napthaai/prediction_request_rag_cohere/0.1.0": "bafybeid56svuhz3w47lq4kpsl72gsc2h2y7fxmndjrieo2rwq2vac6n6hy", - "custom/valory/prediction_request_lite/0.1.0": "bafybeiebuajlm7ehy6xhok3rjs7qholz6ax5i2buuwk7v4fngsy5cgnkky", - "custom/nickcom007/prediction_request_sme_lite/0.1.0": "bafybeif3uckvxkzbetfum5dvmljfxt24nxcwxezfbfo7zjdnnw4rd4shse", - "custom/napthaai/prediction_request_reasoning_lite/0.1.0": "bafybeic67uex272w7seifinwnzseapi6sv64gcpqwa757q6sb7s4ri2jje", + "custom/valory/prediction_request_lite/0.1.0": "bafybeibne6l4jmyuijqzs4gkrb3mi2iyy36fjj7apzs2kqheeu4ihgjm3y", + "custom/nickcom007/prediction_request_sme_lite/0.1.0": "bafybeic3tjzls5iqs7mljdohwuu4cmuzf2warfxqr2htorbka4nivlbrbq", + "custom/napthaai/prediction_request_reasoning_lite/0.1.0": "bafybeig2lsru5t7mkll6cswn46gc7xsgaqpkhwdjfohqwhqzi27sobi3sy", "custom/valory/prediction_langchain/0.1.0": "bafybeif7b45gk5kzdlvollxcqq4dwdcpbrucbqjoewjq3il62r26cqbone", "custom/victorpolisetty/gemini_request/0.1.0": "bafybeigukufdstoauoze3g7oz5mf4j4zqsdr756un5pdujocrp6eo5efgy", - "custom/gnosis/omen_tools/0.1.0": "bafybeibjlqdi5vgr37x6yprm6p3ayydh2h6rtixbo4wmlrydghh3kvbgdu", - "custom/victorpolisetty/dalle_request/0.1.0": "bafybeief3ivpnmtodgv2mamkwrxi75rdljyftp37sukhyr4qjul7ezsegq", - "custom/jhehemann/prediction_sentence_embeddings/0.1.0": "bafybeifivchm3pxiofu7y7a6gzfijfzgptvehcmd6auqsfsro7unaewbo4", - "custom/gnosis/ofv_market_resolver/0.1.0": "bafybeigarwxqzr4uir6nbxyikyddtqj75t5rabkdtzvbrdtl5kzznwmlta", + "custom/gnosis/omen_tools/0.1.0": "bafybeiglmyy3esctsejdqmz63juvaridbbjwjw3ch4mqudicsrgoir4qrq", + "custom/victorpolisetty/dalle_request/0.1.0": "bafybeieqqtd6gtlry7vheix54nj3ok4cag3uy47yoxlufhi6y3u5i6doti", + "custom/jhehemann/prediction_sentence_embeddings/0.1.0": "bafybeifyyb2wpa77tl7a7fs3fabns45llivhgccbnrpupubojmq2fwe4si", + "custom/gnosis/ofv_market_resolver/0.1.0": "bafybeiemvfq6uxiz3wvdplnxg7wloy6siuggejerlkfkchks6ytgk27uqa", "protocol/valory/acn_data_share/0.1.0": "bafybeih5ydonnvrwvy2ygfqgfabkr47s4yw3uqxztmwyfprulwfsoe7ipq", "protocol/valory/websocket_client/0.1.0": "bafybeifjk254sy65rna2k32kynzenutujwqndap2r222afvr3zezi27mx4", "contract/valory/agent_mech/0.1.0": "bafybeiah6b5epo2hlvzg5rr2cydgpp2waausoyrpnoarf7oa7bw33rex34", @@ -40,11 +40,11 @@ "skill/valory/contract_subscription/0.1.0": "bafybeiefuemlp75obgpxrp6iuleb3hn6vcviwh5oetk5djbuprf4xsmgjy", "skill/valory/mech_abci/0.1.0": "bafybeicvbtbmtdps7lsbsutojiic4sam3jmi3prev5u22tps6pxy4du7nq", "skill/valory/task_submission_abci/0.1.0": "bafybeihvvp6ovstb7po7kx6o7dno6kvjmffnmlerfw6qxhhzyrxmktd67e", - "skill/valory/task_execution/0.1.0": "bafybeia6knadhxkquc3g3eg75jjxipbwn5slgk77y7teauv4q54uxnthly", + "skill/valory/task_execution/0.1.0": "bafybeie5obeuqn2g2rgtubqmdfxgyiri2yb5gbkvpvnyw7xapkqykoxmj4", "skill/valory/websocket_client/0.1.0": "bafybeif7rrvsu6z4evqkhblxj3u6wwv2eqou576hgkyoehxuj7cntw7o2m", "skill/valory/subscription_abci/0.1.0": "bafybeihtortmv4fqua5wrnshpnvqsbpaf52frwynrmpuv2uw5j7wkauhze", - "agent/valory/mech/0.1.0": "bafybeig3e3acg3p4mhj37afk2xepzlx3mfsmbirpes4t3m6jdmep7c22ym", - "service/valory/mech/0.1.0": "bafybeick7kaistdt7mpteeeynnqdkppbua75gj7clsa3fpcqlrrz42kcdu" + "agent/valory/mech/0.1.0": "bafybeiebo5ysylvl3hktjn7s7smh4zyk4z3ceyuqtzr3o6wfhenolrovdu", + "service/valory/mech/0.1.0": "bafybeicy4f7cgrzjokzii65uvhiihhvsxbpqnkjdfbjtryxugeywtldkcu" }, "third_party": { "protocol/valory/default/1.0.0": "bafybeifqcqy5hfbnd7fjv4mqdjrtujh2vx3p2xhe33y67zoxa6ph7wdpaq", diff --git a/packages/psouranis/customs/optimization_by_prompting/component.yaml b/packages/psouranis/customs/optimization_by_prompting/component.yaml index a85691d4..28a3ecf1 100644 --- a/packages/psouranis/customs/optimization_by_prompting/component.yaml +++ b/packages/psouranis/customs/optimization_by_prompting/component.yaml @@ -7,7 +7,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi - optimization_by_prompting.py: bafybeidoppvinojin2xuro47na2qwmsz4judd6obtsjon46ywwks7t22ue + optimization_by_prompting.py: bafybeiagqrdueab3h7kwgfdjjdk2jlnwl3gkc46qhdnmtdzkz7vqqmc4py fingerprint_ignore_patterns: [] entry_point: optimization_by_prompting.py callable: run diff --git a/packages/psouranis/customs/optimization_by_prompting/optimization_by_prompting.py b/packages/psouranis/customs/optimization_by_prompting/optimization_by_prompting.py index 142aa3e3..b810f867 100644 --- a/packages/psouranis/customs/optimization_by_prompting/optimization_by_prompting.py +++ b/packages/psouranis/customs/optimization_by_prompting/optimization_by_prompting.py @@ -141,7 +141,7 @@ def count_tokens(text: str, model: str) -> int: "deepmind-optimization", ] TOOL_TO_ENGINE = { - "deepmind-optimization-strong": "gpt-4-0125-preview", + "deepmind-optimization-strong": "gpt-4o-2024-08-06", "deepmind-optimization": "gpt-3.5-turbo-0125", } @@ -279,7 +279,7 @@ def prompt_engineer( init_instructions, instructions_format, iterations=3, - model_name="gpt-4-0125-preview", + model_name="gpt-4o-2024-08-06", ): llm = OpenAILLM(model_name=model_name, openai_api_key=openai_api_key) score_template = {"template": init_instructions, "score": 0.0} diff --git a/packages/valory/agents/mech/aea-config.yaml b/packages/valory/agents/mech/aea-config.yaml index 9c8ae29e..0416b393 100644 --- a/packages/valory/agents/mech/aea-config.yaml +++ b/packages/valory/agents/mech/aea-config.yaml @@ -42,7 +42,7 @@ skills: - valory/registration_abci:0.1.0:bafybeiek7zcsxbucjwzgqfftafhfrocvc7q4yxllh2q44jeemsjxg3rcfm - valory/reset_pause_abci:0.1.0:bafybeidw4mbx3os3hmv7ley7b3g3gja7ydpitr7mxbjpwzxin2mzyt5yam - valory/subscription_abci:0.1.0:bafybeihtortmv4fqua5wrnshpnvqsbpaf52frwynrmpuv2uw5j7wkauhze -- valory/task_execution:0.1.0:bafybeia6knadhxkquc3g3eg75jjxipbwn5slgk77y7teauv4q54uxnthly +- valory/task_execution:0.1.0:bafybeie5obeuqn2g2rgtubqmdfxgyiri2yb5gbkvpvnyw7xapkqykoxmj4 - valory/task_submission_abci:0.1.0:bafybeihvvp6ovstb7po7kx6o7dno6kvjmffnmlerfw6qxhhzyrxmktd67e - valory/termination_abci:0.1.0:bafybeihq6qtbwt6i53ayqym63vhjexkcppy26gguzhhjqywfmiuqghvv44 - valory/transaction_settlement_abci:0.1.0:bafybeigtzlk4uakmd54rxnznorcrstsr52kta474lgrnvx5ovr546vj7sq diff --git a/packages/valory/customs/openai_request/component.yaml b/packages/valory/customs/openai_request/component.yaml index 491dbf46..8a3ba3f6 100644 --- a/packages/valory/customs/openai_request/component.yaml +++ b/packages/valory/customs/openai_request/component.yaml @@ -7,7 +7,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi - openai_request.py: bafybeidqfig3253ikrohbh2i3wdeh4azydapishq3rnwpwaj6dbl5t2c5y + openai_request.py: bafybeiefuf2ssw6eid2bzy5qzoihxsc5arsmith62nyxisuoukqan5ady4 fingerprint_ignore_patterns: [] entry_point: openai_request.py callable: run diff --git a/packages/valory/customs/openai_request/openai_request.py b/packages/valory/customs/openai_request/openai_request.py index 23830fdc..3ffd3af5 100644 --- a/packages/valory/customs/openai_request/openai_request.py +++ b/packages/valory/customs/openai_request/openai_request.py @@ -113,7 +113,7 @@ def count_tokens(text: str, model: str) -> int: } PREFIX = "openai-" ENGINES = { - "chat": ["gpt-3.5-turbo", "gpt-4"], + "chat": ["gpt-3.5-turbo", "gpt-4o-2024-08-06"], "completion": ["gpt-3.5-turbo-instruct"], } ALLOWED_TOOLS = [PREFIX + value for values in ENGINES.values() for value in values] diff --git a/packages/valory/customs/prediction_request/component.yaml b/packages/valory/customs/prediction_request/component.yaml index 6396a1e8..692addce 100644 --- a/packages/valory/customs/prediction_request/component.yaml +++ b/packages/valory/customs/prediction_request/component.yaml @@ -7,12 +7,12 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi - prediction_request.py: bafybeign2wo6w6a76opopleflwfcvwu5szc32fzgldcnxlaqvoy6sflftq + prediction_request.py: bafybeigs2aycielziuthuvfmlvknmhrcj5ck7hpptxopowhrdtk3lvwale fingerprint_ignore_patterns: [] entry_point: prediction_request.py callable: run params: - default_model: gpt-4-0125-preview + default_model: gpt-4o-2024-08-06 dependencies: google-api-python-client: version: ==2.95.0 diff --git a/packages/valory/customs/prediction_request/prediction_request.py b/packages/valory/customs/prediction_request/prediction_request.py index bbbdaf74..e04ae26e 100644 --- a/packages/valory/customs/prediction_request/prediction_request.py +++ b/packages/valory/customs/prediction_request/prediction_request.py @@ -250,6 +250,11 @@ def count_tokens(text: str, model: str) -> int: "limit_max_tokens": 8192, "temperature": 0, }, + "gpt-4o-2024-08-06": { + "default_max_tokens": 500, + "limit_max_tokens": 4096, + "temperature": 0, + }, "claude-3-haiku-20240307": { "default_max_tokens": 1000, "limit_max_tokens": 200_000, @@ -260,6 +265,11 @@ def count_tokens(text: str, model: str) -> int: "limit_max_tokens": 200_000, "temperature": 0, }, + "claude-3-5-sonnet-20240620": { + "default_max_tokens": 1000, + "limit_max_tokens": 200_000, + "temperature": 0, + }, "claude-3-opus-20240229": { "default_max_tokens": 1000, "limit_max_tokens": 200_000, @@ -721,7 +731,7 @@ def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]: tool = kwargs["tool"] engine = kwargs.get("model") if "claude" in tool: # maintain backwards compatibility - engine = "claude-3-sonnet-20240229" + engine = "claude-3-5-sonnet-20240620" print(f"ENGINE: {engine}") with LLMClientManager(kwargs["api_keys"], engine): prompt = kwargs["prompt"] diff --git a/packages/valory/customs/prediction_request_embedding/component.yaml b/packages/valory/customs/prediction_request_embedding/component.yaml index 5ab43c73..3b7cfffc 100644 --- a/packages/valory/customs/prediction_request_embedding/component.yaml +++ b/packages/valory/customs/prediction_request_embedding/component.yaml @@ -8,7 +8,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi - prediction_sentence_embedding.py: bafybeiboko6heairzztzhevye7jmt2x4kbpm75u3gaay7brafu23ktursi + prediction_sentence_embedding.py: bafybeie6gdmestvfteaoep4tqsnj3jjaz3c5ccz3ab44a2knf3o6vpifgu fingerprint_ignore_patterns: [] entry_point: prediction_sentence_embedding.py callable: run diff --git a/packages/valory/customs/prediction_request_embedding/prediction_sentence_embedding.py b/packages/valory/customs/prediction_request_embedding/prediction_sentence_embedding.py index 7cdb6d9f..d6d32542 100644 --- a/packages/valory/customs/prediction_request_embedding/prediction_sentence_embedding.py +++ b/packages/valory/customs/prediction_request_embedding/prediction_sentence_embedding.py @@ -138,7 +138,7 @@ def count_tokens(text: str, model: str) -> int: ] TOOL_TO_ENGINE = { "prediction-sentence-embedding-conservative": "gpt-3.5-turbo-0125", - "prediction-sentence-embedding-bold": "gpt-4-0125-preview", + "prediction-sentence-embedding-bold": "gpt-4o-2024-08-06", } @@ -1097,7 +1097,7 @@ def fetch_additional_information( google_api_key: str, google_engine: str, nlp, - engine: str = "gpt-4-0125-preview", + engine: str = "gpt-4o-2024-08-06", temperature: float = 0.5, max_compl_tokens: int = 500, ) -> str: @@ -1110,7 +1110,7 @@ def fetch_additional_information( google_api_key (str): The API key for the Google service. google_engine (str): The Google engine to be used. temperature (float): The temperature parameter for the engine. - engine (str): The openai engine. Defaults to "gpt-4-0125-preview". + engine (str): The openai engine. Defaults to "gpt-4o-2024-08-06". temperature (float): The temperature parameter for the engine. Defaults to 1.0. max_compl_tokens (int): The maximum number of tokens for the engine's response. @@ -1222,7 +1222,7 @@ def run(**kwargs) -> Tuple[Optional[str], Any, Optional[Dict[str, Any]], Any]: # Fetch additional information additional_information = fetch_additional_information( event_question=event_question, - engine="gpt-4-0125-preview", + engine="gpt-4o-2024-08-06", temperature=0.5, max_compl_tokens=max_compl_tokens, nlp=nlp, diff --git a/packages/valory/customs/prediction_request_lite/component.yaml b/packages/valory/customs/prediction_request_lite/component.yaml index b0d11dcf..dc5d0d36 100644 --- a/packages/valory/customs/prediction_request_lite/component.yaml +++ b/packages/valory/customs/prediction_request_lite/component.yaml @@ -7,12 +7,12 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi - prediction_request.py: bafybeifld3nh4wn2f42xpc3cox2einvvpbhoodzihvur7xmpkhqrbycht4 + prediction_request.py: bafybeigrachllr7ifd2ku65pmyartgizjxw2jxfrx7wm3gxdmdkmpsjb3u fingerprint_ignore_patterns: [] entry_point: prediction_request.py callable: run params: - default_model: gpt-4-0125-preview + default_model: gpt-4o-2024-08-06 dependencies: google-api-python-client: version: ==2.95.0 diff --git a/packages/valory/customs/prediction_request_lite/prediction_request.py b/packages/valory/customs/prediction_request_lite/prediction_request.py index 929614cc..d264eaa5 100644 --- a/packages/valory/customs/prediction_request_lite/prediction_request.py +++ b/packages/valory/customs/prediction_request_lite/prediction_request.py @@ -249,6 +249,11 @@ def count_tokens(text: str, model: str) -> int: "limit_max_tokens": 8192, "temperature": 0, }, + "gpt-4o-2024-08-06": { + "default_max_tokens": 500, + "limit_max_tokens": 4096, + "temperature": 0, + }, "claude-3-haiku-20240307": { "default_max_tokens": 1000, "limit_max_tokens": 200_000, @@ -264,6 +269,11 @@ def count_tokens(text: str, model: str) -> int: "limit_max_tokens": 200_000, "temperature": 0, }, + "claude-3-5-sonnet-20240620": { + "default_max_tokens": 1000, + "limit_max_tokens": 200_000, + "temperature": 0, + }, "databricks/dbrx-instruct:nitro": { "default_max_tokens": 500, "limit_max_tokens": 32_768, @@ -737,7 +747,7 @@ def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]: tool = kwargs["tool"].replace("-lite", "") engine = kwargs.get("model") if "claude" in tool: # maintain backwards compatibility - engine = "claude-3-sonnet-20240229" + engine = "claude-3-5-sonnet-20240620" print(f"ENGINE: {engine}") with LLMClientManager(kwargs["api_keys"], engine): prompt = kwargs["prompt"] diff --git a/packages/valory/customs/resolve_market/component.yaml b/packages/valory/customs/resolve_market/component.yaml index 18be691e..b08b7e56 100644 --- a/packages/valory/customs/resolve_market/component.yaml +++ b/packages/valory/customs/resolve_market/component.yaml @@ -7,7 +7,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeigiijf3cqbpr42srzgpro3rvyvb47fnikcbsii6rjr2uwp3x764hi - resolve_market.py: bafybeif6inyhqusi3mukg3x2d6x6lhs3uuop5snleqwet5dl6lwsonmb7u + resolve_market.py: bafybeifisgmtyyckxr5od2mczmf47p2abr5ywztb77rrjvicwwo3twycry fingerprint_ignore_patterns: [] entry_point: resolve_market.py callable: run diff --git a/packages/valory/customs/resolve_market/resolve_market.py b/packages/valory/customs/resolve_market/resolve_market.py index 56909135..c84cefed 100644 --- a/packages/valory/customs/resolve_market/resolve_market.py +++ b/packages/valory/customs/resolve_market/resolve_market.py @@ -120,7 +120,7 @@ def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore ALLOWED_TOOLS = [ "close_market", ] -TOOL_TO_ENGINE = {tool: "gpt-4-0125-preview" for tool in ALLOWED_TOOLS} +TOOL_TO_ENGINE = {tool: "gpt-4o-2024-08-06" for tool in ALLOWED_TOOLS} NEWSAPI_ENDPOINT = "https://newsapi.org/v2" TOP_HEADLINES = "top-headlines" diff --git a/packages/valory/services/mech/service.yaml b/packages/valory/services/mech/service.yaml index 3f8e65ef..2d85b054 100644 --- a/packages/valory/services/mech/service.yaml +++ b/packages/valory/services/mech/service.yaml @@ -7,7 +7,7 @@ license: Apache-2.0 fingerprint: README.md: bafybeif7ia4jdlazy6745ke2k2x5yoqlwsgwr6sbztbgqtwvs3ndm2p7ba fingerprint_ignore_patterns: [] -agent: valory/mech:0.1.0:bafybeig3e3acg3p4mhj37afk2xepzlx3mfsmbirpes4t3m6jdmep7c22ym +agent: valory/mech:0.1.0:bafybeiebo5ysylvl3hktjn7s7smh4zyk4z3ceyuqtzr3o6wfhenolrovdu number_of_agents: 4 deployment: agent: diff --git a/packages/valory/skills/task_execution/skill.yaml b/packages/valory/skills/task_execution/skill.yaml index 66c405f7..712f969f 100644 --- a/packages/valory/skills/task_execution/skill.yaml +++ b/packages/valory/skills/task_execution/skill.yaml @@ -13,7 +13,7 @@ fingerprint: models.py: bafybeid6befxrrbiaw7nduz4zgbm5nfc246fn2eb6rfmja6v5hmq4wtcwe utils/__init__.py: bafybeiccdijaigu6e5p2iruwo5mkk224o7ywedc7nr6xeu5fpmhjqgk24e utils/apis.py: bafybeigu73lfz3g3mc6iupisrvlsp3fyl4du3oqlyajgdpfvtqypddh3w4 - utils/benchmarks.py: bafybeic2jerkq6mzaxgnsgpwxvbl5jw7m7ilrc6i3htk23ord5nwqglewq + utils/benchmarks.py: bafybeiafnee7iay6dyjnatyqyzjov5c4ibl3ojamjmgfjri7cyghl7qayq utils/cost_calculation.py: bafybeighafxied73w3mcmgziwfp3u2x6t4qlztw4kyekyq2ddgyhdge74q utils/ipfs.py: bafybeic7cbuv3tomi2xv7h2qowrqnpoufpanngzlgzljl4ptimpss3meqm utils/task.py: bafybeicb6nqd475ul6mz4hcexpva33ivkn4fygicgmlb4clu5cuzr34diy diff --git a/packages/valory/skills/task_execution/utils/benchmarks.py b/packages/valory/skills/task_execution/utils/benchmarks.py index e5c2d620..b432ac12 100644 --- a/packages/valory/skills/task_execution/utils/benchmarks.py +++ b/packages/valory/skills/task_execution/utils/benchmarks.py @@ -36,9 +36,11 @@ class TokenCounterCallback: "gpt-4-turbo-preview": {"input": 0.01, "output": 0.03}, "gpt-4-0125-preview": {"input": 0.01, "output": 0.03}, "gpt-4-1106-preview": {"input": 0.01, "output": 0.03}, + "gpt-4o-2024-08-06": {"input": 0.01, "output": 0.03}, "claude-2": {"input": 0.008, "output": 0.024}, "claude-3-haiku-20240307": {"input": 0.00025, "output": 0.00125}, "claude-3-sonnet-20240229": {"input": 0.003, "output": 0.015}, + "claude-3-5-sonnet-20240620": {"input": 0.003, "output": 0.015}, "claude-3-opus-20240229": {"input": 0.015, "output": 0.075}, "cohere/command-r-plus": {"input": 0.003, "output": 0.015}, "databricks/dbrx-instruct:nitro": {"input": 0.0009, "output": 0.0009}, diff --git a/packages/victorpolisetty/customs/dalle_request/component.yaml b/packages/victorpolisetty/customs/dalle_request/component.yaml index e202a1d1..cdb4c97d 100644 --- a/packages/victorpolisetty/customs/dalle_request/component.yaml +++ b/packages/victorpolisetty/customs/dalle_request/component.yaml @@ -7,7 +7,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeiaesxnkadyuhsacvtrz774edwal2ygz3hillffbkdi4yq2z44r5hm - dalle_request.py: bafybeieihdtkdik6vcbow4zfpng6esfqtjsanrrgn5xc5iiw2sejqemnyy + dalle_request.py: bafybeiesauxklzm3trj2du6ismrotwmb5vvyrmw2yb3tbhvhjk4lqdc5d4 fingerprint_ignore_patterns: [] entry_point: dalle_request.py callable: run diff --git a/packages/victorpolisetty/customs/dalle_request/dalle_request.py b/packages/victorpolisetty/customs/dalle_request/dalle_request.py index c1abc8f9..43f3d5c6 100644 --- a/packages/victorpolisetty/customs/dalle_request/dalle_request.py +++ b/packages/victorpolisetty/customs/dalle_request/dalle_request.py @@ -1,5 +1,7 @@ import functools from typing import Any, Dict, Optional, Tuple, Callable + +import openai from openai import OpenAI from tiktoken import encoding_for_model diff --git a/tests/test_tools.py b/tests/test_tools.py index 0ce41ce7..c98f6cb6 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -211,3 +211,4 @@ class TestOfvMarketResolverTool(BaseToolTest): prompts = [ 'Please take over the role of a Data Scientist to evaluate the given question. With the given question "Will Apple release iPhone 17 by March 2025?" and the `yes` option represented by `Yes` and the `no` option represented by `No`, what are the respective probabilities of `p_yes` and `p_no` occurring?' ] + tool_module = ofv_market_resolver