diff --git a/packages/napthaai/customs/prediction_request_rag/component.yaml b/packages/napthaai/customs/prediction_request_rag/component.yaml index 01db9695..0f05f942 100644 --- a/packages/napthaai/customs/prediction_request_rag/component.yaml +++ b/packages/napthaai/customs/prediction_request_rag/component.yaml @@ -7,12 +7,12 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibt7f7crtwvmkg7spy3jhscmlqltvyblzp32g6gj44v7tlo5lycuq - prediction_request_rag.py: bafybeibeuxk2znvfrs5cd675jel5zvbtg7rpjzkbv3vjk5xvjrz2vk7qbq + prediction_request_rag.py: bafybeicllugnruskdj7ipmrj2vrtlxmjpqtwlk4c3cfjttfzuvkeldp3m4 fingerprint_ignore_patterns: [] entry_point: prediction_request_rag.py callable: run params: - default_model: claude-3-sonnet-20240229 + default_model: gpt-4-0125-preview dependencies: google-api-python-client: version: ==2.95.0 diff --git a/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py b/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py index c1e7a4d3..09538603 100644 --- a/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py +++ b/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py @@ -225,6 +225,9 @@ def embeddings(self, model, input): } ALLOWED_TOOLS = [ "prediction-request-rag", + + # LEGACY + "prediction-request-rag-claude", ] ALLOWED_MODELS = list(LLM_SETTINGS.keys()) DEFAULT_NUM_URLS = defaultdict(lambda: 3) @@ -673,12 +676,14 @@ def parser_prediction_response(response: str) -> str: def run(**kwargs) -> Tuple[Optional[str], Any, Optional[Dict[str, Any]], Any]: """Run the task""" + tool = kwargs["tool"] model = kwargs.get("model") + if "claude" in tool: # maintain backwards compatibility + model = "claude-3-sonnet-20240229" print(f"MODEL: {model}") with LLMClientManager( kwargs["api_keys"], model, embedding_provider="openai" ): - tool = kwargs["tool"] prompt = extract_question(kwargs["prompt"]) max_tokens = kwargs.get( "max_tokens", LLM_SETTINGS[model]["default_max_tokens"] diff --git a/packages/napthaai/customs/prediction_request_reasoning/component.yaml b/packages/napthaai/customs/prediction_request_reasoning/component.yaml index 531eaa9d..c4e2b520 100644 --- a/packages/napthaai/customs/prediction_request_reasoning/component.yaml +++ b/packages/napthaai/customs/prediction_request_reasoning/component.yaml @@ -7,12 +7,12 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeib36ew6vbztldut5xayk5553rylrq7yv4cpqyhwc5ktvd4cx67vwu - prediction_request_reasoning.py: bafybeidhdrx5j475uvezhu3wnso66whuybsj77qq6hrqsreh6lc25ux7qe + prediction_request_reasoning.py: bafybeidb43nygtvbhimnsd223ddpoii46dwirb5znmp2g473u4jii36jqa fingerprint_ignore_patterns: [] entry_point: prediction_request_reasoning.py callable: run params: - default_model: claude-3-sonnet-20240229 + default_model: gpt-4-0125-preview dependencies: google-api-python-client: version: ==2.95.0 diff --git a/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py b/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py index 098c8a75..bf750419 100644 --- a/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py +++ b/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py @@ -228,6 +228,9 @@ def embeddings(self, model, input): } ALLOWED_TOOLS = [ "prediction-request-reasoning", + + # LEGACY + "prediction-request-reasoning-claude", ] ALLOWED_MODELS = list(LLM_SETTINGS.keys()) DEFAULT_NUM_URLS = defaultdict(lambda: 3) @@ -839,12 +842,14 @@ def extract_question(prompt: str) -> str: def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]: """Run the task""" + tool = kwargs["tool"] model = kwargs.get("model") + if "claude" in tool: # maintain backwards compatibility + model = "claude-3-sonnet-20240229" print(f"MODEL: {model}") with LLMClientManager( kwargs["api_keys"], model, embedding_provider="openai" ): - tool = kwargs["tool"] prompt = extract_question(kwargs["prompt"]) max_tokens = kwargs.get( "max_tokens", LLM_SETTINGS[model]["default_max_tokens"] diff --git a/packages/napthaai/customs/prediction_url_cot/component.yaml b/packages/napthaai/customs/prediction_url_cot/component.yaml index a40c1f0e..67f3e402 100644 --- a/packages/napthaai/customs/prediction_url_cot/component.yaml +++ b/packages/napthaai/customs/prediction_url_cot/component.yaml @@ -7,7 +7,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeiflni5dkn5fqe7fnu4lgbqxzfrgochhqfbgzwz3vlf5grijp3nkpm - prediction_url_cot.py: bafybeiae5r4xpyqaymqa3v5yvyp3xbjy5agjmm73edczw6moqxear45km4 + prediction_url_cot.py: bafybeihebxfv4xj22nq4mkch6xuddcnu7jv473zec2n5p65oxy63asjudy fingerprint_ignore_patterns: [] entry_point: prediction_url_cot.py callable: run diff --git a/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py b/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py index bf6c9bc6..fa20e274 100644 --- a/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py +++ b/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py @@ -185,6 +185,9 @@ def embeddings(self, model, input): } ALLOWED_TOOLS = [ "prediction-url-cot", + + # LEGACY + "prediction-url-cot-claude", ] ALLOWED_MODELS = list(LLM_SETTINGS.keys()) NUM_QUERIES = 5 @@ -588,10 +591,12 @@ def parser_prediction_response(response: str) -> str: def run(**kwargs) -> Tuple[Optional[str], Any, Optional[Dict[str, Any]], Any]: """Run the task""" + tool = kwargs["tool"] model = kwargs.get("model") + if "claude" in tool: # maintain backwards compatibility + model = "claude-3-sonnet-20240229" print(f"MODEL: {model}") with LLMClientManager(kwargs["api_keys"], model): - tool = kwargs["tool"] prompt = extract_question(kwargs["prompt"]) max_tokens = kwargs.get( "max_tokens", LLM_SETTINGS[model]["default_max_tokens"] diff --git a/packages/packages.json b/packages/packages.json index 59d08845..99cbd799 100644 --- a/packages/packages.json +++ b/packages/packages.json @@ -5,7 +5,7 @@ "custom/valory/openai_request/0.1.0": "bafybeihjtddwwkvwzaltk6yhtkk3xxnwnkurdtyuy6ki5tpf7h5htvuxnq", "custom/valory/prediction_request_embedding/0.1.0": "bafybeifnz5fzxvzyj3mmjpfsre3nzbdieuyjvnxqxuplopp5taz4qw7ys4", "custom/valory/resolve_market/0.1.0": "bafybeiaag2e7rsdr3bwg6mlmfyom4vctsdapohco7z45pxhzjymepz3rya", - "custom/valory/prediction_request/0.1.0": "bafybeiboljzvtnfrf3z4zsepwnyys3b53q7lszcmmcoxxu5f72oyuvjkv4", + "custom/valory/prediction_request/0.1.0": "bafybeibnshbgciu6inzdjzxeysrwvsin4iitkgd4fkj7a2omjzbdrga2ue", "custom/valory/stability_ai_request/0.1.0": "bafybeicyyteycvzj4lk33p4t7mspfarc5d5ktbysu7oqkv6woo4aouxira", "custom/polywrap/prediction_with_research_report/0.1.0": "bafybeiewbcbfyjnyqyp4oou6ianxseakblwjyck22bd2doqojjk37uyxwy", "custom/jhehemann/prediction_sum_url_content/0.1.0": "bafybeiby55g53cvc4vpbgww5awrlf6x67h7q7pg5xlhwber75ejdkh4twa", @@ -13,11 +13,11 @@ "custom/nickcom007/sme_generation_request/0.1.0": "bafybeibqv4ru4lpufy2hvcb3swqhzuq2kejjxmlyepofx6l6mxce6lhiqq", "custom/nickcom007/prediction_request_sme/0.1.0": "bafybeigsszaat6k5m5a3ljyem7xdhjflpcm24imtcscgst3tghpwhamglu", "custom/napthaai/resolve_market_reasoning/0.1.0": "bafybeiewdqtfkee3od5kuktrhyzexy7466ea3w3to7vv6qnli6qutfrqaa", - "custom/napthaai/prediction_request_rag/0.1.0": "bafybeihpvcoy6cg4humgxaqukanjedm32tdcvbpblez4zoycp5vswgpsca", - "custom/napthaai/prediction_request_reasoning/0.1.0": "bafybeie5ga4rhno4ua2s5wnsjks2kkgz5ismalsebowii5qnqo74lh2svy", + "custom/napthaai/prediction_request_rag/0.1.0": "bafybeif7ufhrlhpuegm6kpiw6jzye6jmp4fjvxgn3hwcv4vkolrrrmidmy", + "custom/napthaai/prediction_request_reasoning/0.1.0": "bafybeifzkvc6j5wbbremt2jqig4ozaackzpz3o5okkoihmm3wdpptpviz4", "custom/valory/prepare_tx/0.1.0": "bafybeibjqckeb73df724lr4xkrmeh3woqwas4mswa7au65xnwag2edad2e", "custom/valory/short_maker/0.1.0": "bafybeif63rt4lkopu3rc3l7sg6tebrrwg2lxqufjx6dx4hoda5yzax43fa", - "custom/napthaai/prediction_url_cot/0.1.0": "bafybeid5uwf5i7epbztlmhptbgf2jhsscab5lhyxih7ejpkszrbqtrf6k4", + "custom/napthaai/prediction_url_cot/0.1.0": "bafybeic3ch7wfhxqvwgoud7xotuu3khs4xch3ej35kox2gulya2hv65wbu", "custom/napthaai/prediction_url_cot_claude/0.1.0": "bafybeicbjywni5hx5ssoiv6tnnjbqzsck6cmtsdpr6m562z6afogz5eh44", "custom/napthaai/prediction_request_reasoning_claude/0.1.0": "bafybeihtx2cejxoy42jwk2i5m4evfzz537aic5njuawxnzdzwlo63kdduq", "custom/napthaai/prediction_request_rag_claude/0.1.0": "bafybeickr32t7nmapuoymjyo3cf5rr2v2zapksxcivuqsgjr2gn6zo6y7y", diff --git a/packages/valory/customs/prediction_request/component.yaml b/packages/valory/customs/prediction_request/component.yaml index 58f246d4..fb8ca1e7 100644 --- a/packages/valory/customs/prediction_request/component.yaml +++ b/packages/valory/customs/prediction_request/component.yaml @@ -7,7 +7,7 @@ license: Apache-2.0 aea_version: '>=1.0.0, <2.0.0' fingerprint: __init__.py: bafybeibbn67pnrrm4qm3n3kbelvbs3v7fjlrjniywmw2vbizarippidtvi - prediction_request.py: bafybeihf36nxqee2xgigty2fnd2lkbebpn7elywgfh3dttmxiri6ulqivm + prediction_request.py: bafybeigf5k62mxbmcrvjvsnixpbn3hvxlp2l62sk7jtx5vs7fdg5cgtfxe fingerprint_ignore_patterns: [] entry_point: prediction_request.py callable: run diff --git a/packages/valory/customs/prediction_request/prediction_request.py b/packages/valory/customs/prediction_request/prediction_request.py index 67ec57ec..1f628091 100644 --- a/packages/valory/customs/prediction_request/prediction_request.py +++ b/packages/valory/customs/prediction_request/prediction_request.py @@ -224,6 +224,10 @@ def count_tokens(text: str, model: str) -> int: "prediction-offline", "prediction-online", # "prediction-online-summarized-info", + + # LEGACY + "claude-prediction-offline", + "claude-prediction-online", ] ALLOWED_MODELS = list(LLM_SETTINGS.keys()) # the default number of URLs to fetch online information for @@ -658,10 +662,12 @@ def adjust_additional_information( def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]: """Run the task""" + tool = kwargs["tool"] engine = kwargs.get("model") + if "claude" in tool: # maintain backwards compatibility + engine = "claude-3-sonnet-20240229" print(f"ENGINE: {engine}") with LLMClientManager(kwargs["api_keys"], engine): - tool = kwargs["tool"] prompt = kwargs["prompt"] max_tokens = kwargs.get( "max_tokens", LLM_SETTINGS[engine]["default_max_tokens"]