diff --git a/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py b/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py
index 76cfe36f..9f20be07 100644
--- a/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py
+++ b/packages/napthaai/customs/prediction_request_rag/prediction_request_rag.py
@@ -196,27 +196,27 @@ def embeddings(self, model, input):
},
"claude-3-haiku-20240307": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-sonnet-20240229": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-opus-20240229": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
- "cohere/command-r-plus": {
- "default_max_tokens": 1000,
- "limit_max_tokens": 4096,
+ "databricks/dbrx-instruct:nitro": {
+ "default_max_tokens": 500,
+ "limit_max_tokens": 32_768,
"temperature": 0,
},
- "mistralai/mixtral-8x22b": {
+ "nousresearch/nous-hermes-2-mixtral-8x7b-sft": {
"default_max_tokens": 1000,
- "limit_max_tokens": 4096,
+ "limit_max_tokens": 32_000,
"temperature": 0,
},
}
@@ -243,28 +243,22 @@ def embeddings(self, model, input):
PREDICTION_PROMPT = """
-Here is some additional background information that may be relevant to the question:
- {ADDITIONAL_INFORMATION}
-
-A user has asked the following:
+You will be evaluating the likelihood of an event based on a user's question and additional information from search results.
+The user's question is: {USER_PROMPT}
- {USER_PROMPT}
-
-Carefully consider the user's question and the additional information provided. Think through the likelihood of the event the user asked about actually happening in the future, based on the details given. Write out your reasoning and analysis in a section.
-
-Now, based on your analysis above, provide a prediction of the probability the event will happen, as p_yes between 0 and 1. Also provide the probability it will not happen, as p_no between 0 and 1. The two probabilities should sum to 1.
-
-p_yes: p_no:
-
-How useful was the additional information in allowing you to make a prediction? Provide your rating as info_utility, a number between 0 and 1.
-
-info_utility:
+The additional background information that may be relevant to the question is:
+ {ADDITIONAL_INFORMATION}
-Finally, considering everything, what is your overall confidence in your prediction? Provide your confidence as a number between 0 and 1.
+Carefully consider the user's question and the additional information provided. Then, think through the following:
+- The probability that the event specified in the user's question will happen (p_yes)
+- The probability that the event will not happen (p_no)
+- Your confidence level in your prediction
+- How useful was the additional information in allowing you to make a prediction (info_utility)
-confidence:
+Provide your final scores in the following format: probability between 0 and 1 probability between 0 and 1
+your confidence level between 0 and 1 utility of the additional information between 0 and 1
-Make sure the values you provide are between 0 and 1. And p_yes and p_no should sum to 1.
+Remember, p_yes and p_no should add up to 1.
Your response should be structured as follows:
diff --git a/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py b/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py
index 24b9d9fb..8f3fd994 100644
--- a/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py
+++ b/packages/napthaai/customs/prediction_request_reasoning/prediction_request_reasoning.py
@@ -198,27 +198,27 @@ def embeddings(self, model, input):
},
"claude-3-haiku-20240307": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-sonnet-20240229": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-opus-20240229": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
- "cohere/command-r-plus": {
- "default_max_tokens": 1000,
- "limit_max_tokens": 4096,
+ "databricks/dbrx-instruct:nitro": {
+ "default_max_tokens": 500,
+ "limit_max_tokens": 32_768,
"temperature": 0,
},
- "mistralai/mixtral-8x22b": {
+ "nousresearch/nous-hermes-2-mixtral-8x7b-sft": {
"default_max_tokens": 1000,
- "limit_max_tokens": 4096,
+ "limit_max_tokens": 32_000,
"temperature": 0,
},
}
@@ -271,7 +271,7 @@ class Document(BaseModel):
The reasoning from the other AI is: {REASONING}
-Carefully consider the user's question and the provided reasoning. Then, in a , think through the following:
+Carefully consider the user's question and the provided reasoning. Then, think through the following:
- The probability that the event specified in the user's question will happen (p_yes)
- The probability that the event will not happen (p_no)
- Your confidence level in your prediction
diff --git a/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py b/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py
index 7d814501..40245dff 100644
--- a/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py
+++ b/packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py
@@ -176,27 +176,27 @@ def embeddings(self, model, input):
},
"claude-3-haiku-20240307": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-sonnet-20240229": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-opus-20240229": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
- "cohere/command-r-plus": {
- "default_max_tokens": 1000,
- "limit_max_tokens": 4096,
+ "databricks/dbrx-instruct:nitro": {
+ "default_max_tokens": 500,
+ "limit_max_tokens": 32_768,
"temperature": 0,
},
- "mistralai/mixtral-8x22b": {
+ "nousresearch/nous-hermes-2-mixtral-8x7b-sft": {
"default_max_tokens": 1000,
- "limit_max_tokens": 4096,
+ "limit_max_tokens": 32_000,
"temperature": 0,
},
}
@@ -216,28 +216,22 @@ def embeddings(self, model, input):
N_DOCS = 5
PREDICTION_PROMPT = """
-Here is some additional background information that may be relevant to the question:
- {ADDITIONAL_INFORMATION}
-
-A user has asked the following:
+You will be evaluating the likelihood of an event based on a user's question and additional information from search results.
+The user's question is: {USER_PROMPT}
- {USER_PROMPT}
-
-Carefully consider the user's question and the additional information provided. Think through the likelihood of the event the user asked about actually happening in the future, based on the details given. Write out your reasoning and analysis in a section.
-
-Now, based on your analysis above, provide a prediction of the probability the event will happen, as p_yes between 0 and 1. Also provide the probability it will not happen, as p_no between 0 and 1. The two probabilities should sum to 1.
-
-p_yes: p_no:
-
-How useful was the additional information in allowing you to make a prediction? Provide your rating as info_utility, a number between 0 and 1.
-
-info_utility:
+The additional background information that may be relevant to the question is:
+ {ADDITIONAL_INFORMATION}
-Finally, considering everything, what is your overall confidence in your prediction? Provide your confidence as a number between 0 and 1.
+Carefully consider the user's question and the additional information provided. Then, think through the following:
+- The probability that the event specified in the user's question will happen (p_yes)
+- The probability that the event will not happen (p_no)
+- Your confidence level in your prediction
+- How useful was the additional information in allowing you to make a prediction (info_utility)
-confidence:
+Provide your final scores in the following format: probability between 0 and 1 probability between 0 and 1
+your confidence level between 0 and 1 utility of the additional information between 0 and 1
-Make sure the values you provide are between 0 and 1. And p_yes and p_no should sum to 1.
+Remember, p_yes and p_no should add up to 1.
Your response should be structured as follows:
diff --git a/packages/valory/customs/prediction_request/prediction_request.py b/packages/valory/customs/prediction_request/prediction_request.py
index 7622ce29..79a8d865 100644
--- a/packages/valory/customs/prediction_request/prediction_request.py
+++ b/packages/valory/customs/prediction_request/prediction_request.py
@@ -32,6 +32,7 @@
from markdownify import markdownify as md
from readability import Document
from googleapiclient.discovery import build
+import re
from spacy import Language
from spacy.cli import download
from spacy.lang.en import STOP_WORDS
@@ -189,17 +190,17 @@ def count_tokens(text: str, model: str) -> int:
},
"claude-3-haiku-20240307": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-sonnet-20240229": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-opus-20240229": {
"default_max_tokens": 1000,
- "limit_max_tokens": 200_0000,
+ "limit_max_tokens": 2_000_000,
"temperature": 0,
},
"cohere/command-r-plus": {
@@ -207,14 +208,14 @@ def count_tokens(text: str, model: str) -> int:
"limit_max_tokens": 4096,
"temperature": 0,
},
- "mistralai/mistral-medium": {
- "default_max_tokens": 1000,
- "limit_max_tokens": 8192,
+ "databricks/dbrx-instruct:nitro": {
+ "default_max_tokens": 500,
+ "limit_max_tokens": 32_768,
"temperature": 0,
},
- "mistralai/mixtral-8x22b": {
+ "nousresearch/nous-hermes-2-mixtral-8x7b-sft": {
"default_max_tokens": 1000,
- "limit_max_tokens": 4096,
+ "limit_max_tokens": 32_000,
"temperature": 0,
},
}
@@ -405,6 +406,12 @@ def extract_texts(urls: List[str], num_words: Optional[int]) -> List[str]:
break
return extracted_texts
+def extract_json_string(text):
+ # This regex looks for triple backticks, captures everything in between until it finds another set of triple backticks.
+ print('AAAAAAAA', text)
+ pattern = r"(\{[^}]*\})"
+ matches = re.findall(pattern, text)
+ return matches[0].replace("json", "")
def fetch_additional_information(
prompt: str,
@@ -630,4 +637,5 @@ def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]:
model=engine,
token_counter=count_tokens,
)
- return response.content, prediction_prompt, None, counter_callback
+ extracted_block = extract_json_string(response.content)
+ return extracted_block, prediction_prompt, None, counter_callback
diff --git a/packages/valory/skills/task_execution/utils/benchmarks.py b/packages/valory/skills/task_execution/utils/benchmarks.py
index 0dcd601c..8b201eef 100644
--- a/packages/valory/skills/task_execution/utils/benchmarks.py
+++ b/packages/valory/skills/task_execution/utils/benchmarks.py
@@ -40,6 +40,8 @@ class TokenCounterCallback:
"claude-3-haiku-20240307": {"input": 0.00025, "output": 0.00125},
"claude-3-sonnet-20240229": {"input": 0.003, "output": 0.015},
"claude-3-opus-20240229": {"input": 0.015, "output": 0.075},
+ "databricks/dbrx-instruct:nitro": {"input": 0.0009, "output": 0.0009},
+ "nousresearch/nous-hermes-2-mixtral-8x7b-sft": {"input": 0.00054, "output": 0.00054},
}
def __init__(self) -> None: