Skip to content

Commit

Permalink
update models and prompts
Browse files Browse the repository at this point in the history
  • Loading branch information
richardblythman committed Apr 15, 2024
1 parent 60b8942 commit f79405f
Show file tree
Hide file tree
Showing 5 changed files with 68 additions and 70 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -196,27 +196,27 @@ def embeddings(self, model, input):
},
"claude-3-haiku-20240307": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-sonnet-20240229": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-opus-20240229": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"cohere/command-r-plus": {
"default_max_tokens": 1000,
"limit_max_tokens": 4096,
"databricks/dbrx-instruct:nitro": {
"default_max_tokens": 500,
"limit_max_tokens": 32_768,
"temperature": 0,
},
"mistralai/mixtral-8x22b": {
"nousresearch/nous-hermes-2-mixtral-8x7b-sft": {
"default_max_tokens": 1000,
"limit_max_tokens": 4096,
"limit_max_tokens": 32_000,
"temperature": 0,
},
}
Expand All @@ -243,28 +243,22 @@ def embeddings(self, model, input):


PREDICTION_PROMPT = """
Here is some additional background information that may be relevant to the question:
<additional_information> {ADDITIONAL_INFORMATION} </additional_information>
A user has asked the following:
You will be evaluating the likelihood of an event based on a user's question and additional information from search results.
The user's question is: <user_prompt> {USER_PROMPT} </user_prompt>
<user_prompt> {USER_PROMPT} </user_prompt>
Carefully consider the user's question and the additional information provided. Think through the likelihood of the event the user asked about actually happening in the future, based on the details given. Write out your reasoning and analysis in a section.
Now, based on your analysis above, provide a prediction of the probability the event will happen, as p_yes between 0 and 1. Also provide the probability it will not happen, as p_no between 0 and 1. The two probabilities should sum to 1.
p_yes: p_no:
How useful was the additional information in allowing you to make a prediction? Provide your rating as info_utility, a number between 0 and 1.
info_utility:
The additional background information that may be relevant to the question is:
<additional_information> {ADDITIONAL_INFORMATION} </additional_information>
Finally, considering everything, what is your overall confidence in your prediction? Provide your confidence as a number between 0 and 1.
Carefully consider the user's question and the additional information provided. Then, think through the following:
- The probability that the event specified in the user's question will happen (p_yes)
- The probability that the event will not happen (p_no)
- Your confidence level in your prediction
- How useful was the additional information in allowing you to make a prediction (info_utility)
confidence:
Provide your final scores in the following format: <p_yes>probability between 0 and 1</p_yes> <p_no>probability between 0 and 1</p_no>
your confidence level between 0 and 1 <info_utility>utility of the additional information between 0 and 1</info_utility>
Make sure the values you provide are between 0 and 1. And p_yes and p_no should sum to 1.
Remember, p_yes and p_no should add up to 1.
Your response should be structured as follows:
<p_yes></p_yes>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,27 +198,27 @@ def embeddings(self, model, input):
},
"claude-3-haiku-20240307": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-sonnet-20240229": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-opus-20240229": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"cohere/command-r-plus": {
"default_max_tokens": 1000,
"limit_max_tokens": 4096,
"databricks/dbrx-instruct:nitro": {
"default_max_tokens": 500,
"limit_max_tokens": 32_768,
"temperature": 0,
},
"mistralai/mixtral-8x22b": {
"nousresearch/nous-hermes-2-mixtral-8x7b-sft": {
"default_max_tokens": 1000,
"limit_max_tokens": 4096,
"limit_max_tokens": 32_000,
"temperature": 0,
},
}
Expand Down Expand Up @@ -271,7 +271,7 @@ class Document(BaseModel):
The reasoning from the other AI is: {REASONING}
Carefully consider the user's question and the provided reasoning. Then, in a , think through the following:
Carefully consider the user's question and the provided reasoning. Then, think through the following:
- The probability that the event specified in the user's question will happen (p_yes)
- The probability that the event will not happen (p_no)
- Your confidence level in your prediction
Expand Down
46 changes: 20 additions & 26 deletions packages/napthaai/customs/prediction_url_cot/prediction_url_cot.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,27 +176,27 @@ def embeddings(self, model, input):
},
"claude-3-haiku-20240307": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-sonnet-20240229": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-opus-20240229": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"cohere/command-r-plus": {
"default_max_tokens": 1000,
"limit_max_tokens": 4096,
"databricks/dbrx-instruct:nitro": {
"default_max_tokens": 500,
"limit_max_tokens": 32_768,
"temperature": 0,
},
"mistralai/mixtral-8x22b": {
"nousresearch/nous-hermes-2-mixtral-8x7b-sft": {
"default_max_tokens": 1000,
"limit_max_tokens": 4096,
"limit_max_tokens": 32_000,
"temperature": 0,
},
}
Expand All @@ -216,28 +216,22 @@ def embeddings(self, model, input):
N_DOCS = 5

PREDICTION_PROMPT = """
Here is some additional background information that may be relevant to the question:
<additional_information> {ADDITIONAL_INFORMATION} </additional_information>
A user has asked the following:
You will be evaluating the likelihood of an event based on a user's question and additional information from search results.
The user's question is: <user_prompt> {USER_PROMPT} </user_prompt>
<user_prompt> {USER_PROMPT} </user_prompt>
Carefully consider the user's question and the additional information provided. Think through the likelihood of the event the user asked about actually happening in the future, based on the details given. Write out your reasoning and analysis in a section.
Now, based on your analysis above, provide a prediction of the probability the event will happen, as p_yes between 0 and 1. Also provide the probability it will not happen, as p_no between 0 and 1. The two probabilities should sum to 1.
p_yes: p_no:
How useful was the additional information in allowing you to make a prediction? Provide your rating as info_utility, a number between 0 and 1.
info_utility:
The additional background information that may be relevant to the question is:
<additional_information> {ADDITIONAL_INFORMATION} </additional_information>
Finally, considering everything, what is your overall confidence in your prediction? Provide your confidence as a number between 0 and 1.
Carefully consider the user's question and the additional information provided. Then, think through the following:
- The probability that the event specified in the user's question will happen (p_yes)
- The probability that the event will not happen (p_no)
- Your confidence level in your prediction
- How useful was the additional information in allowing you to make a prediction (info_utility)
confidence:
Provide your final scores in the following format: <p_yes>probability between 0 and 1</p_yes> <p_no>probability between 0 and 1</p_no>
your confidence level between 0 and 1 <info_utility>utility of the additional information between 0 and 1</info_utility>
Make sure the values you provide are between 0 and 1. And p_yes and p_no should sum to 1.
Remember, p_yes and p_no should add up to 1.
Your response should be structured as follows:
<p_yes></p_yes>
Expand Down
26 changes: 17 additions & 9 deletions packages/valory/customs/prediction_request/prediction_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from markdownify import markdownify as md
from readability import Document
from googleapiclient.discovery import build
import re
from spacy import Language
from spacy.cli import download
from spacy.lang.en import STOP_WORDS
Expand Down Expand Up @@ -189,32 +190,32 @@ def count_tokens(text: str, model: str) -> int:
},
"claude-3-haiku-20240307": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-sonnet-20240229": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"claude-3-opus-20240229": {
"default_max_tokens": 1000,
"limit_max_tokens": 200_0000,
"limit_max_tokens": 2_000_000,
"temperature": 0,
},
"cohere/command-r-plus": {
"default_max_tokens": 1000,
"limit_max_tokens": 4096,
"temperature": 0,
},
"mistralai/mistral-medium": {
"default_max_tokens": 1000,
"limit_max_tokens": 8192,
"databricks/dbrx-instruct:nitro": {
"default_max_tokens": 500,
"limit_max_tokens": 32_768,
"temperature": 0,
},
"mistralai/mixtral-8x22b": {
"nousresearch/nous-hermes-2-mixtral-8x7b-sft": {
"default_max_tokens": 1000,
"limit_max_tokens": 4096,
"limit_max_tokens": 32_000,
"temperature": 0,
},
}
Expand Down Expand Up @@ -405,6 +406,12 @@ def extract_texts(urls: List[str], num_words: Optional[int]) -> List[str]:
break
return extracted_texts

def extract_json_string(text):
# This regex looks for triple backticks, captures everything in between until it finds another set of triple backticks.
print('AAAAAAAA', text)
pattern = r"(\{[^}]*\})"
matches = re.findall(pattern, text)
return matches[0].replace("json", "")

def fetch_additional_information(
prompt: str,
Expand Down Expand Up @@ -630,4 +637,5 @@ def run(**kwargs) -> Tuple[str, Optional[str], Optional[Dict[str, Any]], Any]:
model=engine,
token_counter=count_tokens,
)
return response.content, prediction_prompt, None, counter_callback
extracted_block = extract_json_string(response.content)
return extracted_block, prediction_prompt, None, counter_callback
2 changes: 2 additions & 0 deletions packages/valory/skills/task_execution/utils/benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ class TokenCounterCallback:
"claude-3-haiku-20240307": {"input": 0.00025, "output": 0.00125},
"claude-3-sonnet-20240229": {"input": 0.003, "output": 0.015},
"claude-3-opus-20240229": {"input": 0.015, "output": 0.075},
"databricks/dbrx-instruct:nitro": {"input": 0.0009, "output": 0.0009},
"nousresearch/nous-hermes-2-mixtral-8x7b-sft": {"input": 0.00054, "output": 0.00054},
}

def __init__(self) -> None:
Expand Down

0 comments on commit f79405f

Please sign in to comment.