-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
215 lines (157 loc) · 7.07 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
import re
from pathlib import Path
import streamlit as st
import yaml
from openai import OpenAI
st.title("Metaprompt Generator")
st.markdown("""
The metaprompt framework was originally developed by Anthropic's team to help users create clear, structured instructions for their AI assistant, Claude.
This framework supports accurate and consistent task performance by providing practical principles and examples for prompt engineering.
You can find more information and examples in Anthropic's documentation here:
[Anthropic's Prompt Engineering Guide](https://docs.anthropic.com/en/docs/build-with-claude/prompt-engineering/prompt-generator).
""")
api_key = (
st.text_input(
"Enter your OpenAI API key",
type="password",
help="Enter your API key to proceed.",
)
or st.secrets.get("OPENAI_API_KEY")
)
if api_key:
client = OpenAI(api_key=api_key)
else:
st.error("Please enter your OpenAI API key.")
st.stop()
metaprompt = Path("./metaprompt-short.txt").read_text()
@st.cache_data
def list_models():
return [model.id for model in client.models.list()]
available_models = list_models()
MODEL = st.selectbox(
"Select a model", options=available_models, index=available_models.index("gpt-4o")
)
st.markdown("""
Welcome to the Metaprompt! This is a prompt engineering tool designed to solve the "blank page problem" and give you a starting point for iteration. All you need to do is enter your task, and optionally the names of the variables you'd like the assistant to use in the template. Then you'll be able to run the prompt that comes out on any examples you like.
**Caveats**
- This is designed for single-turn question/response prompts, not multiturn.
- The Metaprompt is designed for use with large language models. Generating prompts with other models may lead to worse results.
- The prompt you'll get at the end is not guaranteed to be optimal by any means, so don't be afraid to change it!
""")
TASK = st.text_input(
"Enter your task", "Draft an email responding to a customer complaint"
)
VARIABLES_INPUT = st.text_input(
"Optional: specify the input variables you want the assistant to use (comma-separated)",
"CUSTOMER_NAME, CUSTOMER_COMPLAINT",
)
VARIABLES = (
[var.strip().upper() for var in VARIABLES_INPUT.split(",")]
if VARIABLES_INPUT
else []
)
variable_string = ""
for variable in VARIABLES:
variable_string += f"\n{{${variable.upper()}}}"
prompt = metaprompt.replace("{{TASK}}", TASK)
assistant_partial = "<Inputs>"
if variable_string:
assistant_partial += variable_string + "\n</Inputs>\n<Instructions Structure>"
else:
assistant_partial += "\n</Inputs>\n<Instructions Structure>"
def generate_prompt_template(prompt, assistant_partial):
full_prompt = prompt + "\n" + assistant_partial
response = client.chat.completions.create(
model=MODEL,
messages=[{"role": "user", "content": full_prompt}],
temperature=0.1,
)
completion = response.choices[0].message.content
return completion
def extract_between_tags(tag: str, string: str, strip: bool = False) -> list[str]:
# Ensure we escape the tag to avoid conflicts with special characters in regex
tag_pattern = re.compile(f"<{re.escape(tag)}>(.*?)</{re.escape(tag)}>", re.DOTALL)
ext_list = tag_pattern.findall(string)
if strip:
ext_list = [e.strip() for e in ext_list]
return ext_list
def remove_empty_tags(text):
return re.sub(r"\n<(\w+)>\s*</\1>\n", "", text, flags=re.DOTALL)
def remove_unclosed_tags(text: str) -> str:
tags = re.findall(r"<(/?\w+)>", text)
stack = []
closed_text = text
for tag in tags:
if not tag.startswith("/"):
stack.append(tag)
else:
if stack and stack[-1] == tag[1:]:
stack.pop()
else:
closed_text = re.sub(f"<{tag}>", "", closed_text)
for tag in stack:
closed_text = re.sub(f"<{tag}>", "", closed_text)
return closed_text
def extract_prompt(metaprompt_response):
# metaprompt_response = remove_unclosed_tags(metaprompt_response).strip()
# metaprompt_response = remove_empty_tags(metaprompt_response).strip()
metaprompt_response = extract_between_tags("Instructions", metaprompt_response)[0]
return metaprompt_response
def extract_variables(prompt):
pattern = r"{([^}]+)}"
variables = re.findall(pattern, prompt)
return set(variables)
def generate_assistant_output(prompt_with_variables):
response = client.chat.completions.create(
model=MODEL,
messages=[{"role": "user", "content": prompt_with_variables}],
temperature=0.1,
)
return response.choices[0].message.content
def convert_prompt_to_yaml(prompt_text):
yaml_content = {"prompt": prompt_text}
yaml_output = yaml.dump(yaml_content, sort_keys=False, default_flow_style=False)
return yaml_output
if st.button("Generate Prompt Template"):
with st.spinner("Generating prompt template..."):
generated_text = generate_prompt_template(prompt, assistant_partial)
st.session_state["generated_text"] = generated_text
# If <Instructions is not closed, assume this section is not terminated.
INSTRUCTIONS_END = "</Instructions>"
if generated_text and INSTRUCTIONS_END not in generated_text:
generated_text += INSTRUCTIONS_END
with st.expander("Response"):
st.code(generated_text, language="markdown")
extracted_prompt_template = extract_prompt(generated_text)
variables_in_prompt = extract_variables(extracted_prompt_template)
st.session_state["extracted_prompt_template"] = extracted_prompt_template
st.session_state["variables_in_prompt"] = variables_in_prompt
if "generated_text" in st.session_state:
generated_text = st.session_state["generated_text"]
extracted_prompt_template = st.session_state["extracted_prompt_template"]
variables_in_prompt = st.session_state["variables_in_prompt"]
st.subheader("Generated Prompt Template:")
st.code(generated_text, language="markdown")
st.subheader("Extracted Variables:")
st.write(variables_in_prompt or None)
st.subheader("Final Prompt Template:")
st.code(extracted_prompt_template, language="markdown")
with st.expander("YAML"):
st.code(convert_prompt_to_yaml(extracted_prompt_template), language="yaml")
st.header("Test Your Prompt Template")
variable_values = {}
for variable in variables_in_prompt:
value = st.text_input(
f"Enter value for variable {variable}", key=f"variable_value_{variable}"
)
variable_values[variable] = value
prompt_with_variables = extracted_prompt_template
for variable in variable_values:
prompt_with_variables = prompt_with_variables.replace(
"{" + variable + "}", variable_values[variable]
)
if st.button("Generate Assistant's Output"):
with st.spinner("Generating assistant's output..."):
assistant_output = generate_assistant_output(prompt_with_variables)
st.subheader("Assistant's Output:")
st.write(assistant_output)