-
Notifications
You must be signed in to change notification settings - Fork 62
/
openai.py
271 lines (253 loc) · 9.15 KB
/
openai.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
import time
from typing import Dict, List
from func_timeout import FunctionTimedOut, func_timeout
from openai import OpenAI
import tiktoken
import json
from query_generators.query_generator import QueryGenerator
from utils.pruning import prune_metadata_str
from utils.gen_prompt import to_prompt_schema
from utils.dialects import (
convert_postgres_ddl_to_dialect,
)
openai = OpenAI()
class OpenAIQueryGenerator(QueryGenerator):
"""
Query generator that uses OpenAI's models
"""
def __init__(
self,
db_creds: Dict[str, str],
db_name: str,
db_type: str,
model: str,
prompt_file: str,
timeout: int,
use_public_data: bool,
verbose: bool,
**kwargs,
):
self.db_creds = db_creds
self.db_type = db_type
self.db_name = db_name
self.model = model
self.o1 = self.model.startswith("o1-")
self.prompt_file = prompt_file
self.use_public_data = use_public_data
self.timeout = timeout
self.verbose = verbose
def get_chat_completion(
self,
model,
messages,
max_tokens=600,
temperature=0,
stop=[],
logit_bias={},
seed=100,
):
"""Get OpenAI chat completion for a given prompt and model"""
generated_text = ""
try:
if self.o1:
completion = openai.chat.completions.create(
model=model,
messages=messages,
seed=seed,
)
else:
completion = openai.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
stop=stop,
logit_bias=logit_bias,
seed=seed,
)
generated_text = completion.choices[0].message.content
except Exception as e:
print(type(e), e)
return generated_text
def get_nonchat_completion(
self,
model,
prompt,
max_tokens=600,
temperature=0,
stop=[],
logit_bias={},
):
"""Get OpenAI nonchat completion for a given prompt and model"""
generated_text = ""
try:
completion = openai.completions.create(
model=model,
prompt=prompt,
max_tokens=max_tokens,
temperature=temperature,
stop=stop,
logit_bias=logit_bias,
seed=42,
)
generated_text = completion["choices"][0]["text"]
except Exception as e:
print(type(e), e)
return generated_text
@staticmethod
def count_tokens(
model: str, messages: List[Dict[str, str]] = [], prompt: str = ""
) -> int:
"""
This function counts the number of tokens used in a prompt
model: the model used to generate the prompt. can be any valid OpenAI model
messages: (only for OpenAI chat models) a list of messages to be used as a prompt. Each message is a dict with two keys: role and content
"""
try:
tokenizer = tiktoken.encoding_for_model(model)
except KeyError:
# default to o200k_base if the model is not in the list. this is just for approximating the max token count
tokenizer = tiktoken.get_encoding("o200k_base")
num_tokens = 0
for message in messages:
for _, value in message.items():
num_tokens += len(tokenizer.encode(value))
return num_tokens
def generate_query(
self,
question: str,
instructions: str,
k_shot_prompt: str,
glossary: str,
table_metadata_string: str,
prev_invalid_sql: str,
prev_error_msg: str,
table_aliases: str,
columns_to_keep: int,
shuffle: bool,
) -> dict:
start_time = time.time()
self.err = ""
self.query = ""
self.reason = ""
if self.use_public_data:
from defog_data.metadata import dbs
import defog_data.supplementary as sup
else:
# raise Exception("Replace this with your private data import")
from defog_data_private.metadata import dbs
with open(self.prompt_file) as file:
chat_prompt = json.load(file)
question_instructions = question + " " + instructions
if table_metadata_string == "":
if columns_to_keep > 0:
table_metadata_ddl, join_str = prune_metadata_str(
question_instructions,
self.db_name,
self.use_public_data,
columns_to_keep,
shuffle,
)
table_metadata_ddl = convert_postgres_ddl_to_dialect(
postgres_ddl=table_metadata_ddl,
to_dialect=self.db_type,
db_name=self.db_name,
)
table_metadata_string = table_metadata_ddl + join_str
elif columns_to_keep == 0:
md = dbs[self.db_name]["table_metadata"]
table_metadata_ddl = to_prompt_schema(md, shuffle)
table_metadata_ddl = convert_postgres_ddl_to_dialect(
postgres_ddl=table_metadata_ddl,
to_dialect=self.db_type,
db_name=self.db_name,
)
column_join = sup.columns_join.get(self.db_name, {})
# get join_str from column_join
join_list = []
for values in column_join.values():
col_1, col_2 = values[0]
# add to join_list
join_str = f"{col_1} can be joined with {col_2}"
if join_str not in join_list:
join_list.append(join_str)
if len(join_list) > 0:
join_str = "\nHere is a list of joinable columns:\n" + "\n".join(
join_list
)
else:
join_str = ""
table_metadata_string = table_metadata_ddl + join_str
else:
raise ValueError("columns_to_keep must be >= 0")
if glossary == "":
glossary = dbs[self.db_name]["glossary"]
try:
if self.o1:
sys_prompt = ""
user_prompt = chat_prompt[0]["content"]
else:
sys_prompt = chat_prompt[0]["content"]
sys_prompt = sys_prompt.format(
db_type=self.db_type,
)
user_prompt = chat_prompt[1]["content"]
if len(chat_prompt) == 3:
assistant_prompt = chat_prompt[2]["content"]
except:
raise ValueError("Invalid prompt file. Please use prompt_openai.md")
user_prompt = user_prompt.format(
db_type=self.db_type,
user_question=question,
table_metadata_string=table_metadata_string,
instructions=instructions,
k_shot_prompt=k_shot_prompt,
glossary=glossary,
prev_invalid_sql=prev_invalid_sql,
prev_error_msg=prev_error_msg,
table_aliases=table_aliases,
)
if self.o1:
messages = [{"role": "user", "content": user_prompt}]
else:
messages = []
messages.append({"role": "system", "content": sys_prompt})
messages.append({"role": "user", "content": user_prompt})
if len(chat_prompt) == 3:
messages.append({"role": "assistant", "content": assistant_prompt})
function_to_run = None
package = None
function_to_run = self.get_chat_completion
package = messages
try:
self.completion = func_timeout(
self.timeout,
function_to_run,
args=(self.model, package, 1200, 0),
)
results = self.completion
self.query = results.split("```sql")[-1].split("```")[0]
self.reason = "-"
except FunctionTimedOut:
if self.verbose:
print("generating query timed out")
self.err = "QUERY GENERATION TIMEOUT"
except Exception as e:
if self.verbose:
print(f"Error while generating query: {type(e)}, {e})")
self.query = ""
self.reason = ""
print(e)
if isinstance(e, KeyError):
self.err = f"QUERY GENERATION ERROR: {type(e)}, {e}, Completion: {self.completion}"
else:
self.err = f"QUERY GENERATION ERROR: {type(e)}, {e}"
tokens_used = self.count_tokens(self.model, messages=messages)
return {
"table_metadata_string": table_metadata_string,
"query": self.query,
"reason": self.reason,
"err": self.err,
"latency_seconds": time.time() - start_time,
"tokens_used": tokens_used,
}