-
Notifications
You must be signed in to change notification settings - Fork 0
/
openai_api.py
131 lines (103 loc) Β· 3.5 KB
/
openai_api.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import base64
import os
from dotenv import load_dotenv
load_dotenv()
from openai import OpenAI
client = OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))
system_message = "You are a helpful assistant"
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
def encode_frame(frame):
return base64.b64encode(frame).decode('utf-8')
def encode_images(image_paths, frames=False):
if frames is True:
return [f"data:image/jpeg;base64,{encode_frame(synthesized_frame)}" for synthesized_frame in image_paths]
return [f"data:image/jpeg;base64,{encode_image(image_path)}" for image_path in image_paths]
def test_gpt():
return chatgpt("What is the meaning of life?")
def chatgpt(query, model="gpt-4-1106-preview"):
response = client.chat.completions.create(
messages=[
{
"role": "user",
"content": query,
}
],
model=model
)
response = response.choices[0].message.content
return response
def test_vision_api():
# client = OpenAI()
client = OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "Return a list of foods in the refrigerator as they would appear in a Python list data structure."},
{
"type": "image_url",
"image_url": "https://healsview.com/wp-content/uploads/2023/10/open-fridge-or-1024x683.jpg",
},
],
}
],
max_tokens=300,
)
return response.choices[0].message.content
def extract_food_from_image(image_path):
response = vision_api(
[image_path],
"What food are in the refrigerator? Do NOT generate any openings other than the list of foods in the image. ONLY GIVE ME THE INGREDIENTS in bullet points",
)
return response
def extract_food_from_up_image(image):
response = vision_api(
[image],
"What food are in the refrigerator? Do NOT generate any openings other than the list of foods in the image. ONLY GIVE ME THE INGREDIENTS in bullet points",
)
return response
def vision_api(images, prompt):
content = [{
"type": "text",
"text": prompt
}]
for image in images:
content.append({
"type": "image_url",
"image_url": image
})
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": content,
}
],
max_tokens=1000,
)
return response.choices[0].message.content
def vision_api_image(images, prompt):
content = [{"type": "text", "text": prompt}]
for image in images:
content.append({"type": "image_url", "image_url": image})
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": content,
}
],
max_tokens=1000,
)
return response.choices[0].message.content
if __name__ == "__main__":
# response = test_gpt()
# print(response)
response = test_vision_api()
print(response)