-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
62 lines (52 loc) · 1.72 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import sys
import ollama
import chromadb
import requests
from datetime import datetime
# Fetch data from Culver's API
response = requests.get('https://culvers-fotd.joe.workers.dev/')
data = response.json()
# Get the current date
today = datetime.now().strftime("%B %d")
# Add the current date as the first document
documents = [f"Today's date is {today}."]
# Process the Culver's data
for item in data:
location = item['Location']
address = item['Address']
flavor = item['Flavor']
document = f"The flavor of the day for today at the {location} Culver's location at {address} is {flavor}."
documents.append(document)
# Initialize the ChromaDB client and create a collection
client = chromadb.Client()
collection = client.create_collection(name="docs")
# Store each document in a vector embedding database
for i, d in enumerate(documents):
response = ollama.embeddings(model="nomic-embed-text", prompt=d)
embedding = response["embedding"]
collection.add(
ids=[str(i)],
embeddings=[embedding],
documents=[d]
)
# Check if a prompt is provided as an argument
if len(sys.argv) > 1:
prompt = ' '.join(sys.argv[1:])
else:
prompt = "What is the flavor of the day at Culver's?"
# Generate an embedding for the prompt and retrieve the most relevant document
response = ollama.embeddings(
prompt=prompt,
model="nomic-embed-text"
)
results = collection.query(
query_embeddings=[response["embedding"]],
n_results=1
)
data = results['documents'][0][0]
# Generate a response combining the prompt and data retrieved in the previous step
output = ollama.generate(
model="llama3:8b",
prompt=f"Using this data: {data}. Respond to this prompt: {prompt}"
)
print(output['response'])