-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathembeddings.py
78 lines (64 loc) · 2.86 KB
/
embeddings.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import ConversationalRetrievalChain
# from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import Docx2txtLoader
from langchain.document_loaders import TextLoader
from langchain.text_splitter import MarkdownTextSplitter
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain.docstore.document import Document
import os
import streamlit as st
from dotenv import load_dotenv, find_dotenv
# Charger .env et API key
load_dotenv(find_dotenv(".streamlit/secrets.toml"))
openai_api_key = st.secrets.OPENAI_API_KEY
@st.cache_resource(show_spinner=False)
def initialize_chain(system_prompt, _memory):
llm = ChatOpenAI(temperature=0, max_tokens=1000, model_name="gpt-4-1106-preview", streaming=True)
# documents = []
# for file in os.listdir("data/notes-de-cours/"):
# if file.endswith(".pdf"):
# pdf_path = "./data/notes-de-cours/" + file
# loader = PyPDFLoader(pdf_path)
# documents.extend(loader.load())
# elif file.endswith('.docx') or file.endswith('.doc'):
# doc_path = "./data/notes-de-cours/" + file
# loader = Docx2txtLoader(doc_path)
# documents.extend(loader.load())
# elif file.endswith('.txt'):
# text_path = "./data/notes-de-cours/" + file
# loader = TextLoader(text_path)
# documents.extend(loader.load())
documents = []
folder_path = "./data"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for root, dirs, files in os.walk(folder_path):
for file_name in files:
if file_name.endswith(".md"):
file_path = os.path.join(root, file_name)
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
documents.append(Document(page_content=content, metadata={}))
if documents:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1024,
chunk_overlap=200)
chunked_documents = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(chunked_documents, embedding=embeddings, persist_directory="./vector")
vectorstore.persist()
retriever = vectorstore.as_retriever(
search_type="mmr", #mmr
search_kwargs={"k": 6},
)
qa = ConversationalRetrievalChain.from_llm(
llm,
retriever=retriever,
memory=_memory,
verbose = True) #return_source_documents=True
return qa