Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: initial commit for conversational chatbot #72

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added deep_thought.db
Binary file not shown.
34 changes: 34 additions & 0 deletions src/v1/database.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import sqlite3

dbase = sqlite3.connect('deep_thought.db') # Open a database File
print('Database opened')


dbase.execute(''' CREATE TABLE IF NOT EXISTS chat_history(
ID INT PRIMARY KEY NOT NULL,
MESSAGE TEXT NOT NULL,
TYPE TEXT NOT NULL,
TIMESTAMP TEXT NOT NULL,
GCHATID TEXT NOT NULL,
SLACKID INT NOT NULL) ''')

print('Table created')

def insert_record(ID,MESSAGE,TYPE,TIMESTAMP,GCHATID,SLACKID):
dbase.execute(''' INSERT OR IGNORE INTO chat_history(ID,MESSAGE,TYPE,TIMESTAMP,GCHATID,SLACKID)
VALUES(?,?,?,?,?,?)
''',(ID,MESSAGE,TYPE,TIMESTAMP,GCHATID,SLACKID))
dbase.commit()
print('REcord inserted')


def read_Data():
# from math import *
data = dbase.execute(''' SELECT * FROM chat_history''')
for record in data:
print('ID : '+str(record[0]))
print('MESSAGE : '+str(record[1]))
print('TYPE : '+str(record[2]))
print('TIMESTAMP : '+str(record[3])+'\n')
print('GCHATID : '+str(record[2]))
print('SLACKID : '+str(record[3])+'\n')
88 changes: 88 additions & 0 deletions src/v1/prompt_template.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
"""
This file implements prompt template for llama based models.
Modify the prompt template based on the model you select.
This seems to have significant impact on the output of the LLM.
"""

from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate

# this is specific to Llama-2.

system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
Read the given context before answering questions and think step by step. If you can not answer a user question based on
the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question."""


def get_prompt_template(system_prompt=system_prompt, promptTemplate_type=None, history=False):
if promptTemplate_type == "llama":
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
if history:
instruction = """
Context: {history} \n {context}
User: {question}"""

prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template)
else:
instruction = """
Context: {context}
User: {question}"""

prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template)
elif promptTemplate_type == "mistral":
B_INST, E_INST = "<s>[INST] ", " [/INST]"
if history:
prompt_template = (
B_INST
+ system_prompt
+ """

Context: {history} \n {context}
User: {question}"""
+ E_INST
)
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template)
else:
prompt_template = (
B_INST
+ system_prompt
+ """

Context: {context}
User: {question}"""
+ E_INST
)
prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template)
else:
# change this based on the model you have selected.
if history:
prompt_template = (
system_prompt
+ """

Context: {history} \n {context}
User: {question}
Answer:"""
)
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template)
else:
prompt_template = (
system_prompt
+ """

Context: {context}
User: {question}
Answer:"""
)
prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template)

memory = ConversationBufferMemory(input_key="question", memory_key="history")

return (
prompt,
memory,
)
Loading