diff --git a/deep_thought.db b/deep_thought.db
new file mode 100644
index 0000000..f4775b5
Binary files /dev/null and b/deep_thought.db differ
diff --git a/src/v1/database.py b/src/v1/database.py
new file mode 100644
index 0000000..31f494a
--- /dev/null
+++ b/src/v1/database.py
@@ -0,0 +1,34 @@
+import sqlite3
+
+dbase = sqlite3.connect('deep_thought.db') # Open a database File
+print('Database opened')
+
+
+dbase.execute(''' CREATE TABLE IF NOT EXISTS chat_history(
+ ID INT PRIMARY KEY NOT NULL,
+ MESSAGE TEXT NOT NULL,
+ TYPE TEXT NOT NULL,
+ TIMESTAMP TEXT NOT NULL,
+ GCHATID TEXT NOT NULL,
+ SLACKID INT NOT NULL) ''')
+
+print('Table created')
+
+def insert_record(ID,MESSAGE,TYPE,TIMESTAMP,GCHATID,SLACKID):
+ dbase.execute(''' INSERT OR IGNORE INTO chat_history(ID,MESSAGE,TYPE,TIMESTAMP,GCHATID,SLACKID)
+ VALUES(?,?,?,?,?,?)
+''',(ID,MESSAGE,TYPE,TIMESTAMP,GCHATID,SLACKID))
+ dbase.commit()
+ print('REcord inserted')
+
+
+def read_Data():
+ # from math import *
+ data = dbase.execute(''' SELECT * FROM chat_history''')
+ for record in data:
+ print('ID : '+str(record[0]))
+ print('MESSAGE : '+str(record[1]))
+ print('TYPE : '+str(record[2]))
+ print('TIMESTAMP : '+str(record[3])+'\n')
+ print('GCHATID : '+str(record[2]))
+ print('SLACKID : '+str(record[3])+'\n')
diff --git a/src/v1/prompt_template.py b/src/v1/prompt_template.py
new file mode 100644
index 0000000..e0db1db
--- /dev/null
+++ b/src/v1/prompt_template.py
@@ -0,0 +1,88 @@
+"""
+This file implements prompt template for llama based models.
+Modify the prompt template based on the model you select.
+This seems to have significant impact on the output of the LLM.
+"""
+
+from langchain.memory import ConversationBufferMemory
+from langchain.prompts import PromptTemplate
+
+# this is specific to Llama-2.
+
+system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
+Read the given context before answering questions and think step by step. If you can not answer a user question based on
+the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question."""
+
+
+def get_prompt_template(system_prompt=system_prompt, promptTemplate_type=None, history=False):
+ if promptTemplate_type == "llama":
+ B_INST, E_INST = "[INST]", "[/INST]"
+ B_SYS, E_SYS = "<>\n", "\n<>\n\n"
+ SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
+ if history:
+ instruction = """
+ Context: {history} \n {context}
+ User: {question}"""
+
+ prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
+ prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template)
+ else:
+ instruction = """
+ Context: {context}
+ User: {question}"""
+
+ prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
+ prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template)
+ elif promptTemplate_type == "mistral":
+ B_INST, E_INST = "[INST] ", " [/INST]"
+ if history:
+ prompt_template = (
+ B_INST
+ + system_prompt
+ + """
+
+ Context: {history} \n {context}
+ User: {question}"""
+ + E_INST
+ )
+ prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template)
+ else:
+ prompt_template = (
+ B_INST
+ + system_prompt
+ + """
+
+ Context: {context}
+ User: {question}"""
+ + E_INST
+ )
+ prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template)
+ else:
+ # change this based on the model you have selected.
+ if history:
+ prompt_template = (
+ system_prompt
+ + """
+
+ Context: {history} \n {context}
+ User: {question}
+ Answer:"""
+ )
+ prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template)
+ else:
+ prompt_template = (
+ system_prompt
+ + """
+
+ Context: {context}
+ User: {question}
+ Answer:"""
+ )
+ prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template)
+
+ memory = ConversationBufferMemory(input_key="question", memory_key="history")
+
+ return (
+ prompt,
+ memory,
+ )
\ No newline at end of file