From d19846eeb5a0e124da4bf2fd133684dfb9b1db0c Mon Sep 17 00:00:00 2001 From: Bhavya sameera Meduri Date: Mon, 6 Nov 2023 09:06:42 -0600 Subject: [PATCH] initial commit for conversational chatbot --- deep_thought.db | Bin 0 -> 20480 bytes src/v1/database.py | 34 +++++++++++++++ src/v1/prompt_template.py | 88 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+) create mode 100644 deep_thought.db create mode 100644 src/v1/database.py create mode 100644 src/v1/prompt_template.py diff --git a/deep_thought.db b/deep_thought.db new file mode 100644 index 0000000000000000000000000000000000000000..f4775b57360550e76d0888a6540b87281af03424 GIT binary patch literal 20480 zcmeHPYit|G5hjn4j-n-=oJ4V)M%MZvOP2W%DJizjuh6t&F_tV@QmWcPtuA>h@*?t% zzB@{mi@;ElA}tEE2pl&pkko(D1g;Adg@OD?(l$U7|7n^(P2mEq>!1aSwrCI_MSvm? zpfk&dNgf>~aDXDg5g_;WZg+0xo7uUUS&}oy##M`v1>ML|iwwFtTyD4PK0;hBR}|jO z@GigHaL`olK#TpgUQt(QzN=mOuS@V>b@{JKzg}yCbKOC}LBK)4LBK)4LBK)4LBK)4 zLBK)4LBK)aGa#_a+~S6w9`|GI7ENYZDnqULjA~lCu~L03#HUANv!i4-c5r-@RA1@y z5qKpIk;KtiGBur;h)o|SM@Em6qm#4b=!4_q0X|@2bY>=Y7zm7h4Nliwnms<%_(!T@idJQr^9 z2=0^DL)b(&1+np53Gj8}jRUH`U*P-DBq@?7BYq zj~}}&UG3Ne-fQE7HeFrE+m^R{ycJHg@^;%RUTj_OyddK6Tzd;P4_s<)23~zl_$hku zvqfm~xLcdsL1lfevo7fk>8H}SrG)?A?Z0S0y6*4mF0ET!x83&--+A9xTR(37RV!=V z?){zj``#|`FXFev!Ipf>Q1c&~f7tYgrtdc0?z!qY;gN(Rg2(+M_cvUx0l`oAiuZ}a zNax^IAw8j{Qe;^-vIfiCFeb?-wUW8XdskrVy4IIS@obyt_$zp0;>&J9JsfJ#rwC3!U4|W#hW76kmEW!tFf(K z&!fYf<@0ZoSpaOAQNYbW{ASr{Y7$FV=nCp4PYo-(LQ+}1psbK()yfb`GHN3Csx!+e z^~@PsWxo|O4Bgc1B64(hjT|x&gOj28`S0E_DOro00K@mq~&sI$s~$ zcxm0bNU{`?w2eyYpj6!;7B%6=MahC<+Odzwky`A>Hi@1X$Nt%u%7$nMtDyvSlveNS z@(M*uY!X%QQU*P<(f|V6{1fn0Qc;KSfcoTB zQC^d!C2CjHfLyoG<-0{s5Ax>joLW}1Sptg_*H=Tn>gKNAx3R36lO10K)kH_+;lY}< zHFcNhImX2gUn|=wJ{_u@9Xut<&dv^so#NaEQDCU7nQQ3j4Wd!-I^e}>A58U6p?Y@) zga>P%KKt$zJv%w8d-jxBg{LBtZ^&2O^n53Hf=*{jVFWdCF^(DSGD2oiSHnBu;UGGD3wwfVA5pOZ>_9XoeR zd`0C*?GoANFYyJ@>)L$TrXOn;h2v;;EnQ?xB{N{I%%B01foFdTOe{e%EStwbt?0ZC z*j7SR9;oGLZ(GT*0`CDumCzh9i)0%t1l#O51lxPKH{EoVFN6{s@N9%%&LSgr7Qk&r zYm~JgBK06&j4f|*P*4<n1qQ^uP zLH-4d{u7O(Jl52`ZP{yA&1 z9uS4)v-%1sIcVuYU{W(P>Vj3l<7TS+|Bz{*ojM=zP_rngsGSEceJbTtByOmey4Ag} zNp$mo&4){iXAK0RfLy*VhvjOG?2?BfHO_%}M0a=RW~o&yO(N0CT?a;j(R(9AD2-?- zkBHQbh`L309BDL{%?rgexr?&PQlyIcf2;R|%kOG`vF-h~A4A^%u=gGB1vurr90VK$ z90VK$90VK$90Wci0&&>9DdtS%RYFzyIAl6EitLjaRkxR#cFBVyHDS>>?EEBnKz&8v zITM7+_>+NeE#ChYZmrx#;q*OTX93m^gI3{_j?#Wk#Asb+00 zmfsSRJj~LHBa5YA{3&HRZ`BTt!^y)_vzSgnnVK4D2A^8GK;a;#D=Z5Y>m`NM0#z&iWfjs#keHUHX_!^1k>d_ejpn#| zv0eoUBPyKd-k+esa*+!P|0{XUTN(M1HaU$1R1y$^s_dhajGp+Uu+=_co&DvfmIPC!ORAjMQbs8FdzzFN6b7| z)ZQHAQz=Nu6B3Vavb^lJ-sS0yH@KhS1-@W zyTxI7sJ5;<27BDat&%rCTFzi0r&yTf?DI`46rlkWHcF2@vcv0nBFHO4T^}P9W2GH5 zZOGSb+D~iTeY}94|;XvtPHL0jvqc>!)VzWpt20oiYWr$2J(QKg7ttK%jgSA(T zn{#wy#c&y^R#>DZ3UR7|RT#9I!jOyww#usEB&ya3(m2SnS#ZGM>57bu=}B1a)kKD? OA+EAT+X}Poo&OIm3d_I% literal 0 HcmV?d00001 diff --git a/src/v1/database.py b/src/v1/database.py new file mode 100644 index 0000000..31f494a --- /dev/null +++ b/src/v1/database.py @@ -0,0 +1,34 @@ +import sqlite3 + +dbase = sqlite3.connect('deep_thought.db') # Open a database File +print('Database opened') + + +dbase.execute(''' CREATE TABLE IF NOT EXISTS chat_history( + ID INT PRIMARY KEY NOT NULL, + MESSAGE TEXT NOT NULL, + TYPE TEXT NOT NULL, + TIMESTAMP TEXT NOT NULL, + GCHATID TEXT NOT NULL, + SLACKID INT NOT NULL) ''') + +print('Table created') + +def insert_record(ID,MESSAGE,TYPE,TIMESTAMP,GCHATID,SLACKID): + dbase.execute(''' INSERT OR IGNORE INTO chat_history(ID,MESSAGE,TYPE,TIMESTAMP,GCHATID,SLACKID) + VALUES(?,?,?,?,?,?) +''',(ID,MESSAGE,TYPE,TIMESTAMP,GCHATID,SLACKID)) + dbase.commit() + print('REcord inserted') + + +def read_Data(): + # from math import * + data = dbase.execute(''' SELECT * FROM chat_history''') + for record in data: + print('ID : '+str(record[0])) + print('MESSAGE : '+str(record[1])) + print('TYPE : '+str(record[2])) + print('TIMESTAMP : '+str(record[3])+'\n') + print('GCHATID : '+str(record[2])) + print('SLACKID : '+str(record[3])+'\n') diff --git a/src/v1/prompt_template.py b/src/v1/prompt_template.py new file mode 100644 index 0000000..e0db1db --- /dev/null +++ b/src/v1/prompt_template.py @@ -0,0 +1,88 @@ +""" +This file implements prompt template for llama based models. +Modify the prompt template based on the model you select. +This seems to have significant impact on the output of the LLM. +""" + +from langchain.memory import ConversationBufferMemory +from langchain.prompts import PromptTemplate + +# this is specific to Llama-2. + +system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions. +Read the given context before answering questions and think step by step. If you can not answer a user question based on +the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question.""" + + +def get_prompt_template(system_prompt=system_prompt, promptTemplate_type=None, history=False): + if promptTemplate_type == "llama": + B_INST, E_INST = "[INST]", "[/INST]" + B_SYS, E_SYS = "<>\n", "\n<>\n\n" + SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS + if history: + instruction = """ + Context: {history} \n {context} + User: {question}""" + + prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST + prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template) + else: + instruction = """ + Context: {context} + User: {question}""" + + prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST + prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template) + elif promptTemplate_type == "mistral": + B_INST, E_INST = "[INST] ", " [/INST]" + if history: + prompt_template = ( + B_INST + + system_prompt + + """ + + Context: {history} \n {context} + User: {question}""" + + E_INST + ) + prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template) + else: + prompt_template = ( + B_INST + + system_prompt + + """ + + Context: {context} + User: {question}""" + + E_INST + ) + prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template) + else: + # change this based on the model you have selected. + if history: + prompt_template = ( + system_prompt + + """ + + Context: {history} \n {context} + User: {question} + Answer:""" + ) + prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template) + else: + prompt_template = ( + system_prompt + + """ + + Context: {context} + User: {question} + Answer:""" + ) + prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template) + + memory = ConversationBufferMemory(input_key="question", memory_key="history") + + return ( + prompt, + memory, + ) \ No newline at end of file