Skip to content

Commit

Permalink
A Medical Assistance Chatbot #795 issue closed
Browse files Browse the repository at this point in the history
A Medical Assistance Chatbot
#795 issue closed
This intelligent chatbot provides medical suggestions, including information on pharmacies and blood pressure management. It allows users to search for patients by ID, review their blood pressure history, and access results and entries. Additionally, the chatbot can manage tasks related to blood pressure monitoring and care.
  • Loading branch information
ShraddhaSabde committed Jul 18, 2024
1 parent 1628615 commit 3064dc7
Show file tree
Hide file tree
Showing 12 changed files with 425 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Python: Current File",
"type": "python",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal"
}
]
}
Binary file not shown.
Binary file not shown.
64 changes: 64 additions & 0 deletions Medical_Assistance_Chatbot/Chatbot-main/Chatbot-main/chat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np

from keras.models import load_model
model = load_model('chatbot_model.h5')
import json
import random
intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))


def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words

# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence

def bow(sentence, words, show_details=True):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))

def predict_class(sentence, model):
# filter out predictions below a threshold
p = bow(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
return return_list

def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result

def chatbot_response(msg):
ints = predict_class(msg, model)
res = getResponse(ints, intents)
return res

print(chatbot_response('do i suck at everything'))
Binary file not shown.
Binary file not shown.
79 changes: 79 additions & 0 deletions Medical_Assistance_Chatbot/Chatbot-main/Chatbot-main/intents.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
{"intents": [
{"tag": "greeting",
"patterns": ["Hi there", "How are you", "Is anyone there?","Hey","Hola", "Hello", "Good day"],
"responses": ["Hello, thanks for asking", "Good to see you again", "Hi there, how can I help?"],
"context": [""]
},
{"tag": "goodbye",
"patterns": ["Bye", "See you later", "Goodbye", "Nice chatting to you, bye", "Till next time"],
"responses": ["See you!", "Have a nice day", "Bye! Come back again soon."],
"context": [""]
},
{"tag": "thanks",
"patterns": ["Thanks", "Thank you", "That's helpful", "Awesome, thanks", "Thanks for helping me"],
"responses": ["Happy to help!", "Anytime!", "My pleasure"],
"context": [""]
},
{"tag": "noanswer",
"patterns": [],
"responses": ["Sorry, can't understand you", "Please give me more info", "Not sure I understand"],
"context": [""]
},
{"tag": "options",
"patterns": ["How you could help me?", "What you can do?", "What help you provide?", "How you can be helpful?", "What support is offered"],
"responses": ["I can guide you through Adverse drug reaction list, Blood pressure tracking, Hospitals and Pharmacies", "Offering support for Adverse drug reaction, Blood pressure, Hospitals and Pharmacies"],
"context": [""]
},
{"tag": "adverse_drug",
"patterns": ["How to check Adverse drug reaction?", "Open adverse drugs module", "Give me a list of drugs causing adverse behavior", "List all drugs suitable for patient with adverse reaction", "Which drugs dont have adverse reaction?" ],
"responses": ["Navigating to Adverse drug reaction module"],
"context": [""]
},
{"tag": "blood_pressure",
"patterns": ["Open blood pressure module", "Task related to blood pressure", "Blood pressure data entry", "I want to log blood pressure results", "Blood pressure data management" ],
"responses": ["Navigating to Blood Pressure module"],
"context": [""]
},
{"tag": "blood_pressure_search",
"patterns": ["I want to search for blood pressure result history", "Blood pressure for patient", "Load patient blood pressure result", "Show blood pressure results for patient", "Find blood pressure results by ID" ],
"responses": ["Please provide Patient ID", "Patient ID?"],
"context": ["search_blood_pressure_by_patient_id"]
},
{"tag": "search_blood_pressure_by_patient_id",
"patterns": [],
"responses": ["Loading Blood pressure result for Patient"],
"context": [""]
},
{"tag": "pharmacy_search",
"patterns": ["Find me a pharmacy", "Find pharmacy", "List of pharmacies nearby", "Locate pharmacy", "Search pharmacy" ],
"responses": ["Please provide pharmacy name"],
"context": ["search_pharmacy_by_name"]
},
{"tag": "search_pharmacy_by_name",
"patterns": [],
"responses": ["Loading pharmacy details"],
"context": [""]
},
{"tag": "hospital_search",
"patterns": ["Lookup for hospital", "Searching for hospital to transfer patient", "I want to search hospital data", "Hospital lookup for patient", "Looking up hospital details" ],
"responses": ["Please provide hospital name or location"],
"context": ["search_hospital_by_params"]
},
{"tag": "search_hospital_by_params",
"patterns": [],
"responses": ["Please provide hospital type"],
"context": ["search_hospital_by_type"]
},
{"tag": "search_hospital_by_type",
"patterns": [],
"responses": ["Loading hospital details"],
"context": [""]
},
{
"tag" : "Anxiety",
"patterns":["I am feeling left out","I fucking suck at everything.","I dont have any confidence left in myself anymore","Why i dont get what i want"],
"responses":["Hey! I am here to listen you out. Please feel free to share everything with me. Everything is gonna be great in you life from now onwards! :)","No, You dont suck at everything. I believe in you! :)","I understand. Everything will be perfectly fine. Just recollect memories where you felt proud of yourselves. :)","Because god thinks that this is not right for you!! The more you will think about it the more you will lose yourself, hence Calm down and take a deep breathe. Everything is gonna be Nice soon!"],
"context":[""]
}
]
}
19 changes: 19 additions & 0 deletions Medical_Assistance_Chatbot/Chatbot-main/Chatbot-main/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import torch
import torch.nn as nn

class NeuralNetModels(nn.Module) :
def __init__(self, input_size,hidden_size,num_classes):
super(NeuralNetModels,self).__init__()
self.l1 = nn.Linear(input_size,hidden_size) #1st Layer with input size and output as hidden size
self.l2 = nn.Linear(hidden_size,hidden_size) #2nd layer with hidden size as input and hidden size as output
self.l3 = nn.Linear(hidden_size,num_classes) #3rd layer with hidden size as input and num of classes as output
self.relu = nn.ReLU() #Activation

def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
out = self.relu(out)
return out
42 changes: 42 additions & 0 deletions Medical_Assistance_Chatbot/Chatbot-main/Chatbot-main/nltk_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
'''
Preprocessing strategy :
1. Tokenization
2. Lowering and stem
3. Punctuation Removal
4. Bag Of Words
'''
import enum
import numpy as np
import nltk

#nltk.download('punkt')
from nltk.stem.porter import PorterStemmer

stemmer=PorterStemmer()
def tokenize(sentence):
return nltk.word_tokenize(sentence) #Tokenization happens here. for e,g, I am girl tokenized to I, am , girl


def stem(word):
return stemmer.stem(word.lower())

def bag_of_words(tokenized_sentence,all_words):
'''
tokenized_Sentence=['I','am','saylee']
words=['I','am','saylee','hello','code']
bag=[1,1,1,0,0]
'''
tokenized_sentence=[stem(w) for w in tokenized_sentence]
bag = np.zeros(len(all_words),dtype=np.float32)
for index,w in enumerate(all_words):
if w in tokenized_sentence:
bag[index]=1.0


return bag

tokenized_Sentence=["I","am","Saylee","you"]
words=["hi","hello","am","I","Saylee","thank","cool"]
bag=bag_of_words(tokenized_Sentence,words)
print(bag)
103 changes: 103 additions & 0 deletions Medical_Assistance_Chatbot/Chatbot-main/Chatbot-main/train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
import json

from torch._C import device
from torch.nn.modules import loss
from nltk_utils import stem,bag_of_words,tokenize
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from model import NeuralNetModels



with open('intents.json','r') as f:
intents=json.load(f)

#print(intents)
all_words=[]
tags=[]
tag_pattern_words=[]

for intent in intents['intents']:
tag=intent['tag']
tags.append(tag)
for pattern in intent['patterns']:
w=tokenize(pattern)
all_words.extend(w) #didnt use append cuz w is array itself and we dont want array of array in all_words
tag_pattern_words.append((w,tag))

ignore_words=['?,','.',',','!']
all_words=[stem(w) for w in all_words if w not in ignore_words]
all_words=sorted(set(all_words)) #using set to remove duplicate words
tags=sorted(set(tags))
#print(tags)

X_train=[]
Y_train=[]

for pattern,tag in tag_pattern_words:
bag=bag_of_words(pattern,all_words)
X_train.append(bag)

label=tags.index(tag)
Y_train.append(label) #CrossEntropy Loss

X_train=np.array(X_train)
Y_train=np.array(Y_train)

class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = Y_train

def __getitem__(self, idx) :
return self.x_data[idx], self.y_data[idx]

def __len__(self):
return self.n_samples

#Hyperparameters
batch_size = 32
hidden_size = 8
output_size = len(tags)
input_size = len(X_train[0]) #or you can say len(all_words)
learning_rate = 0.001
num_epochs = 100000
#print(input_size,len(all_words))
#print(output_size,tags)


dataset = ChatDataset()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_loader = DataLoader(dataset=dataset,batch_size=batch_size,shuffle=True, num_workers=0)
model = NeuralNetModels(input_size,hidden_size,output_size).to(device)
#loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)




for epoch in range(num_epochs) :
for (words, labels)in train_loader:
words = words.to(device)

labels = labels.to(dtype=torch.long)
labels = labels.to(device)
#call fwd pass
outputs = model(words)
loss = criterion(outputs, labels)

#backward and optimizer step

optimizer.zero_grad()
loss.backward() #to call backward pro
optimizer.step()

if (epoch+1)%100 == 0:
print(f'epoch {epoch+1}/{num_epochs},loss={loss.item():.4f}')


print(f'final loss, loss={loss.item():.4f}')
Loading

0 comments on commit 3064dc7

Please sign in to comment.