Skip to content

Commit

Permalink
Add new features to the app
Browse files Browse the repository at this point in the history
an architecture selection feature, class labels input, and classification scores display and download
  • Loading branch information
W7CH committed Jul 12, 2024
1 parent 4432ff1 commit cf054d4
Show file tree
Hide file tree
Showing 2 changed files with 88 additions and 50 deletions.
138 changes: 88 additions & 50 deletions app_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,9 @@

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torchvision.models import efficientnet_v2_m, efficientnet_v2_s

model_types = {
"efficientnet_v2_m": efficientnet_v2_m,
"efficientnet_v2_s": efficientnet_v2_s,
}
import torchvision.models as models

############################################################################################
# Functions/variables to be used in the Streamlit app
Expand All @@ -47,13 +43,30 @@ def set_theme(theme):
st.markdown(light, unsafe_allow_html=True)

# Define the model loading function
def load_model(model_type, model_path):
def load_model(architecture, model_path, num_classes=5):
# Load the model checkpoint (remove map_location if you have a GPU)
loaded_cpt = torch.load(model_path, map_location=torch.device('cpu'))
# Define the EfficientNet_V2_M model (by default, no pre-trained weights are used)
model = model_types[model_type]()
# Modify the classifier to match the number of classes in the dataset
model.classifier[-1] = nn.Linear(model.classifier[-1].in_features, 5)
# Define the model according to the architecture and modify the number of output classes (by default, no pre-trained weights are used)
if architecture == "EfficientNet_V2_S":
model = models.efficientnet_v2_s()
model.classifier[-1] = nn.Linear(model.classifier[-1].in_features, num_classes)
elif architecture == "EfficientNet_V2_M":
model = models.efficientnet_v2_m()
model.classifier[-1] = nn.Linear(model.classifier[-1].in_features, num_classes)
elif architecture == "EfficientNet_B7":
model = models.efficientnet_b7()
model.classifier[-1] = nn.Linear(model.classifier[-1].in_features, num_classes)
elif architecture == "ResNet50":
model = models.resnet50()
model.fc = nn.Linear(model.fc.in_features, num_classes)
elif architecture == "DenseNet121":
model = models.densenet121()
model.classifier = nn.Linear(model.classifier.in_features, num_classes)
elif architecture == "VGG16":
model = models.vgg16()
model.classifier[-1] = nn.Linear(model.classifier[-1].in_features, num_classes)
else:
raise ValueError("Unsupported architecture")
# Load the state_dict in order to load the trained parameters
model.load_state_dict(loaded_cpt)
# Set the model to evaluation mode
Expand All @@ -66,8 +79,15 @@ def predict_image(image_path, model, transform):
image = transform(image).unsqueeze(0) # Add batch dimension
with torch.no_grad():
outputs = model(image)
classif_scores = F.softmax(outputs, dim=1)
_, predicted = torch.max(outputs, 1)
return predicted
return predicted, classif_scores

# Define the function to save the probabilities to a file
def save_probabilities(probas, filename='classification_scores.txt'):
with open(filename, 'w') as f:
for prob in probas:
f.write(f'{prob[0]}: {prob[1].tolist()}\n')

# Function to generate and display the graph of detected objects
def display_distribution_plot(class_counts, sns_palette="pastel"):
Expand All @@ -76,7 +96,7 @@ def display_distribution_plot(class_counts, sns_palette="pastel"):
# Convert seaborn colors to Plotly-compatible RGBA format
plotly_colors = ['rgba' + str(tuple(int(255 * c) for c in color[:3]) + (1,)) for color in seaborn_palette]
# Create a Plotly figure
fig = go.Figure(data=[go.Bar(y=list(class_counts.values()), x=list(class_counts.keys()), orientation='v', marker_color=plotly_colors)])
fig = go.Figure(data=[go.Bar(y=list(class_counts.values()), x=list(class_counts.keys()), orientation='v', marker_color=plotly_colors, text=list(class_counts.values()), textposition='auto')])
fig.update_layout(
title='Distribution of Detected Objects',
title_font=dict(size=20),
Expand All @@ -103,6 +123,13 @@ def display_distribution_plot(class_counts, sns_palette="pastel"):
# Body of the Streamlit app
############################################################################################
def main():
# Set the page configuration
st.set_page_config(
page_title="Microorganism Classification",
page_icon="fairscope_favicon.png",
layout = 'wide',
)

# Set the title of the Streamlit app
st.title("Microorganism Classification")

Expand All @@ -119,42 +146,38 @@ def main():
""", unsafe_allow_html=True
)

# Create a toggle button
toggle = st.sidebar.button("Toggle theme", key="theme_toggle")

# Use a global variable to store the current theme
if "theme" not in st.session_state:
st.session_state.theme = "light"

# Change the theme based on the button state
if toggle:
if st.session_state.theme == "light":
st.session_state.theme = "dark"
else:
st.session_state.theme = "light"

# Apply the theme to the app
set_theme(st.session_state.theme)
# Initialize an empty list to store probabilities
if 'probabilities' not in st.session_state:
st.session_state.probabilities = []

with st.sidebar:
# Add text and link to the sidebar
st.markdown("""
### :rocket: Try this easy-to-follow [notebook](https://colab.research.google.com/drive/1iyoA4jVSI0dErl7N3N-rPlx2mrBrV1ad?usp=drive_link) to train your task-specific classifier
""")
# Load the class labels
class_labels = st.text_input("Enter class labels (comma-separated in alphabetical order)", value="d_veliger, pedi_veliger, umbo_veliger").split(", ")
# Select the model architecture
architecture = st.selectbox(
"Select model architecture",
("EfficientNet_V2_M", "EfficientNet_V2_S", "EfficientNet_B7", "ResNet50", "DenseNet121", "VGG16")
)

# Select a model to use for image classification
selected_model = st.file_uploader("Upload a model", type=["pth", "pt", "pb"])

# Wait for the user to select a model
if selected_model is not None:
# Extract the input image dimensions from the model name
pattern = r'(\d{3,4})x(\d{3,4})'
image_size = int(re.search(pattern, selected_model.name).group().split("x")[0])

# Load the selected model in pytorch
model = load_model(architecture, os.path.join("models", selected_model.name), num_classes=len(class_labels))
st.success('Model loaded successfully!')

# File uploader for image selection
uploaded_files = st.file_uploader("Upload images", type=["jpg", "jpeg"], accept_multiple_files=True) #"png"

# List of available AI models
available_models = os.listdir("models")
selected_model = st.selectbox("Select a model", available_models)

# Extract the input image dimensions from the model name
pattern = r'(\d{3,4})x(\d{3,4})'
image_size = int(re.search(pattern, selected_model).group().split("x")[0])

# Load the selected model in pytorch
model = load_model(
os.getenv("TORCHVISION_MODEL_TYPE", "efficientnet_v2_m"),
os.path.join("models", selected_model),
)

# Load the class labels
class_labels = ["Acantharia", "Calanoida", "Neoceratium_petersii", "Ptychodiscus_noctiluca", "Undella"]
uploaded_files = st.file_uploader("Upload images", type=["jpg", "jpeg"], accept_multiple_files=True)

# List to store predicted class labels
predicted_class_labels = []
Expand All @@ -168,7 +191,7 @@ def main():

if uploaded_files is not None:
# Iterate over uploaded images and predict their classes
for uploaded_file in uploaded_files:
for (i, uploaded_file) in enumerate(uploaded_files):
# Read the uploaded image
image = cv2.imdecode(np.fromstring(uploaded_file.read(), np.uint8), 1)

Expand All @@ -182,12 +205,18 @@ def main():
])

# Perform image classification
predicted_class_index = predict_image(uploaded_file, model, transform)
predicted_class_index, predicted_classif_scores = predict_image(uploaded_file, model, transform)
file_name = f"{i}. {uploaded_file.name}"
st.session_state.probabilities.append((file_name, dict(zip(class_labels,predicted_classif_scores.tolist()[0]))))
print(predicted_classif_scores)
predicted_class_label = class_labels[predicted_class_index]
predicted_class_labels.append(predicted_class_label)

# Display the uploaded image with the predicted class
next(cols).image(image, width=150, caption=f"{uploaded_file.name} ({predicted_class_label})", use_column_width=True)
next(cols).image(image, width=150, caption=f"{i}. {predicted_class_label} ({torch.max(predicted_classif_scores):.4f})", use_column_width=True)

# Save the updated probabilities to a text file
#save_probabilities(st.session_state.probabilities)

# Determine the number of detected objects
num_objects = len(predicted_class_labels)
Expand All @@ -197,9 +226,18 @@ def main():

# Count the occurrences of each class label
class_counts = {label: predicted_class_labels.count(label) for label in class_labels}

# Convert probabilities to string format
probabilities_str = '\n'.join([f"{name}: {scores}" for name, scores in st.session_state.probabilities])

# Plot the distribution of detected objects
if num_objects > 0:

# Download the classification scores file with streamlit
with st.sidebar:
st.download_button(label="Download classification scores", data=probabilities_str, file_name="classification_scores.txt", mime="text/plain")

# Display the distribution of detected objects
display_distribution_plot(class_counts)

############################################################################################
Expand Down
Binary file added fairscope_favicon.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

0 comments on commit cf054d4

Please sign in to comment.