Skip to content

Commit

Permalink
style tests
Browse files Browse the repository at this point in the history
  • Loading branch information
bw4sz committed Dec 2, 2023
1 parent 2ca6640 commit 880a4c3
Show file tree
Hide file tree
Showing 7 changed files with 139 additions and 216 deletions.
18 changes: 12 additions & 6 deletions deepforest/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,13 @@ class images_callback(Callback):
None: either prints validation scores or logs them to the pytorch-lightning logger
"""

def __init__(self, savedir, n=2, every_n_epochs=5, select_random=False, color=None, thickness=1):
def __init__(self,
savedir,
n=2,
every_n_epochs=5,
select_random=False,
color=None,
thickness=1):
self.savedir = savedir
self.n = n
self.color = color
Expand All @@ -43,19 +49,19 @@ def __init__(self, savedir, n=2, every_n_epochs=5, select_random=False, color=No
def log_images(self, pl_module):
# It is not clear if this is per device, or per batch. If per batch, then this will not work.
df = pl_module.predictions[0]

# limit to n images, potentially randomly selected
if self.select_random:
selected_images = np.random.choice(df.image_path.unique(), self.n)
else:
selected_images = df.image_path.unique()[:self.n]
df = df[df.image_path.isin(selected_images)]

visualize.plot_prediction_dataframe(
df,
root_dir=pl_module.config["validation"]["root_dir"],
savedir=self.savedir,
color=self.color,
savedir=self.savedir,
color=self.color,
thickness=self.thickness)

try:
Expand All @@ -73,4 +79,4 @@ def on_validation_epoch_end(self, trainer, pl_module):

if trainer.current_epoch % self.every_n_epochs == 0:
print("Running image callback")
self.log_images(pl_module)
self.log_images(pl_module)
55 changes: 31 additions & 24 deletions deepforest/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,14 @@ def compute_class_recall(results):

return class_recall

def __evaluate_wrapper__(predictions, ground_df, root_dir, iou_threshold, numeric_to_label_dict, savedir=None):
"""Evaluate a set of predictions against a ground truth csv file

def __evaluate_wrapper__(predictions,
ground_df,
root_dir,
iou_threshold,
numeric_to_label_dict,
savedir=None):
"""Evaluate a set of predictions against a ground truth csv file
Args:
predictions: a pandas dataframe, if supplied a root dir is needed to give the relative path of files in df.name. The labels in ground truth and predictions must match. If one is numeric, the other must be numeric.
csv_file: a csv file with columns xmin, ymin, xmax, ymax, label, image_path
Expand All @@ -101,28 +107,29 @@ def __evaluate_wrapper__(predictions, ground_df, root_dir, iou_threshold, numeri
Returns:
results: a dictionary of results with keys, results, box_recall, box_precision, class_recall
"""
# remove empty samples from ground truth
ground_df = ground_df[~((ground_df.xmin == 0) & (ground_df.xmax == 0))]

results = evaluate(predictions=predictions,
ground_df=ground_df,
root_dir=root_dir,
iou_threshold=iou_threshold,
savedir=savedir)

# replace classes if not NUll
if not results is None:
results["results"]["predicted_label"] = results["results"][
"predicted_label"].apply(lambda x: numeric_to_label_dict[x]
if not pd.isnull(x) else x)
results["results"]["true_label"] = results["results"]["true_label"].apply(
lambda x: numeric_to_label_dict[x])
results["predictions"] = predictions
results["predictions"]["label"] = results["predictions"]["label"].apply(
lambda x: numeric_to_label_dict[x])

return results

# remove empty samples from ground truth
ground_df = ground_df[~((ground_df.xmin == 0) & (ground_df.xmax == 0))]

results = evaluate(predictions=predictions,
ground_df=ground_df,
root_dir=root_dir,
iou_threshold=iou_threshold,
savedir=savedir)

# replace classes if not NUll
if not results is None:
results["results"]["predicted_label"] = results["results"][
"predicted_label"].apply(lambda x: numeric_to_label_dict[x]
if not pd.isnull(x) else x)
results["results"]["true_label"] = results["results"]["true_label"].apply(
lambda x: numeric_to_label_dict[x])
results["predictions"] = predictions
results["predictions"]["label"] = results["predictions"]["label"].apply(
lambda x: numeric_to_label_dict[x])

return results


def evaluate(predictions, ground_df, root_dir, iou_threshold=0.4, savedir=None):
"""Image annotated crown evaluation routine
submission can be submitted as a .shp, existing pandas dataframe or .csv path
Expand Down
45 changes: 22 additions & 23 deletions deepforest/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def create_trainer(self, logger=None, callbacks=[], **kwargs):
limit_val_batches=limit_val_batches,
num_sanity_val_steps=num_sanity_val_steps,
**kwargs)

def on_fit_start(self):
if self.config["train"]["csv_file"] is None:
raise AttributeError(
Expand Down Expand Up @@ -380,22 +380,21 @@ def predict_file(self, csv_file, root_dir, savedir=None, color=None, thickness=1
"""
df = pd.read_csv(csv_file)
ds = dataset.TreeDataset(csv_file=csv_file,
root_dir=root_dir,
transforms=None,
train=False)
root_dir=root_dir,
transforms=None,
train=False)
dataloader = self.predict_dataloader(ds)

results = predict.predict_file(
model=self,
trainer=self.trainer,
annotations=df,
dataloader=dataloader,
root_dir=root_dir,
nms_thresh=self.config["nms_thresh"],
savedir=savedir,
color=color,
thickness=thickness)


results = predict.predict_file(model=self,
trainer=self.trainer,
annotations=df,
dataloader=dataloader,
root_dir=root_dir,
nms_thresh=self.config["nms_thresh"],
savedir=savedir,
color=color,
thickness=thickness)

return results

def predict_tile(self,
Expand Down Expand Up @@ -558,7 +557,7 @@ def on_validation_epoch_end(self):
output = {key: value for key, value in output.items() if not key == "classes"}
self.log_dict(output)
self.mAP_metric.reset()

# Evaluate on validation data predictions
self.predictions_df = pd.concat(self.predictions)
ground_df = pd.read_csv(self.config["validation"]["csv_file"])
Expand All @@ -572,17 +571,17 @@ def on_validation_epoch_end(self):
root_dir=self.config["validation"]["root_dir"],
iou_threshold=self.config["validation"]["iou_threshold"],
savedir=None,
numeric_to_label_dict=self.numeric_to_label_dict
)

numeric_to_label_dict=self.numeric_to_label_dict)

self.log("box_recall", results["box_recall"])
self.log("box_precision", results["box_precision"])
if isinstance(results, pd.DataFrame):
for index, row in results["class_recall"].iterrows():
self.log("{}_Recall".format(self.numeric_to_label_dict[row["label"]]),
row["recall"])
self.log(
"{}_Recall".format(self.numeric_to_label_dict[row["label"]]),row["recall"])
self.log(
"{}_Precision".format(self.numeric_to_label_dict[row["label"]]),row["precision"])
"{}_Precision".format(self.numeric_to_label_dict[row["label"]]),
row["precision"])

def predict_step(self, batch, batch_idx):
batch_results = self.model(batch)
Expand Down
8 changes: 6 additions & 2 deletions deepforest/predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,10 @@ def predict_file(trainer,
results = pd.concat(results, ignore_index=True)

if savedir:
visualize.plot_prediction_dataframe(results,root_dir=root_dir, savedir=savedir, color=color, thickness=thickness)

visualize.plot_prediction_dataframe(results,
root_dir=root_dir,
savedir=savedir,
color=color,
thickness=thickness)

return results
7 changes: 6 additions & 1 deletion deepforest/visualize.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,12 @@ def plot_prediction_and_targets(image, predictions, targets, image_name, savedir
return figure_path


def plot_prediction_dataframe(df, root_dir, savedir, color=None, thickness=1, ground_truth=None):
def plot_prediction_dataframe(df,
root_dir,
savedir,
color=None,
thickness=1,
ground_truth=None):
"""For each row in dataframe, call plot predictions and save plot files to disk.
For multi-class labels, boxes will be colored by labels. Ground truth boxes will all be same color, regardless of class.
Args:
Expand Down
Loading

0 comments on commit 880a4c3

Please sign in to comment.