diff --git a/quantus/metrics/faithfulness/monotonicity_correlation.py b/quantus/metrics/faithfulness/monotonicity_correlation.py index 73049d9f2..4be495761 100644 --- a/quantus/metrics/faithfulness/monotonicity_correlation.py +++ b/quantus/metrics/faithfulness/monotonicity_correlation.py @@ -399,7 +399,7 @@ def evaluate_batch( Parameters ---------- model: ModelInterface - A ModelInterface that is subject to explanation. + A model that is subject to explanation. x_batch: np.ndarray The input to be evaluated on a batch-basis. y_batch: np.ndarray @@ -412,19 +412,14 @@ def evaluate_batch( List[float] The evaluation results. """ - # Asserts. - asserts.assert_features_in_step( - features_in_step=self.features_in_step, - input_shape=x_batch.shape[2:], - ) # Evaluate explanations. return [ self.evaluate_instance( - model=model, - x=x, - y=y, - a=a, + model, + x, + y, + a, ) for x, y, a in zip(x_batch, y_batch, a_batch) ] diff --git a/quantus/metrics/faithfulness/sufficiency.py b/quantus/metrics/faithfulness/sufficiency.py index 0af6ff069..a45ee33a4 100644 --- a/quantus/metrics/faithfulness/sufficiency.py +++ b/quantus/metrics/faithfulness/sufficiency.py @@ -309,7 +309,22 @@ def evaluate_batch( self, *, i_batch, a_sim_vector_batch, y_pred_classes, **_ ) -> List[float]: """ + TODO: write meaningful docstring about what does it compute. + Parameters + ---------- + i_batch: + The index of the current instance. + a_sim_vector_batch: + The custom input to be evaluated on an instance-basis. + y_pred_classes: + The class predictions of the complete input dataset. + _: + unused. + + Returns + ------- + """ return [ diff --git a/quantus/metrics/localisation/auc.py b/quantus/metrics/localisation/auc.py index 8e2ffd240..401962c8c 100644 --- a/quantus/metrics/localisation/auc.py +++ b/quantus/metrics/localisation/auc.py @@ -286,5 +286,20 @@ def custom_preprocess( def evaluate_batch( self, *, a_batch: np.ndarray, s_batch: np.ndarray, **_ ) -> List[float]: + """ + + Parameters + ---------- + a_batch: + A np.ndarray which contains pre-computed attributions i.e., explanations. + s_batch: + A np.ndarray which contains segmentation masks that matches the input. + _: + unused. + + Returns + ------- + + """ # TODO: for performance reasons replace for-loop with vectorized dispatch. return [self.evaluate_instance(a, s) for a, s in zip(a_batch, s_batch)] diff --git a/quantus/metrics/localisation/focus.py b/quantus/metrics/localisation/focus.py index 18354af91..e78d86f69 100644 --- a/quantus/metrics/localisation/focus.py +++ b/quantus/metrics/localisation/focus.py @@ -386,4 +386,18 @@ def quadrant_bottom_right(self, a: np.ndarray) -> np.ndarray: def evaluate_batch( self, *, a_batch: np.ndarray, c_batch: np.ndarray, **_ ) -> List[float]: + """ + Parameters + ---------- + a_batch: + A np.ndarray which contains pre-computed attributions i.e., explanations. + c_batch: + The custom input to be evaluated on an batch-basis. + _: + unused. + + Returns + ------- + + """ return [self.evaluate_instance(a, c) for a, c in zip(a_batch, c_batch)] diff --git a/quantus/metrics/localisation/pointing_game.py b/quantus/metrics/localisation/pointing_game.py index b39268ed8..cd9c3685e 100644 --- a/quantus/metrics/localisation/pointing_game.py +++ b/quantus/metrics/localisation/pointing_game.py @@ -309,4 +309,19 @@ def custom_preprocess( def evaluate_batch( self, *, a_batch: np.ndarray, s_batch: np.ndarray, **_ ) -> List[float]: + """ + + Parameters + ---------- + a_batch: + A np.ndarray which contains pre-computed attributions i.e., explanations. + s_batch: + A np.ndarray which contains segmentation masks that matches the input. + _: + unused. + + Returns + ------- + + """ return [self.evaluate_instance(a, s) for a, s in zip(a_batch, s_batch)] diff --git a/quantus/metrics/localisation/relevance_mass_accuracy.py b/quantus/metrics/localisation/relevance_mass_accuracy.py index 4016327e4..6cc7a3a67 100644 --- a/quantus/metrics/localisation/relevance_mass_accuracy.py +++ b/quantus/metrics/localisation/relevance_mass_accuracy.py @@ -299,4 +299,18 @@ def custom_preprocess( def evaluate_batch( self, *, a_batch: np.ndarray, s_batch: np.ndarray, **_ ) -> List[float]: + """ + Parameters + ---------- + a_batch: + A np.ndarray which contains pre-computed attributions i.e., explanations. + s_batch: + A np.ndarray which contains segmentation masks that matches the input. + _: + unused. + + Returns + ------- + + """ return [self.evaluate_instance(a, s) for a, s in zip(a_batch, s_batch)] diff --git a/quantus/metrics/localisation/relevance_rank_accuracy.py b/quantus/metrics/localisation/relevance_rank_accuracy.py index c0323b425..b2d068e24 100644 --- a/quantus/metrics/localisation/relevance_rank_accuracy.py +++ b/quantus/metrics/localisation/relevance_rank_accuracy.py @@ -308,5 +308,20 @@ def custom_preprocess( def evaluate_batch( self, *, a_batch: np.ndarray, s_batch: np.ndarray, **_ ) -> List[float]: + """ + + Parameters + ---------- + a_batch: + A np.ndarray which contains pre-computed attributions i.e., explanations. + s_batch: + A np.ndarray which contains segmentation masks that matches the input. + _: + unused. + + Returns + ------- + + """ # TODO: for performance reasons, this method should be vectorized. return [self.evaluate_instance(a, s) for a, s in zip(a_batch, s_batch)] diff --git a/quantus/metrics/localisation/top_k_intersection.py b/quantus/metrics/localisation/top_k_intersection.py index f9af43d3b..1b184b5f0 100644 --- a/quantus/metrics/localisation/top_k_intersection.py +++ b/quantus/metrics/localisation/top_k_intersection.py @@ -318,4 +318,19 @@ def custom_preprocess( def evaluate_batch( self, *, a_batch: np.ndarray, s_batch: np.ndarray, **_ ) -> List[float]: + """ + + Parameters + ---------- + a_batch: + A np.ndarray which contains pre-computed attributions i.e., explanations. + s_batch: + A np.ndarray which contains segmentation masks that matches the input. + _: + unused. + + Returns + ------- + + """ return [self.evaluate_instance(a, s) for a, s in zip(a_batch, s_batch)] diff --git a/quantus/metrics/randomisation/random_logit.py b/quantus/metrics/randomisation/random_logit.py index 3986fc554..79f97e544 100644 --- a/quantus/metrics/randomisation/random_logit.py +++ b/quantus/metrics/randomisation/random_logit.py @@ -333,7 +333,25 @@ def evaluate_batch( a_batch: np.ndarray, **_, ) -> List[float]: - # TODO: for performance reasons vectorize this for-loop + """ + + Parameters + ---------- + model: + A model that is subject to explanation. + x_batch: + A np.ndarray which contains the input data that are explained. + y_batch: + A np.ndarray which contains the output labels that are explained. + a_batch: + A np.ndarray which contains pre-computed attributions i.e., explanations. + _: + unused. + + Returns + ------- + + """ return [ self.evaluate_instance(model, x, y, a) for x, y, a in zip(x_batch, y_batch, a_batch) diff --git a/quantus/metrics/robustness/continuity.py b/quantus/metrics/robustness/continuity.py index 38fbd2ab9..f7f8abc08 100644 --- a/quantus/metrics/robustness/continuity.py +++ b/quantus/metrics/robustness/continuity.py @@ -445,4 +445,21 @@ def evaluate_batch( y_batch: np.ndarray, **_, ) -> List[Dict[str, int]]: + """ + + Parameters + ---------- + model: + A model that is subject to explanation. + x_batch: + A np.ndarray which contains the input data that are explained. + y_batch: + A np.ndarray which contains the output labels that are explained. + _: + unused. + + Returns + ------- + + """ return [self.evaluate_instance(model, x, y) for x, y in zip(x_batch, y_batch)]