diff --git a/src/spikeinterface/core/sortinganalyzer.py b/src/spikeinterface/core/sortinganalyzer.py index d3514a7d21..fc47955814 100644 --- a/src/spikeinterface/core/sortinganalyzer.py +++ b/src/spikeinterface/core/sortinganalyzer.py @@ -650,7 +650,7 @@ def _save_or_select_or_merge( from spikeinterface.core.sorting_tools import get_ids_after_merging new_unit_ids = get_ids_after_merging(self.sorting, units_to_merge, new_unit_ids=unit_ids) - + if self.has_recording(): recording = self._recording elif self.has_temporary_recording(): diff --git a/src/spikeinterface/postprocessing/template_similarity.py b/src/spikeinterface/postprocessing/template_similarity.py index 2ace0945d5..469e76fadc 100644 --- a/src/spikeinterface/postprocessing/template_similarity.py +++ b/src/spikeinterface/postprocessing/template_similarity.py @@ -68,7 +68,9 @@ def _merge_extension_data( self, units_to_merge, new_unit_ids, new_sorting_analyzer, kept_indices=None, verbose=False, **job_kwargs ): num_shifts = int(self.params["max_lag_ms"] * self.sorting_analyzer.sampling_frequency / 1000) - templates_array = get_dense_templates_array(new_sorting_analyzer, return_scaled=self.sorting_analyzer.return_scaled) + templates_array = get_dense_templates_array( + new_sorting_analyzer, return_scaled=self.sorting_analyzer.return_scaled + ) arr = self.data["similarity"] sparsity = new_sorting_analyzer.sparsity all_new_unit_ids = new_sorting_analyzer.unit_ids @@ -213,7 +215,7 @@ def compute_similarity_with_templates_array( distances[count, i, j] /= norm_i + norm_j else: distances[count, i, j] = sklearn.metrics.pairwise.pairwise_distances(src, tgt, metric="cosine") - + distances[count, j, i] = distances[count, i, j] if num_shifts != 0: