From 5fd2627fbcf523dd8ac9c16706120e0e82930942 Mon Sep 17 00:00:00 2001 From: Matthias H Hennig Date: Thu, 29 Jun 2023 12:44:35 +0100 Subject: [PATCH 001/322] Allow any integer type. --- src/spikeinterface/core/numpyextractors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index 398ef18130..17c2849b6d 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -234,7 +234,7 @@ class NumpySortingSegment(BaseSortingSegment): def __init__(self, units_dict): BaseSortingSegment.__init__(self) for unit_id, times in units_dict.items(): - assert times.dtype.kind == 'i', 'numpy array of spike times must be integer' + assert (times.dtype.kind == 'i') or (times.dtype.kind == 'u'), 'numpy array of spike times must be integer' assert np.all(np.diff(times) >= 0), 'unsorted times' self._units_dict = units_dict From 077a7fe28932be5d8dbf81bb946529c4ca6e90f9 Mon Sep 17 00:00:00 2001 From: Matthias H Hennig Date: Thu, 29 Jun 2023 12:45:48 +0100 Subject: [PATCH 002/322] Fix problem with non-numeric unit IDs. --- src/spikeinterface/extractors/mdaextractors.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/extractors/mdaextractors.py b/src/spikeinterface/extractors/mdaextractors.py index 5b97f5de07..2d4b98635e 100644 --- a/src/spikeinterface/extractors/mdaextractors.py +++ b/src/spikeinterface/extractors/mdaextractors.py @@ -197,10 +197,14 @@ def write_sorting(sorting, save_path, write_primary_channels=False): times_list = [] labels_list = [] primary_channels_list = [] - for unit_id in unit_ids: + for unit_id_i, unit_id in enumerate(unit_ids): times = sorting.get_unit_spike_train(unit_id=unit_id) times_list.append(times) - labels_list.append(np.ones(times.shape) * unit_id) + # unit id may not be numeric + if unit_id.dtype.kind in 'biufc': + labels_list.append(np.ones(times.shape) * unit_id) + else: + labels_list.append(np.ones(times.shape) * unit_id_i) if write_primary_channels: if 'max_channel' in sorting.get_unit_property_names(unit_id): primary_channels_list.append([sorting.get_unit_property(unit_id, 'max_channel')] * times.shape[0]) From 9dc04f1fa68cf7202eed224394bb60b95a7b4e6d Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Fri, 14 Jul 2023 13:44:25 +0200 Subject: [PATCH 003/322] WIP --- .../sortingcomponents/matching/circus.py | 517 ++++++++---------- 1 file changed, 218 insertions(+), 299 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 2196320378..8f08aac9c5 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -16,7 +16,8 @@ except ImportError: HAVE_SKLEARN = False -from spikeinterface.core import get_noise_levels, get_random_data_chunks + +from spikeinterface.core import get_noise_levels, get_random_data_chunks, compute_sparsity from spikeinterface.sortingcomponents.peak_detection import DetectPeakByChannel (potrs,) = scipy.linalg.get_lapack_funcs(("potrs",), dtype=np.float32) @@ -130,6 +131,38 @@ def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): return ret +def compute_overlaps(templates, num_samples, num_channels, sparsities): + + num_templates = len(templates) + + dense_templates = np.zeros((num_templates, num_samples, num_channels), dtype=np.float32) + for i in range(num_templates): + dense_templates[i, :, sparsities[i]] = templates[i].T + + size = 2 * num_samples - 1 + + all_delays = list(range(0, num_samples+1)) + + overlaps = {} + + for delay in all_delays: + source = dense_templates[:, :delay, :].reshape(num_templates, -1) + target = dense_templates[:, num_samples-delay:, :].reshape(num_templates, -1) + + overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T)) + + if delay < num_samples: + overlaps[size - delay + 1] = overlaps[delay].T.tocsr() + + new_overlaps = [] + + for i in range(num_templates): + data = [overlaps[j][i, :].T for j in range(size)] + data = scipy.sparse.hstack(data) + new_overlaps += [data] + + return new_overlaps + class CircusOMPPeeler(BaseTemplateMatchingEngine): """ @@ -152,11 +185,6 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): (Minimal, Maximal) amplitudes allowed for every template omp_min_sps: float Stopping criteria of the OMP algorithm, in percentage of the norm - sparsify_threshold: float - Templates are sparsified in order to keep only the channels necessary - to explain. ptp limit for considering a channel as silent - smoothing_factor: float - Templates are smoothed via Spline Interpolation noise_levels: array The noise levels, for every channels. If None, they will be automatically computed @@ -175,133 +203,77 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): "norms": None, "random_chunk_kwargs": {}, "noise_levels": None, - "smoothing_factor": 0.25, + 'sparse_kwargs' : {'method' : 'ptp', 'threshold' : 1}, "ignored_ids": [], + "vicinity" : 0 } - @classmethod - def _sparsify_template(cls, template, sparsify_threshold): - is_silent = template.ptp(0) < sparsify_threshold - template[:, is_silent] = 0 - (active_channels,) = np.where(np.logical_not(is_silent)) - - return template, active_channels - - @classmethod - def _regularize_template(cls, template, smoothing_factor=0.25): - nb_channels = template.shape[1] - nb_timesteps = template.shape[0] - xaxis = np.arange(nb_timesteps) - for i in range(nb_channels): - z = scipy.interpolate.UnivariateSpline(xaxis, template[:, i]) - z.set_smoothing_factor(smoothing_factor) - template[:, i] = z(xaxis) - return template - @classmethod def _prepare_templates(cls, d): - waveform_extractor = d["waveform_extractor"] - num_samples = d["num_samples"] - num_channels = d["num_channels"] - num_templates = len(d["waveform_extractor"].sorting.unit_ids) + + waveform_extractor = d['waveform_extractor'] + num_templates = len(d['waveform_extractor'].sorting.unit_ids) - templates = waveform_extractor.get_all_templates(mode="median").copy() + if not waveform_extractor.is_sparse(): + sparsity = compute_sparsity(waveform_extractor, **d['sparse_kwargs']).mask + else: + sparsity = waveform_extractor.sparsity.mask + + templates = waveform_extractor.get_all_templates(mode='median').copy() - d["sparsities"] = {} - d["templates"] = {} - d["norms"] = np.zeros(num_templates, dtype=np.float32) + d['sparsities'] = {} + d['templates'] = {} + d['norms'] = np.zeros(num_templates, dtype=np.float32) for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): - if d["smoothing_factor"] > 0: - template = cls._regularize_template(templates[count], d["smoothing_factor"]) - else: - template = templates[count] - template, active_channels = cls._sparsify_template(template, d["sparsify_threshold"]) - d["sparsities"][count] = active_channels - d["norms"][count] = np.linalg.norm(template) - d["templates"][count] = template[:, active_channels] / d["norms"][count] - - return d - - @classmethod - def _prepare_overlaps(cls, d): - templates = d["templates"] - num_samples = d["num_samples"] - num_channels = d["num_channels"] - num_templates = d["num_templates"] - sparsities = d["sparsities"] - - dense_templates = np.zeros((num_templates, num_samples, num_channels), dtype=np.float32) - for i in range(num_templates): - dense_templates[i, :, sparsities[i]] = templates[i].T - - size = 2 * num_samples - 1 - - all_delays = list(range(0, num_samples + 1)) - - overlaps = {} - - for delay in all_delays: - source = dense_templates[:, :delay, :].reshape(num_templates, -1) - target = dense_templates[:, num_samples - delay :, :].reshape(num_templates, -1) - - overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T)) - - if delay < num_samples: - overlaps[size - delay + 1] = overlaps[delay].T.tocsr() - - new_overlaps = [] - - for i in range(num_templates): - data = [overlaps[j][i, :].T for j in range(size)] - data = scipy.sparse.hstack(data) - new_overlaps += [data] - - d["overlaps"] = new_overlaps + template = templates[count] + d['sparsities'][count], = np.nonzero(sparsity[count]) + d['norms'][count] = np.linalg.norm(template) + d['templates'][count] = template[:, d['sparsities'][count]]/d['norms'][count] return d @classmethod def initialize_and_check_kwargs(cls, recording, kwargs): + d = cls._default_params.copy() d.update(kwargs) - # assert isinstance(d['waveform_extractor'], WaveformExtractor) - - for v in ["omp_min_sps"]: - assert (d[v] >= 0) and (d[v] <= 1), f"{v} should be in [0, 1]" + #assert isinstance(d['waveform_extractor'], WaveformExtractor) - d["num_channels"] = d["waveform_extractor"].recording.get_num_channels() - d["num_samples"] = d["waveform_extractor"].nsamples - d["nbefore"] = d["waveform_extractor"].nbefore - d["nafter"] = d["waveform_extractor"].nafter - d["sampling_frequency"] = d["waveform_extractor"].recording.get_sampling_frequency() + for v in ['omp_min_sps']: + assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]' + + d['num_channels'] = d['waveform_extractor'].recording.get_num_channels() + d['num_samples'] = d['waveform_extractor'].nsamples + d['nbefore'] = d['waveform_extractor'].nbefore + d['nafter'] = d['waveform_extractor'].nafter + d['sampling_frequency'] = d['waveform_extractor'].recording.get_sampling_frequency() + d['vicinity'] *= d['num_samples'] - if d["noise_levels"] is None: - print("CircusOMPPeeler : noise should be computed outside") - d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) + if d['noise_levels'] is None: + print('CircusOMPPeeler : noise should be computed outside') + d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'], return_scaled=False) - if d["templates"] is None: + if d['templates'] is None: d = cls._prepare_templates(d) else: - for key in ["norms", "sparsities"]: - assert d[key] is not None, "If templates are provided, %d should also be there" % key + for key in ['norms', 'sparsities']: + assert d[key] is not None, "If templates are provided, %d should also be there" %key - d["num_templates"] = len(d["templates"]) + d['num_templates'] = len(d['templates']) - if d["overlaps"] is None: - d = cls._prepare_overlaps(d) + if d['overlaps'] is None: + d['overlaps'] = compute_overlaps(d['templates'], d['num_samples'], d['num_channels'], d['sparsities']) - d["ignored_ids"] = np.array(d["ignored_ids"]) + d['ignored_ids'] = np.array(d['ignored_ids']) - omp_min_sps = d["omp_min_sps"] - norms = d["norms"] - sparsities = d["sparsities"] + omp_min_sps = d['omp_min_sps'] + nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) + d['stop_criteria'] = omp_min_sps * np.sqrt(nb_active_channels * d['num_samples']) - nb_active_channels = np.array([len(sparsities[i]) for i in range(d["num_templates"])]) - d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + return d - return d @classmethod def serialize_method_kwargs(cls, kwargs): @@ -321,26 +293,27 @@ def get_margin(cls, recording, kwargs): @classmethod def main_function(cls, traces, d): - templates = d["templates"] - num_templates = d["num_templates"] - num_channels = d["num_channels"] - num_samples = d["num_samples"] - overlaps = d["overlaps"] - norms = d["norms"] - nbefore = d["nbefore"] - nafter = d["nafter"] + templates = d['templates'] + num_templates = d['num_templates'] + num_channels = d['num_channels'] + num_samples = d['num_samples'] + overlaps = d['overlaps'] + norms = d['norms'] + nbefore = d['nbefore'] + nafter = d['nafter'] omp_tol = np.finfo(np.float32).eps - num_samples = d["nafter"] + d["nbefore"] + num_samples = d['nafter'] + d['nbefore'] neighbor_window = num_samples - 1 - min_amplitude, max_amplitude = d["amplitudes"] - sparsities = d["sparsities"] - ignored_ids = d["ignored_ids"] - stop_criteria = d["stop_criteria"] + min_amplitude, max_amplitude = d['amplitudes'] + sparsities = d['sparsities'] + ignored_ids = d['ignored_ids'] + stop_criteria = d['stop_criteria'][:, np.newaxis] + vicinity = d['vicinity'] - if "cached_fft_kernels" not in d: - d["cached_fft_kernels"] = {"fshape": 0} + if 'cached_fft_kernels' not in d: + d['cached_fft_kernels'] = {'fshape' : 0} - cached_fft_kernels = d["cached_fft_kernels"] + cached_fft_kernels = d['cached_fft_kernels'] num_timesteps = len(traces) @@ -352,22 +325,24 @@ def main_function(cls, traces, d): dummy_traces = np.empty((num_channels, num_timesteps), dtype=np.float32) fshape, axes = get_scipy_shape(dummy_filter, traces, axes=1) - fft_cache = {"full": sp_fft.rfftn(traces, fshape, axes=axes)} + fft_cache = {'full' : sp_fft.rfftn(traces, fshape, axes=axes)} scalar_products = np.empty((num_templates, num_peaks), dtype=np.float32) - flagged_chunk = cached_fft_kernels["fshape"] != fshape[0] + flagged_chunk = cached_fft_kernels['fshape'] != fshape[0] for i in range(num_templates): + if i not in ignored_ids: + if i not in cached_fft_kernels or flagged_chunk: kernel_filter = np.ascontiguousarray(templates[i][::-1].T) - cached_fft_kernels.update({i: sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) - cached_fft_kernels["fshape"] = fshape[0] + cached_fft_kernels.update({i : sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) + cached_fft_kernels['fshape'] = fshape[0] - fft_cache.update({"mask": sparsities[i], "template": cached_fft_kernels[i]}) + fft_cache.update({'mask' : sparsities[i], 'template' : cached_fft_kernels[i]}) - convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode="valid") + convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode='valid') if len(convolution) > 0: scalar_products[i] = convolution.sum(0) else: @@ -381,7 +356,7 @@ def main_function(cls, traces, d): spikes = np.empty(scalar_products.size, dtype=spike_dtype) idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) - M = np.zeros((num_peaks, num_peaks), dtype=np.float32) + M = np.zeros((100, 100), dtype=np.float32) all_selections = np.empty((2, scalar_products.size), dtype=np.int32) final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) @@ -392,13 +367,17 @@ def main_function(cls, traces, d): neighbors = {} cached_overlaps = {} - is_valid = scalar_products > stop_criteria + is_valid = (scalar_products > stop_criteria) + all_amplitudes = np.zeros(0, dtype=np.float32) + is_in_vicinity = np.zeros(0, dtype=np.int32) while np.any(is_valid): + best_amplitude_ind = scalar_products[is_valid].argmax() best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) - + if num_selection > 0: + delta_t = selection[1] - peak_index idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] myline = num_samples + delta_t[idx] @@ -407,25 +386,42 @@ def main_function(cls, traces, d): cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() if num_selection == M.shape[0]: - Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) + Z = np.zeros((2*num_selection, 2*num_selection), dtype=np.float32) Z[:num_selection, :num_selection] = M M = Z M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] - scipy.linalg.solve_triangular( - M[:num_selection, :num_selection], - M[num_selection, :num_selection], - trans=0, - lower=1, - overwrite_b=True, - check_finite=False, - ) - - v = nrm2(M[num_selection, :num_selection]) ** 2 - Lkk = 1 - v - if Lkk <= omp_tol: # selected atoms are dependent - break - M[num_selection, num_selection] = np.sqrt(Lkk) + + if vicinity == 0: + scipy.linalg.solve_triangular(M[:num_selection, :num_selection], M[num_selection, :num_selection], trans=0, + lower=1, + overwrite_b=True, + check_finite=False) + + v = nrm2(M[num_selection, :num_selection]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + is_in_vicinity = np.where(np.abs(delta_t) < vicinity)[0] + + if len(is_in_vicinity) > 0: + + L = M[is_in_vicinity, :][:, is_in_vicinity] + + M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular(L, M[num_selection, is_in_vicinity], trans=0, + lower=1, + overwrite_b=True, + check_finite=False) + + v = nrm2(M[num_selection, is_in_vicinity]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + M[num_selection, num_selection] = 1.0 else: M[0, 0] = 1 @@ -435,45 +431,54 @@ def main_function(cls, traces, d): selection = all_selections[:, :num_selection] res_sps = full_sps[selection[0], selection[1]] - all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) - - all_amplitudes /= norms[selection[0]] - - diff_amplitudes = all_amplitudes - final_amplitudes[selection[0], selection[1]] + if vicinity == 0: + all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, + lower=True, overwrite_b=False) + all_amplitudes /= norms[selection[0]] + else: + is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) + all_amplitudes = np.append(all_amplitudes, np.float32(0)) + L = M[is_in_vicinity, :][:, is_in_vicinity] + all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], + lower=True, overwrite_b=False) + all_amplitudes[is_in_vicinity] /= norms[selection[0][is_in_vicinity]] + + diff_amplitudes = (all_amplitudes - final_amplitudes[selection[0], selection[1]]) modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0] final_amplitudes[selection[0], selection[1]] = all_amplitudes for i in modified: - tmp_best, tmp_peak = selection[:, i] - diff_amp = diff_amplitudes[i] * norms[tmp_best] + tmp_best, tmp_peak = selection[:, i] + diff_amp = diff_amplitudes[i]*norms[tmp_best] + if not tmp_best in cached_overlaps: cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() if not tmp_peak in neighbors.keys(): idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] - neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} + neighbors[tmp_peak] = {'idx' : idx, 'tdx' : tdx} - idx = neighbors[tmp_peak]["idx"] - tdx = neighbors[tmp_peak]["tdx"] + idx = neighbors[tmp_peak]['idx'] + tdx = neighbors[tmp_peak]['tdx'] - to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] - scalar_products[:, idx[0] : idx[1]] -= to_add + to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0]:tdx[1]] + scalar_products[:, idx[0]:idx[1]] -= to_add - is_valid = scalar_products > stop_criteria + is_valid = (scalar_products > stop_criteria) - is_valid = (final_amplitudes > min_amplitude) * (final_amplitudes < max_amplitude) + is_valid = (final_amplitudes > min_amplitude)*(final_amplitudes < max_amplitude) valid_indices = np.where(is_valid) num_spikes = len(valid_indices[0]) - spikes["sample_index"][:num_spikes] = valid_indices[1] + d["nbefore"] - spikes["channel_index"][:num_spikes] = 0 - spikes["cluster_index"][:num_spikes] = valid_indices[0] - spikes["amplitude"][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] - + spikes['sample_index'][:num_spikes] = valid_indices[1] + d['nbefore'] + spikes['channel_index'][:num_spikes] = 0 + spikes['cluster_index'][:num_spikes] = valid_indices[0] + spikes['amplitude'][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] + spikes = spikes[:num_spikes] - order = np.argsort(spikes["sample_index"]) + order = np.argsort(spikes['sample_index']) spikes = spikes[order] return spikes @@ -515,9 +520,6 @@ class CircusPeeler(BaseTemplateMatchingEngine): Maximal amplitude allowed for every template min_amplitude: float Minimal amplitude allowed for every template - sparsify_threshold: float - Templates are sparsified in order to keep only the channels necessary - to explain a given fraction of the total norm use_sparse_matrix_threshold: float If density of the templates is below a given threshold, sparse matrix are used (memory efficient) @@ -529,129 +531,57 @@ class CircusPeeler(BaseTemplateMatchingEngine): """ _default_params = { - "peak_sign": "neg", - "exclude_sweep_ms": 0.1, - "jitter_ms": 0.1, - "detect_threshold": 5, - "noise_levels": None, - "random_chunk_kwargs": {}, - "sparsify_threshold": 0.99, - "max_amplitude": 1.5, - "min_amplitude": 0.5, - "use_sparse_matrix_threshold": 0.25, - "progess_bar_steps": False, - "waveform_extractor": None, - "smoothing_factor": 0.25, + 'peak_sign': 'neg', + 'exclude_sweep_ms': 0.1, + 'jitter_ms' : 0.1, + 'detect_threshold': 5, + 'noise_levels': None, + 'random_chunk_kwargs': {}, + 'max_amplitude' : 1.5, + 'min_amplitude' : 0.5, + 'use_sparse_matrix_threshold' : 0.25, + 'progess_bar_steps' : False, + 'waveform_extractor': None, + 'sparse_kwargs' : {'method' : 'threshold', 'threshold' : 0.5, 'peak_sign' : 'both'} } - @classmethod - def _sparsify_template(cls, template, sparsify_threshold, noise_levels): - is_silent = template.std(0) < 0.1 * noise_levels - - template[:, is_silent] = 0 - - channel_norms = np.linalg.norm(template, axis=0) ** 2 - total_norm = np.linalg.norm(template) ** 2 - - idx = np.argsort(channel_norms)[::-1] - explained_norms = np.cumsum(channel_norms[idx] / total_norm) - channel = np.searchsorted(explained_norms, sparsify_threshold) - active_channels = np.sort(idx[:channel]) - template[:, idx[channel:]] = 0 - return template, active_channels - - @classmethod - def _regularize_template(cls, template, smoothing_factor=0.25): - nb_channels = template.shape[1] - nb_timesteps = template.shape[0] - xaxis = np.arange(nb_timesteps) - for i in range(nb_channels): - z = scipy.interpolate.UnivariateSpline(xaxis, template[:, i]) - z.set_smoothing_factor(smoothing_factor) - template[:, i] = z(xaxis) - return template - @classmethod def _prepare_templates(cls, d): - parameters = d - waveform_extractor = parameters["waveform_extractor"] - num_samples = parameters["num_samples"] - num_channels = parameters["num_channels"] - num_templates = parameters["num_templates"] - max_amplitude = parameters["max_amplitude"] - min_amplitude = parameters["min_amplitude"] - use_sparse_matrix_threshold = parameters["use_sparse_matrix_threshold"] + + waveform_extractor = d['waveform_extractor'] + num_samples = d['num_samples'] + num_channels = d['num_channels'] + num_templates = d['num_templates'] + use_sparse_matrix_threshold = d['use_sparse_matrix_threshold'] - parameters["norms"] = np.zeros(num_templates, dtype=np.float32) + d['norms'] = np.zeros(num_templates, dtype=np.float32) - all_units = list(parameters["waveform_extractor"].sorting.unit_ids) + all_units = list(d['waveform_extractor'].sorting.unit_ids) - templates = waveform_extractor.get_all_templates(mode="median").copy() + if not waveform_extractor.is_sparse(): + sparsity = compute_sparsity(waveform_extractor, **d['sparse_kwargs']).mask + templates = waveform_extractor.get_all_templates(mode='median').copy() + d['sparsities'] = {} + for count, unit_id in enumerate(all_units): - if parameters["smoothing_factor"] > 0: - templates[count] = cls._regularize_template(templates[count], parameters["smoothing_factor"]) - templates[count], _ = cls._sparsify_template( - templates[count], parameters["sparsify_threshold"], parameters["noise_levels"] - ) - parameters["norms"][count] = np.linalg.norm(templates[count]) - templates[count] /= parameters["norms"][count] + d['sparsities'][count], = np.nonzero(sparsity[count]) + templates[count][sparsity[count] == False] = 0 + d['norms'][count] = np.linalg.norm(templates[count]) + templates[count] /= d['norms'][count] templates = templates.reshape(num_templates, -1) - nnz = np.sum(templates != 0) / (num_templates * num_samples * num_channels) + nnz = np.sum(templates != 0)/(num_templates * num_samples * num_channels) if nnz <= use_sparse_matrix_threshold: templates = scipy.sparse.csr_matrix(templates) - print(f"Templates are automatically sparsified (sparsity level is {nnz})") - parameters["is_dense"] = False - else: - parameters["is_dense"] = True - - parameters["templates"] = templates - - return parameters - - @classmethod - def _prepare_overlaps(cls, d): - templates = d["templates"] - num_samples = d["num_samples"] - num_channels = d["num_channels"] - num_templates = d["num_templates"] - is_dense = d["is_dense"] - - if not is_dense: - dense_templates = templates.toarray() + print(f'Templates are automatically sparsified (sparsity level is {nnz})') + d['is_dense'] = False else: - dense_templates = templates - - dense_templates = dense_templates.reshape(num_templates, num_samples, num_channels) - - size = 2 * num_samples - 1 - - all_delays = list(range(0, num_samples + 1)) - if d["progess_bar_steps"]: - all_delays = tqdm(all_delays, desc="[1] compute overlaps") - - overlaps = {} - - for delay in all_delays: - source = dense_templates[:, :delay, :].reshape(num_templates, -1) - target = dense_templates[:, num_samples - delay :, :].reshape(num_templates, -1) - - overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T)) + d['is_dense'] = True - if delay < num_samples: - overlaps[size - delay] = overlaps[delay].T.tocsr() - - new_overlaps = [] - - for i in range(num_templates): - data = [overlaps[j][i, :].T for j in range(size)] - data = scipy.sparse.hstack(data) - new_overlaps += [data] - - d["overlaps"] = new_overlaps + d['templates'] = templates return d @@ -661,9 +591,9 @@ def _mcc_error(cls, bounds, good, bad): fp = np.sum((bounds[0] <= bad) & (bad <= bounds[1])) tp = np.sum((bounds[0] <= good) & (good <= bounds[1])) tn = np.sum((bad < bounds[0]) | (bad > bounds[1])) - denom = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn) + denom = (tp+fp)*(tp+fn)*(tn+fp)*(tn+fn) if denom > 0: - mcc = 1 - (tp * tn - fp * fn) / np.sqrt(denom) + mcc = 1 - (tp*tn - fp*fn)/np.sqrt(denom) else: mcc = 1 return mcc @@ -708,16 +638,6 @@ def _optimize_amplitudes(cls, noise_snippets, d): res = scipy.optimize.differential_evolution(cls._cost_function_mcc, bounds=cost_bounds, args=cost_kwargs) parameters["amplitudes"][count] = res.x - # import pylab as plt - # plt.hist(good, 100, alpha=0.5) - # plt.hist(bad, 100, alpha=0.5) - # plt.hist(noise[count], 100, alpha=0.5) - # ymin, ymax = plt.ylim() - # plt.plot([res.x[0], res.x[0]], [ymin, ymax], 'k--') - # plt.plot([res.x[1], res.x[1]], [ymin, ymax], 'k--') - # plt.savefig('test_%d.png' %count) - # plt.close() - return d @classmethod @@ -727,7 +647,6 @@ def initialize_and_check_kwargs(cls, recording, kwargs): default_parameters.update(kwargs) # assert isinstance(d['waveform_extractor'], WaveformExtractor) - for v in ["sparsify_threshold", "use_sparse_matrix_threshold"]: assert (default_parameters[v] >= 0) and (default_parameters[v] <= 1), f"{v} should be in [0, 1]" @@ -817,31 +736,31 @@ def main_function(cls, traces, d): sym_patch = d["sym_patch"] peak_traces = traces[margin // 2 : -margin // 2, :] - peak_sample_ind, peak_chan_ind = DetectPeakByChannel.detect_peaks( + peak_sample_index, peak_chan_ind = DetectPeakByChannel.detect_peaks( peak_traces, peak_sign, abs_threholds, exclude_sweep_size ) if jitter > 0: - jittered_peaks = peak_sample_ind[:, np.newaxis] + np.arange(-jitter, jitter) + jittered_peaks = peak_sample_index[:, np.newaxis] + np.arange(-jitter, jitter) jittered_channels = peak_chan_ind[:, np.newaxis] + np.zeros(2 * jitter) mask = (jittered_peaks > 0) & (jittered_peaks < len(peak_traces)) jittered_peaks = jittered_peaks[mask] jittered_channels = jittered_channels[mask] - peak_sample_ind, unique_idx = np.unique(jittered_peaks, return_index=True) + peak_sample_index, unique_idx = np.unique(jittered_peaks, return_index=True) peak_chan_ind = jittered_channels[unique_idx] else: - peak_sample_ind, unique_idx = np.unique(peak_sample_ind, return_index=True) + peak_sample_index, unique_idx = np.unique(peak_sample_index, return_index=True) peak_chan_ind = peak_chan_ind[unique_idx] - num_peaks = len(peak_sample_ind) + num_peaks = len(peak_sample_index) if sym_patch: - snippets = extract_patches_2d(traces, patch_sizes)[peak_sample_ind] - peak_sample_ind += margin // 2 + snippets = extract_patches_2d(traces, patch_sizes)[peak_sample_index] + peak_sample_index += margin // 2 else: - peak_sample_ind += margin // 2 + peak_sample_index += margin // 2 snippet_window = np.arange(-d["nbefore"], d["nafter"]) - snippets = traces[peak_sample_ind[:, np.newaxis] + snippet_window] + snippets = traces[peak_sample_index[:, np.newaxis] + snippet_window] if num_peaks > 0: snippets = snippets.reshape(num_peaks, -1) @@ -865,10 +784,10 @@ def main_function(cls, traces, d): best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) best_amplitude = scalar_products[best_cluster_ind, peak_index] - best_peak_sample_ind = peak_sample_ind[peak_index] + best_peak_sample_index = peak_sample_index[peak_index] best_peak_chan_ind = peak_chan_ind[peak_index] - peak_data = peak_sample_ind - peak_sample_ind[peak_index] + peak_data = peak_sample_index - peak_sample_index[peak_index] is_valid_nn = np.searchsorted(peak_data, [-neighbor_window, neighbor_window + 1]) idx_neighbor = peak_data[is_valid_nn[0] : is_valid_nn[1]] + neighbor_window @@ -880,7 +799,7 @@ def main_function(cls, traces, d): scalar_products[:, is_valid_nn[0] : is_valid_nn[1]] += to_add scalar_products[best_cluster_ind, is_valid_nn[0] : is_valid_nn[1]] = -np.inf - spikes["sample_index"][num_spikes] = best_peak_sample_ind + spikes["sample_index"][num_spikes] = best_peak_sample_index spikes["channel_index"][num_spikes] = best_peak_chan_ind spikes["cluster_index"][num_spikes] = best_cluster_ind spikes["amplitude"][num_spikes] = best_amplitude From 0f9fee6fe788a0cdc44c18d19fd8b0f11f10ff4f Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 17 Jul 2023 10:30:33 +0200 Subject: [PATCH 004/322] WIP --- .../sorters/internal/spyking_circus2.py | 59 ++++++++++--------- .../clustering/clustering_tools.py | 2 +- .../clustering/random_projections.py | 3 +- 3 files changed, 35 insertions(+), 29 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 24c4a7ccfc..18db5f37c8 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -3,7 +3,7 @@ import os import shutil import numpy as np -import os +import psutil from spikeinterface.core import NumpySorting, load_extractor, BaseRecording, get_noise_levels, extract_waveforms from spikeinterface.core.job_tools import fix_job_kwargs @@ -18,23 +18,24 @@ class Spykingcircus2Sorter(ComponentsBasedSorter): - sorter_name = "spykingcircus2" + sorter_name = 'spykingcircus2' _default_params = { - "general": {"ms_before": 2, "ms_after": 2, "local_radius_um": 100}, - "waveforms": {"max_spikes_per_unit": 200, "overwrite": True}, - "filtering": {"dtype": "float32"}, - "detection": {"peak_sign": "neg", "detect_threshold": 5}, - "selection": {"n_peaks_per_channel": 5000, "min_n_peaks": 20000}, - "localization": {}, - "clustering": {}, - "matching": {}, - "registration": {}, - "apply_preprocessing": True, - "shared_memory": False, - "job_kwargs": {}, + 'general' : {'ms_before' : 2, 'ms_after' : 2, 'local_radius_um' : 75}, + 'waveforms' : {'max_spikes_per_unit' : 200, 'overwrite' : True, 'sparse' : True, + 'method' : 'ptp', 'threshold' : 1}, + 'filtering' : {'dtype' : 'float32'}, + 'detection' : {'peak_sign': 'neg', 'detect_threshold': 5}, + 'selection' : {'n_peaks_per_channel' : 5000, 'min_n_peaks' : 20000}, + 'localization' : {}, + 'clustering': {}, + 'matching': {}, + 'apply_preprocessing': True, + 'shared_memory' : True, + 'job_kwargs' : {'n_jobs' : -1, 'chunk_memory' : "10M"} } + @classmethod def get_sorter_version(cls): return "2.0" @@ -63,8 +64,6 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): ## First, we are filtering the data filtering_params = params["filtering"].copy() if params["apply_preprocessing"]: - # if recording.is_filtered == True: - # print('Looks like the recording is already filtered, check preprocessing!') recording_f = bandpass_filter(recording, **filtering_params) recording_f = common_reference(recording_f) else: @@ -102,12 +101,15 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): ## We launch a clustering (using hdbscan) relying on positions and features extracted on ## the fly from the snippets - clustering_params = params["clustering"].copy() - clustering_params.update(params["waveforms"]) - clustering_params.update(params["general"]) - clustering_params.update(dict(shared_memory=params["shared_memory"])) - clustering_params["job_kwargs"] = job_kwargs - clustering_params["tmp_folder"] = sorter_output_folder / "clustering" + clustering_params = params['clustering'].copy() + clustering_params['waveforms_kwargs'] = params['waveforms'] + + for k in ['ms_before', 'ms_after']: + clustering_params['waveforms_kwargs'][k] = params['general'][k] + + clustering_params.update(dict(shared_memory=params['shared_memory'])) + clustering_params['job_kwargs'] = job_kwargs + clustering_params['tmp_folder'] = sorter_output_folder / "clustering" labels, peak_labels = find_cluster_from_peaks( recording_f, selected_peaks, method="random_projections", method_kwargs=clustering_params @@ -122,15 +124,18 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): sorting = sorting.save(folder=clustering_folder) - ## We get the templates our of such a clustering - waveforms_params = params["waveforms"].copy() + ## We get the templates our of such a clustering + waveforms_params = params['waveforms'].copy() waveforms_params.update(job_kwargs) - if params["shared_memory"]: - mode = "memory" + for k in ['ms_before', 'ms_after']: + waveforms_params[k] = params['general'][k] + + if params['shared_memory']: + mode = 'memory' waveforms_folder = None else: - mode = "folder" + mode = 'folder' waveforms_folder = sorter_output_folder / "waveforms" we = extract_waveforms( diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 53833b01a2..6edf5af16b 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -579,7 +579,7 @@ def remove_duplicates_via_matching( f.write(blanck) f.close() - recording = BinaryRecordingExtractor(tmp_filename, num_chan=num_chans, sampling_frequency=fs, dtype="float32") + recording = BinaryRecordingExtractor(tmp_filename, num_channels=num_chans, sampling_frequency=fs, dtype="float32") recording.annotate(is_filtered=True) margin = 2 * max(waveform_extractor.nbefore, waveform_extractor.nafter) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 02247dd288..1450ba91db 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -238,7 +238,8 @@ def main_function(cls, recording, peaks, params): if params["tmp_folder"] is None: shutil.rmtree(tmp_folder) else: - shutil.rmtree(tmp_folder / "waveforms") + if not params["shared_memory"]: + shutil.rmtree(tmp_folder / "waveforms") shutil.rmtree(tmp_folder / "sorting") if verbose: From 7a3d4c2181da06c4106d6c17a015839a0cc55f4f Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 17 Jul 2023 14:06:10 +0200 Subject: [PATCH 005/322] WIP --- .../sortingcomponents/matching/circus.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 8f08aac9c5..d86dac97e2 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -194,7 +194,6 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): """ _default_params = { - "sparsify_threshold": 1, "amplitudes": [0.6, 2], "omp_min_sps": 0.1, "waveform_extractor": None, @@ -219,6 +218,7 @@ def _prepare_templates(cls, d): else: sparsity = waveform_extractor.sparsity.mask + print(sparsity.mean()) templates = waveform_extractor.get_all_templates(mode='median').copy() d['sparsities'] = {} @@ -226,10 +226,10 @@ def _prepare_templates(cls, d): d['norms'] = np.zeros(num_templates, dtype=np.float32) for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): - template = templates[count] + template = templates[count][:, sparsity[count]] d['sparsities'][count], = np.nonzero(sparsity[count]) d['norms'][count] = np.linalg.norm(template) - d['templates'][count] = template[:, d['sparsities'][count]]/d['norms'][count] + d['templates'][count] = template/d['norms'][count] return d @@ -269,8 +269,8 @@ def initialize_and_check_kwargs(cls, recording, kwargs): d['ignored_ids'] = np.array(d['ignored_ids']) omp_min_sps = d['omp_min_sps'] - nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) - d['stop_criteria'] = omp_min_sps * np.sqrt(nb_active_channels * d['num_samples']) + #nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) + d['stop_criteria'] = omp_min_sps * np.sqrt(d['noise_levels'].sum() * d['num_samples']) return d @@ -307,7 +307,7 @@ def main_function(cls, traces, d): min_amplitude, max_amplitude = d['amplitudes'] sparsities = d['sparsities'] ignored_ids = d['ignored_ids'] - stop_criteria = d['stop_criteria'][:, np.newaxis] + stop_criteria = d['stop_criteria'] vicinity = d['vicinity'] if 'cached_fft_kernels' not in d: @@ -356,7 +356,7 @@ def main_function(cls, traces, d): spikes = np.empty(scalar_products.size, dtype=spike_dtype) idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) - M = np.zeros((100, 100), dtype=np.float32) + M = np.zeros((num_peaks, num_peaks), dtype=np.float32) all_selections = np.empty((2, scalar_products.size), dtype=np.int32) final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) @@ -647,7 +647,7 @@ def initialize_and_check_kwargs(cls, recording, kwargs): default_parameters.update(kwargs) # assert isinstance(d['waveform_extractor'], WaveformExtractor) - for v in ["sparsify_threshold", "use_sparse_matrix_threshold"]: + for v in ["use_sparse_matrix_threshold"]: assert (default_parameters[v] >= 0) and (default_parameters[v] <= 1), f"{v} should be in [0, 1]" default_parameters["num_channels"] = default_parameters["waveform_extractor"].recording.get_num_channels() From 892305bef89b97454fcda956f39b81e3b7673d55 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 24 Jul 2023 12:01:54 +0200 Subject: [PATCH 006/322] WIP --- src/spikeinterface/sortingcomponents/matching/circus.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index d86dac97e2..d3d2c39836 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -218,7 +218,6 @@ def _prepare_templates(cls, d): else: sparsity = waveform_extractor.sparsity.mask - print(sparsity.mean()) templates = waveform_extractor.get_all_templates(mode='median').copy() d['sparsities'] = {} @@ -542,7 +541,7 @@ class CircusPeeler(BaseTemplateMatchingEngine): 'use_sparse_matrix_threshold' : 0.25, 'progess_bar_steps' : False, 'waveform_extractor': None, - 'sparse_kwargs' : {'method' : 'threshold', 'threshold' : 0.5, 'peak_sign' : 'both'} + 'sparse_kwargs' : {'method' : 'ptp', 'threshold' : 1} } @classmethod From 9b86b485cd6861469e1bd6ed7fd26bd18c59391d Mon Sep 17 00:00:00 2001 From: Matthias H Hennig Date: Mon, 31 Jul 2023 21:21:09 +0100 Subject: [PATCH 007/322] Fixed docstring --- src/spikeinterface/core/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 9b300e4787..817cb95d66 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -555,12 +555,12 @@ def dump_to_pickle( ): """ Dump recording extractor to a pickle file. - The extractor can be re-loaded with load_extractor_from_json(json_file) + The extractor can be re-loaded with load_extractor_from_pickle(pickle_file) Parameters ---------- file_path: str - Path of the json file + Path of the pickle file include_properties: bool If True, all properties are dumped relative_to: str, Path, or None From 1cb122c040b256bd0073e798e96880e19bff6d59 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 28 Aug 2023 13:35:59 +0200 Subject: [PATCH 008/322] WIP for circus2 --- .../sortingcomponents/clustering/clustering_tools.py | 1 + src/spikeinterface/sortingcomponents/matching/circus.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 6edf5af16b..06e0b8ea96 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -581,6 +581,7 @@ def remove_duplicates_via_matching( recording = BinaryRecordingExtractor(tmp_filename, num_channels=num_chans, sampling_frequency=fs, dtype="float32") recording.annotate(is_filtered=True) + recording = recording.set_probe(waveform_extractor.recording.get_probe()) margin = 2 * max(waveform_extractor.nbefore, waveform_extractor.nafter) half_marging = margin // 2 diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index d3d2c39836..ef823316a2 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -559,6 +559,8 @@ def _prepare_templates(cls, d): if not waveform_extractor.is_sparse(): sparsity = compute_sparsity(waveform_extractor, **d['sparse_kwargs']).mask + else: + sparsity = waveform_extractor.sparsity.mask templates = waveform_extractor.get_all_templates(mode='median').copy() d['sparsities'] = {} From ef204dd83e9f6fe627b849619932c44c331e2306 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 28 Aug 2023 13:58:00 +0200 Subject: [PATCH 009/322] WIP --- .../clustering/clustering_tools.py | 13 +- .../clustering/random_projections.py | 131 +++++++----------- 2 files changed, 58 insertions(+), 86 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 06e0b8ea96..f93142152f 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -536,7 +536,6 @@ def remove_duplicates_via_matching( waveform_extractor, noise_levels, peak_labels, - sparsify_threshold=1, method_kwargs={}, job_kwargs={}, tmp_folder=None, @@ -552,6 +551,10 @@ def remove_duplicates_via_matching( from pathlib import Path job_kwargs = fix_job_kwargs(job_kwargs) + + if waveform_extractor.is_sparse(): + sparsity = waveform_extractor.sparsity.mask + templates = waveform_extractor.get_all_templates(mode="median").copy() nb_templates = len(templates) duration = waveform_extractor.nbefore + waveform_extractor.nafter @@ -559,9 +562,10 @@ def remove_duplicates_via_matching( fs = waveform_extractor.recording.get_sampling_frequency() num_chans = waveform_extractor.recording.get_num_channels() - for t in range(nb_templates): - is_silent = templates[t].ptp(0) < sparsify_threshold - templates[t, :, is_silent] = 0 + if waveform_extractor.is_sparse(): + for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): + templates[count][:, ~sparsity[count]] = 0 + zdata = templates.reshape(nb_templates, -1) @@ -598,7 +602,6 @@ def remove_duplicates_via_matching( "waveform_extractor": waveform_extractor, "noise_levels": noise_levels, "amplitudes": [0.95, 1.05], - "sparsify_threshold": sparsify_threshold, "omp_min_sps": 0.1, "templates": None, "overlaps": None, diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 0803763573..5e14fa4736 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -41,7 +41,6 @@ class RandomProjectionClustering: "ms_before": 1.5, "ms_after": 1.5, "random_seed": 42, - "cleaning_method": "matching", "shared_memory": False, "min_values": {"ptp": 0, "energy": 0}, "tmp_folder": None, @@ -160,87 +159,57 @@ def main_function(cls, recording, peaks, params): spikes["segment_index"] = peaks[mask]["segment_index"] spikes["unit_index"] = peak_labels[mask] - cleaning_method = params["cleaning_method"] - if verbose: - print("We found %d raw clusters, starting to clean with %s..." % (len(labels), cleaning_method)) - - if cleaning_method == "cosine": - wfs_arrays = extract_waveforms_to_buffers( - recording, - spikes, - labels, - nbefore, - nafter, - mode="shared_memory", - return_scaled=False, - folder=None, - dtype=recording.get_dtype(), - sparsity_mask=None, - copy=True, - **params["job_kwargs"], - ) - - labels, peak_labels = remove_duplicates( - wfs_arrays, noise_levels, peak_labels, num_samples, num_chans, **params["cleaning_kwargs"] - ) - - elif cleaning_method == "dip": - wfs_arrays = {} - for label in labels: - mask = label == peak_labels - wfs_arrays[label] = hdbscan_data[mask] - - labels, peak_labels = remove_duplicates_via_dip(wfs_arrays, peak_labels, **params["cleaning_kwargs"]) - - elif cleaning_method == "matching": - # create a tmp folder - if params["tmp_folder"] is None: - name = "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) - tmp_folder = get_global_tmp_folder() / name - else: - tmp_folder = Path(params["tmp_folder"]) - - if params["shared_memory"]: - waveform_folder = None - mode = "memory" - else: - waveform_folder = tmp_folder / "waveforms" - mode = "folder" - - sorting_folder = tmp_folder / "sorting" - sorting = NumpySorting.from_times_labels(spikes["sample_index"], spikes["unit_index"], fs) - sorting = sorting.save(folder=sorting_folder) - we = extract_waveforms( - recording, - sorting, - waveform_folder, - ms_before=params["ms_before"], - ms_after=params["ms_after"], - **params["job_kwargs"], - return_scaled=False, - mode=mode, - ) - - cleaning_matching_params = params["job_kwargs"].copy() - cleaning_matching_params["chunk_duration"] = "100ms" - cleaning_matching_params["n_jobs"] = 1 - cleaning_matching_params["verbose"] = False - cleaning_matching_params["progress_bar"] = False - - cleaning_params = params["cleaning_kwargs"].copy() - cleaning_params["tmp_folder"] = tmp_folder - - labels, peak_labels = remove_duplicates_via_matching( - we, noise_levels, peak_labels, job_kwargs=cleaning_matching_params, **cleaning_params - ) - - if params["tmp_folder"] is None: - shutil.rmtree(tmp_folder) - else: - if not params["shared_memory"]: - shutil.rmtree(tmp_folder / "waveforms") - shutil.rmtree(tmp_folder / "sorting") + print("We found %d raw clusters, starting to clean with matching..." % (len(labels))) + + + # create a tmp folder + if params["tmp_folder"] is None: + name = "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) + tmp_folder = get_global_tmp_folder() / name + else: + tmp_folder = Path(params["tmp_folder"]) + + if params["shared_memory"]: + waveform_folder = None + mode = "memory" + else: + waveform_folder = tmp_folder / "waveforms" + mode = "folder" + + sorting_folder = tmp_folder / "sorting" + sorting = NumpySorting.from_times_labels(spikes["sample_index"], spikes["unit_index"], fs) + sorting = sorting.save(folder=sorting_folder) + we = extract_waveforms( + recording, + sorting, + waveform_folder, + ms_before=params["ms_before"], + ms_after=params["ms_after"], + **params["job_kwargs"], + return_scaled=False, + mode=mode, + ) + + cleaning_matching_params = params["job_kwargs"].copy() + cleaning_matching_params["chunk_duration"] = "100ms" + cleaning_matching_params["n_jobs"] = 1 + cleaning_matching_params["verbose"] = False + cleaning_matching_params["progress_bar"] = False + + cleaning_params = params["cleaning_kwargs"].copy() + cleaning_params["tmp_folder"] = tmp_folder + + labels, peak_labels = remove_duplicates_via_matching( + we, noise_levels, peak_labels, job_kwargs=cleaning_matching_params, **cleaning_params + ) + + if params["tmp_folder"] is None: + shutil.rmtree(tmp_folder) + else: + if not params["shared_memory"]: + shutil.rmtree(tmp_folder / "waveforms") + shutil.rmtree(tmp_folder / "sorting") if verbose: print("We kept %d non-duplicated clusters..." % len(labels)) From 242799ff582d886ad8438b9344eea594e07324af Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 28 Aug 2023 14:02:05 +0200 Subject: [PATCH 010/322] Docs --- .../sortingcomponents/matching/circus.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index ef823316a2..50058ab39e 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -5,7 +5,6 @@ import scipy.spatial -from tqdm import tqdm import scipy try: @@ -190,6 +189,9 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): computed random_chunk_kwargs: dict Parameters for computing noise levels, if not provided (sub optimal) + sparse_kwargs: dict + Parameters to extract a sparsity mask from the waveform_extractor, if not + already sparse. ----- """ @@ -522,8 +524,9 @@ class CircusPeeler(BaseTemplateMatchingEngine): use_sparse_matrix_threshold: float If density of the templates is below a given threshold, sparse matrix are used (memory efficient) - progress_bar_steps: bool - In order to display or not steps from the algorithm + sparse_kwargs: dict + Parameters to extract a sparsity mask from the waveform_extractor, if not + already sparse. ----- @@ -539,7 +542,6 @@ class CircusPeeler(BaseTemplateMatchingEngine): 'max_amplitude' : 1.5, 'min_amplitude' : 0.5, 'use_sparse_matrix_threshold' : 0.25, - 'progess_bar_steps' : False, 'waveform_extractor': None, 'sparse_kwargs' : {'method' : 'ptp', 'threshold' : 1} } @@ -618,8 +620,6 @@ def _optimize_amplitudes(cls, noise_snippets, d): alpha = 0.5 norms = parameters["norms"] all_units = list(waveform_extractor.sorting.unit_ids) - if parameters["progess_bar_steps"]: - all_units = tqdm(all_units, desc="[2] compute amplitudes") parameters["amplitudes"] = np.zeros((num_templates, 2), dtype=np.float32) noise = templates.dot(noise_snippets) / norms[:, np.newaxis] From 5566c917ddbd32feda022e4293ba0bc93bdd3139 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 08:46:28 +0200 Subject: [PATCH 011/322] Fix for circus --- .../sortingcomponents/matching/circus.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 50058ab39e..f79cf60a31 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -357,7 +357,7 @@ def main_function(cls, traces, d): spikes = np.empty(scalar_products.size, dtype=spike_dtype) idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) - M = np.zeros((num_peaks, num_peaks), dtype=np.float32) + M = np.zeros((100, 100), dtype=np.float32) all_selections = np.empty((2, scalar_products.size), dtype=np.int32) final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) @@ -570,7 +570,7 @@ def _prepare_templates(cls, d): for count, unit_id in enumerate(all_units): d['sparsities'][count], = np.nonzero(sparsity[count]) - templates[count][sparsity[count] == False] = 0 + templates[count][:, ~sparsity[count]] = 0 d['norms'][count] = np.linalg.norm(templates[count]) templates[count] /= d['norms'][count] @@ -666,7 +666,15 @@ def initialize_and_check_kwargs(cls, recording, kwargs): ) default_parameters = cls._prepare_templates(default_parameters) - default_parameters = cls._prepare_overlaps(default_parameters) + + templates = default_parameters['templates'].reshape(len(default_parameters['templates']), + default_parameters['num_samples'], + default_parameters['num_channels']) + + default_parameters['overlaps'] = compute_overlaps(templates, + default_parameters['num_samples'], + default_parameters['num_channels'], + default_parameters['sparsities']) default_parameters["exclude_sweep_size"] = int( default_parameters["exclude_sweep_ms"] * recording.get_sampling_frequency() / 1000.0 From 75c97937c1f5f66714076dba237574eddbb9782c Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 09:12:16 +0200 Subject: [PATCH 012/322] WIP --- src/spikeinterface/sortingcomponents/matching/circus.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index f79cf60a31..baf7494002 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -432,13 +432,14 @@ def main_function(cls, traces, d): selection = all_selections[:, :num_selection] res_sps = full_sps[selection[0], selection[1]] - if vicinity == 0: + if True: #vicinity == 0: all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) all_amplitudes /= norms[selection[0]] else: + # This is not working, need to figure out why is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) - all_amplitudes = np.append(all_amplitudes, np.float32(0)) + all_amplitudes = np.append(all_amplitudes, np.float32(1)) L = M[is_in_vicinity, :][:, is_in_vicinity] all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], lower=True, overwrite_b=False) From d7e9ac1c803121b7e0fb0d8c4af539340fb82bbe Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Aug 2023 07:14:41 +0000 Subject: [PATCH 013/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sorters/internal/spyking_circus2.py | 56 ++-- .../clustering/clustering_tools.py | 1 - .../clustering/random_projections.py | 1 - .../sortingcomponents/matching/circus.py | 286 +++++++++--------- 4 files changed, 166 insertions(+), 178 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 6635bbfca1..4ccaef8e29 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -18,24 +18,22 @@ class Spykingcircus2Sorter(ComponentsBasedSorter): - sorter_name = 'spykingcircus2' + sorter_name = "spykingcircus2" _default_params = { - 'general' : {'ms_before' : 2, 'ms_after' : 2, 'radius_um' : 75}, - 'waveforms' : {'max_spikes_per_unit' : 200, 'overwrite' : True, 'sparse' : True, - 'method' : 'ptp', 'threshold' : 1}, - 'filtering' : {'dtype' : 'float32'}, - 'detection' : {'peak_sign': 'neg', 'detect_threshold': 5}, - 'selection' : {'n_peaks_per_channel' : 5000, 'min_n_peaks' : 20000}, - 'localization' : {}, - 'clustering': {}, - 'matching': {}, - 'apply_preprocessing': True, - 'shared_memory' : True, - 'job_kwargs' : {'n_jobs' : -1, 'chunk_memory' : "10M"} + "general": {"ms_before": 2, "ms_after": 2, "radius_um": 75}, + "waveforms": {"max_spikes_per_unit": 200, "overwrite": True, "sparse": True, "method": "ptp", "threshold": 1}, + "filtering": {"dtype": "float32"}, + "detection": {"peak_sign": "neg", "detect_threshold": 5}, + "selection": {"n_peaks_per_channel": 5000, "min_n_peaks": 20000}, + "localization": {}, + "clustering": {}, + "matching": {}, + "apply_preprocessing": True, + "shared_memory": True, + "job_kwargs": {"n_jobs": -1, "chunk_memory": "10M"}, } - @classmethod def get_sorter_version(cls): return "2.0" @@ -101,15 +99,15 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): ## We launch a clustering (using hdbscan) relying on positions and features extracted on ## the fly from the snippets - clustering_params = params['clustering'].copy() - clustering_params['waveforms_kwargs'] = params['waveforms'] - - for k in ['ms_before', 'ms_after']: - clustering_params['waveforms_kwargs'][k] = params['general'][k] + clustering_params = params["clustering"].copy() + clustering_params["waveforms_kwargs"] = params["waveforms"] + + for k in ["ms_before", "ms_after"]: + clustering_params["waveforms_kwargs"][k] = params["general"][k] - clustering_params.update(dict(shared_memory=params['shared_memory'])) - clustering_params['job_kwargs'] = job_kwargs - clustering_params['tmp_folder'] = sorter_output_folder / "clustering" + clustering_params.update(dict(shared_memory=params["shared_memory"])) + clustering_params["job_kwargs"] = job_kwargs + clustering_params["tmp_folder"] = sorter_output_folder / "clustering" labels, peak_labels = find_cluster_from_peaks( recording_f, selected_peaks, method="random_projections", method_kwargs=clustering_params @@ -124,18 +122,18 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): sorting = sorting.save(folder=clustering_folder) - ## We get the templates our of such a clustering - waveforms_params = params['waveforms'].copy() + ## We get the templates our of such a clustering + waveforms_params = params["waveforms"].copy() waveforms_params.update(job_kwargs) - for k in ['ms_before', 'ms_after']: - waveforms_params[k] = params['general'][k] + for k in ["ms_before", "ms_after"]: + waveforms_params[k] = params["general"][k] - if params['shared_memory']: - mode = 'memory' + if params["shared_memory"]: + mode = "memory" waveforms_folder = None else: - mode = 'folder' + mode = "folder" waveforms_folder = sorter_output_folder / "waveforms" we = extract_waveforms( diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index f93142152f..b11af55d35 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -565,7 +565,6 @@ def remove_duplicates_via_matching( if waveform_extractor.is_sparse(): for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): templates[count][:, ~sparsity[count]] = 0 - zdata = templates.reshape(nb_templates, -1) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 5e14fa4736..ac564bda9a 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -162,7 +162,6 @@ def main_function(cls, recording, peaks, params): if verbose: print("We found %d raw clusters, starting to clean with matching..." % (len(labels))) - # create a tmp folder if params["tmp_folder"] is None: name = "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index baf7494002..b0f132e94d 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -130,8 +130,8 @@ def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): return ret -def compute_overlaps(templates, num_samples, num_channels, sparsities): +def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) dense_templates = np.zeros((num_templates, num_samples, num_channels), dtype=np.float32) @@ -140,13 +140,13 @@ def compute_overlaps(templates, num_samples, num_channels, sparsities): size = 2 * num_samples - 1 - all_delays = list(range(0, num_samples+1)) + all_delays = list(range(0, num_samples + 1)) overlaps = {} - + for delay in all_delays: source = dense_templates[:, :delay, :].reshape(num_templates, -1) - target = dense_templates[:, num_samples-delay:, :].reshape(num_templates, -1) + target = dense_templates[:, num_samples - delay :, :].reshape(num_templates, -1) overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T)) @@ -161,7 +161,7 @@ def compute_overlaps(templates, num_samples, num_channels, sparsities): new_overlaps += [data] return new_overlaps - + class CircusOMPPeeler(BaseTemplateMatchingEngine): """ @@ -204,77 +204,74 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): "norms": None, "random_chunk_kwargs": {}, "noise_levels": None, - 'sparse_kwargs' : {'method' : 'ptp', 'threshold' : 1}, + "sparse_kwargs": {"method": "ptp", "threshold": 1}, "ignored_ids": [], - "vicinity" : 0 + "vicinity": 0, } @classmethod def _prepare_templates(cls, d): - - waveform_extractor = d['waveform_extractor'] - num_templates = len(d['waveform_extractor'].sorting.unit_ids) + waveform_extractor = d["waveform_extractor"] + num_templates = len(d["waveform_extractor"].sorting.unit_ids) if not waveform_extractor.is_sparse(): - sparsity = compute_sparsity(waveform_extractor, **d['sparse_kwargs']).mask + sparsity = compute_sparsity(waveform_extractor, **d["sparse_kwargs"]).mask else: sparsity = waveform_extractor.sparsity.mask - - templates = waveform_extractor.get_all_templates(mode='median').copy() - d['sparsities'] = {} - d['templates'] = {} - d['norms'] = np.zeros(num_templates, dtype=np.float32) + templates = waveform_extractor.get_all_templates(mode="median").copy() + + d["sparsities"] = {} + d["templates"] = {} + d["norms"] = np.zeros(num_templates, dtype=np.float32) for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): template = templates[count][:, sparsity[count]] - d['sparsities'][count], = np.nonzero(sparsity[count]) - d['norms'][count] = np.linalg.norm(template) - d['templates'][count] = template/d['norms'][count] + (d["sparsities"][count],) = np.nonzero(sparsity[count]) + d["norms"][count] = np.linalg.norm(template) + d["templates"][count] = template / d["norms"][count] return d @classmethod def initialize_and_check_kwargs(cls, recording, kwargs): - d = cls._default_params.copy() d.update(kwargs) - #assert isinstance(d['waveform_extractor'], WaveformExtractor) + # assert isinstance(d['waveform_extractor'], WaveformExtractor) + + for v in ["omp_min_sps"]: + assert (d[v] >= 0) and (d[v] <= 1), f"{v} should be in [0, 1]" - for v in ['omp_min_sps']: - assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]' - - d['num_channels'] = d['waveform_extractor'].recording.get_num_channels() - d['num_samples'] = d['waveform_extractor'].nsamples - d['nbefore'] = d['waveform_extractor'].nbefore - d['nafter'] = d['waveform_extractor'].nafter - d['sampling_frequency'] = d['waveform_extractor'].recording.get_sampling_frequency() - d['vicinity'] *= d['num_samples'] + d["num_channels"] = d["waveform_extractor"].recording.get_num_channels() + d["num_samples"] = d["waveform_extractor"].nsamples + d["nbefore"] = d["waveform_extractor"].nbefore + d["nafter"] = d["waveform_extractor"].nafter + d["sampling_frequency"] = d["waveform_extractor"].recording.get_sampling_frequency() + d["vicinity"] *= d["num_samples"] - if d['noise_levels'] is None: - print('CircusOMPPeeler : noise should be computed outside') - d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'], return_scaled=False) + if d["noise_levels"] is None: + print("CircusOMPPeeler : noise should be computed outside") + d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) - if d['templates'] is None: + if d["templates"] is None: d = cls._prepare_templates(d) else: - for key in ['norms', 'sparsities']: - assert d[key] is not None, "If templates are provided, %d should also be there" %key - - d['num_templates'] = len(d['templates']) + for key in ["norms", "sparsities"]: + assert d[key] is not None, "If templates are provided, %d should also be there" % key - if d['overlaps'] is None: - d['overlaps'] = compute_overlaps(d['templates'], d['num_samples'], d['num_channels'], d['sparsities']) + d["num_templates"] = len(d["templates"]) - d['ignored_ids'] = np.array(d['ignored_ids']) + if d["overlaps"] is None: + d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) - omp_min_sps = d['omp_min_sps'] - #nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) - d['stop_criteria'] = omp_min_sps * np.sqrt(d['noise_levels'].sum() * d['num_samples']) + d["ignored_ids"] = np.array(d["ignored_ids"]) - return d + omp_min_sps = d["omp_min_sps"] + # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) + d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + return d @classmethod def serialize_method_kwargs(cls, kwargs): @@ -294,27 +291,27 @@ def get_margin(cls, recording, kwargs): @classmethod def main_function(cls, traces, d): - templates = d['templates'] - num_templates = d['num_templates'] - num_channels = d['num_channels'] - num_samples = d['num_samples'] - overlaps = d['overlaps'] - norms = d['norms'] - nbefore = d['nbefore'] - nafter = d['nafter'] + templates = d["templates"] + num_templates = d["num_templates"] + num_channels = d["num_channels"] + num_samples = d["num_samples"] + overlaps = d["overlaps"] + norms = d["norms"] + nbefore = d["nbefore"] + nafter = d["nafter"] omp_tol = np.finfo(np.float32).eps - num_samples = d['nafter'] + d['nbefore'] + num_samples = d["nafter"] + d["nbefore"] neighbor_window = num_samples - 1 - min_amplitude, max_amplitude = d['amplitudes'] - sparsities = d['sparsities'] - ignored_ids = d['ignored_ids'] - stop_criteria = d['stop_criteria'] - vicinity = d['vicinity'] + min_amplitude, max_amplitude = d["amplitudes"] + sparsities = d["sparsities"] + ignored_ids = d["ignored_ids"] + stop_criteria = d["stop_criteria"] + vicinity = d["vicinity"] - if 'cached_fft_kernels' not in d: - d['cached_fft_kernels'] = {'fshape' : 0} + if "cached_fft_kernels" not in d: + d["cached_fft_kernels"] = {"fshape": 0} - cached_fft_kernels = d['cached_fft_kernels'] + cached_fft_kernels = d["cached_fft_kernels"] num_timesteps = len(traces) @@ -326,24 +323,22 @@ def main_function(cls, traces, d): dummy_traces = np.empty((num_channels, num_timesteps), dtype=np.float32) fshape, axes = get_scipy_shape(dummy_filter, traces, axes=1) - fft_cache = {'full' : sp_fft.rfftn(traces, fshape, axes=axes)} + fft_cache = {"full": sp_fft.rfftn(traces, fshape, axes=axes)} scalar_products = np.empty((num_templates, num_peaks), dtype=np.float32) - flagged_chunk = cached_fft_kernels['fshape'] != fshape[0] + flagged_chunk = cached_fft_kernels["fshape"] != fshape[0] for i in range(num_templates): - if i not in ignored_ids: - if i not in cached_fft_kernels or flagged_chunk: kernel_filter = np.ascontiguousarray(templates[i][::-1].T) - cached_fft_kernels.update({i : sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) - cached_fft_kernels['fshape'] = fshape[0] + cached_fft_kernels.update({i: sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) + cached_fft_kernels["fshape"] = fshape[0] - fft_cache.update({'mask' : sparsities[i], 'template' : cached_fft_kernels[i]}) + fft_cache.update({"mask": sparsities[i], "template": cached_fft_kernels[i]}) - convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode='valid') + convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode="valid") if len(convolution) > 0: scalar_products[i] = convolution.sum(0) else: @@ -368,17 +363,15 @@ def main_function(cls, traces, d): neighbors = {} cached_overlaps = {} - is_valid = (scalar_products > stop_criteria) + is_valid = scalar_products > stop_criteria all_amplitudes = np.zeros(0, dtype=np.float32) is_in_vicinity = np.zeros(0, dtype=np.int32) while np.any(is_valid): - best_amplitude_ind = scalar_products[is_valid].argmax() best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) - - if num_selection > 0: + if num_selection > 0: delta_t = selection[1] - peak_index idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] myline = num_samples + delta_t[idx] @@ -387,17 +380,21 @@ def main_function(cls, traces, d): cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() if num_selection == M.shape[0]: - Z = np.zeros((2*num_selection, 2*num_selection), dtype=np.float32) + Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) Z[:num_selection, :num_selection] = M M = Z M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] if vicinity == 0: - scipy.linalg.solve_triangular(M[:num_selection, :num_selection], M[num_selection, :num_selection], trans=0, - lower=1, - overwrite_b=True, - check_finite=False) + scipy.linalg.solve_triangular( + M[:num_selection, :num_selection], + M[num_selection, :num_selection], + trans=0, + lower=1, + overwrite_b=True, + check_finite=False, + ) v = nrm2(M[num_selection, :num_selection]) ** 2 Lkk = 1 - v @@ -408,13 +405,11 @@ def main_function(cls, traces, d): is_in_vicinity = np.where(np.abs(delta_t) < vicinity)[0] if len(is_in_vicinity) > 0: - L = M[is_in_vicinity, :][:, is_in_vicinity] - M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular(L, M[num_selection, is_in_vicinity], trans=0, - lower=1, - overwrite_b=True, - check_finite=False) + M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular( + L, M[num_selection, is_in_vicinity], trans=0, lower=1, overwrite_b=True, check_finite=False + ) v = nrm2(M[num_selection, is_in_vicinity]) ** 2 Lkk = 1 - v @@ -432,55 +427,52 @@ def main_function(cls, traces, d): selection = all_selections[:, :num_selection] res_sps = full_sps[selection[0], selection[1]] - if True: #vicinity == 0: - all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, - lower=True, overwrite_b=False) + if True: # vicinity == 0: + all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) all_amplitudes /= norms[selection[0]] else: # This is not working, need to figure out why is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) all_amplitudes = np.append(all_amplitudes, np.float32(1)) L = M[is_in_vicinity, :][:, is_in_vicinity] - all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], - lower=True, overwrite_b=False) + all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], lower=True, overwrite_b=False) all_amplitudes[is_in_vicinity] /= norms[selection[0][is_in_vicinity]] - diff_amplitudes = (all_amplitudes - final_amplitudes[selection[0], selection[1]]) + diff_amplitudes = all_amplitudes - final_amplitudes[selection[0], selection[1]] modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0] final_amplitudes[selection[0], selection[1]] = all_amplitudes for i in modified: - tmp_best, tmp_peak = selection[:, i] - diff_amp = diff_amplitudes[i]*norms[tmp_best] - + diff_amp = diff_amplitudes[i] * norms[tmp_best] + if not tmp_best in cached_overlaps: cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() if not tmp_peak in neighbors.keys(): idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] - neighbors[tmp_peak] = {'idx' : idx, 'tdx' : tdx} + neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} - idx = neighbors[tmp_peak]['idx'] - tdx = neighbors[tmp_peak]['tdx'] + idx = neighbors[tmp_peak]["idx"] + tdx = neighbors[tmp_peak]["tdx"] - to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0]:tdx[1]] - scalar_products[:, idx[0]:idx[1]] -= to_add + to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] + scalar_products[:, idx[0] : idx[1]] -= to_add - is_valid = (scalar_products > stop_criteria) + is_valid = scalar_products > stop_criteria - is_valid = (final_amplitudes > min_amplitude)*(final_amplitudes < max_amplitude) + is_valid = (final_amplitudes > min_amplitude) * (final_amplitudes < max_amplitude) valid_indices = np.where(is_valid) num_spikes = len(valid_indices[0]) - spikes['sample_index'][:num_spikes] = valid_indices[1] + d['nbefore'] - spikes['channel_index'][:num_spikes] = 0 - spikes['cluster_index'][:num_spikes] = valid_indices[0] - spikes['amplitude'][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] - + spikes["sample_index"][:num_spikes] = valid_indices[1] + d["nbefore"] + spikes["channel_index"][:num_spikes] = 0 + spikes["cluster_index"][:num_spikes] = valid_indices[0] + spikes["amplitude"][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] + spikes = spikes[:num_spikes] - order = np.argsort(spikes['sample_index']) + order = np.argsort(spikes["sample_index"]) spikes = spikes[order] return spikes @@ -534,58 +526,56 @@ class CircusPeeler(BaseTemplateMatchingEngine): """ _default_params = { - 'peak_sign': 'neg', - 'exclude_sweep_ms': 0.1, - 'jitter_ms' : 0.1, - 'detect_threshold': 5, - 'noise_levels': None, - 'random_chunk_kwargs': {}, - 'max_amplitude' : 1.5, - 'min_amplitude' : 0.5, - 'use_sparse_matrix_threshold' : 0.25, - 'waveform_extractor': None, - 'sparse_kwargs' : {'method' : 'ptp', 'threshold' : 1} + "peak_sign": "neg", + "exclude_sweep_ms": 0.1, + "jitter_ms": 0.1, + "detect_threshold": 5, + "noise_levels": None, + "random_chunk_kwargs": {}, + "max_amplitude": 1.5, + "min_amplitude": 0.5, + "use_sparse_matrix_threshold": 0.25, + "waveform_extractor": None, + "sparse_kwargs": {"method": "ptp", "threshold": 1}, } @classmethod def _prepare_templates(cls, d): - - waveform_extractor = d['waveform_extractor'] - num_samples = d['num_samples'] - num_channels = d['num_channels'] - num_templates = d['num_templates'] - use_sparse_matrix_threshold = d['use_sparse_matrix_threshold'] + waveform_extractor = d["waveform_extractor"] + num_samples = d["num_samples"] + num_channels = d["num_channels"] + num_templates = d["num_templates"] + use_sparse_matrix_threshold = d["use_sparse_matrix_threshold"] - d['norms'] = np.zeros(num_templates, dtype=np.float32) + d["norms"] = np.zeros(num_templates, dtype=np.float32) - all_units = list(d['waveform_extractor'].sorting.unit_ids) + all_units = list(d["waveform_extractor"].sorting.unit_ids) if not waveform_extractor.is_sparse(): - sparsity = compute_sparsity(waveform_extractor, **d['sparse_kwargs']).mask + sparsity = compute_sparsity(waveform_extractor, **d["sparse_kwargs"]).mask else: sparsity = waveform_extractor.sparsity.mask - templates = waveform_extractor.get_all_templates(mode='median').copy() - d['sparsities'] = {} - - for count, unit_id in enumerate(all_units): + templates = waveform_extractor.get_all_templates(mode="median").copy() + d["sparsities"] = {} - d['sparsities'][count], = np.nonzero(sparsity[count]) + for count, unit_id in enumerate(all_units): + (d["sparsities"][count],) = np.nonzero(sparsity[count]) templates[count][:, ~sparsity[count]] = 0 - d['norms'][count] = np.linalg.norm(templates[count]) - templates[count] /= d['norms'][count] + d["norms"][count] = np.linalg.norm(templates[count]) + templates[count] /= d["norms"][count] templates = templates.reshape(num_templates, -1) - nnz = np.sum(templates != 0)/(num_templates * num_samples * num_channels) + nnz = np.sum(templates != 0) / (num_templates * num_samples * num_channels) if nnz <= use_sparse_matrix_threshold: templates = scipy.sparse.csr_matrix(templates) - print(f'Templates are automatically sparsified (sparsity level is {nnz})') - d['is_dense'] = False + print(f"Templates are automatically sparsified (sparsity level is {nnz})") + d["is_dense"] = False else: - d['is_dense'] = True + d["is_dense"] = True - d['templates'] = templates + d["templates"] = templates return d @@ -595,9 +585,9 @@ def _mcc_error(cls, bounds, good, bad): fp = np.sum((bounds[0] <= bad) & (bad <= bounds[1])) tp = np.sum((bounds[0] <= good) & (good <= bounds[1])) tn = np.sum((bad < bounds[0]) | (bad > bounds[1])) - denom = (tp+fp)*(tp+fn)*(tn+fp)*(tn+fn) + denom = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn) if denom > 0: - mcc = 1 - (tp*tn - fp*fn)/np.sqrt(denom) + mcc = 1 - (tp * tn - fp * fn) / np.sqrt(denom) else: mcc = 1 return mcc @@ -668,14 +658,16 @@ def initialize_and_check_kwargs(cls, recording, kwargs): default_parameters = cls._prepare_templates(default_parameters) - templates = default_parameters['templates'].reshape(len(default_parameters['templates']), - default_parameters['num_samples'], - default_parameters['num_channels']) + templates = default_parameters["templates"].reshape( + len(default_parameters["templates"]), default_parameters["num_samples"], default_parameters["num_channels"] + ) - default_parameters['overlaps'] = compute_overlaps(templates, - default_parameters['num_samples'], - default_parameters['num_channels'], - default_parameters['sparsities']) + default_parameters["overlaps"] = compute_overlaps( + templates, + default_parameters["num_samples"], + default_parameters["num_channels"], + default_parameters["sparsities"], + ) default_parameters["exclude_sweep_size"] = int( default_parameters["exclude_sweep_ms"] * recording.get_sampling_frequency() / 1000.0 From 14c8f58571fefc60eaa544da476c0210d45d2b92 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 11:09:02 +0200 Subject: [PATCH 014/322] useless dependency --- src/spikeinterface/sorters/internal/spyking_circus2.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 4ccaef8e29..ec2a74b6bb 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -3,7 +3,6 @@ import os import shutil import numpy as np -import psutil from spikeinterface.core import NumpySorting, load_extractor, BaseRecording, get_noise_levels, extract_waveforms from spikeinterface.core.job_tools import fix_job_kwargs From e455da3f46cc5529986f60c56cb7868391f12af5 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 13:51:38 +0200 Subject: [PATCH 015/322] Fix for classical circus with sparsity --- .../sortingcomponents/matching/circus.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index b0f132e94d..cdacfe1304 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -136,6 +136,7 @@ def compute_overlaps(templates, num_samples, num_channels, sparsities): dense_templates = np.zeros((num_templates, num_samples, num_channels), dtype=np.float32) for i in range(num_templates): + print(templates[i].shape, len(sparsities[i])) dense_templates[i, :, sparsities[i]] = templates[i].T size = 2 * num_samples - 1 @@ -558,12 +559,14 @@ def _prepare_templates(cls, d): templates = waveform_extractor.get_all_templates(mode="median").copy() d["sparsities"] = {} + d["circus_templates"] = {} for count, unit_id in enumerate(all_units): (d["sparsities"][count],) = np.nonzero(sparsity[count]) templates[count][:, ~sparsity[count]] = 0 d["norms"][count] = np.linalg.norm(templates[count]) templates[count] /= d["norms"][count] + d['circus_templates'][count] = templates[count][:, sparsity[count]] templates = templates.reshape(num_templates, -1) @@ -617,7 +620,7 @@ def _optimize_amplitudes(cls, noise_snippets, d): all_amps = {} for count, unit_id in enumerate(all_units): - waveform = waveform_extractor.get_waveforms(unit_id) + waveform = waveform_extractor.get_waveforms(unit_id, force_dense=True) snippets = waveform.reshape(waveform.shape[0], -1).T amps = templates.dot(snippets) / norms[:, np.newaxis] good = amps[count, :].flatten() @@ -658,12 +661,8 @@ def initialize_and_check_kwargs(cls, recording, kwargs): default_parameters = cls._prepare_templates(default_parameters) - templates = default_parameters["templates"].reshape( - len(default_parameters["templates"]), default_parameters["num_samples"], default_parameters["num_channels"] - ) - default_parameters["overlaps"] = compute_overlaps( - templates, + default_parameters['circus_templates'], default_parameters["num_samples"], default_parameters["num_channels"], default_parameters["sparsities"], From 2f84c6b632cd17391ba1eff0b89578b87f2fb892 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Aug 2023 11:51:59 +0000 Subject: [PATCH 016/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sortingcomponents/matching/circus.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index cdacfe1304..e92e7929f6 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -566,7 +566,7 @@ def _prepare_templates(cls, d): templates[count][:, ~sparsity[count]] = 0 d["norms"][count] = np.linalg.norm(templates[count]) templates[count] /= d["norms"][count] - d['circus_templates'][count] = templates[count][:, sparsity[count]] + d["circus_templates"][count] = templates[count][:, sparsity[count]] templates = templates.reshape(num_templates, -1) @@ -662,7 +662,7 @@ def initialize_and_check_kwargs(cls, recording, kwargs): default_parameters = cls._prepare_templates(default_parameters) default_parameters["overlaps"] = compute_overlaps( - default_parameters['circus_templates'], + default_parameters["circus_templates"], default_parameters["num_samples"], default_parameters["num_channels"], default_parameters["sparsities"], From 3d849fb91680f05c27c52dc240f61e65490c4a16 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 13:52:34 +0200 Subject: [PATCH 017/322] Fix for classical circus with sparsity --- src/spikeinterface/sortingcomponents/matching/circus.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index cdacfe1304..06cd99d92a 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -136,7 +136,6 @@ def compute_overlaps(templates, num_samples, num_channels, sparsities): dense_templates = np.zeros((num_templates, num_samples, num_channels), dtype=np.float32) for i in range(num_templates): - print(templates[i].shape, len(sparsities[i])) dense_templates[i, :, sparsities[i]] = templates[i].T size = 2 * num_samples - 1 From 7dcfdb0b325ffefb980c54ac5070339a490f8b49 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 14:56:23 +0200 Subject: [PATCH 018/322] Fixing slow tests with SC2 --- src/spikeinterface/sorters/internal/spyking_circus2.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index ec2a74b6bb..628ea991c1 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -30,7 +30,7 @@ class Spykingcircus2Sorter(ComponentsBasedSorter): "matching": {}, "apply_preprocessing": True, "shared_memory": True, - "job_kwargs": {"n_jobs": -1, "chunk_memory": "10M"}, + "job_kwargs": {"n_jobs": -1}, } @classmethod @@ -145,6 +145,9 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_params.update({"noise_levels": noise_levels}) matching_job_params = job_kwargs.copy() + if 'chunk_memory' in matching_job_params: + matching_job_params.pop('chunk_memory') + matching_job_params["chunk_duration"] = "100ms" spikes = find_spikes_from_templates( From 9f196b58acf4a5d2cc1ebc45a0ee969c03451d83 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Aug 2023 12:58:57 +0000 Subject: [PATCH 019/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sorters/internal/spyking_circus2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 628ea991c1..8a7b353bd1 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -145,8 +145,8 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_params.update({"noise_levels": noise_levels}) matching_job_params = job_kwargs.copy() - if 'chunk_memory' in matching_job_params: - matching_job_params.pop('chunk_memory') + if "chunk_memory" in matching_job_params: + matching_job_params.pop("chunk_memory") matching_job_params["chunk_duration"] = "100ms" From 1c7c8020147e24997e3c34e374c76df8a72bc684 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 15:25:58 +0200 Subject: [PATCH 020/322] WIP for cleaning --- .../sortingcomponents/clustering/random_projections.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index ac564bda9a..d9a317ca06 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -191,6 +191,8 @@ def main_function(cls, recording, peaks, params): ) cleaning_matching_params = params["job_kwargs"].copy() + if 'chunk_memory' in cleaning_matching_params: + cleaning_matching_params.pop('chunk_memory') cleaning_matching_params["chunk_duration"] = "100ms" cleaning_matching_params["n_jobs"] = 1 cleaning_matching_params["verbose"] = False From af4f1877aa800ff0277bd40a2aa83fc408b1ef08 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Aug 2023 13:31:36 +0000 Subject: [PATCH 021/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sortingcomponents/clustering/random_projections.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index d9a317ca06..d82f9a7808 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -191,8 +191,8 @@ def main_function(cls, recording, peaks, params): ) cleaning_matching_params = params["job_kwargs"].copy() - if 'chunk_memory' in cleaning_matching_params: - cleaning_matching_params.pop('chunk_memory') + if "chunk_memory" in cleaning_matching_params: + cleaning_matching_params.pop("chunk_memory") cleaning_matching_params["chunk_duration"] = "100ms" cleaning_matching_params["n_jobs"] = 1 cleaning_matching_params["verbose"] = False From 8c2af8fcfa4c0ab4aa058e4778545b4cee64fa08 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 29 Aug 2023 18:09:23 +0200 Subject: [PATCH 022/322] WIP --- .../benchmark/benchmark_matching.py | 51 +++++++++++-------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py index 07c7db155c..8ce8efe25f 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py @@ -600,29 +600,38 @@ def plot_comparison_matching( else: ax = axs[j] comp1, comp2 = comp_per_method[method1], comp_per_method[method2] - for performance, color in zip(performance_names, colors): - perf1 = comp1.get_performance()[performance] - perf2 = comp2.get_performance()[performance] - ax.plot(perf2, perf1, ".", label=performance, color=color) - ax.plot([0, 1], [0, 1], "k--", alpha=0.5) - ax.set_ylim(ylim) - ax.set_xlim(ylim) - ax.spines[["right", "top"]].set_visible(False) - ax.set_aspect("equal") - - if j == 0: - ax.set_ylabel(f"{method1}") - else: - ax.set_yticks([]) - if i == num_methods - 1: - ax.set_xlabel(f"{method2}") + if i <= j: + for performance, color in zip(performance_names, colors): + perf1 = comp1.get_performance()[performance] + perf2 = comp2.get_performance()[performance] + ax.plot(perf2, perf1, ".", label=performance, color=color) + + ax.plot([0, 1], [0, 1], "k--", alpha=0.5) + ax.set_ylim(ylim) + ax.set_xlim(ylim) + ax.spines[["right", "top"]].set_visible(False) + ax.set_aspect("equal") + + if j == i: + ax.set_ylabel(f"{method1}") + else: + ax.set_yticks([]) + if i == j: + ax.set_xlabel(f"{method2}") + else: + ax.set_xticks([]) + if i == num_methods - 1 and j == num_methods - 1: + patches = [] + for color, name in zip(colors, performance_names): + patches.append(mpatches.Patch(color=color, label=name)) + ax.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0) else: + ax.spines['bottom'].set_visible(False) + ax.spines['left'].set_visible(False) + ax.spines['top'].set_visible(False) + ax.spines['right'].set_visible(False) ax.set_xticks([]) - if i == num_methods - 1 and j == num_methods - 1: - patches = [] - for color, name in zip(colors, performance_names): - patches.append(mpatches.Patch(color=color, label=name)) - ax.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0) + ax.set_yticks([]) plt.tight_layout(h_pad=0, w_pad=0) return fig, axs From 30d1ecce4249a3e645ca09be39799277186e11c6 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 4 Sep 2023 11:47:37 +0200 Subject: [PATCH 023/322] Allow to postprocess on read-only waveform folders --- src/spikeinterface/core/waveform_extractor.py | 55 ++++++++++--------- .../tests/common_extension_tests.py | 23 +++++++- .../postprocessing/unit_localization.py | 4 +- 3 files changed, 54 insertions(+), 28 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 877c9fb00c..e404e74be4 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -4,6 +4,7 @@ import shutil from typing import Iterable, Literal, Optional import json +import os import numpy as np from copy import deepcopy @@ -87,6 +88,7 @@ def __init__( self._template_cache = {} self._params = {} self._loaded_extensions = dict() + self._is_read_only = False self.sparsity = sparsity self.folder = folder @@ -103,6 +105,8 @@ def __init__( if (self.folder / "params.json").is_file(): with open(str(self.folder / "params.json"), "r") as f: self._params = json.load(f) + if not os.access(self.folder, os.W_OK): + self._is_read_only = True else: # this is in case of in-memory self.format = "memory" @@ -399,6 +403,9 @@ def return_scaled(self) -> bool: def dtype(self): return self._params["dtype"] + def is_read_only(self) -> bool: + return self._is_read_only + def has_recording(self) -> bool: return self._recording is not None @@ -514,18 +521,8 @@ def is_extension(self, extension_name) -> bool: exists: bool Whether the extension exists or not """ - if self.folder is None: - return extension_name in self._loaded_extensions - else: - if self.format == "binary": - return (self.folder / extension_name).is_dir() and ( - self.folder / extension_name / "params.json" - ).is_file() - elif self.format == "zarr": - return ( - extension_name in self._waveforms_root.keys() - and "params" in self._waveforms_root[extension_name].attrs.keys() - ) + # Extensions are always loaded in memory + return extension_name in self._loaded_extensions def load_extension(self, extension_name): """ @@ -1735,20 +1732,28 @@ def __init__(self, waveform_extractor): self.waveform_extractor = waveform_extractor if self.waveform_extractor.folder is not None: - self.folder = self.waveform_extractor.folder - self.format = self.waveform_extractor.format - if self.format == "binary": - self.extension_folder = self.folder / self.extension_name - if not self.extension_folder.is_dir(): - self.extension_folder.mkdir() - else: - import zarr - - zarr_root = zarr.open(self.folder, mode="r+") - if self.extension_name not in zarr_root.keys(): - self.extension_group = zarr_root.create_group(self.extension_name) + if not self.waveform_extractor.is_read_only(): + self.folder = self.waveform_extractor.folder + self.format = self.waveform_extractor.format + if self.format == "binary": + self.extension_folder = self.folder / self.extension_name + if not self.extension_folder.is_dir(): + self.extension_folder.mkdir() else: - self.extension_group = zarr_root[self.extension_name] + import zarr + + zarr_root = zarr.open(self.folder, mode="r+") + if self.extension_name not in zarr_root.keys(): + self.extension_group = zarr_root.create_group(self.extension_name) + else: + self.extension_group = zarr_root[self.extension_name] + else: + warn( + "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." + ) + self.format = "memory" + self.extension_folder = None + self.folder = None else: self.format = "memory" self.extension_folder = None diff --git a/src/spikeinterface/postprocessing/tests/common_extension_tests.py b/src/spikeinterface/postprocessing/tests/common_extension_tests.py index b9c72f9b99..f44d58470c 100644 --- a/src/spikeinterface/postprocessing/tests/common_extension_tests.py +++ b/src/spikeinterface/postprocessing/tests/common_extension_tests.py @@ -4,7 +4,7 @@ import shutil from pathlib import Path -from spikeinterface import extract_waveforms, load_extractor, compute_sparsity +from spikeinterface import extract_waveforms, load_extractor, load_waveforms, compute_sparsity from spikeinterface.extractors import toy_example if hasattr(pytest, "global_test_folder"): @@ -76,6 +76,15 @@ def setUp(self): overwrite=True, ) self.we2 = we2 + + # make we read-only + we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" + if not we_ro_folder.is_dir(): + shutil.copytree(we2.folder, we_ro_folder) + # change permissions (R+X) + we_ro_folder.chmod(0o555) + self.we_ro = load_waveforms(we_ro_folder) + self.sparsity2 = compute_sparsity(we2, method="radius", radius_um=30) we_memory = extract_waveforms( recording, @@ -97,6 +106,11 @@ def setUp(self): folder=cache_folder / "toy_sorting_2seg_sparse", format="binary", sparsity=sparsity, overwrite=True ) + def tearDown(self): + # allow pytest to delete RO folder + we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" + we_ro_folder.chmod(0o777) + def _test_extension_folder(self, we, in_memory=False): if self.extension_function_kwargs_list is None: extension_function_kwargs_list = [dict()] @@ -177,3 +191,10 @@ def test_extension(self): assert ext_data_mem.equals(ext_data_zarr) else: print(f"{ext_data_name} of type {type(ext_data_mem)} not tested.") + + # read-only - Extension is memory only + _ = self.extension_class.get_extension_function()(self.we_ro, load_if_exists=False) + assert self.extension_class.extension_name in self.we_ro.get_available_extension_names() + ext_ro = self.we_ro.load_extension(self.extension_class.extension_name) + assert ext_ro.format == "memory" + assert ext_ro.extension_folder is None diff --git a/src/spikeinterface/postprocessing/unit_localization.py b/src/spikeinterface/postprocessing/unit_localization.py index 740fdd234b..d2739f69dd 100644 --- a/src/spikeinterface/postprocessing/unit_localization.py +++ b/src/spikeinterface/postprocessing/unit_localization.py @@ -570,6 +570,8 @@ def enforce_decrease_shells_data(wf_data, maxchan, radial_parents, in_place=Fals def get_grid_convolution_templates_and_weights( contact_locations, radius_um=50, upsampling_um=5, sigma_um=np.linspace(10, 50.0, 5), margin_um=50 ): + import sklearn.metrics + x_min, x_max = contact_locations[:, 0].min(), contact_locations[:, 0].max() y_min, y_max = contact_locations[:, 1].min(), contact_locations[:, 1].max() @@ -593,8 +595,6 @@ def get_grid_convolution_templates_and_weights( template_positions[:, 0] = all_x.flatten() template_positions[:, 1] = all_y.flatten() - import sklearn - # mask to get nearest template given a channel dist = sklearn.metrics.pairwise_distances(contact_locations, template_positions) nearest_template_mask = dist < radius_um From b8ee13c208cf928573595d941803b11e38278eb0 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 4 Sep 2023 15:02:13 +0200 Subject: [PATCH 024/322] Restore extension loading --- src/spikeinterface/core/waveform_extractor.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index e404e74be4..6083732c11 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -521,8 +521,22 @@ def is_extension(self, extension_name) -> bool: exists: bool Whether the extension exists or not """ - # Extensions are always loaded in memory - return extension_name in self._loaded_extensions + if self.folder is None: + return extension_name in self._loaded_extensions + else: + # Extensions already loaded in memory + if extension_name in self._loaded_extensions: + return True + else: + if self.format == "binary": + return (self.folder / extension_name).is_dir() and ( + self.folder / extension_name / "params.json" + ).is_file() + elif self.format == "zarr": + return ( + extension_name in self._waveforms_root.keys() + and "params" in self._waveforms_root[extension_name].attrs.keys() + ) def load_extension(self, extension_name): """ From def525c20a463b625c2f014fd5a84be4f79a00ef Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 5 Sep 2023 15:38:06 +0200 Subject: [PATCH 025/322] handle re-loading correctly --- src/spikeinterface/core/waveform_extractor.py | 140 ++++++++++-------- 1 file changed, 77 insertions(+), 63 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 6083732c11..39d115e22c 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -1746,28 +1746,39 @@ def __init__(self, waveform_extractor): self.waveform_extractor = waveform_extractor if self.waveform_extractor.folder is not None: - if not self.waveform_extractor.is_read_only(): - self.folder = self.waveform_extractor.folder - self.format = self.waveform_extractor.format - if self.format == "binary": - self.extension_folder = self.folder / self.extension_name - if not self.extension_folder.is_dir(): + self.folder = self.waveform_extractor.folder + self.format = self.waveform_extractor.format + if self.format == "binary": + self.extension_folder = self.folder / self.extension_name + if not self.extension_folder.is_dir(): + if not self.waveform_extractor.is_read_only(): self.extension_folder.mkdir() - else: - import zarr + else: + raise Exception( + "WaveformExtractor: cannot save extension in read-only mode. " + "Extension will be saved in memory." + ) + self.format = "memory" + self.extension_folder = None + self.folder = None + else: + import zarr - zarr_root = zarr.open(self.folder, mode="r+") - if self.extension_name not in zarr_root.keys(): + mode = "r+" if not self.waveform_extractor.is_read_only() else "r" + zarr_root = zarr.open(self.folder, mode=mode) + if self.extension_name not in zarr_root.keys(): + if not self.waveform_extractor.is_read_only(): self.extension_group = zarr_root.create_group(self.extension_name) else: - self.extension_group = zarr_root[self.extension_name] - else: - warn( - "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." - ) - self.format = "memory" - self.extension_folder = None - self.folder = None + raise Exception( + "WaveformExtractor: cannot save extension in read-only mode. " + "Extension will be saved in memory." + ) + self.format = "memory" + self.extension_folder = None + self.folder = None + else: + self.extension_group = zarr_root[self.extension_name] else: self.format = "memory" self.extension_folder = None @@ -1882,53 +1893,56 @@ def save(self, **kwargs): self._save(**kwargs) def _save(self, **kwargs): - if self.format == "binary": - import pandas as pd - - for ext_data_name, ext_data in self._extension_data.items(): - if isinstance(ext_data, dict): - with (self.extension_folder / f"{ext_data_name}.json").open("w") as f: - json.dump(ext_data, f) - elif isinstance(ext_data, np.ndarray): - np.save(self.extension_folder / f"{ext_data_name}.npy", ext_data) - elif isinstance(ext_data, pd.DataFrame): - ext_data.to_csv(self.extension_folder / f"{ext_data_name}.csv", index=True) - else: - try: - with (self.extension_folder / f"{ext_data_name}.pkl").open("wb") as f: - pickle.dump(ext_data, f) - except: - raise Exception(f"Could not save {ext_data_name} as extension data") - elif self.format == "zarr": - from .zarrrecordingextractor import get_default_zarr_compressor - import pandas as pd - import numcodecs - - compressor = kwargs.get("compressor", None) - if compressor is None: - compressor = get_default_zarr_compressor() - for ext_data_name, ext_data in self._extension_data.items(): - if ext_data_name in self.extension_group: - del self.extension_group[ext_data_name] - if isinstance(ext_data, dict): - self.extension_group.create_dataset( - name=ext_data_name, data=[ext_data], object_codec=numcodecs.JSON() - ) - self.extension_group[ext_data_name].attrs["dict"] = True - elif isinstance(ext_data, np.ndarray): - self.extension_group.create_dataset(name=ext_data_name, data=ext_data, compressor=compressor) - elif isinstance(ext_data, pd.DataFrame): - ext_data.to_xarray().to_zarr( - store=self.extension_group.store, group=f"{self.extension_group.name}/{ext_data_name}", mode="a" - ) - self.extension_group[ext_data_name].attrs["dataframe"] = True - else: - try: + if not self.waveform_extractor.is_read_only(): + if self.format == "binary": + import pandas as pd + + for ext_data_name, ext_data in self._extension_data.items(): + if isinstance(ext_data, dict): + with (self.extension_folder / f"{ext_data_name}.json").open("w") as f: + json.dump(ext_data, f) + elif isinstance(ext_data, np.ndarray): + np.save(self.extension_folder / f"{ext_data_name}.npy", ext_data) + elif isinstance(ext_data, pd.DataFrame): + ext_data.to_csv(self.extension_folder / f"{ext_data_name}.csv", index=True) + else: + try: + with (self.extension_folder / f"{ext_data_name}.pkl").open("wb") as f: + pickle.dump(ext_data, f) + except: + raise Exception(f"Could not save {ext_data_name} as extension data") + elif self.format == "zarr": + from .zarrrecordingextractor import get_default_zarr_compressor + import pandas as pd + import numcodecs + + compressor = kwargs.get("compressor", None) + if compressor is None: + compressor = get_default_zarr_compressor() + for ext_data_name, ext_data in self._extension_data.items(): + if ext_data_name in self.extension_group: + del self.extension_group[ext_data_name] + if isinstance(ext_data, dict): self.extension_group.create_dataset( - name=ext_data_name, data=ext_data, object_codec=numcodecs.Pickle() + name=ext_data_name, data=[ext_data], object_codec=numcodecs.JSON() + ) + self.extension_group[ext_data_name].attrs["dict"] = True + elif isinstance(ext_data, np.ndarray): + self.extension_group.create_dataset(name=ext_data_name, data=ext_data, compressor=compressor) + elif isinstance(ext_data, pd.DataFrame): + ext_data.to_xarray().to_zarr( + store=self.extension_group.store, + group=f"{self.extension_group.name}/{ext_data_name}", + mode="a", ) - except: - raise Exception(f"Could not save {ext_data_name} as extension data") + self.extension_group[ext_data_name].attrs["dataframe"] = True + else: + try: + self.extension_group.create_dataset( + name=ext_data_name, data=ext_data, object_codec=numcodecs.Pickle() + ) + except: + raise Exception(f"Could not save {ext_data_name} as extension data") def reset(self): """ From dfa67e681afec0ef741b16e61417c70123c97ef5 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Wed, 6 Sep 2023 12:08:01 +0200 Subject: [PATCH 026/322] warn instead of raise --- src/spikeinterface/core/waveform_extractor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 39d115e22c..431440c846 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -1754,7 +1754,7 @@ def __init__(self, waveform_extractor): if not self.waveform_extractor.is_read_only(): self.extension_folder.mkdir() else: - raise Exception( + warn( "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." ) @@ -1770,7 +1770,7 @@ def __init__(self, waveform_extractor): if not self.waveform_extractor.is_read_only(): self.extension_group = zarr_root.create_group(self.extension_name) else: - raise Exception( + warn( "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." ) From f60024b0c52e17edfebe02b8170f9ac3d78b053f Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Wed, 6 Sep 2023 12:24:41 +0200 Subject: [PATCH 027/322] Do not overwrite similarity in Phy if available --- src/spikeinterface/exporters/to_phy.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index 5615402fdb..c92861a8bf 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -178,7 +178,11 @@ def export_to_phy( templates[unit_ind, :, :][:, : len(chan_inds)] = template templates_ind[unit_ind, : len(chan_inds)] = chan_inds - template_similarity = compute_template_similarity(waveform_extractor, method="cosine_similarity") + if waveform_extractor.is_extension("similarity"): + tmc = waveform_extractor.load_extension("similarity") + template_similarity = tmc.get_data() + else: + template_similarity = compute_template_similarity(waveform_extractor, method="cosine_similarity") np.save(str(output_folder / "templates.npy"), templates) np.save(str(output_folder / "template_ind.npy"), templates_ind) From e64b8b4e99aabae273738e5f2985a651f321aa08 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 6 Sep 2023 15:00:52 +0200 Subject: [PATCH 028/322] Recafor sorter launcher. Deorecated run_sorters() and add run_sorter_jobs() --- doc/api.rst | 1 + doc/modules/sorters.rst | 37 +- src/spikeinterface/comparison/studytools.py | 38 +- src/spikeinterface/sorters/__init__.py | 9 +- src/spikeinterface/sorters/basesorter.py | 11 + src/spikeinterface/sorters/launcher.py | 450 ++++++++---------- .../sorters/tests/test_launcher.py | 287 ++++++----- 7 files changed, 406 insertions(+), 427 deletions(-) diff --git a/doc/api.rst b/doc/api.rst index 2e9fc1567a..1e8d6d62b1 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -212,6 +212,7 @@ spikeinterface.sorters .. autofunction:: print_sorter_versions .. autofunction:: get_sorter_description .. autofunction:: run_sorter + .. autofunction:: run_sorter_jobs .. autofunction:: run_sorters .. autofunction:: run_sorter_by_property diff --git a/doc/modules/sorters.rst b/doc/modules/sorters.rst index 26f2365202..ad50f9e411 100644 --- a/doc/modules/sorters.rst +++ b/doc/modules/sorters.rst @@ -285,27 +285,26 @@ Running several sorters in parallel The :py:mod:`~spikeinterface.sorters` module also includes tools to run several spike sorting jobs sequentially or in parallel. This can be done with the -:py:func:`~spikeinterface.sorters.run_sorters()` function by specifying +:py:func:`~spikeinterface.sorters.run_sorter_jobs()` function by specifying an :code:`engine` that supports parallel processing (such as :code:`joblib` or :code:`slurm`). .. code-block:: python - recordings = {'rec1' : recording, 'rec2': another_recording} - sorter_list = ['herdingspikes', 'tridesclous'] - sorter_params = { - 'herdingspikes': {'clustering_bandwidth' : 8}, - 'tridesclous': {'detect_threshold' : 5.}, - } - sorting_output = run_sorters(sorter_list, recordings, working_folder='tmp_some_sorters', - mode_if_folder_exists='overwrite', sorter_params=sorter_params) + # here we run 2 sorters on 2 diffrents recording = 4 jobs + recording = ... + another_recording = ... + + job_list = [ + {'sorter_name': 'tridesclous', 'recording': recording, 'output_folder': '/folder1','detect_threshold': 5.}, + {'sorter_name': 'tridesclous', 'recording': another_recording, 'output_folder': '/folder2', 'detect_threshold': 5.}, + {'sorter_name': 'herdingspikes', 'recording': recording, 'output_folder': '/folder3', 'clustering_bandwidth': 8., 'docker_image': True}, + {'sorter_name': 'herdingspikes', 'recording': another_recording, 'output_folder': '/folder4', 'clustering_bandwidth': 8., 'docker_image': True}, + ] + + # run in loop + sortings = run_sorter_jobs(job_list, engine='loop') - # the output is a dict with (rec_name, sorter_name) as keys - for (rec_name, sorter_name), sorting in sorting_output.items(): - print(rec_name, sorter_name, ':', sorting.get_unit_ids()) -After the jobs are run, the :code:`sorting_outputs` is a dictionary with :code:`(rec_name, sorter_name)` as a key (e.g. -:code:`('rec1', 'tridesclous')` in this example), and the corresponding :py:class:`~spikeinterface.core.BaseSorting` -as a value. :py:func:`~spikeinterface.sorters.run_sorters` has several "engines" available to launch the computation: @@ -315,13 +314,11 @@ as a value. .. code-block:: python - run_sorters(sorter_list, recordings, engine='loop') + run_sorter_jobs(job_list, engine='loop') - run_sorters(sorter_list, recordings, engine='joblib', - engine_kwargs={'n_jobs': 2}) + run_sorter_jobs(job_list, engine='joblib', engine_kwargs={'n_jobs': 2}) - run_sorters(sorter_list, recordings, engine='slurm', - engine_kwargs={'cpus_per_task': 10, 'mem', '5G'}) + run_sorter_jobs(job_list, engine='slurm', engine_kwargs={'cpus_per_task': 10, 'mem', '5G'}) Spike sorting by group diff --git a/src/spikeinterface/comparison/studytools.py b/src/spikeinterface/comparison/studytools.py index 79227c865f..00119c1586 100644 --- a/src/spikeinterface/comparison/studytools.py +++ b/src/spikeinterface/comparison/studytools.py @@ -22,12 +22,48 @@ from spikeinterface.core.job_tools import fix_job_kwargs from spikeinterface.extractors import NpzSortingExtractor from spikeinterface.sorters import sorter_dict -from spikeinterface.sorters.launcher import iter_working_folder, iter_sorting_output +from spikeinterface.sorters.basesorter import is_log_ok + from .comparisontools import _perf_keys from .paircomparisons import compare_sorter_to_ground_truth + + + +# This is deprecated and will be removed +def iter_working_folder(working_folder): + working_folder = Path(working_folder) + for rec_folder in working_folder.iterdir(): + if not rec_folder.is_dir(): + continue + for output_folder in rec_folder.iterdir(): + if (output_folder / "spikeinterface_job.json").is_file(): + with open(output_folder / "spikeinterface_job.json", "r") as f: + job_dict = json.load(f) + rec_name = job_dict["rec_name"] + sorter_name = job_dict["sorter_name"] + yield rec_name, sorter_name, output_folder + else: + rec_name = rec_folder.name + sorter_name = output_folder.name + if not output_folder.is_dir(): + continue + if not is_log_ok(output_folder): + continue + yield rec_name, sorter_name, output_folder + +# This is deprecated and will be removed +def iter_sorting_output(working_folder): + """Iterator over output_folder to retrieve all triplets of (rec_name, sorter_name, sorting).""" + for rec_name, sorter_name, output_folder in iter_working_folder(working_folder): + SorterClass = sorter_dict[sorter_name] + sorting = SorterClass.get_result_from_folder(output_folder) + yield rec_name, sorter_name, sorting + + + def setup_comparison_study(study_folder, gt_dict, **job_kwargs): """ Based on a dict of (recording, sorting) create the study folder. diff --git a/src/spikeinterface/sorters/__init__.py b/src/spikeinterface/sorters/__init__.py index a0d437559d..ba663327e8 100644 --- a/src/spikeinterface/sorters/__init__.py +++ b/src/spikeinterface/sorters/__init__.py @@ -1,11 +1,4 @@ from .basesorter import BaseSorter from .sorterlist import * from .runsorter import * - -from .launcher import ( - run_sorters, - run_sorter_by_property, - collect_sorting_outputs, - iter_working_folder, - iter_sorting_output, -) +from .launcher import run_sorter_jobs, run_sorters, run_sorter_by_property diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index ff559cc78d..aa76809b58 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -411,3 +411,14 @@ def get_job_kwargs(params, verbose): if not verbose: job_kwargs["progress_bar"] = False return job_kwargs + + +def is_log_ok(output_folder): + # log is OK when run_time is not None + if (output_folder / "spikeinterface_log.json").is_file(): + with open(output_folder / "spikeinterface_log.json", mode="r", encoding="utf8") as logfile: + log = json.load(logfile) + run_time = log.get("run_time", None) + ok = run_time is not None + return ok + return False \ No newline at end of file diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 52098f45cd..138b4c5848 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -10,55 +10,148 @@ import stat import subprocess import sys +import warnings from spikeinterface.core import load_extractor, aggregate_units from spikeinterface.core.core_tools import check_json from .sorterlist import sorter_dict -from .runsorter import run_sorter, run_sorter - - -def _run_one(arg_list): - # the multiprocessing python module force to have one unique tuple argument - ( - sorter_name, - recording, - output_folder, - verbose, - sorter_params, - docker_image, - singularity_image, - with_output, - ) = arg_list - - if isinstance(recording, dict): - recording = load_extractor(recording) +from .runsorter import run_sorter +from .basesorter import is_log_ok + +_implemented_engine = ("loop", "joblib", "dask", "slurm") + +def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=False): + """ + Run several :py:func:`run_sorter()` sequencially or in parralel given a list of job. + + For **engine="loop"** this is equivalent to: + + ..code:: + + for job in job_list: + run_sorter(**job) + + For some engines, this function is blocking until the results ("loop", "joblib", "multiprocessing", "dask"). + For some other engine ("slurm") the function return almost immediatly (akak non blocking) and the results + must be retrieve by hand when finished with :py:func:`read_sorter_folder()`. + + Parameters + ---------- + job_list: list of dict + A list a dict that are propagated to run_sorter(...) + engine: str "loop", "joblib", "dask", "slurm" + The engine to run the list. + * "loop": a simple loop. This engine is + engine_kwargs: dict + + return_output: bool, dfault False + Return a sorting or None. + + Returns + ------- + sortings: None or list of sorting + With engine="loop" or "joblib" you can optional get directly the list of sorting result if return_output=True. + """ + + assert engine in _implemented_engine, f"engine must be in {_implemented_engine}" + + if return_output: + assert engine in ("loop", "joblib", "multiprocessing") + out = [] else: - recording = recording - - # because this is checks in run_sorters before this call - remove_existing_folder = False - # result is retrieve later - delete_output_folder = False - # because we won't want the loop/worker to break - raise_error = False - - run_sorter( - sorter_name, - recording, - output_folder=output_folder, - remove_existing_folder=remove_existing_folder, - delete_output_folder=delete_output_folder, - verbose=verbose, - raise_error=raise_error, - docker_image=docker_image, - singularity_image=singularity_image, - with_output=with_output, - **sorter_params, - ) + out = None + + if engine == "loop": + # simple loop in main process + for kwargs in job_list: + sorting = run_sorter(**kwargs) + if return_output: + out.append(sorting) + + elif engine == "joblib": + from joblib import Parallel, delayed + + n_jobs = engine_kwargs.get("n_jobs", -1) + backend = engine_kwargs.get("backend", "loky") + sortings = Parallel(n_jobs=n_jobs, backend=backend)(delayed(run_sorter)(**kwargs) for kwargs in job_list) + if return_output: + out.extend(sortings) + + elif engine == "multiprocessing": + raise NotImplementedError() + + elif engine == "dask": + client = engine_kwargs.get("client", None) + assert client is not None, "For dask engine you have to provide : client = dask.distributed.Client(...)" + + tasks = [] + for kwargs in job_list: + task = client.submit(run_sorter, **kwargs) + tasks.append(task) + + for task in tasks: + task.result() + + elif engine == "slurm": + # generate python script for slurm + tmp_script_folder = engine_kwargs.get("tmp_script_folder", None) + if tmp_script_folder is None: + tmp_script_folder = tempfile.mkdtemp(prefix="spikeinterface_slurm_") + tmp_script_folder = Path(tmp_script_folder) + cpus_per_task = engine_kwargs.get("cpus_per_task", 1) + mem = engine_kwargs.get("mem", "1G") + + tmp_script_folder.mkdir(exist_ok=True, parents=True) + + # for i, task_args in enumerate(task_args_list): + for i, kwargs in enumerate(job_list): + script_name = tmp_script_folder / f"si_script_{i}.py" + with open(script_name, "w") as f: + kwargs_txt = "" + for k, v in kwargs.items(): + print(k, v) + kwargs_txt += " " + if k == "recording": + # put None temporally + kwargs_txt += "recording=None" + else: + if isinstance(v, str): + kwargs_txt += f'{k}="{v}"' + elif isinstance(v, Path): + kwargs_txt += f'{k}="{str(v.absolute())}"' + else: + kwargs_txt += f"{k}={v}" + kwargs_txt += ",\n" + + # recording_dict = task_args[1] + recording_dict = kwargs["recording"].to_dict() + slurm_script = _slurm_script.format( + python=sys.executable, recording_dict=recording_dict, kwargs_txt=kwargs_txt + ) + print(slurm_script) + f.write(slurm_script) + os.fchmod(f.fileno(), mode=stat.S_IRWXU) + + # subprocess.Popen(["sbatch", str(script_name.absolute()), f"-cpus-per-task={cpus_per_task}", f"-mem={mem}"]) + + return out + +_slurm_script = """#! {python} +from numpy import array +from spikeinterface.sorters import run_sorter + +rec_dict = {recording_dict} + +kwargs = dict( +{kwargs_txt} +) +kwargs['recording'] = load_extactor(rec_dict) + +run_sorter(**kwargs) +""" -_implemented_engine = ("loop", "joblib", "dask", "slurm") def run_sorter_by_property( @@ -66,7 +159,7 @@ def run_sorter_by_property( recording, grouping_property, working_folder, - mode_if_folder_exists="raise", + mode_if_folder_exists=None, engine="loop", engine_kwargs={}, verbose=False, @@ -93,11 +186,10 @@ def run_sorter_by_property( Property to split by before sorting working_folder: str The working directory. - mode_if_folder_exists: {'raise', 'overwrite', 'keep'} - The mode when the subfolder of recording/sorter already exists. - * 'raise' : raise error if subfolder exists - * 'overwrite' : delete and force recompute - * 'keep' : do not compute again if f=subfolder exists and log is OK + mode_if_folder_exists: None + Must be None. This is deprecated. + If not None then a warning is raise. + Will be removed in next release. engine: {'loop', 'joblib', 'dask'} Which engine to use to run sorter. engine_kwargs: dict @@ -127,46 +219,50 @@ def run_sorter_by_property( engine_kwargs={"n_jobs": 4}) """ + if mode_if_folder_exists is not None: + warnings.warn( + "run_sorter_by_property(): mode_if_folder_exists is not used anymore", + DeprecationWarning, + stacklevel=2, + ) + + working_folder = Path(working_folder).absolute() assert grouping_property in recording.get_property_keys(), ( f"The 'grouping_property' {grouping_property} is not " f"a recording property!" ) recording_dict = recording.split_by(grouping_property) - sorting_output = run_sorters( - [sorter_name], - recording_dict, - working_folder, - mode_if_folder_exists=mode_if_folder_exists, - engine=engine, - engine_kwargs=engine_kwargs, - verbose=verbose, - with_output=True, - docker_images={sorter_name: docker_image}, - singularity_images={sorter_name: singularity_image}, - sorter_params={sorter_name: sorter_params}, - ) - - grouping_property_values = None - sorting_list = [] - for output_name, sorting in sorting_output.items(): - prop_name, sorter_name = output_name - sorting_list.append(sorting) - if grouping_property_values is None: - grouping_property_values = np.array( - [prop_name] * len(sorting.get_unit_ids()), dtype=np.dtype(type(prop_name)) - ) - else: - grouping_property_values = np.concatenate( - (grouping_property_values, [prop_name] * len(sorting.get_unit_ids())) - ) + + job_list = [] + for k, rec in recording_dict.items(): + job = dict( + sorter_name=sorter_name, + recording=rec, + output_folder=working_folder / str(k), + verbose=verbose, + docker_image=docker_image, + singularity_image=singularity_image, + **sorter_params + ) + job_list.append(job) + + sorting_list = run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=True) + + unit_groups = [] + for sorting, group in zip(sorting_list, recording_dict.keys()): + num_units = sorting.get_unit_ids().size + unit_groups.extend([group] * num_units) + unit_groups = np.array(unit_groups) aggregate_sorting = aggregate_units(sorting_list) - aggregate_sorting.set_property(key=grouping_property, values=grouping_property_values) + aggregate_sorting.set_property(key=grouping_property, values=unit_groups) aggregate_sorting.register_recording(recording) return aggregate_sorting + +# This is deprecated and will be removed def run_sorters( sorter_list, recording_dict_or_list, @@ -180,8 +276,10 @@ def run_sorters( docker_images={}, singularity_images={}, ): - """Run several sorter on several recordings. - + """ + This function is deprecated and will be removed. + Please use run_sorter_jobs() instead. + Parameters ---------- sorter_list: list of str @@ -221,6 +319,13 @@ def run_sorters( results : dict The output is nested dict[(rec_name, sorter_name)] of SortingExtractor. """ + + warnings.warn( + "run_sorters()is deprecated please use run_sorter_jobs() instead", + DeprecationWarning, + stacklevel=2, + ) + working_folder = Path(working_folder) mode_if_folder_exists in ("raise", "keep", "overwrite") @@ -247,8 +352,7 @@ def run_sorters( dtype_rec_name = np.dtype(type(list(recording_dict.keys())[0])) assert dtype_rec_name.kind in ("i", "u", "S", "U"), "Dict keys can only be integers or strings!" - need_dump = engine != "loop" - task_args_list = [] + job_list = [] for rec_name, recording in recording_dict.items(): for sorter_name in sorter_list: output_folder = working_folder / str(rec_name) / sorter_name @@ -260,6 +364,7 @@ def run_sorters( elif mode_if_folder_exists == "overwrite": shutil.rmtree(str(output_folder)) elif mode_if_folder_exists == "keep": + if is_log_ok(output_folder): continue else: @@ -268,181 +373,22 @@ def run_sorters( params = sorter_params.get(sorter_name, {}) docker_image = docker_images.get(sorter_name, None) singularity_image = singularity_images.get(sorter_name, None) - _check_container_images(docker_image, singularity_image, sorter_name) - - if need_dump: - if not recording.check_if_dumpable(): - raise Exception("recording not dumpable call recording.save() before") - recording_arg = recording.to_dict(recursive=True) - else: - recording_arg = recording - - task_args = ( - sorter_name, - recording_arg, - output_folder, - verbose, - params, - docker_image, - singularity_image, - with_output, - ) - task_args_list.append(task_args) - if engine == "loop": - # simple loop in main process - for task_args in task_args_list: - _run_one(task_args) - - elif engine == "joblib": - from joblib import Parallel, delayed - - n_jobs = engine_kwargs.get("n_jobs", -1) - backend = engine_kwargs.get("backend", "loky") - Parallel(n_jobs=n_jobs, backend=backend)(delayed(_run_one)(task_args) for task_args in task_args_list) - - elif engine == "dask": - client = engine_kwargs.get("client", None) - assert client is not None, "For dask engine you have to provide : client = dask.distributed.Client(...)" - - tasks = [] - for task_args in task_args_list: - task = client.submit(_run_one, task_args) - tasks.append(task) - - for task in tasks: - task.result() - - elif engine == "slurm": - # generate python script for slurm - tmp_script_folder = engine_kwargs.get("tmp_script_folder", None) - if tmp_script_folder is None: - tmp_script_folder = tempfile.mkdtemp(prefix="spikeinterface_slurm_") - tmp_script_folder = Path(tmp_script_folder) - cpus_per_task = engine_kwargs.get("cpus_per_task", 1) - mem = engine_kwargs.get("mem", "1G") - - for i, task_args in enumerate(task_args_list): - script_name = tmp_script_folder / f"si_script_{i}.py" - with open(script_name, "w") as f: - arg_list_txt = "(\n" - for j, arg in enumerate(task_args): - arg_list_txt += "\t" - if j != 1: - if isinstance(arg, str): - arg_list_txt += f'"{arg}"' - elif isinstance(arg, Path): - arg_list_txt += f'"{str(arg.absolute())}"' - else: - arg_list_txt += f"{arg}" - else: - arg_list_txt += "recording" - arg_list_txt += ",\r" - arg_list_txt += ")" - - recording_dict = task_args[1] - slurm_script = _slurm_script.format( - python=sys.executable, recording_dict=recording_dict, arg_list_txt=arg_list_txt - ) - f.write(slurm_script) - os.fchmod(f.fileno(), mode=stat.S_IRWXU) - - print(slurm_script) - - subprocess.Popen(["sbatch", str(script_name.absolute()), f"-cpus-per-task={cpus_per_task}", f"-mem={mem}"]) - - non_blocking_engine = ("loop", "joblib") - if engine in non_blocking_engine: - # dump spikeinterface_job.json - # only for non blocking engine - for rec_name, recording in recording_dict.items(): - for sorter_name in sorter_list: - output_folder = working_folder / str(rec_name) / sorter_name - with open(output_folder / "spikeinterface_job.json", "w") as f: - dump_dict = {"rec_name": rec_name, "sorter_name": sorter_name, "engine": engine} - if engine != "dask": - dump_dict.update({"engine_kwargs": engine_kwargs}) - json.dump(check_json(dump_dict), f) - - if with_output: - if engine not in non_blocking_engine: - print( - f'Warning!! With engine="{engine}" you cannot have directly output results\n' - "Use : run_sorters(..., with_output=False)\n" - "And then: results = collect_sorting_outputs(output_folders)" + job = dict( + sorter_name=sorter_name, + recording=recording, + output_folder=output_folder, + verbose=verbose, + docker_image=docker_image, + singularity_image=singularity_image, + **params ) - return + job_list.append(job) + + sorting_list = run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=with_output) - results = collect_sorting_outputs(working_folder) + if with_output: + keys = [(rec_name, sorter_name) for rec_name in recording_dict for sorter_name in sorter_list ] + results = dict(zip(keys, sorting_list)) return results - -_slurm_script = """#! {python} -from numpy import array -from spikeinterface.sorters.launcher import _run_one - -recording = {recording_dict} - -arg_list = {arg_list_txt} - -_run_one(arg_list) -""" - - -def is_log_ok(output_folder): - # log is OK when run_time is not None - if (output_folder / "spikeinterface_log.json").is_file(): - with open(output_folder / "spikeinterface_log.json", mode="r", encoding="utf8") as logfile: - log = json.load(logfile) - run_time = log.get("run_time", None) - ok = run_time is not None - return ok - return False - - -def iter_working_folder(working_folder): - working_folder = Path(working_folder) - for rec_folder in working_folder.iterdir(): - if not rec_folder.is_dir(): - continue - for output_folder in rec_folder.iterdir(): - if (output_folder / "spikeinterface_job.json").is_file(): - with open(output_folder / "spikeinterface_job.json", "r") as f: - job_dict = json.load(f) - rec_name = job_dict["rec_name"] - sorter_name = job_dict["sorter_name"] - yield rec_name, sorter_name, output_folder - else: - rec_name = rec_folder.name - sorter_name = output_folder.name - if not output_folder.is_dir(): - continue - if not is_log_ok(output_folder): - continue - yield rec_name, sorter_name, output_folder - - -def iter_sorting_output(working_folder): - """Iterator over output_folder to retrieve all triplets of (rec_name, sorter_name, sorting).""" - for rec_name, sorter_name, output_folder in iter_working_folder(working_folder): - SorterClass = sorter_dict[sorter_name] - sorting = SorterClass.get_result_from_folder(output_folder) - yield rec_name, sorter_name, sorting - - -def collect_sorting_outputs(working_folder): - """Collect results in a working_folder. - - The output is a dict with double key access results[(rec_name, sorter_name)] of SortingExtractor. - """ - results = {} - for rec_name, sorter_name, sorting in iter_sorting_output(working_folder): - results[(rec_name, sorter_name)] = sorting - return results - - -def _check_container_images(docker_image, singularity_image, sorter_name): - if docker_image is not None: - assert singularity_image is None, f"Provide either a docker or a singularity image " f"for sorter {sorter_name}" - if singularity_image is not None: - assert docker_image is None, f"Provide either a docker or a singularity image " f"for sorter {sorter_name}" diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index cd8bc0fa5d..0d84dc0bdb 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -1,4 +1,5 @@ import os +import sys import shutil import time @@ -6,8 +7,9 @@ from pathlib import Path from spikeinterface.core import load_extractor -from spikeinterface.extractors import toy_example -from spikeinterface.sorters import run_sorters, run_sorter_by_property, collect_sorting_outputs +# from spikeinterface.extractors import toy_example +from spikeinterface import generate_ground_truth_recording +from spikeinterface.sorters import run_sorter_jobs, run_sorters, run_sorter_by_property if hasattr(pytest, "global_test_folder"): @@ -15,10 +17,16 @@ else: cache_folder = Path("cache_folder") / "sorters" +base_output = cache_folder / 'sorter_output' + +# no need to have many +num_recordings = 2 +sorters = ["tridesclous2"] def setup_module(): - rec, _ = toy_example(num_channels=8, duration=30, seed=0, num_segments=1) - for i in range(4): + base_seed = 42 + for i in range(num_recordings): + rec, _ = generate_ground_truth_recording(num_channels=8, durations=[10.0], seed=base_seed + i) rec_folder = cache_folder / f"toy_rec_{i}" if rec_folder.is_dir(): shutil.rmtree(rec_folder) @@ -31,19 +39,101 @@ def setup_module(): rec.save(folder=rec_folder) -def test_run_sorters_with_list(): - working_folder = cache_folder / "test_run_sorters_list" +def get_job_list(): + jobs = [] + for i in range(num_recordings): + for sorter_name in sorters: + recording = load_extractor(cache_folder / f"toy_rec_{i}") + kwargs = dict(sorter_name=sorter_name, + recording=recording, + output_folder=base_output / f"{sorter_name}_rec{i}", + verbose=True, + raise_error=False, + ) + jobs.append(kwargs) + + return jobs + +@pytest.fixture(scope="module") +def job_list(): + return get_job_list() + + + + + + + +################################ + + +def test_run_sorter_jobs_loop(job_list): + if base_output.is_dir(): + shutil.rmtree(base_output) + sortings = run_sorter_jobs(job_list, engine="loop", return_output=True) + print(sortings) + + +def test_run_sorter_jobs_joblib(job_list): + if base_output.is_dir(): + shutil.rmtree(base_output) + sortings = run_sorter_jobs(job_list, engine="joblib", engine_kwargs=dict(n_jobs=2, backend="loky"), return_output=True) + print(sortings) + +def test_run_sorter_jobs_multiprocessing(job_list): + pass + +@pytest.mark.skipif(True, reason="This is tested locally") +def test_run_sorter_jobs_dask(job_list): + if base_output.is_dir(): + shutil.rmtree(base_output) + + # create a dask Client for a slurm queue + from dask.distributed import Client + + test_mode = "local" + # test_mode = "client_slurm" + + if test_mode == "local": + client = Client() + elif test_mode == "client_slurm": + from dask_jobqueue import SLURMCluster + cluster = SLURMCluster( + processes=1, + cores=1, + memory="12GB", + python=sys.executable, + walltime="12:00:00", + ) + cluster.scale(2) + client = Client(cluster) + + # dask + t0 = time.perf_counter() + run_sorter_jobs(job_list, engine="dask", engine_kwargs=dict(client=client)) + t1 = time.perf_counter() + print(t1 - t0) + + +def test_run_sorter_jobs_slurm(job_list): + if base_output.is_dir(): + shutil.rmtree(base_output) + + working_folder = cache_folder / "test_run_sorters_slurm" if working_folder.is_dir(): shutil.rmtree(working_folder) - # make dumpable - rec0 = load_extractor(cache_folder / "toy_rec_0") - rec1 = load_extractor(cache_folder / "toy_rec_1") - - recording_list = [rec0, rec1] - sorter_list = ["tridesclous"] + tmp_script_folder = working_folder / "slurm_scripts" - run_sorters(sorter_list, recording_list, working_folder, engine="loop", verbose=False, with_output=False) + run_sorter_jobs( + job_list, + engine="slurm", + engine_kwargs=dict( + tmp_script_folder=tmp_script_folder, + cpus_per_task=32, + mem="32G", + ) + ) def test_run_sorter_by_property(): @@ -59,7 +149,7 @@ def test_run_sorter_by_property(): rec0_by = rec0.split_by("group") group_names0 = list(rec0_by.keys()) - sorter_name = "tridesclous" + sorter_name = "tridesclous2" sorting0 = run_sorter_by_property(sorter_name, rec0, "group", working_folder1, engine="loop", verbose=False) assert "group" in sorting0.get_property_keys() assert all([g in group_names0 for g in sorting0.get_property("group")]) @@ -68,13 +158,38 @@ def test_run_sorter_by_property(): rec1_by = rec1.split_by("group") group_names1 = list(rec1_by.keys()) - sorter_name = "tridesclous" + sorter_name = "tridesclous2" sorting1 = run_sorter_by_property(sorter_name, rec1, "group", working_folder2, engine="loop", verbose=False) assert "group" in sorting1.get_property_keys() assert all([g in group_names1 for g in sorting1.get_property("group")]) + +# run_sorters is deprecated +# This will test will be removed in next release +def test_run_sorters_with_list(): + + + working_folder = cache_folder / "test_run_sorters_list" + if working_folder.is_dir(): + shutil.rmtree(working_folder) + + # make dumpable + rec0 = load_extractor(cache_folder / "toy_rec_0") + rec1 = load_extractor(cache_folder / "toy_rec_1") + + recording_list = [rec0, rec1] + sorter_list = ["tridesclous2"] + + run_sorters(sorter_list, recording_list, working_folder, engine="loop", verbose=False, with_output=False) + + + + +# run_sorters is deprecated +# This will test will be removed in next release def test_run_sorters_with_dict(): + working_folder = cache_folder / "test_run_sorters_dict" if working_folder.is_dir(): shutil.rmtree(working_folder) @@ -84,9 +199,9 @@ def test_run_sorters_with_dict(): recording_dict = {"toy_tetrode": rec0, "toy_octotrode": rec1} - sorter_list = ["tridesclous", "tridesclous2"] + sorter_list = ["tridesclous2"] - sorter_params = {"tridesclous": dict(detect_threshold=5.6), "tridesclous2": dict()} + sorter_params = {"tridesclous2": dict()} # simple loop t0 = time.perf_counter() @@ -116,143 +231,23 @@ def test_run_sorters_with_dict(): ) -@pytest.mark.skipif(True, reason="This is tested locally") -def test_run_sorters_joblib(): - working_folder = cache_folder / "test_run_sorters_joblib" - if working_folder.is_dir(): - shutil.rmtree(working_folder) - - recording_dict = {} - for i in range(4): - rec = load_extractor(cache_folder / f"toy_rec_{i}") - recording_dict[f"rec_{i}"] = rec - - sorter_list = [ - "tridesclous", - ] - - # joblib - t0 = time.perf_counter() - run_sorters( - sorter_list, - recording_dict, - working_folder / "with_joblib", - engine="joblib", - engine_kwargs={"n_jobs": 4}, - with_output=False, - mode_if_folder_exists="keep", - ) - t1 = time.perf_counter() - print(t1 - t0) - - -@pytest.mark.skipif(True, reason="This is tested locally") -def test_run_sorters_dask(): - working_folder = cache_folder / "test_run_sorters_dask" - if working_folder.is_dir(): - shutil.rmtree(working_folder) - - recording_dict = {} - for i in range(4): - rec = load_extractor(cache_folder / f"toy_rec_{i}") - recording_dict[f"rec_{i}"] = rec - sorter_list = [ - "tridesclous", - ] - - # create a dask Client for a slurm queue - from dask.distributed import Client - from dask_jobqueue import SLURMCluster - - python = "/home/samuel.garcia/.virtualenvs/py36/bin/python3.6" - cluster = SLURMCluster( - processes=1, - cores=1, - memory="12GB", - python=python, - walltime="12:00:00", - ) - cluster.scale(5) - client = Client(cluster) - - # dask - t0 = time.perf_counter() - run_sorters( - sorter_list, - recording_dict, - working_folder, - engine="dask", - engine_kwargs={"client": client}, - with_output=False, - mode_if_folder_exists="keep", - ) - t1 = time.perf_counter() - print(t1 - t0) - - -@pytest.mark.skipif(True, reason="This is tested locally") -def test_run_sorters_slurm(): - working_folder = cache_folder / "test_run_sorters_slurm" - if working_folder.is_dir(): - shutil.rmtree(working_folder) - - # create recording - recording_dict = {} - for i in range(4): - rec = load_extractor(cache_folder / f"toy_rec_{i}") - recording_dict[f"rec_{i}"] = rec - - sorter_list = [ - "spykingcircus2", - "tridesclous2", - ] - - tmp_script_folder = working_folder / "slurm_scripts" - tmp_script_folder.mkdir(parents=True) - - run_sorters( - sorter_list, - recording_dict, - working_folder, - engine="slurm", - engine_kwargs={ - "tmp_script_folder": tmp_script_folder, - "cpus_per_task": 32, - "mem": "32G", - }, - with_output=False, - mode_if_folder_exists="keep", - verbose=True, - ) - - -def test_collect_sorting_outputs(): - working_folder = cache_folder / "test_run_sorters_dict" - results = collect_sorting_outputs(working_folder) - print(results) - - -def test_sorter_installation(): - # This import is to get error on github when import fails - import tridesclous - - # import circus if __name__ == "__main__": setup_module() - # pass - # test_run_sorters_with_list() + job_list = get_job_list() + + # test_run_sorter_jobs_loop(job_list) + # test_run_sorter_jobs_joblib(job_list) + # test_run_sorter_jobs_multiprocessing(job_list) + # test_run_sorter_jobs_dask(job_list) + # test_run_sorter_jobs_slurm(job_list) # test_run_sorter_by_property() + # this deprecated + test_run_sorters_with_list() test_run_sorters_with_dict() - # test_run_sorters_joblib() - - # test_run_sorters_dask() - - # test_run_sorters_slurm() - # test_collect_sorting_outputs() From 67dc176ec3305154adc7e0ce21b38b466c0fcd0b Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Wed, 6 Sep 2023 15:12:06 +0200 Subject: [PATCH 029/322] Update doc/modules/sorters.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/modules/sorters.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/sorters.rst b/doc/modules/sorters.rst index ad50f9e411..1843e80b8c 100644 --- a/doc/modules/sorters.rst +++ b/doc/modules/sorters.rst @@ -290,7 +290,7 @@ an :code:`engine` that supports parallel processing (such as :code:`joblib` or : .. code-block:: python - # here we run 2 sorters on 2 diffrents recording = 4 jobs + # here we run 2 sorters on 2 different recordings = 4 jobs recording = ... another_recording = ... From fe5052818fa4ddaed3f0e21fd657c9fe4151f988 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 6 Sep 2023 15:32:30 +0200 Subject: [PATCH 030/322] add engine="processpoolexecutor" --- src/spikeinterface/sorters/launcher.py | 56 ++++++++++++++----- .../sorters/tests/test_launcher.py | 27 ++++----- 2 files changed, 55 insertions(+), 28 deletions(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 138b4c5848..60be6e1286 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -4,7 +4,6 @@ from pathlib import Path import shutil import numpy as np -import json import tempfile import os import stat @@ -12,14 +11,22 @@ import sys import warnings -from spikeinterface.core import load_extractor, aggregate_units -from spikeinterface.core.core_tools import check_json +from spikeinterface.core import aggregate_units from .sorterlist import sorter_dict from .runsorter import run_sorter from .basesorter import is_log_ok -_implemented_engine = ("loop", "joblib", "dask", "slurm") +_default_engine_kwargs = dict( + loop=dict(), + joblib=dict(n_jobs=-1, backend="loky"), + processpoolexecutor=dict(max_workers=2, mp_context=None), + dask=dict(client=None), + slurm=dict(tmp_script_folder=None, cpus_per_task=1, mem="1G"), +) + + +_implemented_engine = list(_default_engine_kwargs.keys()) def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=False): """ @@ -56,8 +63,15 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal assert engine in _implemented_engine, f"engine must be in {_implemented_engine}" + engine_kwargs_ = dict() + engine_kwargs_.update(_default_engine_kwargs[engine]) + engine_kwargs_.update(engine_kwargs) + engine_kwargs = engine_kwargs_ + + + if return_output: - assert engine in ("loop", "joblib", "multiprocessing") + assert engine in ("loop", "joblib", "processpoolexecutor") out = [] else: out = None @@ -72,17 +86,30 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal elif engine == "joblib": from joblib import Parallel, delayed - n_jobs = engine_kwargs.get("n_jobs", -1) - backend = engine_kwargs.get("backend", "loky") + n_jobs = engine_kwargs["n_jobs"] + backend = engine_kwargs["backend"] sortings = Parallel(n_jobs=n_jobs, backend=backend)(delayed(run_sorter)(**kwargs) for kwargs in job_list) if return_output: out.extend(sortings) - elif engine == "multiprocessing": - raise NotImplementedError() + elif engine == "processpoolexecutor": + from concurrent.futures import ProcessPoolExecutor + + max_workers = engine_kwargs["max_workers"] + mp_context = engine_kwargs["mp_context"] + + with ProcessPoolExecutor(max_workers=max_workers, mp_context=mp_context) as executor: + futures = [] + for kwargs in job_list: + res = executor.submit(run_sorter, **kwargs) + futures.append(res) + for futur in futures: + sorting = futur.result() + if return_output: + out.append(sorting) elif engine == "dask": - client = engine_kwargs.get("client", None) + client = engine_kwargs["client"] assert client is not None, "For dask engine you have to provide : client = dask.distributed.Client(...)" tasks = [] @@ -95,16 +122,15 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal elif engine == "slurm": # generate python script for slurm - tmp_script_folder = engine_kwargs.get("tmp_script_folder", None) + tmp_script_folder = engine_kwargs["tmp_script_folder"] if tmp_script_folder is None: tmp_script_folder = tempfile.mkdtemp(prefix="spikeinterface_slurm_") tmp_script_folder = Path(tmp_script_folder) - cpus_per_task = engine_kwargs.get("cpus_per_task", 1) - mem = engine_kwargs.get("mem", "1G") + cpus_per_task = engine_kwargs["cpus_per_task"] + mem = engine_kwargs["mem"] tmp_script_folder.mkdir(exist_ok=True, parents=True) - # for i, task_args in enumerate(task_args_list): for i, kwargs in enumerate(job_list): script_name = tmp_script_folder / f"si_script_{i}.py" with open(script_name, "w") as f: @@ -133,7 +159,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal f.write(slurm_script) os.fchmod(f.fileno(), mode=stat.S_IRWXU) - # subprocess.Popen(["sbatch", str(script_name.absolute()), f"-cpus-per-task={cpus_per_task}", f"-mem={mem}"]) + subprocess.Popen(["sbatch", str(script_name.absolute()), f"-cpus-per-task={cpus_per_task}", f"-mem={mem}"]) return out diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index 0d84dc0bdb..c1f8b6e0bb 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -59,14 +59,6 @@ def job_list(): return get_job_list() - - - - - -################################ - - def test_run_sorter_jobs_loop(job_list): if base_output.is_dir(): shutil.rmtree(base_output) @@ -74,14 +66,22 @@ def test_run_sorter_jobs_loop(job_list): print(sortings) + + def test_run_sorter_jobs_joblib(job_list): if base_output.is_dir(): shutil.rmtree(base_output) sortings = run_sorter_jobs(job_list, engine="joblib", engine_kwargs=dict(n_jobs=2, backend="loky"), return_output=True) print(sortings) -def test_run_sorter_jobs_multiprocessing(job_list): - pass +def test_run_sorter_jobs_processpoolexecutor(job_list): + if base_output.is_dir(): + shutil.rmtree(base_output) + sortings = run_sorter_jobs(job_list, engine="processpoolexecutor", engine_kwargs=dict(max_workers=2), return_output=True) + print(sortings) + + + @pytest.mark.skipif(True, reason="This is tested locally") def test_run_sorter_jobs_dask(job_list): @@ -235,11 +235,12 @@ def test_run_sorters_with_dict(): if __name__ == "__main__": - setup_module() + # setup_module() job_list = get_job_list() # test_run_sorter_jobs_loop(job_list) # test_run_sorter_jobs_joblib(job_list) + test_run_sorter_jobs_processpoolexecutor(job_list) # test_run_sorter_jobs_multiprocessing(job_list) # test_run_sorter_jobs_dask(job_list) # test_run_sorter_jobs_slurm(job_list) @@ -247,7 +248,7 @@ def test_run_sorters_with_dict(): # test_run_sorter_by_property() # this deprecated - test_run_sorters_with_list() - test_run_sorters_with_dict() + # test_run_sorters_with_list() + # test_run_sorters_with_dict() From f4b7c3caad2011606bf19a70c69d098a3922f277 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 6 Sep 2023 16:29:17 +0200 Subject: [PATCH 031/322] debug slurm launcher --- src/spikeinterface/sorters/launcher.py | 5 ++--- src/spikeinterface/sorters/tests/test_launcher.py | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 60be6e1286..6f3b972fdd 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -136,7 +136,6 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal with open(script_name, "w") as f: kwargs_txt = "" for k, v in kwargs.items(): - print(k, v) kwargs_txt += " " if k == "recording": # put None temporally @@ -155,7 +154,6 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal slurm_script = _slurm_script.format( python=sys.executable, recording_dict=recording_dict, kwargs_txt=kwargs_txt ) - print(slurm_script) f.write(slurm_script) os.fchmod(f.fileno(), mode=stat.S_IRWXU) @@ -165,6 +163,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal _slurm_script = """#! {python} from numpy import array +from spikeinterface import load_extractor from spikeinterface.sorters import run_sorter rec_dict = {recording_dict} @@ -172,7 +171,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal kwargs = dict( {kwargs_txt} ) -kwargs['recording'] = load_extactor(rec_dict) +kwargs['recording'] = load_extractor(rec_dict) run_sorter(**kwargs) """ diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index c1f8b6e0bb..2d8e6f3d3c 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -240,10 +240,10 @@ def test_run_sorters_with_dict(): # test_run_sorter_jobs_loop(job_list) # test_run_sorter_jobs_joblib(job_list) - test_run_sorter_jobs_processpoolexecutor(job_list) + # test_run_sorter_jobs_processpoolexecutor(job_list) # test_run_sorter_jobs_multiprocessing(job_list) # test_run_sorter_jobs_dask(job_list) - # test_run_sorter_jobs_slurm(job_list) + test_run_sorter_jobs_slurm(job_list) # test_run_sorter_by_property() From 93de4db5596a7c4ff3cc2925f6c702a9cabf7703 Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Wed, 6 Sep 2023 16:25:42 +0200 Subject: [PATCH 032/322] Update doc/modules/sorters.rst Co-authored-by: Alessio Buccino --- doc/modules/sorters.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/modules/sorters.rst b/doc/modules/sorters.rst index 1843e80b8c..d17927cc42 100644 --- a/doc/modules/sorters.rst +++ b/doc/modules/sorters.rst @@ -295,10 +295,10 @@ an :code:`engine` that supports parallel processing (such as :code:`joblib` or : another_recording = ... job_list = [ - {'sorter_name': 'tridesclous', 'recording': recording, 'output_folder': '/folder1','detect_threshold': 5.}, - {'sorter_name': 'tridesclous', 'recording': another_recording, 'output_folder': '/folder2', 'detect_threshold': 5.}, - {'sorter_name': 'herdingspikes', 'recording': recording, 'output_folder': '/folder3', 'clustering_bandwidth': 8., 'docker_image': True}, - {'sorter_name': 'herdingspikes', 'recording': another_recording, 'output_folder': '/folder4', 'clustering_bandwidth': 8., 'docker_image': True}, + {'sorter_name': 'tridesclous', 'recording': recording, 'output_folder': 'folder1','detect_threshold': 5.}, + {'sorter_name': 'tridesclous', 'recording': another_recording, 'output_folder': 'folder2', 'detect_threshold': 5.}, + {'sorter_name': 'herdingspikes', 'recording': recording, 'output_folder': 'folder3', 'clustering_bandwidth': 8., 'docker_image': True}, + {'sorter_name': 'herdingspikes', 'recording': another_recording, 'output_folder': 'folder4', 'clustering_bandwidth': 8., 'docker_image': True}, ] # run in loop From fe2d7c532611add92ea46d877f051a396ead6ced Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Thu, 7 Sep 2023 19:08:32 +0200 Subject: [PATCH 033/322] Suggestions from Zach Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- src/spikeinterface/sorters/launcher.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 6f3b972fdd..103f30dac5 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -30,7 +30,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=False): """ - Run several :py:func:`run_sorter()` sequencially or in parralel given a list of job. + Run several :py:func:`run_sorter()` sequentially or in parallel given a list of jobs. For **engine="loop"** this is equivalent to: @@ -39,9 +39,9 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal for job in job_list: run_sorter(**job) - For some engines, this function is blocking until the results ("loop", "joblib", "multiprocessing", "dask"). - For some other engine ("slurm") the function return almost immediatly (akak non blocking) and the results - must be retrieve by hand when finished with :py:func:`read_sorter_folder()`. + For some engines ("loop", "joblib", "multiprocessing", "dask"), this function is blocking until the results . + For other engines ("slurm") the function returns almost immediately (aka non-blocking) and the results + must be retrieved by hand when finished with :py:func:`read_sorter_folder()`. Parameters ---------- From 0acc125e1688a83c66542f19519045ee2f6eadf6 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 8 Sep 2023 10:28:21 +0200 Subject: [PATCH 034/322] Start GroundTruthStudy refactoring. --- .../comparison/groundtruthstudy.py | 66 ++++++++- .../comparison/tests/test_groundtruthstudy.py | 128 ++++++++++++------ 2 files changed, 152 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 7b146f07bc..12588019ba 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -22,8 +22,72 @@ collect_run_times, ) - class GroundTruthStudy: + """ + This class is an helper function to run any comparison on several "cases" for several ground truth dataset. + + "cases" can be: + * several sorter for comparisons + * same sorter with differents parameters + * parameters of comparisons + * any combination of theses + + For enough flexibility cases key can be a tuple so that we can varify complexity along several + "axis" (paremeters or sorter) + + Ground truth dataset need recording+sorting. This can be from meraec file or from the internal generator + :py:fun:`generate_ground_truth_recording()` + + This GroundTruthStudy have been refactor in version 0.100 to be more flexible than previous versions. + Folders structures are not backward compatible. + + + + """ + def __init__(self, study_folder=None): + # import pandas as pd + + self.study_folder = Path(study_folder) + + # self.computed_names = None + # self.recording_names = None + # self.cases_names = None + + self.datasets = {} + self.cases = {} + + # self.rec_names = None + # self.sorter_names = None + + self.scan_folder() + + # self.comparisons = None + # self.exhaustive_gt = None + + @classmethod + def create(cls, study_folder, datasets={}, cases={}): + pass + + def __repr__(self): + t = f"GroundTruthStudy {self.study_folder.stem} \n" + t += f" recordings: {len(self.rec_names)} {self.rec_names}\n" + if len(self.sorter_names): + t += " cases: {} {}\n".format(len(self.sorter_names), self.sorter_names) + + return t + + def scan_folder(self): + self.rec_names = get_rec_names(self.study_folder) + # scan computed names + self.computed_names = list(iter_computed_names(self.study_folder)) # list of pair (rec_name, sorter_name) + self.sorter_names = np.unique([e for _, e in iter_computed_names(self.study_folder)]).tolist() + self._is_scanned = True + + + + + +class OLDGroundTruthStudy: def __init__(self, study_folder=None): import pandas as pd diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 70f8a63c8c..f28d901075 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -3,16 +3,18 @@ import pytest from pathlib import Path -from spikeinterface.extractors import toy_example +# from spikeinterface.extractors import toy_example +from spikeinterface import generate_ground_truth_recording +from spikeinterface.preprocessing import bandpass_filter from spikeinterface.sorters import installed_sorters from spikeinterface.comparison import GroundTruthStudy -try: - import tridesclous +# try: +# import tridesclous - HAVE_TDC = True -except ImportError: - HAVE_TDC = False +# HAVE_TDC = True +# except ImportError: +# HAVE_TDC = False if hasattr(pytest, "global_test_folder"): @@ -27,61 +29,105 @@ def setup_module(): if study_folder.is_dir(): shutil.rmtree(study_folder) - _setup_comparison_study() + create_study(study_folder) -def _setup_comparison_study(): - rec0, gt_sorting0 = toy_example(num_channels=4, duration=30, seed=0, num_segments=1) - rec1, gt_sorting1 = toy_example(num_channels=32, duration=30, seed=0, num_segments=1) +def simple_preprocess(rec): + return bandpass_filter(rec) - gt_dict = { + +def create_study(study_folder): + rec0, gt_sorting0 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=42) + rec1, gt_sorting1 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=91) + + datasets = { "toy_tetrode": (rec0, gt_sorting0), "toy_probe32": (rec1, gt_sorting1), + "toy_probe32_preprocess": (simple_preprocess(rec1), gt_sorting1), } - study = GroundTruthStudy.create(study_folder, gt_dict) + + # cases can also be generated via simple loops + cases = { + # + ("tdc2", "no-preprocess", "tetrode"): { + "label": "tridesclous2 without preprocessing and standard params", + "dataset": "toy_tetrode", + "run_sorter_params": { + + }, + "comparison_params": { + + }, + }, + # + ("tdc2", "with-preprocess", "probe32"): { + "label": "tridesclous2 with preprocessing standar params", + "dataset": "toy_probe32_preprocess", + "run_sorter_params": { + + }, + "comparison_params": { + + }, + }, + # + ("sc2", "no-preprocess", "tetrode"): { + "label": "spykingcircus2 without preprocessing standar params", + "dataset": "toy_tetrode", + "run_sorter_params": { + + }, + "comparison_params": { + + }, + }, + } + + study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases) + print(study) -@pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") -def test_run_study_sorters(): - study = GroundTruthStudy(study_folder) - sorter_list = [ - "tridesclous", - ] - print( - f"\n#################################\nINSTALLED SORTERS\n#################################\n" - f"{installed_sorters()}" - ) - study.run_sorters(sorter_list) +# @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") +# def test_run_study_sorters(): +# study = GroundTruthStudy(study_folder) +# sorter_list = [ +# "tridesclous", +# ] +# print( +# f"\n#################################\nINSTALLED SORTERS\n#################################\n" +# f"{installed_sorters()}" +# ) +# study.run_sorters(sorter_list) -@pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") -def test_extract_sortings(): - study = GroundTruthStudy(study_folder) +# @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") +# def test_extract_sortings(): +# study = GroundTruthStudy(study_folder) - study.copy_sortings() +# study.copy_sortings() - for rec_name in study.rec_names: - gt_sorting = study.get_ground_truth(rec_name) +# for rec_name in study.rec_names: +# gt_sorting = study.get_ground_truth(rec_name) - for rec_name in study.rec_names: - metrics = study.get_metrics(rec_name=rec_name) +# for rec_name in study.rec_names: +# metrics = study.get_metrics(rec_name=rec_name) - snr = study.get_units_snr(rec_name=rec_name) +# snr = study.get_units_snr(rec_name=rec_name) - study.copy_sortings() +# study.copy_sortings() - run_times = study.aggregate_run_times() +# run_times = study.aggregate_run_times() - study.run_comparisons(exhaustive_gt=True) +# study.run_comparisons(exhaustive_gt=True) - perf = study.aggregate_performance_by_unit() +# perf = study.aggregate_performance_by_unit() - count_units = study.aggregate_count_units() - dataframes = study.aggregate_dataframes() - print(dataframes) +# count_units = study.aggregate_count_units() +# dataframes = study.aggregate_dataframes() +# print(dataframes) if __name__ == "__main__": - # setup_module() + setup_module() # test_run_study_sorters() - test_extract_sortings() + # test_extract_sortings() From 462961ff8321c1a060705f27005f38dfd6ef3a66 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 8 Sep 2023 13:44:05 +0200 Subject: [PATCH 035/322] new GroundTruthStudy wip --- .../comparison/groundtruthstudy.py | 153 +++++++++++++++--- .../comparison/tests/test_groundtruthstudy.py | 23 ++- 2 files changed, 146 insertions(+), 30 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 12588019ba..fc4de5a18d 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -1,26 +1,32 @@ from pathlib import Path import shutil +import json +import pickle import numpy as np from spikeinterface.core import load_extractor -from spikeinterface.extractors import NpzSortingExtractor -from spikeinterface.sorters import sorter_dict, run_sorters +from spikeinterface.core.core_tools import SIJsonEncoder + +from spikeinterface.sorters import run_sorter_jobs, read_sorter_folder from spikeinterface import WaveformExtractor from spikeinterface.qualitymetrics import compute_quality_metrics from .paircomparisons import compare_sorter_to_ground_truth -from .studytools import ( - setup_comparison_study, - get_rec_names, - get_recordings, - iter_working_folder, - iter_computed_names, - iter_computed_sorting, - collect_run_times, -) +# from .studytools import ( +# setup_comparison_study, +# get_rec_names, +# get_recordings, +# iter_working_folder, +# iter_computed_names, +# iter_computed_sorting, +# collect_run_times, +# ) + + +_key_separator = " ## " class GroundTruthStudy: """ @@ -44,10 +50,10 @@ class GroundTruthStudy: """ - def __init__(self, study_folder=None): + def __init__(self, study_folder): # import pandas as pd - self.study_folder = Path(study_folder) + self.folder = Path(study_folder) # self.computed_names = None # self.recording_names = None @@ -66,22 +72,121 @@ def __init__(self, study_folder=None): @classmethod def create(cls, study_folder, datasets={}, cases={}): - pass + study_folder = Path(study_folder) + study_folder.mkdir(exist_ok=False, parents=True) + + (study_folder / "datasets").mkdir() + (study_folder / "datasets/recordings").mkdir() + (study_folder / "datasets/gt_sortings").mkdir() + (study_folder / "sorters").mkdir() + (study_folder / "sortings").mkdir() + + for key, (rec, gt_sorting) in datasets.items(): + assert "/" not in key + assert "\\" not in key + + # rec are pickle + rec.dump_to_pickle(study_folder / f"datasets/recordings/{key}.pickle") + + # sorting are pickle + saved as NumpyFolderSorting + gt_sorting.dump_to_pickle(study_folder / f"datasets/gt_sortings/{key}.pickle") + gt_sorting.save(format="numpy_folder", folder=study_folder / f"datasets/gt_sortings/{key}") + + + # (study_folder / "cases.jon").write_text( + # json.dumps(cases, indent=4, cls=SIJsonEncoder), + # encoding="utf8", + # ) + # cases is dump to a pickle file, json is not possible because of tuple key + (study_folder / "cases.pickle").write_bytes(pickle.dumps(cases)) + + return cls(study_folder) + + + def scan_folder(self): + if not (self.folder / "datasets").exists(): + raise ValueError(f"This is folder is not a {self.folder} GroundTruthStudy") + + for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): + key = rec_file.stem + rec = load_extractor(rec_file) + gt_sorting = load_extractor(self.folder / f"datasets/gt_sortings/{key}") + self.datasets[key] = (rec, gt_sorting) + + with open(self.folder / "cases.pickle", "rb") as f: + self.cases = pickle.load(f) def __repr__(self): - t = f"GroundTruthStudy {self.study_folder.stem} \n" - t += f" recordings: {len(self.rec_names)} {self.rec_names}\n" - if len(self.sorter_names): - t += " cases: {} {}\n".format(len(self.sorter_names), self.sorter_names) + t = f"GroundTruthStudy {self.folder.stem} \n" + t += f" datasets: {len(self.datasets)} {list(self.datasets.keys())}\n" + t += f" cases: {len(self.cases)} {list(self.cases.keys())}\n" return t - def scan_folder(self): - self.rec_names = get_rec_names(self.study_folder) - # scan computed names - self.computed_names = list(iter_computed_names(self.study_folder)) # list of pair (rec_name, sorter_name) - self.sorter_names = np.unique([e for _, e in iter_computed_names(self.study_folder)]).tolist() - self._is_scanned = True + def key_to_str(self, key): + if isinstance(key, str): + return key + elif isinstance(key, tuple): + return _key_separator.join(key) + else: + raise ValueError("Keys for cases must str or tuple") + + def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True, verbose=False): + """ + + """ + if case_keys is None: + case_keys = self.cases.keys() + + job_list = [] + for key in case_keys: + sorting_folder = self.folder / "sortings" / self.key_to_str(key) + sorting_exists = sorting_folder.exists() + + sorter_folder = self.folder / "sorters" / self.key_to_str(key) + sorter_folder_exists = sorting_folder.exists() + + if keep: + if sorting_exists: + continue + if sorter_folder_exists: + # the sorter folder exists but havent been copied to sortings folder + sorting = read_sorter_folder(sorter_folder, raise_error=False) + if sorting is not None: + # save and skip + sorting.save(format="numpy_folder", folder=sorting_folder) + continue + + params = self.cases[key]["run_sorter_params"].copy() + # this ensure that sorter_name is given + recording, _ = self.datasets[self.cases[key]["dataset"]] + sorter_name = params.pop("sorter_name") + job = dict(sorter_name=sorter_name, + recording=recording, + output_folder=sorter_folder) + job.update(params) + job_list.append(job) + + run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=False) + + # TODO create a list in laucher for engine blocking and non-blocking + if engine not in ("slurm", ): + self.copy_sortings(case_keys) + + def copy_sortings(self, case_keys=None): + if case_keys is None: + case_keys = self.cases.keys() + + for key in case_keys: + sorting_folder = self.folder / "sortings" / self.key_to_str(key) + sorter_folder = self.folder / "sorters" / self.key_to_str(key) + + sorting = read_sorter_folder(sorter_folder, raise_error=False) + if sorting is not None: + sorting.save(format="numpy_folder", folder=sorting_folder) + + def run_comparisons(self): + pass diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index f28d901075..15ba7db2ab 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -25,18 +25,19 @@ study_folder = cache_folder / "test_groundtruthstudy/" +print(study_folder.absolute()) def setup_module(): if study_folder.is_dir(): shutil.rmtree(study_folder) - create_study(study_folder) + create_a_study(study_folder) def simple_preprocess(rec): return bandpass_filter(rec) -def create_study(study_folder): +def create_a_study(study_folder): rec0, gt_sorting0 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=42) rec1, gt_sorting1 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=91) @@ -53,7 +54,7 @@ def create_study(study_folder): "label": "tridesclous2 without preprocessing and standard params", "dataset": "toy_tetrode", "run_sorter_params": { - + "sorter_name": "tridesclous2", }, "comparison_params": { @@ -64,7 +65,7 @@ def create_study(study_folder): "label": "tridesclous2 with preprocessing standar params", "dataset": "toy_probe32_preprocess", "run_sorter_params": { - + "sorter_name": "tridesclous2", }, "comparison_params": { @@ -75,7 +76,7 @@ def create_study(study_folder): "label": "spykingcircus2 without preprocessing standar params", "dataset": "toy_tetrode", "run_sorter_params": { - + "sorter_name": "spykingcircus2", }, "comparison_params": { @@ -87,6 +88,13 @@ def create_study(study_folder): print(study) + +def test_GroundTruthStudy(): + study = GroundTruthStudy(study_folder) + print(study) + + study.run_sorters(verbose=True) + # @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") # def test_run_study_sorters(): # study = GroundTruthStudy(study_folder) @@ -128,6 +136,9 @@ def create_study(study_folder): if __name__ == "__main__": - setup_module() + # setup_module() + test_GroundTruthStudy() + + # test_run_study_sorters() # test_extract_sortings() From e0af88dbae3593a62372706ed842cde3b1736464 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 8 Sep 2023 20:32:11 +0200 Subject: [PATCH 036/322] Make internal sorters able to be run with none dumpable to json recording. --- src/spikeinterface/comparison/groundtruthstudy.py | 2 +- .../comparison/tests/test_groundtruthstudy.py | 4 ++-- src/spikeinterface/core/base.py | 6 ++++-- src/spikeinterface/sorters/internal/si_based.py | 14 +++++++++++--- .../sorters/internal/spyking_circus2.py | 4 +--- .../sorters/internal/tridesclous2.py | 4 +--- 6 files changed, 20 insertions(+), 14 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index fc4de5a18d..2eeb697980 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -105,7 +105,7 @@ def create(cls, study_folder, datasets={}, cases={}): def scan_folder(self): if not (self.folder / "datasets").exists(): - raise ValueError(f"This is folder is not a {self.folder} GroundTruthStudy") + raise ValueError(f"This is folder is not a GroundTruthStudy : {self.folder.absolute()}") for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): key = rec_file.stem diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 15ba7db2ab..169c5a12bb 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -85,7 +85,7 @@ def create_a_study(study_folder): } study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases) - print(study) + # print(study) @@ -136,7 +136,7 @@ def test_GroundTruthStudy(): if __name__ == "__main__": - # setup_module() + setup_module() test_GroundTruthStudy() diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 87c0805630..4f6043f16e 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -425,14 +425,15 @@ def from_dict(dictionary: dict, base_folder: Optional[Union[Path, str]] = None) extractor: RecordingExtractor or SortingExtractor The loaded extractor object """ - if dictionary["relative_paths"]: + # for pickle dump relative_path was not in the dict, this ensure compatibility + if dictionary.get("relative_paths", False): assert base_folder is not None, "When relative_paths=True, need to provide base_folder" dictionary = _make_paths_absolute(dictionary, base_folder) extractor = _load_extractor_from_dict(dictionary) folder_metadata = dictionary.get("folder_metadata", None) if folder_metadata is not None: folder_metadata = Path(folder_metadata) - if dictionary["relative_paths"]: + if dictionary.get("relative_paths", False): folder_metadata = base_folder / folder_metadata extractor.load_metadata_from_folder(folder_metadata) return extractor @@ -622,6 +623,7 @@ def dump_to_pickle( include_annotations=True, include_properties=include_properties, folder_metadata=folder_metadata, + relative_to=None, recursive=False, ) file_path = self._get_file_path(file_path, [".pkl", ".pickle"]) diff --git a/src/spikeinterface/sorters/internal/si_based.py b/src/spikeinterface/sorters/internal/si_based.py index 1496ffbbd1..ee5dcbea0d 100644 --- a/src/spikeinterface/sorters/internal/si_based.py +++ b/src/spikeinterface/sorters/internal/si_based.py @@ -1,4 +1,4 @@ -from spikeinterface.core import load_extractor +from spikeinterface.core import load_extractor, NumpyRecording from spikeinterface.sorters import BaseSorter @@ -14,8 +14,16 @@ def is_installed(cls): @classmethod def _setup_recording(cls, recording, output_folder, params, verbose): - # nothing to do here because the spikeinterface_recording.json is here anyway - pass + # Some recording not json serializable but they can be saved to pickle + # * NoiseGeneratorRecording or InjectTemplatesRecording: we force a pickle because this is light + # * for NumpyRecording (this is a bit crazy because it flush the entire buffer!!) + if recording.check_if_dumpable() and not isinstance(recording, NumpyRecording): + rec_file = output_folder.parent / "spikeinterface_recording.pickle" + recording.dump_to_pickle(rec_file) + # TODO (hard) : find a solution for NumpyRecording without any dump + # this will need an internal API change I think + # because the run_sorter is from the "folder" (because of container mainly and also many other reasons) + # and not from the recording itself @classmethod def _get_result_from_folder(cls, output_folder): diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 9de2762562..72171cd5b5 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -54,9 +54,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): job_kwargs["verbose"] = verbose job_kwargs["progress_bar"] = verbose - recording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = load_extractor(sorter_output_folder.parent / "spikeinterface_recording.pickle") sampling_rate = recording.get_sampling_frequency() num_channels = recording.get_num_channels() diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index 42f51d3a77..7cbf01cf68 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -49,9 +49,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): import hdbscan - recording_raw = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording_raw = load_extractor(sorter_output_folder.parent / "spikeinterface_recording.pickle") num_chans = recording_raw.get_num_channels() sampling_frequency = recording_raw.get_sampling_frequency() From 9905bf59fc4447e5f80bbf5acadb71f692337982 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 8 Sep 2023 21:24:24 +0200 Subject: [PATCH 037/322] wip --- src/spikeinterface/comparison/groundtruthstudy.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 2eeb697980..d760703ea1 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -165,6 +165,8 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True recording=recording, output_folder=sorter_folder) job.update(params) + # the verbose is overwritten and global to all run_sorters + job["verbose"] = verbose job_list.append(job) run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=False) From 98fa0f81b280ef79c691444d0d3999abb2c9a160 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Sat, 9 Sep 2023 08:57:29 +0200 Subject: [PATCH 038/322] gt_study wip --- .../comparison/groundtruthstudy.py | 59 ++++++++++++++----- .../comparison/tests/test_groundtruthstudy.py | 12 +++- 2 files changed, 53 insertions(+), 18 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index d760703ea1..3debced277 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -5,7 +5,7 @@ import numpy as np -from spikeinterface.core import load_extractor +from spikeinterface.core import load_extractor, extract_waveforms from spikeinterface.core.core_tools import SIJsonEncoder from spikeinterface.sorters import run_sorter_jobs, read_sorter_folder @@ -13,7 +13,7 @@ from spikeinterface import WaveformExtractor from spikeinterface.qualitymetrics import compute_quality_metrics -from .paircomparisons import compare_sorter_to_ground_truth +from .paircomparisons import compare_sorter_to_ground_truth, GroundTruthComparison # from .studytools import ( # setup_comparison_study, @@ -51,25 +51,15 @@ class GroundTruthStudy: """ def __init__(self, study_folder): - # import pandas as pd - self.folder = Path(study_folder) - # self.computed_names = None - # self.recording_names = None - # self.cases_names = None - self.datasets = {} self.cases = {} - - # self.rec_names = None - # self.sorter_names = None + self.sortings = {} + self.comparisons = {} self.scan_folder() - # self.comparisons = None - # self.exhaustive_gt = None - @classmethod def create(cls, study_folder, datasets={}, cases={}): study_folder = Path(study_folder) @@ -116,10 +106,26 @@ def scan_folder(self): with open(self.folder / "cases.pickle", "rb") as f: self.cases = pickle.load(f) + self.comparisons = {k: None for k in self.cases} + + self.sortings = {} + for key in self.cases: + sorting_folder = self.folder / "sortings" / self.key_to_str(key) + print(sorting_folder) + print(sorting_folder.is_dir()) + if sorting_folder.exists(): + sorting = load_extractor(sorting_folder) + else: + sorting = None + self.sortings[key] = sorting + + def __repr__(self): t = f"GroundTruthStudy {self.folder.stem} \n" t += f" datasets: {len(self.datasets)} {list(self.datasets.keys())}\n" t += f" cases: {len(self.cases)} {list(self.cases.keys())}\n" + num_computed = sum([1 for sorting in self.sortings.values() if sorting is not None]) + t += f" computed: {num_computed}\n" return t @@ -187,10 +193,31 @@ def copy_sortings(self, case_keys=None): if sorting is not None: sorting.save(format="numpy_folder", folder=sorting_folder) - def run_comparisons(self): - pass + def run_comparisons(self, case_keys=None, comparison_class=GroundTruthComparison, **kwargs): + if case_keys is None: + case_keys = self.cases.keys() + + for key in case_keys: + dataset_key = self.cases[key]["dataset"] + _, gt_sorting = self.datasets[dataset_key] + sorting = self.sortings[key] + comp = comparison_class(gt_sorting, sorting, **kwargs) + self.comparisons[key] = comp + def extract_waveforms_gt(self, case_keys=None, **extract_kwargs): + + if case_keys is None: + case_keys = self.cases.keys() + + base_folder = self.folder / "waveforms" + base_folder.mkdir(exist_ok=True) + + for key in case_keys: + dataset_key = self.cases[key]["dataset"] + recording, gt_sorting = self.datasets[dataset_key] + wf_folder = base_folder / self.key_to_str(key) + we = extract_waveforms(recording, gt_sorting, folder=wf_folder) diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 169c5a12bb..9aaa742184 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -93,7 +93,15 @@ def test_GroundTruthStudy(): study = GroundTruthStudy(study_folder) print(study) - study.run_sorters(verbose=True) + # study.run_sorters(verbose=True) + + # print(study.sortings) + + # print(study.comparisons) + # study.run_comparisons() + # print(study.comparisons) + + study.extract_waveforms_gt(n_jobs=-1) # @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") # def test_run_study_sorters(): @@ -136,7 +144,7 @@ def test_GroundTruthStudy(): if __name__ == "__main__": - setup_module() + # setup_module() test_GroundTruthStudy() From f0940a5265d9f1db235dc4db66af15e0b513fc51 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Sat, 9 Sep 2023 18:19:18 +0200 Subject: [PATCH 039/322] gt study wip --- .../comparison/groundtruthstudy.py | 200 +++++++++++++++++- .../comparison/tests/test_groundtruthstudy.py | 48 +++-- 2 files changed, 224 insertions(+), 24 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 3debced277..9eb771b71a 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -1,11 +1,12 @@ from pathlib import Path import shutil +import os import json import pickle import numpy as np -from spikeinterface.core import load_extractor, extract_waveforms +from spikeinterface.core import load_extractor, extract_waveforms, load_waveforms from spikeinterface.core.core_tools import SIJsonEncoder from spikeinterface.sorters import run_sorter_jobs, read_sorter_folder @@ -26,7 +27,16 @@ # ) +# TODO : save comparison in folders +# TODO : find a way to set level names + + + +# This is to separate names when the key are tuples when saving folders _key_separator = " ## " +# This would be more funny +# _key_separator = " (°_°) " + class GroundTruthStudy: """ @@ -70,6 +80,10 @@ def create(cls, study_folder, datasets={}, cases={}): (study_folder / "datasets/gt_sortings").mkdir() (study_folder / "sorters").mkdir() (study_folder / "sortings").mkdir() + (study_folder / "sortings" / "run_logs").mkdir() + (study_folder / "metrics").mkdir() + + for key, (rec, gt_sorting) in datasets.items(): assert "/" not in key @@ -111,8 +125,6 @@ def scan_folder(self): self.sortings = {} for key in self.cases: sorting_folder = self.folder / "sortings" / self.key_to_str(key) - print(sorting_folder) - print(sorting_folder.is_dir()) if sorting_folder.exists(): sorting = load_extractor(sorting_folder) else: @@ -160,9 +172,13 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True sorting = read_sorter_folder(sorter_folder, raise_error=False) if sorting is not None: # save and skip - sorting.save(format="numpy_folder", folder=sorting_folder) + self.copy_sortings(case_keys=[key]) continue - + + if sorting_exists: + # TODO : delete sorting + log + pass + params = self.cases[key]["run_sorter_params"].copy() # this ensure that sorter_name is given recording, _ = self.datasets[self.cases[key]["dataset"]] @@ -181,17 +197,29 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True if engine not in ("slurm", ): self.copy_sortings(case_keys) - def copy_sortings(self, case_keys=None): + def copy_sortings(self, case_keys=None, force=True): if case_keys is None: case_keys = self.cases.keys() for key in case_keys: sorting_folder = self.folder / "sortings" / self.key_to_str(key) sorter_folder = self.folder / "sorters" / self.key_to_str(key) + log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" sorting = read_sorter_folder(sorter_folder, raise_error=False) if sorting is not None: - sorting.save(format="numpy_folder", folder=sorting_folder) + if sorting_folder.exists(): + if force: + # TODO delete folder + log + shutil.rmtree(sorting_folder) + else: + continue + + sorting = sorting.save(format="numpy_folder", folder=sorting_folder) + self.sortings[key] = sorting + + # copy logs + shutil.copyfile(sorter_folder / "spikeinterface_log.json", log_file) def run_comparisons(self, case_keys=None, comparison_class=GroundTruthComparison, **kwargs): @@ -202,9 +230,29 @@ def run_comparisons(self, case_keys=None, comparison_class=GroundTruthComparison dataset_key = self.cases[key]["dataset"] _, gt_sorting = self.datasets[dataset_key] sorting = self.sortings[key] + if sorting is None: + self.comparisons[key] = None + continue comp = comparison_class(gt_sorting, sorting, **kwargs) self.comparisons[key] = comp + def get_run_times(self, case_keys=None): + import pandas as pd + if case_keys is None: + case_keys = self.cases.keys() + + log_folder = self.folder / "sortings" / "run_logs" + + run_times = {} + for key in case_keys: + log_file = log_folder / f"{self.key_to_str(key)}.json" + with open(log_file, mode="r") as logfile: + log = json.load(logfile) + run_time = log.get("run_time", None) + run_times[key] = run_time + + return pd.Series(run_times, name="run_time") + def extract_waveforms_gt(self, case_keys=None, **extract_kwargs): if case_keys is None: @@ -219,6 +267,144 @@ def extract_waveforms_gt(self, case_keys=None, **extract_kwargs): wf_folder = base_folder / self.key_to_str(key) we = extract_waveforms(recording, gt_sorting, folder=wf_folder) + def get_waveform_extractor(self, key): + # some recording are not dumpable to json and the waveforms extactor need it! + # so we load it with and put after + we = load_waveforms(self.folder / "waveforms" / self.key_to_str(key), with_recording=False) + dataset_key = self.cases[key]["dataset"] + recording, _ = self.datasets[dataset_key] + we.set_recording(recording) + return we + + def get_templates(self, key, mode="mean"): + we = self.get_waveform_extractor(key) + templates = we.get_all_templates(mode=mode) + return templates + + def compute_metrics(self, case_keys=None, metric_names=["snr", "firing_rate"], force=False): + if case_keys is None: + case_keys = self.cases.keys() + + for key in case_keys: + filename = self.folder / "metrics" / f"{self.key_to_str(key)}.txt" + if filename.exists(): + if force: + os.remove(filename) + else: + continue + + we = self.get_waveform_extractor(key) + metrics = compute_quality_metrics(we, metric_names=metric_names) + metrics.to_csv(filename, sep="\t", index=True) + + def get_metrics(self, key): + import pandas as pd + filename = self.folder / "metrics" / f"{self.key_to_str(key)}.txt" + if not filename.exists(): + return + metrics = pd.read_csv(filename, sep="\t", index_col=0) + dataset_key = self.cases[key]["dataset"] + recording, gt_sorting = self.datasets[dataset_key] + metrics.index = gt_sorting.unit_ids + return metrics + + def get_units_snr(self, key): + """ + """ + return self.get_metrics(key)["snr"] + + def aggregate_performance_by_unit(self, case_keys=None): + + import pandas as pd + + if case_keys is None: + case_keys = self.cases.keys() + + perf_by_unit = [] + for key in case_keys: + comp = self.comparisons.get(key, None) + assert comp is not None, "You need to do study.run_comparisons() first" + + perf = comp.get_performance(method="by_unit", output="pandas") + if isinstance(key, str): + cols = ["level0"] + perf["level0"] = key + + elif isinstance(key, tuple): + cols = [f'level{i}' for i in range(len(key))] + for col, k in zip(cols, key): + perf[col] = k + + perf = perf.reset_index() + perf_by_unit.append(perf) + + + + perf_by_unit = pd.concat(perf_by_unit) + perf_by_unit = perf_by_unit.set_index(cols) + + return perf_by_unit + + # def aggregate_count_units(self, well_detected_score=None, redundant_score=None, overmerged_score=None): + + def aggregate_count_units( + self, case_keys=None, well_detected_score=None, redundant_score=None, overmerged_score=None + ): + + import pandas as pd + + if case_keys is None: + case_keys = self.cases.keys() + + perf_by_unit = [] + for key in case_keys: + comp = self.comparisons.get(key, None) + assert comp is not None, "You need to do study.run_comparisons() first" + + + + # assert self.comparisons is not None, "run_comparisons first" + + # import pandas as pd + + # index = pd.MultiIndex.from_tuples(self.computed_names, names=["rec_name", "sorter_name"]) + + # count_units = pd.DataFrame( + # index=index, + # columns=["num_gt", "num_sorter", "num_well_detected", "num_redundant", "num_overmerged"], + # dtype=int, + # ) + + # if self.exhaustive_gt: + # count_units["num_false_positive"] = pd.Series(dtype=int) + # count_units["num_bad"] = pd.Series(dtype=int) + + # for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): + # gt_sorting = self.get_ground_truth(rec_name) + # comp = self.comparisons[(rec_name, sorter_name)] + + # count_units.loc[(rec_name, sorter_name), "num_gt"] = len(gt_sorting.get_unit_ids()) + # count_units.loc[(rec_name, sorter_name), "num_sorter"] = len(sorting.get_unit_ids()) + # count_units.loc[(rec_name, sorter_name), "num_well_detected"] = comp.count_well_detected_units( + # well_detected_score + # ) + # if self.exhaustive_gt: + # count_units.loc[(rec_name, sorter_name), "num_overmerged"] = comp.count_overmerged_units( + # overmerged_score + # ) + # count_units.loc[(rec_name, sorter_name), "num_redundant"] = comp.count_redundant_units(redundant_score) + # count_units.loc[(rec_name, sorter_name), "num_false_positive"] = comp.count_false_positive_units( + # redundant_score + # ) + # count_units.loc[(rec_name, sorter_name), "num_bad"] = comp.count_bad_units() + + # return count_units + + + + + + class OLDGroundTruthStudy: diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 9aaa742184..3593b0b05f 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -71,17 +71,17 @@ def create_a_study(study_folder): }, }, - # - ("sc2", "no-preprocess", "tetrode"): { - "label": "spykingcircus2 without preprocessing standar params", - "dataset": "toy_tetrode", - "run_sorter_params": { - "sorter_name": "spykingcircus2", - }, - "comparison_params": { - - }, - }, + # we comment this at the moement because SC2 is quite slow for testing + # ("sc2", "no-preprocess", "tetrode"): { + # "label": "spykingcircus2 without preprocessing standar params", + # "dataset": "toy_tetrode", + # "run_sorter_params": { + # "sorter_name": "spykingcircus2", + # }, + # "comparison_params": { + + # }, + # }, } study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases) @@ -93,16 +93,30 @@ def test_GroundTruthStudy(): study = GroundTruthStudy(study_folder) print(study) - # study.run_sorters(verbose=True) + study.run_sorters(verbose=True) - # print(study.sortings) + print(study.sortings) - # print(study.comparisons) - # study.run_comparisons() - # print(study.comparisons) + print(study.comparisons) + study.run_comparisons() + print(study.comparisons) study.extract_waveforms_gt(n_jobs=-1) + study.compute_metrics() + + for key in study.cases: + metrics = study.get_metrics(key) + print(metrics) + + study.aggregate_performance_by_unit() + + +# perf = study.aggregate_performance_by_unit() +# count_units = study.aggregate_count_units() + + + # @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") # def test_run_study_sorters(): # study = GroundTruthStudy(study_folder) @@ -144,7 +158,7 @@ def test_GroundTruthStudy(): if __name__ == "__main__": - # setup_module() + setup_module() test_GroundTruthStudy() From b0267dcd72b69c0c1982d57200381c9ab6c1ec0f Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Sun, 10 Sep 2023 21:45:40 +0200 Subject: [PATCH 040/322] Add levels concept in GTStudy --- .../comparison/groundtruthstudy.py | 83 ++++++++++++++++--- .../comparison/tests/test_groundtruthstudy.py | 3 +- 2 files changed, 74 insertions(+), 12 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 9eb771b71a..76c019f6b9 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -71,7 +71,29 @@ def __init__(self, study_folder): self.scan_folder() @classmethod - def create(cls, study_folder, datasets={}, cases={}): + def create(cls, study_folder, datasets={}, cases={}, levels=None): + + # check that cases keys are homogeneous + key0 = list(cases.keys())[0] + if isinstance(key0, str): + assert all(isinstance(key, str) for key in cases.keys()), "Keys for cases are not homogeneous" + if levels is None: + levels = "level0" + else: + assert isinstance(levels, str) + elif isinstance(key0, tuple): + assert all(isinstance(key, tuple) for key in cases.keys()), "Keys for cases are not homogeneous" + num_levels = len(key0) + assert all(len(key) == num_levels for key in cases.keys()), "Keys for cases are not homogeneous, tuple negth differ" + if levels is None: + levels = [f"level{i}" for i in range(num_levels)] + else: + levels = list(levels) + assert len(levels) == num_levels + else: + raise ValueError("Keys for cases must str or tuple") + + study_folder = Path(study_folder) study_folder.mkdir(exist_ok=False, parents=True) @@ -97,6 +119,10 @@ def create(cls, study_folder, datasets={}, cases={}): gt_sorting.save(format="numpy_folder", folder=study_folder / f"datasets/gt_sortings/{key}") + info = {} + info["levels"] = levels + (study_folder / "info.json").write_text(json.dumps(info, indent=4), encoding="utf8") + # (study_folder / "cases.jon").write_text( # json.dumps(cases, indent=4, cls=SIJsonEncoder), # encoding="utf8", @@ -111,6 +137,12 @@ def scan_folder(self): if not (self.folder / "datasets").exists(): raise ValueError(f"This is folder is not a GroundTruthStudy : {self.folder.absolute()}") + with open(self.folder / "info.json", "r") as f: + self.info = json.load(f) + if isinstance(self.levels, list): + # because tuple caoont be stored in json + self.levels = tuple(self.info["levels"]) + for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): key = rec_file.stem rec = load_extractor(rec_file) @@ -327,12 +359,9 @@ def aggregate_performance_by_unit(self, case_keys=None): perf = comp.get_performance(method="by_unit", output="pandas") if isinstance(key, str): - cols = ["level0"] - perf["level0"] = key - + perf[self.levels] = key elif isinstance(key, tuple): - cols = [f'level{i}' for i in range(len(key))] - for col, k in zip(cols, key): + for col, k in zip(self.levels, key): perf[col] = k perf = perf.reset_index() @@ -341,7 +370,7 @@ def aggregate_performance_by_unit(self, case_keys=None): perf_by_unit = pd.concat(perf_by_unit) - perf_by_unit = perf_by_unit.set_index(cols) + perf_by_unit = perf_by_unit.set_index(self.levels) return perf_by_unit @@ -354,18 +383,50 @@ def aggregate_count_units( import pandas as pd if case_keys is None: - case_keys = self.cases.keys() + case_keys = list(self.cases.keys()) + + if isinstance(case_keys[0], str): + index = pd.Index(case_keys, name=self.levels) + else: + index = pd.MultiIndex.from_tuples(case_keys, names=self.levels) + + + columns = ["num_gt", "num_sorter", "num_well_detected", "num_redundant", "num_overmerged"] + comp = self.comparisons[case_keys[0]] + if comp.exhaustive_gt: + columns.extend(["num_false_positive", "num_bad"]) + count_units = pd.DataFrame(index=index, columns=columns, dtype=int) + - perf_by_unit = [] for key in case_keys: comp = self.comparisons.get(key, None) assert comp is not None, "You need to do study.run_comparisons() first" + gt_sorting = comp.sorting1 + sorting = comp.sorting2 + + count_units.loc[key, "num_gt"] = len(gt_sorting.get_unit_ids()) + count_units.loc[key, "num_sorter"] = len(sorting.get_unit_ids()) + count_units.loc[key, "num_well_detected"] = comp.count_well_detected_units( + well_detected_score + ) + if comp.exhaustive_gt: + count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( + overmerged_score + ) + count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) + count_units.loc[key, "num_false_positive"] = comp.count_false_positive_units( + redundant_score + ) + count_units.loc[key, "num_bad"] = comp.count_bad_units() + + # count_units = pd.concat(count_units) + # count_units = count_units.set_index(cols) + return count_units - # assert self.comparisons is not None, "run_comparisons first" - # import pandas as pd + count_units = [] # index = pd.MultiIndex.from_tuples(self.computed_names, names=["rec_name", "sorter_name"]) diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 3593b0b05f..5c5af476e4 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -84,7 +84,7 @@ def create_a_study(study_folder): # }, } - study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases) + study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases, levels=["sorter_name", "processing", "probe_type"]) # print(study) @@ -110,6 +110,7 @@ def test_GroundTruthStudy(): print(metrics) study.aggregate_performance_by_unit() + study.aggregate_count_units() # perf = study.aggregate_performance_by_unit() From fe178c67ac9428477ca146dd6ac453bf1cccfc78 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 11 Sep 2023 10:37:00 +0200 Subject: [PATCH 041/322] Apply suggestions and avoid using chmod on windows --- src/spikeinterface/core/waveform_extractor.py | 111 +++++++++--------- .../tests/common_extension_tests.py | 28 +++-- 2 files changed, 73 insertions(+), 66 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 431440c846..3647e915bf 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -1751,9 +1751,7 @@ def __init__(self, waveform_extractor): if self.format == "binary": self.extension_folder = self.folder / self.extension_name if not self.extension_folder.is_dir(): - if not self.waveform_extractor.is_read_only(): - self.extension_folder.mkdir() - else: + if self.waveform_extractor.is_read_only(): warn( "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." @@ -1761,15 +1759,16 @@ def __init__(self, waveform_extractor): self.format = "memory" self.extension_folder = None self.folder = None + else: + self.extension_folder.mkdir() + else: import zarr mode = "r+" if not self.waveform_extractor.is_read_only() else "r" zarr_root = zarr.open(self.folder, mode=mode) if self.extension_name not in zarr_root.keys(): - if not self.waveform_extractor.is_read_only(): - self.extension_group = zarr_root.create_group(self.extension_name) - else: + if self.waveform_extractor.is_read_only(): warn( "WaveformExtractor: cannot save extension in read-only mode. " "Extension will be saved in memory." @@ -1777,6 +1776,8 @@ def __init__(self, waveform_extractor): self.format = "memory" self.extension_folder = None self.folder = None + else: + self.extension_group = zarr_root.create_group(self.extension_name) else: self.extension_group = zarr_root[self.extension_name] else: @@ -1893,56 +1894,58 @@ def save(self, **kwargs): self._save(**kwargs) def _save(self, **kwargs): - if not self.waveform_extractor.is_read_only(): - if self.format == "binary": - import pandas as pd - - for ext_data_name, ext_data in self._extension_data.items(): - if isinstance(ext_data, dict): - with (self.extension_folder / f"{ext_data_name}.json").open("w") as f: - json.dump(ext_data, f) - elif isinstance(ext_data, np.ndarray): - np.save(self.extension_folder / f"{ext_data_name}.npy", ext_data) - elif isinstance(ext_data, pd.DataFrame): - ext_data.to_csv(self.extension_folder / f"{ext_data_name}.csv", index=True) - else: - try: - with (self.extension_folder / f"{ext_data_name}.pkl").open("wb") as f: - pickle.dump(ext_data, f) - except: - raise Exception(f"Could not save {ext_data_name} as extension data") - elif self.format == "zarr": - from .zarrrecordingextractor import get_default_zarr_compressor - import pandas as pd - import numcodecs - - compressor = kwargs.get("compressor", None) - if compressor is None: - compressor = get_default_zarr_compressor() - for ext_data_name, ext_data in self._extension_data.items(): - if ext_data_name in self.extension_group: - del self.extension_group[ext_data_name] - if isinstance(ext_data, dict): + # Only save if not read only + if self.waveform_extractor.is_read_only(): + return + if self.format == "binary": + import pandas as pd + + for ext_data_name, ext_data in self._extension_data.items(): + if isinstance(ext_data, dict): + with (self.extension_folder / f"{ext_data_name}.json").open("w") as f: + json.dump(ext_data, f) + elif isinstance(ext_data, np.ndarray): + np.save(self.extension_folder / f"{ext_data_name}.npy", ext_data) + elif isinstance(ext_data, pd.DataFrame): + ext_data.to_csv(self.extension_folder / f"{ext_data_name}.csv", index=True) + else: + try: + with (self.extension_folder / f"{ext_data_name}.pkl").open("wb") as f: + pickle.dump(ext_data, f) + except: + raise Exception(f"Could not save {ext_data_name} as extension data") + elif self.format == "zarr": + from .zarrrecordingextractor import get_default_zarr_compressor + import pandas as pd + import numcodecs + + compressor = kwargs.get("compressor", None) + if compressor is None: + compressor = get_default_zarr_compressor() + for ext_data_name, ext_data in self._extension_data.items(): + if ext_data_name in self.extension_group: + del self.extension_group[ext_data_name] + if isinstance(ext_data, dict): + self.extension_group.create_dataset( + name=ext_data_name, data=[ext_data], object_codec=numcodecs.JSON() + ) + self.extension_group[ext_data_name].attrs["dict"] = True + elif isinstance(ext_data, np.ndarray): + self.extension_group.create_dataset(name=ext_data_name, data=ext_data, compressor=compressor) + elif isinstance(ext_data, pd.DataFrame): + ext_data.to_xarray().to_zarr( + store=self.extension_group.store, + group=f"{self.extension_group.name}/{ext_data_name}", + mode="a", + ) + self.extension_group[ext_data_name].attrs["dataframe"] = True + else: + try: self.extension_group.create_dataset( - name=ext_data_name, data=[ext_data], object_codec=numcodecs.JSON() - ) - self.extension_group[ext_data_name].attrs["dict"] = True - elif isinstance(ext_data, np.ndarray): - self.extension_group.create_dataset(name=ext_data_name, data=ext_data, compressor=compressor) - elif isinstance(ext_data, pd.DataFrame): - ext_data.to_xarray().to_zarr( - store=self.extension_group.store, - group=f"{self.extension_group.name}/{ext_data_name}", - mode="a", + name=ext_data_name, data=ext_data, object_codec=numcodecs.Pickle() ) - self.extension_group[ext_data_name].attrs["dataframe"] = True - else: - try: - self.extension_group.create_dataset( - name=ext_data_name, data=ext_data, object_codec=numcodecs.Pickle() - ) - except: - raise Exception(f"Could not save {ext_data_name} as extension data") + except: + raise Exception(f"Could not save {ext_data_name} as extension data") def reset(self): """ diff --git a/src/spikeinterface/postprocessing/tests/common_extension_tests.py b/src/spikeinterface/postprocessing/tests/common_extension_tests.py index f44d58470c..f7272ddefe 100644 --- a/src/spikeinterface/postprocessing/tests/common_extension_tests.py +++ b/src/spikeinterface/postprocessing/tests/common_extension_tests.py @@ -2,6 +2,7 @@ import numpy as np import pandas as pd import shutil +import platform from pathlib import Path from spikeinterface import extract_waveforms, load_extractor, load_waveforms, compute_sparsity @@ -78,12 +79,13 @@ def setUp(self): self.we2 = we2 # make we read-only - we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" - if not we_ro_folder.is_dir(): - shutil.copytree(we2.folder, we_ro_folder) + if platform.system() != "Windows": + we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" + if not we_ro_folder.is_dir(): + shutil.copytree(we2.folder, we_ro_folder) # change permissions (R+X) - we_ro_folder.chmod(0o555) - self.we_ro = load_waveforms(we_ro_folder) + we_ro_folder.chmod(0o555) + self.we_ro = load_waveforms(we_ro_folder) self.sparsity2 = compute_sparsity(we2, method="radius", radius_um=30) we_memory = extract_waveforms( @@ -108,8 +110,9 @@ def setUp(self): def tearDown(self): # allow pytest to delete RO folder - we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" - we_ro_folder.chmod(0o777) + if platform.system() != "Windows": + we_ro_folder = cache_folder / "toy_waveforms_2seg_readonly" + we_ro_folder.chmod(0o777) def _test_extension_folder(self, we, in_memory=False): if self.extension_function_kwargs_list is None: @@ -193,8 +196,9 @@ def test_extension(self): print(f"{ext_data_name} of type {type(ext_data_mem)} not tested.") # read-only - Extension is memory only - _ = self.extension_class.get_extension_function()(self.we_ro, load_if_exists=False) - assert self.extension_class.extension_name in self.we_ro.get_available_extension_names() - ext_ro = self.we_ro.load_extension(self.extension_class.extension_name) - assert ext_ro.format == "memory" - assert ext_ro.extension_folder is None + if platform.system() != "Windows": + _ = self.extension_class.get_extension_function()(self.we_ro, load_if_exists=False) + assert self.extension_class.extension_name in self.we_ro.get_available_extension_names() + ext_ro = self.we_ro.load_extension(self.extension_class.extension_name) + assert ext_ro.format == "memory" + assert ext_ro.extension_folder is None From 4f73dd1cd5f7990ace9f6f8d962b218f406e4692 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 11 Sep 2023 16:37:59 +0200 Subject: [PATCH 042/322] WIP: firing_range and amplitude_spread --- doc/modules/qualitymetrics/firing_range.rst | 48 ++++++++++ .../qualitymetrics/misc_metrics.py | 94 ++++++++++++++++++- .../qualitymetrics/quality_metric_list.py | 2 + 3 files changed, 143 insertions(+), 1 deletion(-) create mode 100644 doc/modules/qualitymetrics/firing_range.rst diff --git a/doc/modules/qualitymetrics/firing_range.rst b/doc/modules/qualitymetrics/firing_range.rst new file mode 100644 index 0000000000..fd8f79682c --- /dev/null +++ b/doc/modules/qualitymetrics/firing_range.rst @@ -0,0 +1,48 @@ +Firing range (:code:`firing_range`) +=================================== + + +Calculation +----------- + +The firing range indicates the spread of the firing range of a unit across the recording. It is computed by +taking the difference between the 95-th and 5th percentiles firing rates computed over short time bins (e.g. 10 s). + + + +Expectation and use +------------------- + +Both very high and very low firing rates can indicate errors. +Highly contaminated units (type I error) may have high firing rates as a result of the inclusion of other neurons' spikes. +Low firing rate units are likely to be incomplete (type II error), although this is not always the case (some neurons have highly selective firing patterns). +The firing rate is expected to be approximately log-normally distributed [Buzsáki]_. + +Example code +------------ + +.. code-block:: python + + import spikeinterface.qualitymetrics as qm + + # Make recording, sorting and wvf_extractor object for your data. + firing_rate = qm.compute_firing_ranges(wvf_extractor) + # firing_rate is a dict containing the units' IDs as keys, + # and their firing rates across segments as values (in Hz). + +References +---------- + +.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_firing_rates + + +Links to original implementations +--------------------------------- + +* From the `AllenSDK `_ + +Literature +---------- + +Unknown origin. +Widely discussed eg: [Buzsáki]_. diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index ee28485983..9be9a32ff6 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -563,7 +563,99 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k return synchrony_metrics -_default_params["synchrony_metrics"] = dict(synchrony_sizes=(0, 2, 4)) +_default_params["synchrony"] = dict(synchrony_sizes=(0, 2, 4)) + + +def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.95), unit_ids=None): + """ + Compute firing range, the range between the 5th and 95th quantiles of the firing rates distribution + computed in non-overlapping time bins. + + Parameters + ---------- + waveform_extractor : WaveformExtractor + The waveform extractor object. + bin_size_s : float, default: 5 + The size of the bin in seconds. + quantiles : tuple, default: (0.05, 0.95) + The quantiles to compute. + + Returns + ------- + firing_ranges : dict + The firing range for each unit. + """ + sampling_frequency = waveform_extractor.sampling_frequency + bin_size_samples = int(bin_size_s * sampling_frequency) + sorting = waveform_extractor.sorting + if unit_ids is None: + unit_ids = sorting.unit_ids + + # for each segment, we compute the firing rate histogram and we concatenate them + firing_rate_histograms = {unit_id: np.array([], dtype=float) for unit_id in sorting.unit_ids} + for segment_index in range(waveform_extractor.get_num_segments()): + num_samples = waveform_extractor.get_num_samples(segment_index) + edges = np.arange(0, num_samples + 1, bin_size_samples) + + for unit_id in unit_ids: + spike_times = sorting.get_unit_spike_train(unit_id=unit_id, segment_index=segment_index) + spike_counts, _ = np.histogram(spike_times, bins=edges) + firing_rates = spike_counts / bin_size_s + firing_rate_histograms[unit_id] = np.concatentate((firing_rate_histograms[unit_id], firing_rates)) + + # finally we compute the percentiles + firing_ranges = {} + for unit_id in unit_ids: + firing_ranges[unit_id] = np.percentile(firing_rate_histograms[unit_id], quantiles[1]) - np.percentile( + firing_rate_histograms[unit_id], quantiles[0] + ) + + return firing_ranges + + +_default_params["firing_range"] = dict(bin_size_s=5, quantiles=(0.05, 0.95)) + + +# TODO: docs +def compute_amplitude_spreads( + waveform_extractor, spikes_bin_size=50, amplitude_extension="spike_amplitudes", unit_ids=None +): + """Calculate mean spread of spike amplitudes within defined bins of AP events + + S Musall 2023 + + Input: + ------ + amplitudes : numpy.ndarray + Array of amplitudes (don't need to be in physical units) + + """ + sorting = waveform_extractor.sorting + spikes = sorting.to_spike_vector() + num_spikes = sorting.count_num_spikes_per_unit() + if unit_ids is None: + unit_ids = sorting.unit_ids + + if waveform_extractor.is_extension(amplitude_extension): + sac = waveform_extractor.load_extension(amplitude_extension) + amps = sac.get_data(outputs="concatenated") + else: + warnings.warn("") + empty_dict = {unit_id: np.nan for unit_id in unit_ids} + return empty_dict + + all_unit_ids = list(sorting.unit_ids) + for unit_id in unit_ids: + amps_unit = amps[spikes["unit_index"] == all_unit_ids.index(unit_id)] + if num_spikes[unit_id] < spikes_bin_size: + amp_spread = np.var(amps_unit) + else: + amp_spread = [] + for i in range(0, num_spikes[unit_id], spikes_bin_size): + amp_spread.append(np.var(amps_unit[i : i + spikes_bin_size])) + amp_spread = np.median(amp_spread) + + return amp_spread def compute_amplitude_cutoffs( diff --git a/src/spikeinterface/qualitymetrics/quality_metric_list.py b/src/spikeinterface/qualitymetrics/quality_metric_list.py index 90dbb47a3a..917927f44a 100644 --- a/src/spikeinterface/qualitymetrics/quality_metric_list.py +++ b/src/spikeinterface/qualitymetrics/quality_metric_list.py @@ -12,6 +12,7 @@ compute_amplitude_medians, compute_drift_metrics, compute_synchrony_metrics, + compute_firing_ranges, ) from .pca_metrics import ( @@ -41,5 +42,6 @@ "amplitude_cutoff": compute_amplitude_cutoffs, "amplitude_median": compute_amplitude_medians, "synchrony": compute_synchrony_metrics, + "firing_range": compute_firing_ranges, "drift": compute_drift_metrics, } From 0750638eb13030b22ad30b9db94fa968a60c7fa2 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Mon, 11 Sep 2023 16:56:23 +0200 Subject: [PATCH 043/322] wip gtstudy --- .../comparison/groundtruthstudy.py | 53 ++----------------- .../comparison/tests/test_groundtruthstudy.py | 2 +- src/spikeinterface/widgets/widget_list.py | 3 ++ 3 files changed, 9 insertions(+), 49 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 76c019f6b9..049c97c234 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -139,9 +139,11 @@ def scan_folder(self): with open(self.folder / "info.json", "r") as f: self.info = json.load(f) - if isinstance(self.levels, list): - # because tuple caoont be stored in json - self.levels = tuple(self.info["levels"]) + + self.levels = self.info["levels"] + # if isinstance(self.levels, list): + # # because tuple caoont be stored in json + # self.levels = tuple(self.info["levels"]) for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): key = rec_file.stem @@ -371,11 +373,8 @@ def aggregate_performance_by_unit(self, case_keys=None): perf_by_unit = pd.concat(perf_by_unit) perf_by_unit = perf_by_unit.set_index(self.levels) - return perf_by_unit - # def aggregate_count_units(self, well_detected_score=None, redundant_score=None, overmerged_score=None): - def aggregate_count_units( self, case_keys=None, well_detected_score=None, redundant_score=None, overmerged_score=None ): @@ -420,51 +419,9 @@ def aggregate_count_units( ) count_units.loc[key, "num_bad"] = comp.count_bad_units() - # count_units = pd.concat(count_units) - # count_units = count_units.set_index(cols) - return count_units - count_units = [] - - # index = pd.MultiIndex.from_tuples(self.computed_names, names=["rec_name", "sorter_name"]) - - # count_units = pd.DataFrame( - # index=index, - # columns=["num_gt", "num_sorter", "num_well_detected", "num_redundant", "num_overmerged"], - # dtype=int, - # ) - - # if self.exhaustive_gt: - # count_units["num_false_positive"] = pd.Series(dtype=int) - # count_units["num_bad"] = pd.Series(dtype=int) - - # for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - # gt_sorting = self.get_ground_truth(rec_name) - # comp = self.comparisons[(rec_name, sorter_name)] - - # count_units.loc[(rec_name, sorter_name), "num_gt"] = len(gt_sorting.get_unit_ids()) - # count_units.loc[(rec_name, sorter_name), "num_sorter"] = len(sorting.get_unit_ids()) - # count_units.loc[(rec_name, sorter_name), "num_well_detected"] = comp.count_well_detected_units( - # well_detected_score - # ) - # if self.exhaustive_gt: - # count_units.loc[(rec_name, sorter_name), "num_overmerged"] = comp.count_overmerged_units( - # overmerged_score - # ) - # count_units.loc[(rec_name, sorter_name), "num_redundant"] = comp.count_redundant_units(redundant_score) - # count_units.loc[(rec_name, sorter_name), "num_false_positive"] = comp.count_false_positive_units( - # redundant_score - # ) - # count_units.loc[(rec_name, sorter_name), "num_bad"] = comp.count_bad_units() - - # return count_units - - - - - diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 5c5af476e4..1da79b9efe 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -162,6 +162,6 @@ def test_GroundTruthStudy(): setup_module() test_GroundTruthStudy() - # test_run_study_sorters() # test_extract_sortings() + diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index f3c640ff16..1e9d5301cf 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -20,6 +20,7 @@ from .unit_templates import UnitTemplatesWidget from .unit_waveforms_density_map import UnitWaveformDensityMapWidget from .unit_waveforms import UnitWaveformsWidget +from .gtstudy import StudyRunTimesWidget widget_list = [ @@ -41,6 +42,7 @@ UnitTemplatesWidget, UnitWaveformDensityMapWidget, UnitWaveformsWidget, + StudyRunTimesWidget, ] @@ -88,6 +90,7 @@ plot_unit_templates = UnitTemplatesWidget plot_unit_waveforms_density_map = UnitWaveformDensityMapWidget plot_unit_waveforms = UnitWaveformsWidget +plot_study_run_times = StudyRunTimesWidget def plot_timeseries(*args, **kwargs): From 6a364b03e3c6504969a9bdcf7eecf6885384ddb3 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 12 Sep 2023 10:28:55 +0200 Subject: [PATCH 044/322] feedback from Zacha dn Ramon --- src/spikeinterface/sorters/launcher.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 103f30dac5..b158eba22d 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -39,9 +39,21 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal for job in job_list: run_sorter(**job) - For some engines ("loop", "joblib", "multiprocessing", "dask"), this function is blocking until the results . - For other engines ("slurm") the function returns almost immediately (aka non-blocking) and the results - must be retrieved by hand when finished with :py:func:`read_sorter_folder()`. + The following engines block the I/O: + * "loop" + * "joblib" + * "multiprocessing" + * "dask" + + The following engines are *asynchronous*: + * "slurm" + + Where *blocking* means that this function is blocking until the results are returned. + This is in opposition to *asynchronous*, where the function returns `None` almost immediately (aka non-blocking), + but the results must be retrieved by hand when jobs are finished. No mechanisim is provided here to be aware + when jobs are finish. + In this *asynchronous* case, the :py:func:read_sorter_folder() helps to retrieve individual results. + Parameters ---------- @@ -302,7 +314,7 @@ def run_sorters( singularity_images={}, ): """ - This function is deprecated and will be removed. + This function is deprecated and will be removed in version 0.100 Please use run_sorter_jobs() instead. Parameters @@ -346,7 +358,7 @@ def run_sorters( """ warnings.warn( - "run_sorters()is deprecated please use run_sorter_jobs() instead", + "run_sorters()is deprecated please use run_sorter_jobs() instead. This will be removed in 0.100", DeprecationWarning, stacklevel=2, ) From ee2eb2f04d5c17817fcb9f014f9814f5192cb624 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 12 Sep 2023 14:23:00 +0200 Subject: [PATCH 045/322] STart porting matplotlib widgets related to ground truth study. --- .../comparison/groundtruthstudy.py | 4 +- .../comparison/tests/test_groundtruthstudy.py | 48 +---- src/spikeinterface/widgets/gtstudy.py | 192 ++++++++++++++++++ src/spikeinterface/widgets/widget_list.py | 6 +- 4 files changed, 201 insertions(+), 49 deletions(-) create mode 100644 src/spikeinterface/widgets/gtstudy.py diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 049c97c234..d936c50e5e 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -347,7 +347,7 @@ def get_units_snr(self, key): """ return self.get_metrics(key)["snr"] - def aggregate_performance_by_unit(self, case_keys=None): + def get_performance_by_unit(self, case_keys=None): import pandas as pd @@ -375,7 +375,7 @@ def aggregate_performance_by_unit(self, case_keys=None): perf_by_unit = perf_by_unit.set_index(self.levels) return perf_by_unit - def aggregate_count_units( + def get_count_units( self, case_keys=None, well_detected_score=None, redundant_score=None, overmerged_score=None ): diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 1da79b9efe..52d5c73d3b 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -109,54 +109,10 @@ def test_GroundTruthStudy(): metrics = study.get_metrics(key) print(metrics) - study.aggregate_performance_by_unit() - study.aggregate_count_units() + study.get_performance_by_unit() + study.get_count_units() -# perf = study.aggregate_performance_by_unit() -# count_units = study.aggregate_count_units() - - - -# @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") -# def test_run_study_sorters(): -# study = GroundTruthStudy(study_folder) -# sorter_list = [ -# "tridesclous", -# ] -# print( -# f"\n#################################\nINSTALLED SORTERS\n#################################\n" -# f"{installed_sorters()}" -# ) -# study.run_sorters(sorter_list) - - -# @pytest.mark.skipif(not HAVE_TDC, reason="Test requires Python package 'tridesclous'") -# def test_extract_sortings(): -# study = GroundTruthStudy(study_folder) - -# study.copy_sortings() - -# for rec_name in study.rec_names: -# gt_sorting = study.get_ground_truth(rec_name) - -# for rec_name in study.rec_names: -# metrics = study.get_metrics(rec_name=rec_name) - -# snr = study.get_units_snr(rec_name=rec_name) - -# study.copy_sortings() - -# run_times = study.aggregate_run_times() - -# study.run_comparisons(exhaustive_gt=True) - -# perf = study.aggregate_performance_by_unit() - -# count_units = study.aggregate_count_units() -# dataframes = study.aggregate_dataframes() -# print(dataframes) - if __name__ == "__main__": setup_module() diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py new file mode 100644 index 0000000000..aa1a80c3d3 --- /dev/null +++ b/src/spikeinterface/widgets/gtstudy.py @@ -0,0 +1,192 @@ +import numpy as np + +from .base import BaseWidget, to_attr +from .utils import get_unit_colors + +from ..core import ChannelSparsity +from ..core.waveform_extractor import WaveformExtractor +from ..core.basesorting import BaseSorting + + +class StudyRunTimesWidget(BaseWidget): + """ + Plot sorter run times for a GroundTruthStudy + + + Parameters + ---------- + study: GroundTruthStudy + A study object. + case_keys: list or None + A selection of cases to plot, if None, then all. + + """ + + def __init__( + self, + study, + case_keys=None, + backend=None, + **backend_kwargs, + ): + + if case_keys is None: + case_keys = list(study.cases.keys()) + + plot_data = dict( + study=study, + run_times=study.get_run_times(), + case_keys=case_keys, + ) + + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + + dp = to_attr(data_plot) + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + for i, key in enumerate(dp.case_keys): + label = dp.study.cases[key]["label"] + rt = dp.run_times.loc[key] + self.ax.bar(i, rt, width=0.8, label=label) + + self.ax.legend() + + + +# TODO : plot optionally average on some levels using group by +class StudyUnitCountsWidget(BaseWidget): + """ + Plot unit counts for a study: "num_well_detected", "num_false_positive", "num_redundant", "num_overmerged" + + + Parameters + ---------- + study: GroundTruthStudy + A study object. + case_keys: list or None + A selection of cases to plot, if None, then all. + + """ + + def __init__( + self, + study, + case_keys=None, + backend=None, + **backend_kwargs, + ): + + if case_keys is None: + case_keys = list(study.cases.keys()) + + plot_data = dict( + study=study, + count_units = study.get_count_units(case_keys=case_keys), + case_keys=case_keys, + ) + + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + from .utils import get_some_colors + + dp = to_attr(data_plot) + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + columns = dp.count_units.columns.tolist() + columns.remove("num_gt") + columns.remove("num_sorter") + + ncol = len(columns) + + colors = get_some_colors(columns, color_engine="auto", + map_name="hot") + colors["num_well_detected"] = "green" + + xticklabels = [] + for i, key in enumerate(dp.case_keys): + for c, col in enumerate(columns): + x = i + 1 + c / (ncol + 1) + y = dp.count_units.loc[key, col] + if not "well_detected" in col: + y = -y + + if i == 0: + label = col.replace("num_", "").replace("_", " ").title() + else: + label = None + + self.ax.bar([x], [y], width=1 / (ncol + 2), label=label, color=colors[col]) + + xticklabels.append(dp.study.cases[key]["label"]) + + self.ax.set_xticks(np.arange(len(dp.case_keys)) + 1) + self.ax.set_xticklabels(xticklabels) + self.ax.legend() + + +# TODO : plot optionally average on some levels using group by +class StudyPerformances(BaseWidget): + """ + Plot performances over case for a study. + + + Parameters + ---------- + study: GroundTruthStudy + A study object. + mode: str + Which mode in "swarm" + case_keys: list or None + A selection of cases to plot, if None, then all. + + """ + + def __init__( + self, + study, + mode="swarm", + case_keys=None, + backend=None, + **backend_kwargs, + ): + + if case_keys is None: + case_keys = list(study.cases.keys()) + + plot_data = dict( + study=study, + perfs=study.get_performance_by_unit(case_keys=case_keys), + mode=mode, + case_keys=case_keys, + ) + + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + from .utils import get_some_colors + + import pandas as pd + import seaborn as sns + + dp = to_attr(data_plot) + perfs = dp.perfs + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + if dp.mode == "swarm": + levels = perfs.index.names + df = pd.melt(perfs.reset_index(), id_vars=levels, var_name='Metric', value_name='Score', + value_vars=('accuracy','precision', 'recall')) + df['x'] = df.apply(lambda r: ' '.join([r[col] for col in levels]), axis=1) + sns.swarmplot(data=df, x='x', y='Score', hue='Metric', dodge=True) diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index 1e9d5301cf..4bc91e0737 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -20,7 +20,7 @@ from .unit_templates import UnitTemplatesWidget from .unit_waveforms_density_map import UnitWaveformDensityMapWidget from .unit_waveforms import UnitWaveformsWidget -from .gtstudy import StudyRunTimesWidget +from .gtstudy import StudyRunTimesWidget, StudyUnitCountsWidget, StudyPerformances widget_list = [ @@ -43,6 +43,8 @@ UnitWaveformDensityMapWidget, UnitWaveformsWidget, StudyRunTimesWidget, + StudyUnitCountsWidget, + StudyPerformances ] @@ -91,6 +93,8 @@ plot_unit_waveforms_density_map = UnitWaveformDensityMapWidget plot_unit_waveforms = UnitWaveformsWidget plot_study_run_times = StudyRunTimesWidget +plot_study_unit_counts = StudyUnitCountsWidget +plot_study_performances = StudyPerformances def plot_timeseries(*args, **kwargs): From b91ff2e774de0b2ee04f1ed6e075962e1c30d468 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 12 Sep 2023 15:24:44 +0200 Subject: [PATCH 046/322] Add amplitude_spread --- .../qualitymetrics/amplitude_spread.rst | 48 ++++++++++++++ doc/modules/qualitymetrics/drift.rst | 1 + doc/modules/qualitymetrics/firing_range.rst | 24 +++---- .../qualitymetrics/misc_metrics.py | 66 ++++++++++++++----- .../qualitymetrics/quality_metric_list.py | 2 + .../tests/test_metrics_functions.py | 26 +++++++- 6 files changed, 132 insertions(+), 35 deletions(-) create mode 100644 doc/modules/qualitymetrics/amplitude_spread.rst diff --git a/doc/modules/qualitymetrics/amplitude_spread.rst b/doc/modules/qualitymetrics/amplitude_spread.rst new file mode 100644 index 0000000000..0ae0761265 --- /dev/null +++ b/doc/modules/qualitymetrics/amplitude_spread.rst @@ -0,0 +1,48 @@ +Amplitude spread (:code:`amplitude_spread`) +=========================================== + + +Calculation +----------- + +The amplitude spread is a measure of the amplitude variability. +It is computed the ratio between the standard deviation and the amplitude mean (aka coefficient of variation). +To obtain a better estimate of this measure, it is first computed separately for several bins of a prefixed number of spikes +(e.g 100) and then the median of these values is taken. + +The computation requires either spike amplitudes (see :py:func:`~spikeinterface.postprocessing.compute_spike_amplitudes()`) +or amplitude scalings (see :py:func:`~spikeinterface.postprocessing.compute_amplitude_scalings()`) to be pre-computed. + + +Expectation and use +------------------- + +Very high levels of amplitude_spread ranges, outside of a physiolocigal range, might indicate noise contamination. + + +Example code +------------ + +.. code-block:: python + + import spikeinterface.qualitymetrics as qm + + # Make recording, sorting and wvf_extractor object for your data. + # It is required to run `compute_spike_amplitudes(wvf_extractor)` or + # `compute_amplitude_scalings(wvf_extractor)` (if missing, values will be NaN) + amplitude_spread = qm.compute_firing_ranges(wvf_extractor, amplitude_extension='spike_amplitudes') + # amplitude_spread is a dict containing the units' IDs as keys, + # and their amplitude_spread (in units of standard deviation). + + + +References +---------- + +.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_amplitude_spreads + + +Literature +---------- + +Designed by Simon Musall and adapted to SpikeInterface by Alessio Buccino. diff --git a/doc/modules/qualitymetrics/drift.rst b/doc/modules/qualitymetrics/drift.rst index 0a852f80af..4e78150ba7 100644 --- a/doc/modules/qualitymetrics/drift.rst +++ b/doc/modules/qualitymetrics/drift.rst @@ -42,6 +42,7 @@ Example code import spikeinterface.qualitymetrics as qm + # Make recording, sorting and wvf_extractor object for your data. # It is required to run `compute_spike_locations(wvf_extractor)` # (if missing, values will be NaN) drift_ptps, drift_stds, drift_mads = qm.compute_drift_metrics(wvf_extractor, peak_sign="neg") diff --git a/doc/modules/qualitymetrics/firing_range.rst b/doc/modules/qualitymetrics/firing_range.rst index fd8f79682c..0d17eedc13 100644 --- a/doc/modules/qualitymetrics/firing_range.rst +++ b/doc/modules/qualitymetrics/firing_range.rst @@ -5,7 +5,7 @@ Firing range (:code:`firing_range`) Calculation ----------- -The firing range indicates the spread of the firing range of a unit across the recording. It is computed by +The firing range indicates the dispersion of the firing rate of a unit across the recording. It is computed by taking the difference between the 95-th and 5th percentiles firing rates computed over short time bins (e.g. 10 s). @@ -13,10 +13,8 @@ taking the difference between the 95-th and 5th percentiles firing rates compute Expectation and use ------------------- -Both very high and very low firing rates can indicate errors. -Highly contaminated units (type I error) may have high firing rates as a result of the inclusion of other neurons' spikes. -Low firing rate units are likely to be incomplete (type II error), although this is not always the case (some neurons have highly selective firing patterns). -The firing rate is expected to be approximately log-normally distributed [Buzsáki]_. +Very high levels of firing ranges, outside of a physiolocigal range, might indicate noise contamination. + Example code ------------ @@ -26,23 +24,17 @@ Example code import spikeinterface.qualitymetrics as qm # Make recording, sorting and wvf_extractor object for your data. - firing_rate = qm.compute_firing_ranges(wvf_extractor) - # firing_rate is a dict containing the units' IDs as keys, - # and their firing rates across segments as values (in Hz). + firing_range = qm.compute_firing_ranges(wvf_extractor) + # firing_range is a dict containing the units' IDs as keys, + # and their firing firing_range as values (in Hz). References ---------- -.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_firing_rates - - -Links to original implementations ---------------------------------- +.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_firing_ranges -* From the `AllenSDK `_ Literature ---------- -Unknown origin. -Widely discussed eg: [Buzsáki]_. +Designed by Simon Musall and adapted to SpikeInterface by Alessio Buccino. diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index 9be9a32ff6..6c237ee720 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -567,8 +567,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.95), unit_ids=None): - """ - Compute firing range, the range between the 5th and 95th quantiles of the firing rates distribution + """Calculate firing range, the range between the 5th and 95th quantiles of the firing rates distribution computed in non-overlapping time bins. Parameters @@ -579,11 +578,17 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.9 The size of the bin in seconds. quantiles : tuple, default: (0.05, 0.95) The quantiles to compute. + unit_ids : list or None + List of unit ids to compute the firing range. If None, all units are used. Returns ------- firing_ranges : dict The firing range for each unit. + + Notes + ----- + Designed by Simon Musall and ported to SpikeInterface by Alessio Buccino. """ sampling_frequency = waveform_extractor.sampling_frequency bin_size_samples = int(bin_size_s * sampling_frequency) @@ -601,7 +606,7 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.9 spike_times = sorting.get_unit_spike_train(unit_id=unit_id, segment_index=segment_index) spike_counts, _ = np.histogram(spike_times, bins=edges) firing_rates = spike_counts / bin_size_s - firing_rate_histograms[unit_id] = np.concatentate((firing_rate_histograms[unit_id], firing_rates)) + firing_rate_histograms[unit_id] = np.concatenate((firing_rate_histograms[unit_id], firing_rates)) # finally we compute the percentiles firing_ranges = {} @@ -616,20 +621,37 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.9 _default_params["firing_range"] = dict(bin_size_s=5, quantiles=(0.05, 0.95)) -# TODO: docs def compute_amplitude_spreads( - waveform_extractor, spikes_bin_size=50, amplitude_extension="spike_amplitudes", unit_ids=None + waveform_extractor, num_spikes_per_bin=100, amplitude_extension="spike_amplitudes", unit_ids=None ): - """Calculate mean spread of spike amplitudes within defined bins of AP events + """Calculate spread of spike amplitudes within defined bins of spike events. + The spread is the median relative variance (variance divided by the overall amplitude mean) + computed over bins of `num_spikes_per_bin` spikes. - S Musall 2023 + Parameters + ---------- + waveform_extractor : WaveformExtractor + The waveform extractor object. + num_spikes_per_bin : int, default: 50 + The number of spikes per bin. + amplitude_extension : str, default: 'spike_amplitudes' + The name of the extension to load the amplitudes from. 'spike_amplitudes' or 'amplitude_scalings'. + unit_ids : list or None + List of unit ids to compute the amplitude spread. If None, all units are used. - Input: - ------ - amplitudes : numpy.ndarray - Array of amplitudes (don't need to be in physical units) + Returns + ------- + amplitude_spreads : dict + The amplitude spread for each unit. + Notes + ----- + Designed by Simon Musall and ported to SpikeInterface by Alessio Buccino. """ + assert amplitude_extension in ( + "spike_amplitudes", + "amplitude_scalings", + ), "Invalid amplitude_extension. It can be either 'spike_amplitudes' or 'amplitude_scalings'" sorting = waveform_extractor.sorting spikes = sorting.to_spike_vector() num_spikes = sorting.count_num_spikes_per_unit() @@ -639,23 +661,31 @@ def compute_amplitude_spreads( if waveform_extractor.is_extension(amplitude_extension): sac = waveform_extractor.load_extension(amplitude_extension) amps = sac.get_data(outputs="concatenated") + if amplitude_extension == "spike_amplitudes": + amps = np.concatenate(amps) else: warnings.warn("") empty_dict = {unit_id: np.nan for unit_id in unit_ids} return empty_dict all_unit_ids = list(sorting.unit_ids) + amplitude_spreads = {} for unit_id in unit_ids: amps_unit = amps[spikes["unit_index"] == all_unit_ids.index(unit_id)] - if num_spikes[unit_id] < spikes_bin_size: - amp_spread = np.var(amps_unit) + amp_mean = np.abs(np.mean(amps_unit)) + if num_spikes[unit_id] < num_spikes_per_bin: + amp_spread = np.std(amps_unit) / amp_mean else: - amp_spread = [] - for i in range(0, num_spikes[unit_id], spikes_bin_size): - amp_spread.append(np.var(amps_unit[i : i + spikes_bin_size])) - amp_spread = np.median(amp_spread) + amp_spreads = [] + for i in range(0, num_spikes[unit_id], num_spikes_per_bin): + amp_spreads.append(np.std(amps_unit[i : i + num_spikes_per_bin]) / amp_mean) + amp_spread = np.median(amp_spreads) + amplitude_spreads[unit_id] = amp_spread + + return amplitude_spreads + - return amp_spread +_default_params["amplitude_spread"] = dict(num_spikes_per_bin=100, amplitude_extension="spike_amplitudes") def compute_amplitude_cutoffs( diff --git a/src/spikeinterface/qualitymetrics/quality_metric_list.py b/src/spikeinterface/qualitymetrics/quality_metric_list.py index 917927f44a..ee25ce64fd 100644 --- a/src/spikeinterface/qualitymetrics/quality_metric_list.py +++ b/src/spikeinterface/qualitymetrics/quality_metric_list.py @@ -13,6 +13,7 @@ compute_drift_metrics, compute_synchrony_metrics, compute_firing_ranges, + compute_amplitude_spreads, ) from .pca_metrics import ( @@ -41,6 +42,7 @@ "sliding_rp_violation": compute_sliding_rp_violations, "amplitude_cutoff": compute_amplitude_cutoffs, "amplitude_median": compute_amplitude_medians, + "amplitude_spread": compute_amplitude_spreads, "synchrony": compute_synchrony_metrics, "firing_range": compute_firing_ranges, "drift": compute_drift_metrics, diff --git a/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py b/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py index d927d64c4f..a570b75b52 100644 --- a/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py +++ b/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py @@ -12,6 +12,7 @@ compute_principal_components, compute_spike_locations, compute_spike_amplitudes, + compute_amplitude_scalings, ) from spikeinterface.qualitymetrics import ( @@ -31,6 +32,8 @@ compute_drift_metrics, compute_amplitude_medians, compute_synchrony_metrics, + compute_firing_ranges, + compute_amplitude_spreads, ) @@ -212,6 +215,12 @@ def test_calculate_firing_rate_num_spikes(waveform_extractor_simple): # np.testing.assert_array_equal(list(num_spikes_gt.values()), list(num_spikes.values())) +def test_calculate_firing_range(waveform_extractor_simple): + we = waveform_extractor_simple + firing_ranges = compute_firing_ranges(we) + print(firing_ranges) + + def test_calculate_amplitude_cutoff(waveform_extractor_simple): we = waveform_extractor_simple spike_amps = compute_spike_amplitudes(we) @@ -234,6 +243,19 @@ def test_calculate_amplitude_median(waveform_extractor_simple): # assert np.allclose(list(amp_medians_gt.values()), list(amp_medians.values()), rtol=0.05) +def test_calculate_amplitude_spread(waveform_extractor_simple): + we = waveform_extractor_simple + spike_amps = compute_spike_amplitudes(we) + amp_spreads = compute_amplitude_spreads(we, num_spikes_per_bin=20) + print(amp_spreads) + + amps_scalings = compute_amplitude_scalings(we) + amp_spreads_scalings = compute_amplitude_spreads( + we, num_spikes_per_bin=20, amplitude_extension="amplitude_scalings" + ) + print(amp_spreads_scalings) + + def test_calculate_snrs(waveform_extractor_simple): we = waveform_extractor_simple snrs = compute_snrs(we) @@ -358,4 +380,6 @@ def test_calculate_drift_metrics(waveform_extractor_simple): # test_calculate_isi_violations(we) # test_calculate_sliding_rp_violations(we) # test_calculate_drift_metrics(we) - test_synchrony_metrics(we) + # test_synchrony_metrics(we) + test_calculate_firing_range(we) + test_calculate_amplitude_spread(we) From d80341ca2cd84852988cc5704bafc1c0a6d16540 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 12 Sep 2023 18:16:26 +0200 Subject: [PATCH 047/322] remove gtstudy widgets from legacy and port some of then in the API. --- .../comparison/tests/test_groundtruthstudy.py | 15 +- .../widgets/_legacy_mpl_widgets/__init__.py | 16 - .../widgets/_legacy_mpl_widgets/gtstudy.py | 574 ------------------ src/spikeinterface/widgets/gtstudy.py | 60 ++ src/spikeinterface/widgets/widget_list.py | 6 +- 5 files changed, 66 insertions(+), 605 deletions(-) delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/gtstudy.py diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 52d5c73d3b..a75ac272be 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -1,20 +1,11 @@ -import importlib import shutil import pytest from pathlib import Path -# from spikeinterface.extractors import toy_example from spikeinterface import generate_ground_truth_recording from spikeinterface.preprocessing import bandpass_filter -from spikeinterface.sorters import installed_sorters from spikeinterface.comparison import GroundTruthStudy -# try: -# import tridesclous - -# HAVE_TDC = True -# except ImportError: -# HAVE_TDC = False if hasattr(pytest, "global_test_folder"): @@ -71,7 +62,7 @@ def create_a_study(study_folder): }, }, - # we comment this at the moement because SC2 is quite slow for testing + # we comment this at the moement because SC2 is quite slow for testing # ("sc2", "no-preprocess", "tetrode"): { # "label": "spykingcircus2 without preprocessing standar params", # "dataset": "toy_tetrode", @@ -118,6 +109,4 @@ def test_GroundTruthStudy(): setup_module() test_GroundTruthStudy() - # test_run_study_sorters() - # test_extract_sortings() - + \ No newline at end of file diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py index c0dcd7ea6e..bf28c891f5 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py @@ -41,22 +41,6 @@ from .sortingperformance import plot_sorting_performance -# ground truth study (=comparison over sorter) -from .gtstudy import ( - StudyComparisonRunTimesWidget, - plot_gt_study_run_times, - StudyComparisonUnitCountsWidget, - StudyComparisonUnitCountsAveragesWidget, - plot_gt_study_unit_counts, - plot_gt_study_unit_counts_averages, - plot_gt_study_performances, - plot_gt_study_performances_averages, - StudyComparisonPerformancesWidget, - StudyComparisonPerformancesAveragesWidget, - plot_gt_study_performances_by_template_similarity, - StudyComparisonPerformancesByTemplateSimilarity, -) - # ground truth comparions (=comparison over sorter) from .gtcomparison import ( plot_gt_performances, diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/gtstudy.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/gtstudy.py deleted file mode 100644 index 573221f528..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/gtstudy.py +++ /dev/null @@ -1,574 +0,0 @@ -""" -Various widgets on top of GroundTruthStudy to summary results: - * run times - * performances - * count units -""" -import numpy as np - - -from .basewidget import BaseWidget - - -class StudyComparisonRunTimesWidget(BaseWidget): - """ - Plot run times for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - color: - - - """ - - def __init__(self, study, color="#F7DC6F", ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.color = color - - BaseWidget.__init__(self, ax=ax) - - def plot(self): - study = self.study - ax = self.ax - - all_run_times = study.aggregate_run_times() - av_run_times = all_run_times.reset_index().groupby("sorter_name")["run_time"].mean() - - if len(study.rec_names) == 1: - # no errors bars - yerr = None - else: - # errors bars across recording - yerr = all_run_times.reset_index().groupby("sorter_name")["run_time"].std() - - sorter_names = av_run_times.index - - x = np.arange(sorter_names.size) + 1 - ax.bar(x, av_run_times.values, width=0.8, color=self.color, yerr=yerr) - ax.set_ylabel("run times (s)") - ax.set_xticks(x) - ax.set_xticklabels(sorter_names, rotation=45) - ax.set_xlim(0, sorter_names.size + 1) - - -def plot_gt_study_run_times(*args, **kwargs): - W = StudyComparisonRunTimesWidget(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_run_times.__doc__ = StudyComparisonRunTimesWidget.__doc__ - - -class StudyComparisonUnitCountsAveragesWidget(BaseWidget): - """ - Plot averages over found units for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - log_scale: if the y-axis should be displayed as log scaled - - """ - - def __init__(self, study, cmap_name="Set2", log_scale=False, ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.cmap_name = cmap_name - self.log_scale = log_scale - - BaseWidget.__init__(self, ax=ax) - - def plot(self): - study = self.study - ax = self.ax - - count_units = study.aggregate_count_units() - - if study.exhaustive_gt: - columns = ["num_well_detected", "num_false_positive", "num_redundant", "num_overmerged"] - else: - columns = ["num_well_detected", "num_redundant", "num_overmerged"] - ncol = len(columns) - - df = count_units.reset_index() - - m = df.groupby("sorter_name")[columns].mean() - - cmap = plt.get_cmap(self.cmap_name, 4) - - if len(study.rec_names) == 1: - # no errors bars - stds = None - else: - # errors bars across recording - stds = df.groupby("sorter_name")[columns].std() - - sorter_names = m.index - clean_labels = [col.replace("num_", "").replace("_", " ").title() for col in columns] - - for c, col in enumerate(columns): - x = np.arange(sorter_names.size) + 1 + c / (ncol + 2) - if stds is None: - yerr = None - else: - yerr = stds[col].values - ax.bar(x, m[col].values, yerr=yerr, width=1 / (ncol + 2), color=cmap(c), label=clean_labels[c]) - - ax.legend() - if self.log_scale: - ax.set_yscale("log") - - ax.set_xticks(np.arange(sorter_names.size) + 1) - ax.set_xticklabels(sorter_names, rotation=45) - ax.set_ylabel("# units") - ax.set_xlim(0, sorter_names.size + 1) - - if count_units["num_gt"].unique().size == 1: - num_gt = count_units["num_gt"].unique()[0] - ax.axhline(num_gt, ls="--", color="k") - - -class StudyComparisonUnitCountsWidget(BaseWidget): - """ - Plot averages over found units for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - log_scale: if the y-axis should be displayed as log scaled - - """ - - def __init__(self, study, cmap_name="Set2", log_scale=False, ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.cmap_name = cmap_name - self.log_scale = log_scale - - num_rec = len(study.rec_names) - if ax is None: - fig, axes = plt.subplots(ncols=1, nrows=num_rec, squeeze=False) - else: - axes = np.array([ax]).T - - BaseWidget.__init__(self, axes=axes) - - def plot(self): - study = self.study - ax = self.ax - - import seaborn as sns - - study = self.study - - count_units = study.aggregate_count_units() - count_units = count_units.reset_index() - - if study.exhaustive_gt: - columns = ["num_well_detected", "num_false_positive", "num_redundant", "num_overmerged"] - else: - columns = ["num_well_detected", "num_redundant", "num_overmerged"] - - ncol = len(columns) - cmap = plt.get_cmap(self.cmap_name, 4) - - for r, rec_name in enumerate(study.rec_names): - ax = self.axes[r, 0] - ax.set_title(rec_name) - df = count_units.loc[count_units["rec_name"] == rec_name, :] - m = df.groupby("sorter_name")[columns].mean() - sorter_names = m.index - clean_labels = [col.replace("num_", "").replace("_", " ").title() for col in columns] - - for c, col in enumerate(columns): - x = np.arange(sorter_names.size) + 1 + c / (ncol + 2) - ax.bar(x, m[col].values, width=1 / (ncol + 2), color=cmap(c), label=clean_labels[c]) - - if r == 0: - ax.legend() - - if self.log_scale: - ax.set_yscale("log") - - if r == len(study.rec_names) - 1: - ax.set_xticks(np.arange(sorter_names.size) + 1) - ax.set_xticklabels(sorter_names, rotation=45) - ax.set_ylabel("# units") - ax.set_xlim(0, sorter_names.size + 1) - - if count_units["num_gt"].unique().size == 1: - num_gt = count_units["num_gt"].unique()[0] - ax.axhline(num_gt, ls="--", color="k") - - -def plot_gt_study_unit_counts_averages(*args, **kwargs): - W = StudyComparisonUnitCountsAveragesWidget(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_unit_counts_averages.__doc__ = StudyComparisonUnitCountsAveragesWidget.__doc__ - - -def plot_gt_study_unit_counts(*args, **kwargs): - W = StudyComparisonUnitCountsWidget(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_unit_counts.__doc__ = StudyComparisonUnitCountsWidget.__doc__ - - -class StudyComparisonPerformancesWidget(BaseWidget): - """ - Plot run times for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - - """ - - def __init__(self, study, palette="Set1", ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.palette = palette - - num_rec = len(study.rec_names) - if ax is None: - fig, axes = plt.subplots(ncols=1, nrows=num_rec, squeeze=False) - else: - axes = np.array([ax]).T - - BaseWidget.__init__(self, axes=axes) - - def plot(self, average=False): - import seaborn as sns - - study = self.study - - sns.set_palette(sns.color_palette(self.palette)) - - perf_by_units = study.aggregate_performance_by_unit() - perf_by_units = perf_by_units.reset_index() - - for r, rec_name in enumerate(study.rec_names): - ax = self.axes[r, 0] - ax.set_title(rec_name) - df = perf_by_units.loc[perf_by_units["rec_name"] == rec_name, :] - df = pd.melt( - df, - id_vars="sorter_name", - var_name="Metric", - value_name="Score", - value_vars=("accuracy", "precision", "recall"), - ).sort_values("sorter_name") - sns.swarmplot( - data=df, x="sorter_name", y="Score", hue="Metric", dodge=True, s=3, ax=ax - ) # order=sorter_list, - # ~ ax.set_xticklabels(sorter_names_short, rotation=30, ha='center') - # ~ ax.legend(bbox_to_anchor=(1.0, 1), loc=2, borderaxespad=0., frameon=False, fontsize=8, markerscale=0.5) - - ax.set_ylim(0, 1.05) - ax.set_ylabel(f"Perfs for {rec_name}") - if r < len(study.rec_names) - 1: - ax.set_xlabel("") - ax.set(xticklabels=[]) - - -class StudyComparisonTemplateSimilarityWidget(BaseWidget): - """ - Plot run times for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - - """ - - def __init__(self, study, cmap_name="Set1", ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.cmap_name = cmap_name - - BaseWidget.__init__(self, ax=ax) - - def plot(self): - import seaborn as sns - - study = self.study - ax = self.ax - - perf_by_units = study.aggregate_performance_by_unit() - perf_by_units = perf_by_units.reset_index() - - columns = ["accuracy", "precision", "recall"] - to_agg = {} - ncol = len(columns) - - for column in columns: - perf_by_units[column] = pd.to_numeric(perf_by_units[column], downcast="float") - to_agg[column] = ["mean"] - - data = perf_by_units.groupby(["sorter_name", "rec_name"]).agg(to_agg) - - m = data.groupby("sorter_name").mean() - - cmap = plt.get_cmap(self.cmap_name, 4) - - if len(study.rec_names) == 1: - # no errors bars - stds = None - else: - # errors bars across recording - stds = data.groupby("sorter_name").std() - - sorter_names = m.index - clean_labels = [col.replace("num_", "").replace("_", " ").title() for col in columns] - - width = 1 / (ncol + 2) - - for c, col in enumerate(columns): - x = np.arange(sorter_names.size) + 1 + c / (ncol + 2) - if stds is None: - yerr = None - else: - yerr = stds[col].values - ax.bar(x, m[col].values.flatten(), yerr=yerr.flatten(), width=width, color=cmap(c), label=clean_labels[c]) - - ax.legend() - - ax.set_xticks(np.arange(sorter_names.size) + 1 + width) - ax.set_xticklabels(sorter_names, rotation=45) - ax.set_ylabel("metric") - ax.set_xlim(0, sorter_names.size + 1) - - -class StudyComparisonPerformancesAveragesWidget(BaseWidget): - """ - Plot run times for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - - """ - - def __init__(self, study, cmap_name="Set1", ax=None): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.cmap_name = cmap_name - - BaseWidget.__init__(self, ax=ax) - - def plot(self): - import seaborn as sns - - study = self.study - ax = self.ax - - perf_by_units = study.aggregate_performance_by_unit() - perf_by_units = perf_by_units.reset_index() - - columns = ["accuracy", "precision", "recall"] - to_agg = {} - ncol = len(columns) - - for column in columns: - perf_by_units[column] = pd.to_numeric(perf_by_units[column], downcast="float") - to_agg[column] = ["mean"] - - data = perf_by_units.groupby(["sorter_name", "rec_name"]).agg(to_agg) - - m = data.groupby("sorter_name").mean() - - cmap = plt.get_cmap(self.cmap_name, 4) - - if len(study.rec_names) == 1: - # no errors bars - stds = None - else: - # errors bars across recording - stds = data.groupby("sorter_name").std() - - sorter_names = m.index - clean_labels = [col.replace("num_", "").replace("_", " ").title() for col in columns] - - width = 1 / (ncol + 2) - - for c, col in enumerate(columns): - x = np.arange(sorter_names.size) + 1 + c / (ncol + 2) - if stds is None: - yerr = None - else: - yerr = stds[col].values - ax.bar(x, m[col].values.flatten(), yerr=yerr.flatten(), width=width, color=cmap(c), label=clean_labels[c]) - - ax.legend() - - ax.set_xticks(np.arange(sorter_names.size) + 1 + width) - ax.set_xticklabels(sorter_names, rotation=45) - ax.set_ylabel("metric") - ax.set_xlim(0, sorter_names.size + 1) - - -class StudyComparisonPerformancesByTemplateSimilarity(BaseWidget): - """ - Plot run times for a study. - - Parameters - ---------- - study: GroundTruthStudy - The study object to consider - ax: matplotlib ax - The ax to be used. If not given a figure is created - cmap_name - - """ - - def __init__(self, study, cmap_name="Set1", ax=None, ylim=(0.6, 1), show_legend=True): - from matplotlib import pyplot as plt - import pandas as pd - - self.study = study - self.cmap_name = cmap_name - self.show_legend = show_legend - self.ylim = ylim - - BaseWidget.__init__(self, ax=ax) - - def plot(self): - import sklearn - - cmap = plt.get_cmap(self.cmap_name, len(self.study.sorter_names)) - colors = [cmap(i) for i in range(len(self.study.sorter_names))] - - flat_templates_gt = {} - for rec_name in self.study.rec_names: - waveform_folder = self.study.study_folder / "waveforms" / f"waveforms_GroundTruth_{rec_name}" - if not waveform_folder.is_dir(): - self.study.compute_waveforms(rec_name) - - templates = self.study.get_templates(rec_name) - flat_templates_gt[rec_name] = templates.reshape(templates.shape[0], -1) - - all_results = {} - - for sorter_name in self.study.sorter_names: - all_results[sorter_name] = {"similarity": [], "accuracy": []} - - for rec_name in self.study.rec_names: - try: - waveform_folder = self.study.study_folder / "waveforms" / f"waveforms_{sorter_name}_{rec_name}" - if not waveform_folder.is_dir(): - self.study.compute_waveforms(rec_name, sorter_name) - templates = self.study.get_templates(rec_name, sorter_name) - flat_templates = templates.reshape(templates.shape[0], -1) - similarity_matrix = sklearn.metrics.pairwise.cosine_similarity( - flat_templates_gt[rec_name], flat_templates - ) - - comp = self.study.comparisons[(rec_name, sorter_name)] - - for i, u1 in enumerate(comp.sorting1.unit_ids): - u2 = comp.best_match_12[u1] - if u2 != -1: - all_results[sorter_name]["similarity"] += [ - similarity_matrix[comp.sorting1.id_to_index(u1), comp.sorting2.id_to_index(u2)] - ] - all_results[sorter_name]["accuracy"] += [comp.agreement_scores.at[u1, u2]] - except Exception: - pass - - all_results[sorter_name]["similarity"] = np.array(all_results[sorter_name]["similarity"]) - all_results[sorter_name]["accuracy"] = np.array(all_results[sorter_name]["accuracy"]) - - from matplotlib.patches import Ellipse - - similarity_means = [all_results[sorter_name]["similarity"].mean() for sorter_name in self.study.sorter_names] - similarity_stds = [all_results[sorter_name]["similarity"].std() for sorter_name in self.study.sorter_names] - - accuracy_means = [all_results[sorter_name]["accuracy"].mean() for sorter_name in self.study.sorter_names] - accuracy_stds = [all_results[sorter_name]["accuracy"].std() for sorter_name in self.study.sorter_names] - - scount = 0 - for x, y, i, j in zip(similarity_means, accuracy_means, similarity_stds, accuracy_stds): - e = Ellipse((x, y), i, j) - e.set_alpha(0.2) - e.set_facecolor(colors[scount]) - self.ax.add_artist(e) - self.ax.scatter([x], [y], c=colors[scount], label=self.study.sorter_names[scount]) - scount += 1 - - self.ax.set_ylabel("accuracy") - self.ax.set_xlabel("cosine similarity") - if self.ylim is not None: - self.ax.set_ylim(self.ylim) - - if self.show_legend: - self.ax.legend() - - -def plot_gt_study_performances(*args, **kwargs): - W = StudyComparisonPerformancesWidget(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_performances.__doc__ = StudyComparisonPerformancesWidget.__doc__ - - -def plot_gt_study_performances_averages(*args, **kwargs): - W = StudyComparisonPerformancesAveragesWidget(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_performances_averages.__doc__ = StudyComparisonPerformancesAveragesWidget.__doc__ - - -def plot_gt_study_performances_by_template_similarity(*args, **kwargs): - W = StudyComparisonPerformancesByTemplateSimilarity(*args, **kwargs) - W.plot() - return W - - -plot_gt_study_performances_by_template_similarity.__doc__ = StudyComparisonPerformancesByTemplateSimilarity.__doc__ diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index aa1a80c3d3..304cf1a44a 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -190,3 +190,63 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): value_vars=('accuracy','precision', 'recall')) df['x'] = df.apply(lambda r: ' '.join([r[col] for col in levels]), axis=1) sns.swarmplot(data=df, x='x', y='Score', hue='Metric', dodge=True) + + + +class StudyPerformancesVsMetrics(BaseWidget): + """ + Plot performances vs a metrics (snr for instance) over case for a study. + + + Parameters + ---------- + study: GroundTruthStudy + A study object. + mode: str + Which mode in "swarm" + case_keys: list or None + A selection of cases to plot, if None, then all. + + """ + + def __init__( + self, + study, + metric_name="snr", + performance_name="accuracy", + case_keys=None, + backend=None, + **backend_kwargs, + ): + + if case_keys is None: + case_keys = list(study.cases.keys()) + + plot_data = dict( + study=study, + metric_name=metric_name, + performance_name=performance_name, + case_keys=case_keys, + ) + + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + from .utils import get_some_colors + + dp = to_attr(data_plot) + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + + study = dp.study + perfs = study.get_performance_by_unit(case_keys=dp.case_keys) + + for key in dp.case_keys: + x = study.get_metrics(key)[dp.metric_name].values + y = perfs.xs(key)[dp.performance_name].values + label = dp.study.cases[key]["label"] + self.ax.scatter(x, y, label=label) + + self.ax.legend() \ No newline at end of file diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index 4bc91e0737..3a1bdd12dc 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -20,7 +20,7 @@ from .unit_templates import UnitTemplatesWidget from .unit_waveforms_density_map import UnitWaveformDensityMapWidget from .unit_waveforms import UnitWaveformsWidget -from .gtstudy import StudyRunTimesWidget, StudyUnitCountsWidget, StudyPerformances +from .gtstudy import StudyRunTimesWidget, StudyUnitCountsWidget, StudyPerformances, StudyPerformancesVsMetrics widget_list = [ @@ -44,7 +44,8 @@ UnitWaveformsWidget, StudyRunTimesWidget, StudyUnitCountsWidget, - StudyPerformances + StudyPerformances, + StudyPerformancesVsMetrics ] @@ -95,6 +96,7 @@ plot_study_run_times = StudyRunTimesWidget plot_study_unit_counts = StudyUnitCountsWidget plot_study_performances = StudyPerformances +plot_stufy_performances_vs_metrics = StudyPerformancesVsMetrics def plot_timeseries(*args, **kwargs): From f97f76a7948f87cdf6873ce0a0b378f1120040b7 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 12 Sep 2023 18:23:43 +0200 Subject: [PATCH 048/322] Clean --- .../comparison/groundtruthstudy.py | 340 +----------------- 1 file changed, 10 insertions(+), 330 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index d936c50e5e..8d43fb5f0c 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -16,19 +16,10 @@ from .paircomparisons import compare_sorter_to_ground_truth, GroundTruthComparison -# from .studytools import ( -# setup_comparison_study, -# get_rec_names, -# get_recordings, -# iter_working_folder, -# iter_computed_names, -# iter_computed_sorting, -# collect_run_times, -# ) - -# TODO : save comparison in folders -# TODO : find a way to set level names +# TODO : save comparison in folders when COmparison object will be able to serialize +# TODO ??: make an internal optional binary copy when running several external sorter +# on the same dataset to avoid multiple save binary ? even when the recording is float32 (ks need int16) @@ -48,17 +39,16 @@ class GroundTruthStudy: * parameters of comparisons * any combination of theses - For enough flexibility cases key can be a tuple so that we can varify complexity along several - "axis" (paremeters or sorter) + For enough flexibility cases key can be a tuple so that we can varify complexity along several + "levels" or "axis" (paremeters or sorter). + + Generated dataframes will have index with several levels optionaly. - Ground truth dataset need recording+sorting. This can be from meraec file or from the internal generator + Ground truth dataset need recording+sorting. This can be from mearec file or from the internal generator :py:fun:`generate_ground_truth_recording()` This GroundTruthStudy have been refactor in version 0.100 to be more flexible than previous versions. - Folders structures are not backward compatible. - - - + Folders structures are not backward compatible at all. """ def __init__(self, study_folder): self.folder = Path(study_folder) @@ -105,8 +95,6 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): (study_folder / "sortings" / "run_logs").mkdir() (study_folder / "metrics").mkdir() - - for key, (rec, gt_sorting) in datasets.items(): assert "/" not in key assert "\\" not in key @@ -341,7 +329,7 @@ def get_metrics(self, key): recording, gt_sorting = self.datasets[dataset_key] metrics.index = gt_sorting.unit_ids return metrics - + def get_units_snr(self, key): """ """ @@ -369,8 +357,6 @@ def get_performance_by_unit(self, case_keys=None): perf = perf.reset_index() perf_by_unit.append(perf) - - perf_by_unit = pd.concat(perf_by_unit) perf_by_unit = perf_by_unit.set_index(self.levels) return perf_by_unit @@ -421,309 +407,3 @@ def get_count_units( return count_units - - - - -class OLDGroundTruthStudy: - def __init__(self, study_folder=None): - import pandas as pd - - self.study_folder = Path(study_folder) - self._is_scanned = False - self.computed_names = None - self.rec_names = None - self.sorter_names = None - - self.scan_folder() - - self.comparisons = None - self.exhaustive_gt = None - - def __repr__(self): - t = "Ground truth study\n" - t += " " + str(self.study_folder) + "\n" - t += " recordings: {} {}\n".format(len(self.rec_names), self.rec_names) - if len(self.sorter_names): - t += " sorters: {} {}\n".format(len(self.sorter_names), self.sorter_names) - - return t - - def scan_folder(self): - self.rec_names = get_rec_names(self.study_folder) - # scan computed names - self.computed_names = list(iter_computed_names(self.study_folder)) # list of pair (rec_name, sorter_name) - self.sorter_names = np.unique([e for _, e in iter_computed_names(self.study_folder)]).tolist() - self._is_scanned = True - - @classmethod - def create(cls, study_folder, gt_dict, **job_kwargs): - setup_comparison_study(study_folder, gt_dict, **job_kwargs) - return cls(study_folder) - - def run_sorters(self, sorter_list, mode_if_folder_exists="keep", remove_sorter_folders=False, **kwargs): - sorter_folders = self.study_folder / "sorter_folders" - recording_dict = get_recordings(self.study_folder) - - run_sorters( - sorter_list, - recording_dict, - sorter_folders, - with_output=False, - mode_if_folder_exists=mode_if_folder_exists, - **kwargs, - ) - - # results are copied so the heavy sorter_folders can be removed - self.copy_sortings() - - if remove_sorter_folders: - shutil.rmtree(self.study_folder / "sorter_folders") - - def _check_rec_name(self, rec_name): - if not self._is_scanned: - self.scan_folder() - if len(self.rec_names) > 1 and rec_name is None: - raise Exception("Pass 'rec_name' parameter to select which recording to use.") - elif len(self.rec_names) == 1: - rec_name = self.rec_names[0] - else: - rec_name = self.rec_names[self.rec_names.index(rec_name)] - return rec_name - - def get_ground_truth(self, rec_name=None): - rec_name = self._check_rec_name(rec_name) - sorting = load_extractor(self.study_folder / "ground_truth" / rec_name) - return sorting - - def get_recording(self, rec_name=None): - rec_name = self._check_rec_name(rec_name) - rec = load_extractor(self.study_folder / "raw_files" / rec_name) - return rec - - def get_sorting(self, sort_name, rec_name=None): - rec_name = self._check_rec_name(rec_name) - - selected_sorting = None - if sort_name in self.sorter_names: - for r_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - if sort_name == sorter_name and r_name == rec_name: - selected_sorting = sorting - return selected_sorting - - def copy_sortings(self): - sorter_folders = self.study_folder / "sorter_folders" - sorting_folders = self.study_folder / "sortings" - log_olders = self.study_folder / "sortings" / "run_log" - - log_olders.mkdir(parents=True, exist_ok=True) - - for rec_name, sorter_name, output_folder in iter_working_folder(sorter_folders): - SorterClass = sorter_dict[sorter_name] - fname = rec_name + "[#]" + sorter_name - npz_filename = sorting_folders / (fname + ".npz") - - try: - sorting = SorterClass.get_result_from_folder(output_folder) - NpzSortingExtractor.write_sorting(sorting, npz_filename) - except: - if npz_filename.is_file(): - npz_filename.unlink() - if (output_folder / "spikeinterface_log.json").is_file(): - shutil.copyfile( - output_folder / "spikeinterface_log.json", sorting_folders / "run_log" / (fname + ".json") - ) - - self.scan_folder() - - def run_comparisons(self, exhaustive_gt=False, **kwargs): - self.comparisons = {} - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - sc = compare_sorter_to_ground_truth(gt_sorting, sorting, exhaustive_gt=exhaustive_gt, **kwargs) - self.comparisons[(rec_name, sorter_name)] = sc - self.exhaustive_gt = exhaustive_gt - - def aggregate_run_times(self): - return collect_run_times(self.study_folder) - - def aggregate_performance_by_unit(self): - assert self.comparisons is not None, "run_comparisons first" - - perf_by_unit = [] - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - comp = self.comparisons[(rec_name, sorter_name)] - - perf = comp.get_performance(method="by_unit", output="pandas") - perf["rec_name"] = rec_name - perf["sorter_name"] = sorter_name - perf = perf.reset_index() - perf_by_unit.append(perf) - - import pandas as pd - - perf_by_unit = pd.concat(perf_by_unit) - perf_by_unit = perf_by_unit.set_index(["rec_name", "sorter_name", "gt_unit_id"]) - - return perf_by_unit - - def aggregate_count_units(self, well_detected_score=None, redundant_score=None, overmerged_score=None): - assert self.comparisons is not None, "run_comparisons first" - - import pandas as pd - - index = pd.MultiIndex.from_tuples(self.computed_names, names=["rec_name", "sorter_name"]) - - count_units = pd.DataFrame( - index=index, - columns=["num_gt", "num_sorter", "num_well_detected", "num_redundant", "num_overmerged"], - dtype=int, - ) - - if self.exhaustive_gt: - count_units["num_false_positive"] = pd.Series(dtype=int) - count_units["num_bad"] = pd.Series(dtype=int) - - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - comp = self.comparisons[(rec_name, sorter_name)] - - count_units.loc[(rec_name, sorter_name), "num_gt"] = len(gt_sorting.get_unit_ids()) - count_units.loc[(rec_name, sorter_name), "num_sorter"] = len(sorting.get_unit_ids()) - count_units.loc[(rec_name, sorter_name), "num_well_detected"] = comp.count_well_detected_units( - well_detected_score - ) - if self.exhaustive_gt: - count_units.loc[(rec_name, sorter_name), "num_overmerged"] = comp.count_overmerged_units( - overmerged_score - ) - count_units.loc[(rec_name, sorter_name), "num_redundant"] = comp.count_redundant_units(redundant_score) - count_units.loc[(rec_name, sorter_name), "num_false_positive"] = comp.count_false_positive_units( - redundant_score - ) - count_units.loc[(rec_name, sorter_name), "num_bad"] = comp.count_bad_units() - - return count_units - - def aggregate_dataframes(self, copy_into_folder=True, **karg_thresh): - dataframes = {} - dataframes["run_times"] = self.aggregate_run_times().reset_index() - perfs = self.aggregate_performance_by_unit() - - dataframes["perf_by_unit"] = perfs.reset_index() - dataframes["count_units"] = self.aggregate_count_units(**karg_thresh).reset_index() - - if copy_into_folder: - tables_folder = self.study_folder / "tables" - tables_folder.mkdir(parents=True, exist_ok=True) - - for name, df in dataframes.items(): - df.to_csv(str(tables_folder / (name + ".csv")), sep="\t", index=False) - - return dataframes - - def get_waveform_extractor(self, rec_name, sorter_name=None): - rec = self.get_recording(rec_name) - - if sorter_name is None: - name = "GroundTruth" - sorting = self.get_ground_truth(rec_name) - else: - assert sorter_name in self.sorter_names - name = sorter_name - sorting = self.get_sorting(sorter_name, rec_name) - - waveform_folder = self.study_folder / "waveforms" / f"waveforms_{name}_{rec_name}" - - if waveform_folder.is_dir(): - we = WaveformExtractor.load(waveform_folder) - else: - we = WaveformExtractor.create(rec, sorting, waveform_folder) - return we - - def compute_waveforms( - self, - rec_name, - sorter_name=None, - ms_before=3.0, - ms_after=4.0, - max_spikes_per_unit=500, - n_jobs=-1, - total_memory="1G", - ): - we = self.get_waveform_extractor(rec_name, sorter_name) - we.set_params(ms_before=ms_before, ms_after=ms_after, max_spikes_per_unit=max_spikes_per_unit) - we.run_extract_waveforms(n_jobs=n_jobs, total_memory=total_memory) - - def get_templates(self, rec_name, sorter_name=None, mode="median"): - """ - Get template for a given recording. - - If sorter_name=None then template are from the ground truth. - - """ - we = self.get_waveform_extractor(rec_name, sorter_name=sorter_name) - templates = we.get_all_templates(mode=mode) - return templates - - def compute_metrics( - self, - rec_name, - metric_names=["snr"], - ms_before=3.0, - ms_after=4.0, - max_spikes_per_unit=500, - n_jobs=-1, - total_memory="1G", - ): - we = self.get_waveform_extractor(rec_name) - we.set_params(ms_before=ms_before, ms_after=ms_after, max_spikes_per_unit=max_spikes_per_unit) - we.run_extract_waveforms(n_jobs=n_jobs, total_memory=total_memory) - - # metrics - metrics = compute_quality_metrics(we, metric_names=metric_names) - folder = self.study_folder / "metrics" - folder.mkdir(exist_ok=True) - filename = folder / f"metrics _{rec_name}.txt" - metrics.to_csv(filename, sep="\t", index=True) - - return metrics - - def get_metrics(self, rec_name=None, **metric_kwargs): - """ - Load or compute units metrics for a given recording. - """ - rec_name = self._check_rec_name(rec_name) - metrics_folder = self.study_folder / "metrics" - metrics_folder.mkdir(parents=True, exist_ok=True) - - filename = self.study_folder / "metrics" / f"metrics _{rec_name}.txt" - import pandas as pd - - if filename.is_file(): - metrics = pd.read_csv(filename, sep="\t", index_col=0) - gt_sorting = self.get_ground_truth(rec_name) - metrics.index = gt_sorting.unit_ids - else: - metrics = self.compute_metrics(rec_name, **metric_kwargs) - - metrics.index.name = "unit_id" - # add rec name columns - metrics["rec_name"] = rec_name - - return metrics - - def get_units_snr(self, rec_name=None, **metric_kwargs): - """ """ - metric = self.get_metrics(rec_name=rec_name, **metric_kwargs) - return metric["snr"] - - def concat_all_snr(self): - metrics = [] - for rec_name in self.rec_names: - df = self.get_metrics(rec_name) - df = df.reset_index() - metrics.append(df) - metrics = pd.concat(metrics) - metrics = metrics.set_index(["rec_name", "unit_id"]) - return metrics["snr"] From 99e7acc8044d91773b2c77c67d51669dfe6b2fd2 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 13 Sep 2023 11:32:37 +0200 Subject: [PATCH 049/322] WIP --- src/spikeinterface/sorters/internal/spyking_circus2.py | 5 +++-- .../sortingcomponents/clustering/random_projections.py | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 8a7b353bd1..571096caf9 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -145,8 +145,9 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_params.update({"noise_levels": noise_levels}) matching_job_params = job_kwargs.copy() - if "chunk_memory" in matching_job_params: - matching_job_params.pop("chunk_memory") + for value in ['chunk_size', 'chunk_memory', 'total_memory', 'chunk_duration']: + if value in matching_job_params: + matching_job_params.pop(value) matching_job_params["chunk_duration"] = "100ms" diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index d82f9a7808..025555440a 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -191,8 +191,9 @@ def main_function(cls, recording, peaks, params): ) cleaning_matching_params = params["job_kwargs"].copy() - if "chunk_memory" in cleaning_matching_params: - cleaning_matching_params.pop("chunk_memory") + for value in ['chunk_size', 'chunk_memory', 'total_memory', 'chunk_duration']: + if value in cleaning_matching_params: + cleaning_matching_params.pop(value) cleaning_matching_params["chunk_duration"] = "100ms" cleaning_matching_params["n_jobs"] = 1 cleaning_matching_params["verbose"] = False From cc792136cf213c4701a962206295dc7efaa718ad Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 13 Sep 2023 09:32:58 +0000 Subject: [PATCH 050/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sorters/internal/spyking_circus2.py | 2 +- .../sortingcomponents/benchmark/benchmark_matching.py | 8 ++++---- .../sortingcomponents/clustering/random_projections.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 571096caf9..db3d88f116 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -145,7 +145,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_params.update({"noise_levels": noise_levels}) matching_job_params = job_kwargs.copy() - for value in ['chunk_size', 'chunk_memory', 'total_memory', 'chunk_duration']: + for value in ["chunk_size", "chunk_memory", "total_memory", "chunk_duration"]: if value in matching_job_params: matching_job_params.pop(value) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py index 8ce8efe25f..50d64e1349 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py @@ -626,10 +626,10 @@ def plot_comparison_matching( patches.append(mpatches.Patch(color=color, label=name)) ax.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0.0) else: - ax.spines['bottom'].set_visible(False) - ax.spines['left'].set_visible(False) - ax.spines['top'].set_visible(False) - ax.spines['right'].set_visible(False) + ax.spines["bottom"].set_visible(False) + ax.spines["left"].set_visible(False) + ax.spines["top"].set_visible(False) + ax.spines["right"].set_visible(False) ax.set_xticks([]) ax.set_yticks([]) plt.tight_layout(h_pad=0, w_pad=0) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 025555440a..5592b23c8d 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -191,7 +191,7 @@ def main_function(cls, recording, peaks, params): ) cleaning_matching_params = params["job_kwargs"].copy() - for value in ['chunk_size', 'chunk_memory', 'total_memory', 'chunk_duration']: + for value in ["chunk_size", "chunk_memory", "total_memory", "chunk_duration"]: if value in cleaning_matching_params: cleaning_matching_params.pop(value) cleaning_matching_params["chunk_duration"] = "100ms" From dda78037d9570a529392af35055d343fc6c56022 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 13 Sep 2023 13:26:01 +0200 Subject: [PATCH 051/322] Adding unit_ids --- .../sortingcomponents/clustering/random_projections.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 5592b23c8d..be8ecd6702 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -177,7 +177,8 @@ def main_function(cls, recording, peaks, params): mode = "folder" sorting_folder = tmp_folder / "sorting" - sorting = NumpySorting.from_times_labels(spikes["sample_index"], spikes["unit_index"], fs) + unit_ids = np.arange(len(np.unique(spikes["unit_index"]))) + sorting = NumpySorting(spikes, fs, unit_ids=unit_ids) sorting = sorting.save(folder=sorting_folder) we = extract_waveforms( recording, From 1b28837a452da62e6890019bcb311cb5ced4009e Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 13 Sep 2023 13:45:38 +0200 Subject: [PATCH 052/322] skip slurm tests --- src/spikeinterface/sorters/tests/test_launcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index 2d8e6f3d3c..ecab64ede6 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -115,6 +115,7 @@ def test_run_sorter_jobs_dask(job_list): print(t1 - t0) +@pytest.mark.skip("Slurm launcher need a machine with slurm") def test_run_sorter_jobs_slurm(job_list): if base_output.is_dir(): shutil.rmtree(base_output) From ba2e961bd9b26fd7acc226183b19bc5b3a85401b Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 13 Sep 2023 14:58:14 +0200 Subject: [PATCH 053/322] small fix --- src/spikeinterface/comparison/groundtruthstudy.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 8d43fb5f0c..9f0039b9cb 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -228,7 +228,12 @@ def copy_sortings(self, case_keys=None, force=True): sorter_folder = self.folder / "sorters" / self.key_to_str(key) log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" - sorting = read_sorter_folder(sorter_folder, raise_error=False) + + if (sorter_folder / "spikeinterface_log.json").exists(): + sorting = read_sorter_folder(sorter_folder, raise_error=False) + else: + sorting = None + if sorting is not None: if sorting_folder.exists(): if force: From 9b5b28b9b6cf0b7d7e313d12cf2015253087f032 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 13 Sep 2023 15:03:57 +0200 Subject: [PATCH 054/322] small fix --- src/spikeinterface/widgets/gtstudy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index 304cf1a44a..bc2c1246b7 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -35,7 +35,7 @@ def __init__( plot_data = dict( study=study, - run_times=study.get_run_times(), + run_times=study.get_run_times(case_keys), case_keys=case_keys, ) From f96e2b79df22b699cfd380b126b4c977339f3ab1 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Wed, 13 Sep 2023 19:28:00 +0200 Subject: [PATCH 055/322] Extend and refactor waveform metrics --- src/spikeinterface/postprocessing/__init__.py | 1 - .../postprocessing/template_metrics.py | 583 ++++++++++++++++-- .../tests/test_template_metrics.py | 8 +- 3 files changed, 534 insertions(+), 58 deletions(-) diff --git a/src/spikeinterface/postprocessing/__init__.py b/src/spikeinterface/postprocessing/__init__.py index 223bda5e30..d7e1ffac01 100644 --- a/src/spikeinterface/postprocessing/__init__.py +++ b/src/spikeinterface/postprocessing/__init__.py @@ -10,7 +10,6 @@ from .template_metrics import ( TemplateMetricsCalculator, compute_template_metrics, - calculate_template_metrics, get_template_metric_names, ) diff --git a/src/spikeinterface/postprocessing/template_metrics.py b/src/spikeinterface/postprocessing/template_metrics.py index 681f6f3e84..119f0dc53d 100644 --- a/src/spikeinterface/postprocessing/template_metrics.py +++ b/src/spikeinterface/postprocessing/template_metrics.py @@ -11,9 +11,24 @@ from ..core.waveform_extractor import BaseWaveformExtractorExtension import warnings +# DEBUG = True + +# if DEBUG: +# import matplotlib.pyplot as plt +# plt.ion() +# plt.show() + + +def get_1d_template_metric_names(): + return deepcopy(list(_1d_metric_name_to_func.keys())) + + +def get_2d_template_metric_names(): + return deepcopy(list(_2d_metric_name_to_func.keys())) + def get_template_metric_names(): - return deepcopy(list(_metric_name_to_func.keys())) + return get_1d_template_metric_names() + get_2d_template_metric_names() class TemplateMetricsCalculator(BaseWaveformExtractorExtension): @@ -26,20 +41,31 @@ class TemplateMetricsCalculator(BaseWaveformExtractorExtension): """ extension_name = "template_metrics" + min_channels_for_2d_warning = 10 def __init__(self, waveform_extractor): BaseWaveformExtractorExtension.__init__(self, waveform_extractor) - def _set_params(self, metric_names=None, peak_sign="neg", upsampling_factor=10, sparsity=None, window_slope_ms=0.7): + def _set_params( + self, + metric_names=None, + peak_sign="neg", + upsampling_factor=10, + sparsity=None, + functions_kwargs=None, + include_2d_metrics=False, + ): if metric_names is None: - metric_names = get_template_metric_names() - + metric_names = get_1d_template_metric_names() + if include_2d_metrics: + metric_names += get_2d_template_metric_names() + functions_kwargs = functions_kwargs or dict() params = dict( metric_names=[str(name) for name in metric_names], sparsity=sparsity, peak_sign=peak_sign, upsampling_factor=int(upsampling_factor), - window_slope_ms=float(window_slope_ms), + functions_kwargs=functions_kwargs, ) return params @@ -60,6 +86,9 @@ def _run(self): unit_ids = self.waveform_extractor.sorting.unit_ids sampling_frequency = self.waveform_extractor.sampling_frequency + metrics_1d = [m for m in metric_names if m in get_1d_template_metric_names()] + metrics_2d = [m for m in metric_names if m in get_2d_template_metric_names()] + if sparsity is None: extremum_channels_ids = get_template_extremum_channel( self.waveform_extractor, peak_sign=peak_sign, outputs="id" @@ -79,6 +108,8 @@ def _run(self): template_metrics = pd.DataFrame(index=multi_index, columns=metric_names) all_templates = self.waveform_extractor.get_all_templates() + channel_locations = self.waveform_extractor.get_channel_locations() + for unit_index, unit_id in enumerate(unit_ids): template_all_chans = all_templates[unit_index] chan_ids = np.array(extremum_channels_ids[unit_id]) @@ -87,6 +118,7 @@ def _run(self): chan_ind = self.waveform_extractor.channel_ids_to_indices(chan_ids) template = template_all_chans[:, chan_ind] + # compute 1d metrics for i, template_single in enumerate(template.T): if sparsity is None: index = unit_id @@ -100,15 +132,50 @@ def _run(self): template_upsampled = template_single sampling_frequency_up = sampling_frequency - for metric_name in metric_names: + trough_idx, peak_idx = get_trough_and_peak_idx(template_upsampled) + + for metric_name in metrics_1d: func = _metric_name_to_func[metric_name] value = func( template_upsampled, sampling_frequency=sampling_frequency_up, - window_ms=self._params["window_slope_ms"], + trough_idx=trough_idx, + peak_idx=peak_idx, + **self._params["functions_kwargs"], ) template_metrics.at[index, metric_name] = value + # compute metrics 2d + for metric_name in metrics_2d: + # retrieve template (with sparsity if waveform extractor is sparse) + template = self.waveform_extractor.get_template(unit_id=unit_id) + + if template.shape[1] < self.min_channels_for_2d_warning: + warnings.warn( + f"With less than {self.min_channels_for_2d_warning} channels, " + "2D metrics might not be reliable." + ) + if self.waveform_extractor.is_sparse(): + channel_locations_sparse = channel_locations[self.waveform_extractor.sparsity.mask[unit_index]] + else: + channel_locations_sparse = channel_locations + + if upsampling_factor > 1: + assert isinstance(upsampling_factor, (int, np.integer)), "'upsample' must be an integer" + template_upsampled = resample_poly(template, up=upsampling_factor, down=1, axis=0) + sampling_frequency_up = upsampling_factor * sampling_frequency + else: + template_upsampled = template + sampling_frequency_up = sampling_frequency + + func = _metric_name_to_func[metric_name] + value = func( + template_upsampled, + channel_locations=channel_locations_sparse, + sampling_frequency=sampling_frequency_up, + **self._params["functions_kwargs"], + ) + template_metrics.at[index, metric_name] = value self._extension_data["metrics"] = template_metrics def get_data(self): @@ -139,7 +206,17 @@ def compute_template_metrics( peak_sign="neg", upsampling_factor=10, sparsity=None, - window_slope_ms=0.7, + include_2d_metrics=False, + functions_kwargs=dict( + recovery_window_ms=0.7, + peak_relative_threshold=0.2, + peak_width_ms=0.2, + depth_direction="y", + min_channels_for_velocity=5, + min_r2_for_velocity=0.5, + exp_peak_function="ptp", + spread_threshold=0.2, + ), ): """ Compute template metrics including: @@ -148,6 +225,14 @@ def compute_template_metrics( * halfwidth * repolarization_slope * recovery_slope + * num_positive_peaks + * num_negative_peaks + + Optionally, the following 2d metrics can be computed (when include_2d_metrics=True): + * velocity_above + * velocity_below + * exp_decay + * spread Parameters ---------- @@ -157,34 +242,57 @@ def compute_template_metrics( Whether to load precomputed template metrics, if they already exist. metric_names : list, optional List of metrics to compute (see si.postprocessing.get_template_metric_names()), by default None - peak_sign : str, optional - "pos" | "neg", by default 'neg' - upsampling_factor : int, optional - Upsample factor, by default 10 - sparsity: dict or None + peak_sign : {"neg", "pos"}, default: "neg" + The peak sign + upsampling_factor : int, default: 10 + The upsampling factor to upsample the templates + sparsity: dict or None, default: None Default is sparsity=None and template metric is computed on extremum channel only. If given, the dictionary should contain a unit ids as keys and a channel id or a list of channel ids as values. For more generating a sparsity dict, see the postprocessing.compute_sparsity() function. - window_slope_ms: float - Window in ms after the positiv peak to compute slope, by default 0.7 + include_2d_metrics: bool, default: False + Whether to compute 2d metrics + functions_kwargs: dict + Additional arguments to pass to the metric functions. Including: + * recovery_window_ms: the window in ms after the peak to compute the recovery_slope, default: 0.7 + * peak_relative_threshold: the relative threshold to detect positive and negative peaks, default: 0.2 + * peak_width_ms: the width in samples to detect peaks, default: 0.2 + * depth_direction: the direction to compute velocity above and below, default: "y" + * min_channels_for_velocity: the minimum number of channels above or below to compute velocity, default: 5 + * min_r2_for_velocity: the minimum r2 to accept the velocity fit, default: 0.7 + * exp_peak_function: the function to use to compute the peak amplitude for the exp decay, default: "ptp" + * spread_threshold: the threshold to compute the spread, default: 0.2 Returns ------- - tempalte_metrics : pd.DataFrame + template_metrics : pd.DataFrame Dataframe with the computed template metrics. If 'sparsity' is None, the index is the unit_id. If 'sparsity' is given, the index is a multi-index (unit_id, channel_id) + + Notes + ----- + If any 2d metric is in the metric_names or include_2d_metrics is True, sparsity must be None, so that one metric + value will be computed per unit. """ if load_if_exists and waveform_extractor.is_extension(TemplateMetricsCalculator.extension_name): tmc = waveform_extractor.load_extension(TemplateMetricsCalculator.extension_name) else: tmc = TemplateMetricsCalculator(waveform_extractor) + # For 2D metrics, external sparsity must be None, so that one metric value will be computed per unit. + if include_2d_metrics or ( + metric_names is not None and any([m in get_2d_template_metric_names() for m in metric_names]) + ): + assert ( + sparsity is None + ), "If 2D metrics are computed, sparsity must be None, so that each unit will correspond to 1 row of the output dataframe." tmc.set_params( metric_names=metric_names, peak_sign=peak_sign, upsampling_factor=upsampling_factor, sparsity=sparsity, - window_slope_ms=window_slope_ms, + include_2d_metrics=include_2d_metrics, + functions_kwargs=functions_kwargs, ) tmc.run() @@ -197,7 +305,19 @@ def get_trough_and_peak_idx(template): """ Return the indices into the input template of the detected trough (minimum of template) and peak (maximum of template, after trough). - Assumes negative trough and positive peak + Assumes negative trough and positive peak. + + Parameters + ---------- + template: numpy.ndarray + The 1D template waveform + + Returns + ------- + trough_idx: int + The index of the trough + peak_idx: int + The index of the peak """ assert template.ndim == 1 trough_idx = np.argmin(template) @@ -205,41 +325,94 @@ def get_trough_and_peak_idx(template): return trough_idx, peak_idx -def get_peak_to_valley(template, **kwargs): +######################################################################################### +# 1D metrics +def get_peak_to_valley(template_single, trough_idx=None, peak_idx=None, **kwargs): """ - Time between trough and peak in s + Return the peak to valley duration in seconds of input waveforms. + + Parameters + ---------- + template_single: numpy.ndarray + The 1D template waveform + trough_idx: int, default: None + The index of the trough + peak_idx: int, default: None + The index of the peak + **kwargs: Required kwargs: + - sampling_frequency: the sampling frequency + + Returns + ------- + ptv: float + The peak to valley duration in seconds """ sampling_frequency = kwargs["sampling_frequency"] - trough_idx, peak_idx = get_trough_and_peak_idx(template) + if trough_idx is None or peak_idx is None: + trough_idx, peak_idx = get_trough_and_peak_idx(template_single) ptv = (peak_idx - trough_idx) / sampling_frequency return ptv -def get_peak_trough_ratio(template, **kwargs): +def get_peak_trough_ratio(template_single, trough_idx=None, peak_idx=None, **kwargs): """ - Ratio between peak heigth and trough depth + Return the peak to trough ratio of input waveforms. + + Parameters + ---------- + template_single: numpy.ndarray + The 1D template waveform + trough_idx: int, default: None + The index of the trough + peak_idx: int, default: None + The index of the peak + **kwargs: Required kwargs: + - sampling_frequency: the sampling frequency + + Returns + ------- + ptratio: float + The peak to trough ratio """ - trough_idx, peak_idx = get_trough_and_peak_idx(template) - ptratio = template[peak_idx] / template[trough_idx] + if trough_idx is None or peak_idx is None: + trough_idx, peak_idx = get_trough_and_peak_idx(template_single) + ptratio = template_single[peak_idx] / template_single[trough_idx] return ptratio -def get_half_width(template, **kwargs): +def get_half_width(template_single, trough_idx=None, peak_idx=None, **kwargs): """ - Width of waveform at its half of amplitude in s + Return the half width of input waveforms in seconds. + + Parameters + ---------- + template_single: numpy.ndarray + The 1D template waveform + trough_idx: int, default: None + The index of the trough + peak_idx: int, default: None + The index of the peak + **kwargs: Required kwargs: + - sampling_frequency: the sampling frequency + + Returns + ------- + hw: float + The half width in seconds """ - trough_idx, peak_idx = get_trough_and_peak_idx(template) + if trough_idx is None or peak_idx is None: + trough_idx, peak_idx = get_trough_and_peak_idx(template_single) sampling_frequency = kwargs["sampling_frequency"] if peak_idx == 0: return np.nan - trough_val = template[trough_idx] + trough_val = template_single[trough_idx] # threshold is half of peak heigth (assuming baseline is 0) threshold = 0.5 * trough_val - (cpre_idx,) = np.where(template[:trough_idx] < threshold) - (cpost_idx,) = np.where(template[trough_idx:] < threshold) + (cpre_idx,) = np.where(template_single[:trough_idx] < threshold) + (cpost_idx,) = np.where(template_single[trough_idx:] < threshold) if len(cpre_idx) == 0 or len(cpost_idx) == 0: hw = np.nan @@ -254,7 +427,7 @@ def get_half_width(template, **kwargs): return hw -def get_repolarization_slope(template, **kwargs): +def get_repolarization_slope(template_single, trough_idx=None, **kwargs): """ Return slope of repolarization period between trough and baseline @@ -264,17 +437,26 @@ def get_repolarization_slope(template, **kwargs): Optionally the function returns also the indices per waveform where the potential crosses baseline. - """ - trough_idx, peak_idx = get_trough_and_peak_idx(template) + Parameters + ---------- + template_single: numpy.ndarray + The 1D template waveform + trough_idx: int, default: None + The index of the trough + **kwargs: Required kwargs: + - sampling_frequency: the sampling frequency + """ + if trough_idx is None: + trough_idx = get_trough_and_peak_idx(template_single) sampling_frequency = kwargs["sampling_frequency"] - times = np.arange(template.shape[0]) / sampling_frequency + times = np.arange(template_single.shape[0]) / sampling_frequency if trough_idx == 0: return np.nan - (rtrn_idx,) = np.nonzero(template[trough_idx:] >= 0) + (rtrn_idx,) = np.nonzero(template_single[trough_idx:] >= 0) if len(rtrn_idx) == 0: return np.nan # first time after trough, where template is at baseline @@ -285,11 +467,11 @@ def get_repolarization_slope(template, **kwargs): import scipy.stats - res = scipy.stats.linregress(times[trough_idx:return_to_base_idx], template[trough_idx:return_to_base_idx]) + res = scipy.stats.linregress(times[trough_idx:return_to_base_idx], template_single[trough_idx:return_to_base_idx]) return res.slope -def get_recovery_slope(template, window_ms=0.7, **kwargs): +def get_recovery_slope(template_single, peak_idx=None, **kwargs): """ Return the recovery slope of input waveforms. After repolarization, the neuron hyperpolarizes untill it peaks. The recovery slope is the @@ -299,41 +481,332 @@ def get_recovery_slope(template, window_ms=0.7, **kwargs): Takes a numpy array of waveforms and returns an array with recovery slopes per waveform. + + Parameters + ---------- + template_single: numpy.ndarray + The 1D template waveform + peak_idx: int, default: None + The index of the peak + **kwargs: Required kwargs: + - sampling_frequency: the sampling frequency + - recovery_window_ms: the window in ms after the peak to compute the recovery_slope """ + import scipy.stats - trough_idx, peak_idx = get_trough_and_peak_idx(template) + assert "recovery_window_ms" in kwargs, "recovery_window_ms must be given as kwarg" + recovery_window_ms = kwargs["recovery_window_ms"] + if peak_idx is None: + _, peak_idx = get_trough_and_peak_idx(template_single) sampling_frequency = kwargs["sampling_frequency"] - times = np.arange(template.shape[0]) / sampling_frequency + times = np.arange(template_single.shape[0]) / sampling_frequency if peak_idx == 0: return np.nan - max_idx = int(peak_idx + ((window_ms / 1000) * sampling_frequency)) - max_idx = np.min([max_idx, template.shape[0]]) - - import scipy.stats + max_idx = int(peak_idx + ((recovery_window_ms / 1000) * sampling_frequency)) + max_idx = np.min([max_idx, template_single.shape[0]]) - res = scipy.stats.linregress(times[peak_idx:max_idx], template[peak_idx:max_idx]) + res = scipy.stats.linregress(times[peak_idx:max_idx], template_single[peak_idx:max_idx]) return res.slope -_metric_name_to_func = { +def get_num_positive_peaks(template_single, **kwargs): + """ + Count the number of positive peaks in the template. + + Parameters + ---------- + template_single: numpy.ndarray + The 1D template waveform + **kwargs: Required kwargs: + - peak_relative_threshold: the relative threshold to detect positive and negative peaks + - peak_width_ms: the width in samples to detect peaks + - sampling_frequency: the sampling frequency + """ + from scipy.signal import find_peaks + + assert "peak_relative_threshold" in kwargs, "peak_relative_threshold must be given as kwarg" + assert "peak_width_ms" in kwargs, "peak_width_ms must be given as kwarg" + peak_relative_threshold = kwargs["peak_relative_threshold"] + peak_width_ms = kwargs["peak_width_ms"] + max_value = np.max(np.abs(template_single)) + peak_width_samples = int(peak_width_ms / 1000 * kwargs["sampling_frequency"]) + + pos_peaks = find_peaks(template_single, height=peak_relative_threshold * max_value, width=peak_width_samples) + + return len(pos_peaks[0]) + + +def get_num_negative_peaks(template_single, **kwargs): + """ + Count the number of negative peaks in the template. + + Parameters + ---------- + template_single: numpy.ndarray + The 1D template waveform + **kwargs: Required kwargs: + - peak_relative_threshold: the relative threshold to detect positive and negative peaks + - peak_width_ms: the width in samples to detect peaks + - sampling_frequency: the sampling frequency + """ + from scipy.signal import find_peaks + + assert "peak_relative_threshold" in kwargs, "peak_relative_threshold must be given as kwarg" + assert "peak_width_ms" in kwargs, "peak_width_ms must be given as kwarg" + peak_relative_threshold = kwargs["peak_relative_threshold"] + peak_width_ms = kwargs["peak_width_ms"] + max_value = np.max(np.abs(template_single)) + peak_width_samples = int(peak_width_ms / 1000 * kwargs["sampling_frequency"]) + + neg_peaks = find_peaks(-template_single, height=peak_relative_threshold * max_value, width=peak_width_samples) + + return len(neg_peaks[0]) + + +_1d_metric_name_to_func = { "peak_to_valley": get_peak_to_valley, "peak_trough_ratio": get_peak_trough_ratio, "half_width": get_half_width, "repolarization_slope": get_repolarization_slope, "recovery_slope": get_recovery_slope, + "num_positive_peaks": get_num_positive_peaks, + "num_negative_peaks": get_num_negative_peaks, } -# back-compatibility -def calculate_template_metrics(*args, **kwargs): - warnings.warn( - "The 'calculate_template_metrics' function is deprecated. " "Use 'compute_template_metrics' instead", - DeprecationWarning, - stacklevel=2, - ) - return compute_template_metrics(*args, **kwargs) +######################################################################################### +# 2D metrics + + +def fit_velocity(peak_times, channel_dist): + # from scipy.stats import linregress + # slope, intercept, _, _, _ = linregress(peak_times, channel_dist) + + from sklearn.linear_model import TheilSenRegressor + + theil = TheilSenRegressor() + theil.fit(peak_times.reshape(-1, 1), channel_dist) + slope = theil.coef_[0] + intercept = theil.intercept_ + score = theil.score(peak_times.reshape(-1, 1), channel_dist) + return slope, intercept, score + + +def get_velocity_above(template, channel_locations, **kwargs): + """ + Compute the velocity above the max channel of the template. + + Parameters + ---------- + template: numpy.ndarray + The template waveform (num_samples, num_channels) + channel_locations: numpy.ndarray + The channel locations (num_channels, 2) + **kwargs: Required kwargs: + - depth_direction: the direction to compute velocity above and below ("x", "y", or "z") + - min_channels_for_velocity: the minimum number of channels above or below to compute velocity + - min_r2_for_velocity: the minimum r2 to accept the velocity fit + - sampling_frequency: the sampling frequency + """ + assert "depth_direction" in kwargs, "depth_direction must be given as kwarg" + assert "min_channels_for_velocity" in kwargs, "min_channels_for_velocity must be given as kwarg" + assert "min_r2_for_velocity" in kwargs, "min_r2_for_velocity must be given as kwarg" + + depth_direction = kwargs["depth_direction"] + min_channels_for_velocity = kwargs["min_channels_for_velocity"] + min_r2_for_velocity = kwargs["min_r2_for_velocity"] + + direction_index = ["x", "y", "z"].index(depth_direction) + sampling_frequency = kwargs["sampling_frequency"] + + # find location of max channel + max_sample_idx, max_channel_idx = np.unravel_index(np.argmin(template), template.shape) + max_channel_location = channel_locations[max_channel_idx] + + channels_above = channel_locations[:, direction_index] >= max_channel_location[direction_index] + + # we only consider samples forward in time with respect to the max channel + template_above = template[max_sample_idx:, channels_above] + channel_locations_above = channel_locations[channels_above] + + peak_times_ms_above = np.argmin(template_above, 0) / sampling_frequency * 1000 + distances_um_above = np.array([np.linalg.norm(cl - max_channel_location) for cl in channel_locations_above]) + velocity_above, intercept, score = fit_velocity(peak_times_ms_above, distances_um_above) + + # if DEBUG: + # fig, ax = plt.subplots() + # ax.plot(peak_times_ms_above, distances_um_above, "o") + # x = np.linspace(peak_times_ms_above.min(), peak_times_ms_above.max(), 20) + # ax.plot(x, intercept + x * velocity_above) + # ax.set_xlabel("Peak time (ms)") + # ax.set_ylabel("Distance from max channel (um)") + # ax.set_title(f"Velocity above: {velocity_above:.2f} um/ms") + + if np.sum(channels_above) < min_channels_for_velocity: + # if DEBUG: + # ax.set_title("NaN velocity - not enough channels") + return np.nan + + if score < min_r2_for_velocity: + # if DEBUG: + # ax.set_title(f"NaN velocity - R2 is too low: {score:.2f}") + return np.nan + return velocity_above + + +def get_velocity_below(template, channel_locations, **kwargs): + """ + Compute the velocity below the max channel of the template. + + Parameters + ---------- + template: numpy.ndarray + The template waveform (num_samples, num_channels) + channel_locations: numpy.ndarray + The channel locations (num_channels, 2) + **kwargs: Required kwargs: + - depth_direction: the direction to compute velocity above and below ("x", "y", or "z") + - min_channels_for_velocity: the minimum number of channels above or below to compute velocity + - min_r2_for_velocity: the minimum r2 to accept the velocity fit + - sampling_frequency: the sampling frequency + """ + assert "depth_direction" in kwargs, "depth_direction must be given as kwarg" + assert "min_channels_for_velocity" in kwargs, "min_channels_for_velocity must be given as kwarg" + assert "min_r2_for_velocity" in kwargs, "min_r2_for_velocity must be given as kwarg" + direction = kwargs["depth_direction"] + min_channels_for_velocity = kwargs["min_channels_for_velocity"] + min_r2_for_velocity = kwargs["min_r2_for_velocity"] + direction_index = ["x", "y", "z"].index(direction) + + # find location of max channel + max_sample_idx, max_channel_idx = np.unravel_index(np.argmin(template), template.shape) + max_channel_location = channel_locations[max_channel_idx] + sampling_frequency = kwargs["sampling_frequency"] + + channels_below = channel_locations[:, direction_index] <= max_channel_location[direction_index] + + # we only consider samples forward in time with respect to the max channel + template_below = template[max_sample_idx:, channels_below] + channel_locations_below = channel_locations[channels_below] + + peak_times_ms_below = np.argmin(template_below, 0) / sampling_frequency * 1000 + distances_um_below = np.array([np.linalg.norm(cl - max_channel_location) for cl in channel_locations_below]) + velocity_below, intercept, score = fit_velocity(peak_times_ms_below, distances_um_below) + + # if DEBUG: + # fig, ax = plt.subplots() + # ax.plot(peak_times_ms_below, distances_um_below, "o") + # x = np.linspace(peak_times_ms_below.min(), peak_times_ms_below.max(), 20) + # ax.plot(x, intercept + x * velocity_below) + # ax.set_xlabel("Peak time (ms)") + # ax.set_ylabel("Distance from max channel (um)") + # ax.set_title(f"Velocity below: {np.round(velocity_below, 3)} um/ms") + + if np.sum(channels_below) < min_channels_for_velocity: + # if DEBUG: + # ax.set_title("NaN velocity - not enough channels") + return np.nan + + if score < min_r2_for_velocity: + # if DEBUG: + # ax.set_title(f"NaN velocity - R2 is too low: {np.round(score, 3)}") + return np.nan + + return velocity_below + + +def get_exp_decay(template, channel_locations, **kwargs): + """ + Compute the exponential decay of the template amplitude over distance. + + Parameters + ---------- + template: numpy.ndarray + The template waveform (num_samples, num_channels) + channel_locations: numpy.ndarray + The channel locations (num_channels, 2) + **kwargs: Required kwargs: + - exp_peak_function: the function to use to compute the peak amplitude for the exp decay ("ptp" or "min") + """ + from scipy.optimize import curve_fit + + def exp_decay(x, a, b, c): + return a * np.exp(-b * x) + c + + assert "exp_peak_function" in kwargs, "exp_peak_function must be given as kwarg" + exp_peak_function = kwargs["exp_peak_function"] + # exp decay fit + if exp_peak_function == "ptp": + fun = np.ptp + elif exp_peak_function == "min": + fun = np.min + peak_amplitudes = np.abs(fun(template, axis=0)) + max_channel_location = channel_locations[np.argmax(peak_amplitudes)] + channel_distances = np.array([np.linalg.norm(cl - max_channel_location) for cl in channel_locations]) + distances_sort_indices = np.argsort(channel_distances) + channel_distances_sorted = channel_distances[distances_sort_indices] + peak_amplitudes_sorted = peak_amplitudes[distances_sort_indices] + try: + popt, _ = curve_fit(exp_decay, channel_distances_sorted, peak_amplitudes_sorted) + exp_decay_value = popt[1] + # if DEBUG: + # fig, ax = plt.subplots() + # ax.plot(channel_distances_sorted, peak_amplitudes_sorted, "o") + # x = np.arange(channel_distances_sorted.min(), channel_distances_sorted.max()) + # ax.plot(x, exp_decay(x, *popt)) + # ax.set_xlabel("Distance from max channel (um)") + # ax.set_ylabel("Peak amplitude") + # ax.set_title(f"Exp decay: {np.round(exp_decay_value, 3)}") + except: + exp_decay_value = np.nan + return exp_decay_value + + +def get_spread(template, channel_locations, **kwargs): + """ + Compute the spread of the template amplitude over distance. + Parameters + ---------- + template: numpy.ndarray + The template waveform (num_samples, num_channels) + channel_locations: numpy.ndarray + The channel locations (num_channels, 2) + **kwargs: Required kwargs: + - depth_direction: the direction to compute velocity above and below ("x", "y", or "z") + - spread_threshold: the threshold to compute the spread + """ + assert "depth_direction" in kwargs, "depth_direction must be given as kwarg" + depth_direction = kwargs["depth_direction"] + assert "spread_threshold" in kwargs, "spread_threshold must be given as kwarg" + spread_threshold = kwargs["spread_threshold"] + + direction_index = ["x", "y", "z"].index(depth_direction) + MM = np.ptp(template, 0) + MM = MM / np.max(MM) + channel_locations_above_theshold = channel_locations[MM > spread_threshold] + channel_depth_above_theshold = channel_locations_above_theshold[:, direction_index] + spread = np.ptp(channel_depth_above_theshold) + + # if DEBUG: + # fig, ax = plt.subplots() + # channel_depths = channel_locations[:, direction_index] + # sort_indices = np.argsort(channel_depths) + # ax.plot(channel_depths[sort_indices], MM[sort_indices], "o-") + # ax.axhline(spread_threshold, ls="--", color="r") + # ax.set_xlabel("Depth (um)") + # ax.set_ylabel("Amplitude") + # ax.set_title(f"Spread: {np.round(spread, 3)} um") + return spread + + +_2d_metric_name_to_func = { + "velocity_above": get_velocity_above, + "velocity_below": get_velocity_below, + "exp_decay": get_exp_decay, + "spread": get_spread, +} -calculate_template_metrics.__doc__ = compute_template_metrics.__doc__ +_metric_name_to_func = {**_1d_metric_name_to_func, **_2d_metric_name_to_func} diff --git a/src/spikeinterface/postprocessing/tests/test_template_metrics.py b/src/spikeinterface/postprocessing/tests/test_template_metrics.py index 9895e2ec4c..5dcff3ffba 100644 --- a/src/spikeinterface/postprocessing/tests/test_template_metrics.py +++ b/src/spikeinterface/postprocessing/tests/test_template_metrics.py @@ -17,9 +17,13 @@ def test_sparse_metrics(self): tm_sparse = self.extension_class.get_extension_function()(self.we1, sparsity=self.sparsity1) print(tm_sparse) + def test_2d_metrics(self): + tm_2d = self.extension_class.get_extension_function()(self.we1, include_2d_metrics=True) + print(tm_2d) + if __name__ == "__main__": test = TemplateMetricsExtensionTest() test.setUp() - test.test_extension() - test.test_sparse_metrics() + # test.test_extension() + test.test_2d_metrics() From 0d87ea07eab0baa02ee34915d96be8a6c623b222 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 10:11:38 +0200 Subject: [PATCH 056/322] Update doc/modules/qualitymetrics/amplitude_spread.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/modules/qualitymetrics/amplitude_spread.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/modules/qualitymetrics/amplitude_spread.rst b/doc/modules/qualitymetrics/amplitude_spread.rst index 0ae0761265..cc79ebbe1d 100644 --- a/doc/modules/qualitymetrics/amplitude_spread.rst +++ b/doc/modules/qualitymetrics/amplitude_spread.rst @@ -6,9 +6,9 @@ Calculation ----------- The amplitude spread is a measure of the amplitude variability. -It is computed the ratio between the standard deviation and the amplitude mean (aka coefficient of variation). +It is computed as the ratio between the standard deviation and the amplitude mean (aka the coefficient of variation). To obtain a better estimate of this measure, it is first computed separately for several bins of a prefixed number of spikes -(e.g 100) and then the median of these values is taken. +(e.g. 100) and then the median of these values is taken. The computation requires either spike amplitudes (see :py:func:`~spikeinterface.postprocessing.compute_spike_amplitudes()`) or amplitude scalings (see :py:func:`~spikeinterface.postprocessing.compute_amplitude_scalings()`) to be pre-computed. From 2513a0e14cb5144c1747aa21cda9670c39449b80 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 10:11:44 +0200 Subject: [PATCH 057/322] Update doc/modules/qualitymetrics/firing_range.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/modules/qualitymetrics/firing_range.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/qualitymetrics/firing_range.rst b/doc/modules/qualitymetrics/firing_range.rst index 0d17eedc13..1b82c7540f 100644 --- a/doc/modules/qualitymetrics/firing_range.rst +++ b/doc/modules/qualitymetrics/firing_range.rst @@ -13,7 +13,7 @@ taking the difference between the 95-th and 5th percentiles firing rates compute Expectation and use ------------------- -Very high levels of firing ranges, outside of a physiolocigal range, might indicate noise contamination. +Very high levels of firing ranges, outside of a physiological range, might indicate noise contamination. Example code From 78959e349b3783e77a3eca2a18967140909ba619 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 10:11:52 +0200 Subject: [PATCH 058/322] Update doc/modules/qualitymetrics/amplitude_spread.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/modules/qualitymetrics/amplitude_spread.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/qualitymetrics/amplitude_spread.rst b/doc/modules/qualitymetrics/amplitude_spread.rst index cc79ebbe1d..bdd23892c5 100644 --- a/doc/modules/qualitymetrics/amplitude_spread.rst +++ b/doc/modules/qualitymetrics/amplitude_spread.rst @@ -17,7 +17,7 @@ or amplitude scalings (see :py:func:`~spikeinterface.postprocessing.compute_ampl Expectation and use ------------------- -Very high levels of amplitude_spread ranges, outside of a physiolocigal range, might indicate noise contamination. +Very high levels of amplitude_spread ranges, outside of a physiological range, might indicate noise contamination. Example code From a311455f34bb4fbe085b9191cd61b91e6efbb14a Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 10:12:13 +0200 Subject: [PATCH 059/322] Update doc/modules/qualitymetrics/firing_range.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/modules/qualitymetrics/firing_range.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/qualitymetrics/firing_range.rst b/doc/modules/qualitymetrics/firing_range.rst index 1b82c7540f..3fd3d53573 100644 --- a/doc/modules/qualitymetrics/firing_range.rst +++ b/doc/modules/qualitymetrics/firing_range.rst @@ -6,7 +6,7 @@ Calculation ----------- The firing range indicates the dispersion of the firing rate of a unit across the recording. It is computed by -taking the difference between the 95-th and 5th percentiles firing rates computed over short time bins (e.g. 10 s). +taking the difference between the 95th percentile's firing rate and the 5th percentile's firing rate computed over short time bins (e.g. 10 s). From dcf2935acffb6d0634ba210fa6a590597173eabb Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 10:30:45 +0200 Subject: [PATCH 060/322] quantile -> percentile --- src/spikeinterface/qualitymetrics/misc_metrics.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index 6c237ee720..541d201c5e 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -566,8 +566,8 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k _default_params["synchrony"] = dict(synchrony_sizes=(0, 2, 4)) -def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.95), unit_ids=None): - """Calculate firing range, the range between the 5th and 95th quantiles of the firing rates distribution +def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(0.05, 0.95), unit_ids=None): + """Calculate firing range, the range between the 5th and 95th percentiles of the firing rates distribution computed in non-overlapping time bins. Parameters @@ -576,8 +576,8 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.9 The waveform extractor object. bin_size_s : float, default: 5 The size of the bin in seconds. - quantiles : tuple, default: (0.05, 0.95) - The quantiles to compute. + percentiles : tuple, default: (0.05, 0.95) + The percentiles to compute. unit_ids : list or None List of unit ids to compute the firing range. If None, all units are used. @@ -611,14 +611,14 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, quantiles=(0.05, 0.9 # finally we compute the percentiles firing_ranges = {} for unit_id in unit_ids: - firing_ranges[unit_id] = np.percentile(firing_rate_histograms[unit_id], quantiles[1]) - np.percentile( - firing_rate_histograms[unit_id], quantiles[0] + firing_ranges[unit_id] = np.percentile(firing_rate_histograms[unit_id], percentiles[1]) - np.percentile( + firing_rate_histograms[unit_id], percentiles[0] ) return firing_ranges -_default_params["firing_range"] = dict(bin_size_s=5, quantiles=(0.05, 0.95)) +_default_params["firing_range"] = dict(bin_size_s=5, percentiles=(0.05, 0.95)) def compute_amplitude_spreads( From 34a8df2e4db5c412b8a699057b05d44d699a8c40 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 14:10:28 +0200 Subject: [PATCH 061/322] Modify amplitude_spread implementations and docs --- ...{amplitude_spread.rst => amplitude_cv.rst} | 27 ++++-- doc/modules/qualitymetrics/amplitudes.png | Bin 0 -> 214334 bytes .../qualitymetrics/misc_metrics.py | 90 +++++++++++++----- .../qualitymetrics/quality_metric_list.py | 4 +- .../tests/test_metrics_functions.py | 21 ++-- 5 files changed, 98 insertions(+), 44 deletions(-) rename doc/modules/qualitymetrics/{amplitude_spread.rst => amplitude_cv.rst} (50%) create mode 100644 doc/modules/qualitymetrics/amplitudes.png diff --git a/doc/modules/qualitymetrics/amplitude_spread.rst b/doc/modules/qualitymetrics/amplitude_cv.rst similarity index 50% rename from doc/modules/qualitymetrics/amplitude_spread.rst rename to doc/modules/qualitymetrics/amplitude_cv.rst index bdd23892c5..981813ef09 100644 --- a/doc/modules/qualitymetrics/amplitude_spread.rst +++ b/doc/modules/qualitymetrics/amplitude_cv.rst @@ -1,14 +1,15 @@ -Amplitude spread (:code:`amplitude_spread`) -=========================================== +Amplitude CV (:code:`amplitude_cv_median`, :code:`amplitude_cv_range`) +====================================================================== Calculation ----------- -The amplitude spread is a measure of the amplitude variability. -It is computed as the ratio between the standard deviation and the amplitude mean (aka the coefficient of variation). -To obtain a better estimate of this measure, it is first computed separately for several bins of a prefixed number of spikes -(e.g. 100) and then the median of these values is taken. +The amplitude CV (coefficient of variation) is a measure of the amplitude variability. +It is computed as the ratio between the standard deviation and the amplitude mean. +To obtain a better estimate of this measure, it is first computed separately for several temporal bins. +Out of these values, the median and the range (percentile distance, by default between the +5th and 95th percentiles) are computed. The computation requires either spike amplitudes (see :py:func:`~spikeinterface.postprocessing.compute_spike_amplitudes()`) or amplitude scalings (see :py:func:`~spikeinterface.postprocessing.compute_amplitude_scalings()`) to be pre-computed. @@ -17,7 +18,13 @@ or amplitude scalings (see :py:func:`~spikeinterface.postprocessing.compute_ampl Expectation and use ------------------- -Very high levels of amplitude_spread ranges, outside of a physiological range, might indicate noise contamination. +The amplitude CV median is expected to be relatively low for well-isolated units, indicating a "stereotypical" spike shape. + +The amplitude CV range can be high in the presence of noise contamination, due to amplitude outliers like in +the example below. + +.. image:: amplitudes.png + :width: 600 Example code @@ -30,9 +37,9 @@ Example code # Make recording, sorting and wvf_extractor object for your data. # It is required to run `compute_spike_amplitudes(wvf_extractor)` or # `compute_amplitude_scalings(wvf_extractor)` (if missing, values will be NaN) - amplitude_spread = qm.compute_firing_ranges(wvf_extractor, amplitude_extension='spike_amplitudes') - # amplitude_spread is a dict containing the units' IDs as keys, - # and their amplitude_spread (in units of standard deviation). + amplitude_cv_median, amplitude_cv_range = qm.compute_amplitude_cv_metrics(wvf_extractor) + # amplitude_cv_median and amplitude_cv_range are dicts containing the unit ids as keys, + # and their amplitude_cv metrics as values. diff --git a/doc/modules/qualitymetrics/amplitudes.png b/doc/modules/qualitymetrics/amplitudes.png new file mode 100644 index 0000000000000000000000000000000000000000..0ee4dd1edacb27e459b5601eb313aec675fbf890 GIT binary patch literal 214334 zcmdSBWmMH&`!2c=0VSoS1qGx9qy+>PE!_gg}r)A&>{kkC4G{ z*apQt!9Ncjge8?9fsf~-4?*B(B1chmMVnT_=`Wiu;qq?ye#*4NJ*j2xe_y<%f$e8$Sg%g)35>UD(9n|ui5 z8AK8)r0kl!JLjUUG;xh`D2EfkD5UV#h`}#C2+fWF-)^jV+tg~#scM?HzJze_yC5PT=z4fz`ckezBadV1(CI{#$G! zOq(>?zYk#(LV5mNWjgbH>9f`{d0$iXQ22N}S5;LFsGWD0-h34l6gEBI?14>2#^m@E z3KiQKNa34oa4H-!c!EuY|3Aj{oxH0=kd#&$#D{xw>hY6s1G|iWrvtaj{mc?C=35?P z)d$r7F?ei3&g0{*2U8|T^Ob|BWFc)wXKsH#+W+fxNHu@WbUB#s+#Jqg^}2G*Il~~~ zOc#$Ne}s;CZ(ap~Y7vNM!WggI_U!2F*pQ!&99=o&1i*wD$}sORr> z1mgyUg>_sWZ-rn5MFl!_> zH5HSVmbP=H#%hL&nHjTKvs$r=hlhvq`EwBzgMVTXdn+KJhNY^kOx~R_?ze%NS^fwO zPB31igS~y#92s_$?Be<*`|%od*6&d&euDj2nY!D3=XPQ8GZPW?sa_@(#_|*JLK#Fh4z2V z$}%keq*SjRQBlEmmIh)!IVB}3AwlC{gAgB|lA0Qcm6g>!EF?%mok^<(DkSt#wX`@d z@2!FYcK5dr%D;@*NSj~{ON~c>1=B*f!nu<--$jE52 z$&LM5jw(}gcdn7p+S(e#L*P&b2+bTmmwo+-5*_s>*K+U+x~AG&kK~|8wKuXoJ-*x} zNCVdQvOn^AzKt=^5#!-~_VatNy1t%iF;N1)VgMFJgn-x8kHJB#@$vDCvl$x%2nm;U zN0COQyrQCCAcgl+L}c`#9g=7N4XMoBHT@6SQd-*fY-|qpi@TdI-$Fl_Z^z)l92rKE zY+8c37Ce>mi>2e3xqa?DSDrurClp6Mxfct_{heB}8I~w1>8-p|Qnn|B)xC{{P75tfOcCYzgSCw`U|FAtCY#3Lg;=k$+4~5Z2e%Pga^>Nl8mDZEXdE z*;S7_`giSdgp9L`uB@yWSX)Pc)MB{amvFID+6sYOpY4gbx^l-d>x6v$it~b+dW%v0 zpLxyf8sVigx3UT!8Bv71)zBa+DlS&g&k!$i*!ejAwbT~&YlEA^NNZ(nmx|(2I=VO{Zdj=mXE3} zCWx`=u@#OD4Ga+Sl?#p**6ewNMMS1RN|7wDs)A~0Xf&rCtgjMBwLXVla$*REPhlnRP3m>AShMO+8=kpd$6W$?d(2A zMn;lnQBmQjid=VX|M~OB_Ugo}FP>GTTLByUvG@xdj&1mT8(j_}R#!h*&eUMw7pehh`bK`+Ta0Cav z2Ji8biOEP%5~>_NE~x;MKkVUu;l)@HquVjljn`e%@==Y~ zwZ4IML_b3s{wsPgqii%yUa5cGEDvgCEBzk!#sKOQ41igP82(2s{`W!p1E5rG z>FLj6&W3cGk%9u?_rm|@UX~iQ1knMQAG4bIwWqn4k5~;4m;Ek<14Y3LihV|7`U?vw zR4xyfkhOT7s${go^WF2Vg#Ry4=;@rWbWBXlq}#>p z%D+QCbG-+gPq#%xZ-vy^*~7w!_l)@O|D!Uu4r8X{%@fCpsVPa}{n<7k>udVgh{C_* z)w4EaZ<0vIre_rvE|*tkJ?AV|r_}%Fp3t#m_)F;UIYIj(D89tUBAbaKdCtvIMoE0e z4E^UaiI20SLmv17FhI-2^`9a8>;9R>J)745A}^{D+W+m0!EZ;Cqrf4`qSNqsr8`oD zk>EKaBRaf7_xE?E@_Wc@YNE=?$-TKbY;OpO`RlgtZz~Q91AmFpog>i9l@nd8J$JyCAp4*O!o`E4M zDG5zYO%3AP+A4^Hi|a(d_*d*C#P%=Tr+<5bWhU@>Oa z)jiF~$gs7y_XE+f%mXmNOM3bd$@IU5{e$~*bhOHJg}p;mLNua@xOh7X{P+ESU|E9m zlaon720%nYLYl0xA|YVbR^PE;V`r}hm`I`W1wklCxcNs5Mr=ekH#YztCHNNh_HvVX zAaOEj>f1&>)Ya9!8F;d#;qJ^St6LYe>~a1C=Ltkm4l6^%e7xg5>vWA(;ptAbY{>NVbf!ok z=9AD)TU%S-m>7I8LUsr9{GARUf$N)?e8&UB!~ZNw!uvbx|sSx}ff7gPozS%dm-W18O@IKBQ7x2xvsJCIVUHh8q;u`%rDL$w|*B?59|5@u#*pfsFp zCf)kbQ$rCJSLPbaz1uuLQe`&0HqSMcmw(c7v5eO9fh~-b2Zt)^QF{PJJGhjBu5JWq zG}`<7W&Tr&IT*||G+yRAfA9C&-hPRbK<`3G*hj=A3vW<@UN^*lNgr{p%?siv3y%k{ z2-%#fq;)^r%^w@jl_6o&tjbdmf+?#j`mvnvwzU$nWX9-7zk7tynTK$@<4p-7HHu)Q zvhrwaU(a^y%BilgZm~R2JnMT}n(o=!_UgkO0w!ggtd`VN_bCc&>@D5haFPq-g9C|o z(w|yuxd`4RLn58`Knlo{e6?C9o%VpwrE6*Qd(8B%l~uwMj`!hGA;iD3Sek;Kn+&`p z_qpFlyKry+9ZyU^^H)L}BaM;FdD^fkIa^fov@@>tHeht7CL=WN`@lJElth{PnW5`i zGjtZE{Kz}M@{`9XJQnm1IMgydt^b2jS$a+j^q zkOcO_^LmGurK*Hp{ku~S#Gp>BSJ|fC1{sRNCoOx|_aFZ1a2@wAE=_Yj^2k?jPO*qb zE?>GioK_6N0MvoDdUF#wAk?U;J|!O^+?%c%{@oH3BvE@tE?6QFG#{6e7K!*>_9!tX z4^ud-@r1fcjYWI@5;r0_SXjY~4*7iZBgnF-WCXCEo6(t%^Ygc7loZiPAAI)s)Rd!wi)bCua*sr;`IFqSof{=m2 zOZi7$P*9Kj{M8kS=h^Slx|kUMo1enFe0C2)=ON2O2|PR3`4rARGqZ4@2=v2~|MoWHB{~d_FZ~|7EkD+7NzEIhAqb5++wf-Aee15G ztr>Fh4X>`gF1`5Er=cZzrnCJyG#3{a&@?>;2r8j$*6+cKxd#6Ma`Lju8UkKYY5?OV zn?1M)Saca3Uqa38W<`ZxT_t9oYs4pIRR*FgfBkA;W{nBbY=%fCu_8O$7d3fbW?kK~ zS6qa=q*UhX9aLGZQy&w#Nf;PTPL6ST(LY81UV|@#s>}y1`}<#%lj?=K`r~ogptBjUh-$sQlN=>kf#aT2@JQfjs1cRA64=3V)1vLZy-PB z#|bD(G(Jg+1U@1q!sb8`aX(c}pquVqpr{#FeIN=Q^VDfc4bos~^6jX44e8#3)9xF`r*C1FS8MFXi`AWSsNgbSc^{eMj9P*!Qht{eiT_vu62$F- zsdrza_9Jhz1ym-b7toIdq6(^()1Ofj-ho-qt5@gSc+3crELn2$qP|HW>nG%;(Txc- zG6IFllbcwkkk2n6AJ$j(Vb=#}PqAU$^DEu(>%yV+`a!E2aT>h*MODzSD!pdrL0ZGd zr{CLHS$-H4V9tw6QaBQ;CGV@Hp>b$BAqyC+Jf8B}>Q0YAfzb0*FIA9c6PsUhY@9G~ zeSH?)>q^p$%}4QF;L?qU6YL_(-dU<(}!(eLd3-*70pNp zNYPRF_z*JTFv^ObSIWtfgW5745+yJ4j*L9FqQ{5?1?A0HVcc8zdtno`(%4-$0c4p* z;T1IKeDAk;;aX)ggFGr8_SW{T?@$J|Z@z&+S#?DhKyRz~1nEBd&(>w)pE;_7rx<%X z*q0cLIUvjJx{a5;hVPoECih0bIBdv3foxVs+jjXk%VGcgLKMJPJ~lRt@!TKLzQVgP zSf=C)Q3Md{h^c8@YMZYlJ}Dqx8IJkh2?q*+%Z5KA2xaAeLps{t?vLqD{PM>P$w84i zGn}*nBO(?p8S<44uo(Q<*pohgDDYxp-|NdiS&yJVVUU)F!%sGz-Ds!pRYmA}w>5;c zhVS()jQocYQS^mPUqY#(z$<}|0*83k!25})_PXe}z(rq3Um7=dvAS5PWDf&l2Vv`7 z2zzn?0_g$&-@hql50qmAF9}*{N zcLt(uEIanI#PK6tK>GtyP={x%5quulhy$tQ43`U3Lb`qKj#y$B;X%IdXmH`+ceKXqK8@aLH=3j* zFB`8_44PL}$hQH1p(xv$gN4!&0(=i7x~;7TP!SC5w6B@po-`k9JP$+)5>tN{!m03> zj*i|6fU~;PSem!5q0roLuL|qgx|jd}U1FS_(pcbEdQ2;za(lr@Ne%k}g2s$kD`|Nl zLUNdf{$2mKb!V&T9~12&aIF0ijbA^$a_dGVV7e%Ck7N;;2`k z3CIv(=cW-&$~$#fPY3mOxg7LCcWT9{pbilY@H%<@F+c0www$_T7q~bQ&o=&$4#-zP zmmYQeCc(SEYgH*+G_@&po$dUMyR*^9>Ioogwd?*Ttn!r~2p`tgC>dC=_%KO-I8b#5TK(9GYL1ULH`dbF#123u1PXbT>_J3}~Vr zkr4nkaC>#VOQ1d#Q%tO&s*{@bIXf-NKt#k-ep?96`CUL=_J>Jzt+qlh4}?t6_NFwn zywf4UR&aoIwIKx7TDKJ!G0yh>79nI|QP)&|L>8+WA|-|GBwF3IUhZH5V&WeB9F-WA zF>-g16Z!!9oDsA;xyB!!m8L^^4Kn$wmCKEotHX^{SJIu= z-*6@yEC_WcEAWjJ=^)Pla=Ixy!lK&TKSztPt}}aq_9@^jO#o` zt?XDHCUx+>{ORvpnRdVpo>s6pq2GFkYh+o3#Ky%L9x!zn6^mv^ON+?gwr_obJZgeppM+HTQz`8_d&{sUXn)g@t_-A=5p0`}Hm zS81!Nb~57Ifw@NY7PDwugeXnvT$#?%(eLeZok(Cd9$`Ih<0`d$!pr!{&E@*ce1)u@ ziV{_s`dymaxejFe`jD&TM#_pJH6|(al)|e>BIv=}j(UfShUreysFLQhOTq%LeZJh$ zjh5RJrFho&50f>k{get-;or`CU(vx+0vR`+neI?b7E(wMI=pZj_N5Ei zmQ{+N(9*)Iy2ZwujN{uI@0&XC01%MdE*85f>;#x)H?9l9Gv-UnEl%DI2L(NvR85j< zu~`uK92nRRgW(I_->`rE`gL(_O=ZJ*prfmxYK;JI5k%IT>1qqhcqomy&UgB_-`Gj< zj}9=&9s;P`%Bla%LI9?d!I9W`57uryM-QV{PJ8s5B45hPPrtD0hnmcquRhy;kqeI~HWPRnAc%POERjYv!OyMp8}=(H){ywN_Y ztE$3Ng&_<7he9Zrs39lgNB!aRiN2?>1Q+^tP$TmFoH#|h#V$8`JRjDbQttc3rpxM7 zDAhoU$cAJz_RXZ_!~Xc-bRzZ`>w}D3vs-bBto9xSSJO7(x=8P*4tj2)n_mu>aiy`?Kf1BN+V%AI_pN~{jA{Xulx^;qYkP$cf z0}rx$A89o`y!7|yT^Ouu`=mmEZ?m%K8|Z#Ia@n%o{^gS0ux~49RWtWvR#p~ejMQ+g zjUJp90!*x}i%U0LAO(Hd?Rn%YV zcT2D*02GGgmw59KNr@j3h$x_DRB>3t{B?URge}J2!{i{E(y#?(axcMWMSr0z|i#f08o1uKhn6Hjxg z#w8_%8g@q*{8{bQY@Q!`kvsPju(!5*>oOO0^-F+P9U6|I?)j^JZL&Ef{yKL!HH1NTUP|u5JbcldQHbcEgw6+wJm~>|Se*fWOwPGR*+QQVE^=-ZaN- zvj<@uvyQNk5W@2E@+-5E7qg8na!1LTnN*W1g>;TUme8!7g3V#9;{@AhX=4T{k7jO^ zXsa|ffAuhukZ8^yA;+cA+qmm>B?}Hyp1HGeMEBcPs8pqhNag$PEN_3cEE>lC6(*iy zX;tp|=hH(gBMqWf6tS+^S<P%3^eY$RzINX$Q5s5qEaxbYFWJ``^cSQ<8GS4gsN1tMl}MMZ_XV5`&4I3UVo zuK2k`oB5o6@^konpX`8U=j7z~Ia|vPXxA6i#2)l+n!fWgErVc3m5+9^8X6l1pmNOQ zMTUf+I_^$J-G1QL2o`mk&m0g0ytJwb3( zO}4wQ51pKxe7aS4hEO(%Q|9RY+$5yNW`UfUnOQvc@J zIqRI+iw8F9tE>v0J%EG+2$7eJbPDD+qYWy}XL4P1Z`=E@Sg_7D?@K_JcR1*e1_%o3 zpMIa58YX`gH*Q<#U%0v-sXtGl15!wdPGgz#o@R!K8NpYZg|6~S6H2DiA0o}}1$eVDI+L6VBz3{Td zMdOqgrV01< z3)>E9wDbKlMtg7k(@sOcvey)AF3IYq{yH855=}hs8+fm#mhZLwY8+<%OYCGP zkP{PAs@4S18HTg^{jCKpf2KBEH)d?`Bem4k7k_k9WT?e4^9Gx}J%4-Q=1#@Tqml2m zyC}pvC4jv(-U2xC9e+Z~=w66&iXNc;KZ-`5YVBvfqwf?!BUrYt_D8vGZi2U``ep_h z6JA_SAQB8h;}&;gPzoL0t8F5ETJBH*1bY}3HYVBM^TU_NAxf?IZah0f^CRlNsqX`@`(~NF!K!_~lDxu~ypkXVkYsl4YO?C7u4m`hw#iCQ9<$VqY2Lzq&!W>Bu>?d9 z(E4T)yO?mj@NxFx6Q#!Tex@T}$UCpzpuFWaz3PZARAB(v+3>lb+FCBs`=pO1K->h* zkmuao>V=jzSbBOF9v*Oj)5Poeqv>en=&m5fbmkWV6C)Eg=@32(le+Iw2A0`uSJ|R| zU6tMEm>lW{Uc5^Q;wA=npO5H$OwD;2nHXQ$^*m7rY*bD-#!nI9!?r}&;qVtnmeRZ* zt1t_BjXlCNuoOgrF#76+GA(Ox=lgf0ym;fWO!^j9;cfgLi+gKw&$G3-QY_ORE6 ziiy2n{P9sZaAcKfx(?ec;TMBs*NYeWmtjQia|eK%fYW+XRxW>zmk#zY0Nc>K-UD%f#iFBX9vTtZ9j%m`*EdEoTZsZ#j-j7FAKdBc zx^72_AC8@fj}3(26;O-^PIrBrIlm94z#><6zboc*c`{PW(4?b-6jEo#10G9e!Ai}% zQA>F&``Ra?lq@7P)b4a!zYOQ;nAd5|EY8agSK!|@r{EMguLtiS*0BA`9bGK;BI<1&M(s*-r zxJkoTDMSKv-K9>Zv_)fMxs5Cj+I{aw7i9>R)-CYZ+B!B-dw^|Um@!EgG*w_DLzLX-@ z-``B9y%Y>79~MPdo6>i@YAgy0_JGrO7E3!@9|dJol}GpUFYYHvxc)=Nsj8URwRJp} z+0PsY9f)kPR^baVFW6X@WkI*X`GA0w5rc1mhP43f>&*0tpzvvXKyFFogV*O~(?d>a zCXWEQba6AmUuVC9%-hhg9BgNZoxT+D*asuj+VskjM0)g8L)U<^<>FK=IXGN&Mz#2r zk^0ksv#sK|rxMNkDPFd#eMB*51LOsQHSmV1)cYC($O)Uf`w{gk-R<4YOO->QaNZnl z=!M*3qG5LRDLgkY7G7gP=jrML)EuB>yHgFZ>u6&#JvIi^xi%h0H#ps@J88 zGTn_kZ6?bITHTHvq-nmqJT&N=It)>q0h#LNY`zhs3Rw1s6R#V)oS9yL@3AXa0*$ps zWhXd9DSWn!b_Lv(^Y?C57+8INKExCK#>Vv1Kp2#@nF(OlO~sf| z)ga=unV-!j&}@iJ41sqVU*`b0_3^pmalhWl%`N3j)RGw%iTciJ2cyra5kR=(KHK`P zX)`(rBr^0u6Pe-f3vS{$sTT~P1O{t{k>=G0F0C^WaTcDj4&9OMD1~Yk)%;5GYBIwh zDnT>z*vzedd0~c|dvRG#kh4K-@zy`q&(DvzI_0XsMrJtIofM{`1Tf%t9vEJ6IHh^W zcEjLkt>5zdhXQOmIkDP6=YGws$muv7jhs)XIdeMoMKn|y=d#s$`tuT{j7^sf=zX0J z=I2%kG~w3+gwLbovmnJ*BNMCeP(B}FEtEjyz2nZ}w7fAXZ{u;srkV>$rg@=J!%fz8 zP*g$bER>qS1iGI|%WX~Dgsig9X)%&{-rggBc2Nw=iKXZCR^kK6LdLQYvT9}gyr-;uq8|n+FsuY zCe4|+UfCBZ0xBbs>7-rHC!s@s`cgUyya^y7GR0)JsfQlq2Cjv;IXWd z90tl89W~G`rb;{_DY`4nM#oy#56791HN133Pp5J$CX{h9-@-O#wM)~&6m&LXuQ6|~ zOTiF8P^F~~wV=>Xr5r74>|N7N7iE%RD+uiotV$!Jc4tz_zp6iJGj{_euwwQzxf4ij zgusX~pt6EdO_uSB&RqX-e&Tjk&Nc^KVtPIxGo&g7^3P9>M|-QT0Zvr@`U~>S7$Uz{ z@;(0^or5e%CL#C0&B3#DyCWyB6ww&{U;z-9K)M}Ryyl3Oc;&tbfgn28^t)noYe>0^ zRZ50DP)XBqm5?YdbT?UFO;4{=&*7)$7umVZEpW3kkbFE+QIQDSD73%?-h%>zHM{U` z%YZWySKaPu3gsD|l?CtGd>-h4)I*l44(a>TzBMl&B>^Bl>Co$RF7%g)!DA%(S7 z+W^sMDXhgvLt&euA8}8^tao|qg z@L|epqQRE))L$&kUE5GcbTMWR)PeT)VZ-%zO}G7==zF8$Rw-jZWGSq=^}gXI*DQNt z73fd7dPIBJf6z)oW!aoaOn~eZqXE|6i?^@)Ck|T*2!jDJyx z)FAP<2sz_Zo(%AkcJUuGvQ<)`E9Bhv))?ji!db-sUBJn%wpk*A5JBPMd}+zz$i~Y zl=?L_B1(=wysGb=a?c0w3hj43cWv;Vf|my&kJkVW`O|-d;&DJhM*^4X6YA?jR?BKF zP&}`<3u#o+br(R}LHTFIDBH*g0b*HHRA$y`fAZLrx3+NeYua~O%oKsFcz)P03?C@b zh~LMH5g-(|C%E%BS0p*d_vLw&>aX3tPE)WpIPZSWQc&n&cnOh@ah!H#RV%47dI*@IvhZ8_l5I1lhn@OP1Bqf?T6UH9_J6_t9$ywwakK= z?GWF>>YY0K65@I6v%9QPTRe%tB2^<^0dxK^H}a7h?|s*QTMc4!T-2O=a&f1}CtII= zSag-QedaaFNpBzQ+&}k`>CDM_dbaw$gPN8hMTxJWfzveE^8)xJefFFH0gwd-xVEC= zusb-~)pJ*U(zNc-I^?Fg+3p3sRQ}+#em`-Q6|8JJIAEkihQ#w|hy2@YBMLSoDzBg_ zA~LBlGxCYlLz8l80GtN098e*|MUB2>(HMeQ|8ucy9B`z=vRyjC!wDl-`>7~avtSD<~bG15p zFGLU-9UJ%DvsY1Dq7E6@;6V3wbO3mmiCme=1W$P3R6o7D?rGNY1-7=?zCNh1IXx=q zpXBI8!FQP}D@Pactipi>E@D2{Hvp+u*UplMhZG#gQae!%q$-((O3RLW$P8X5labl| z`OWeL(3QZ2?RfOZ+*YUlQ&bd(rt}J$yv871=C{ipTDR<*L!J9xGg;AETkc#w~cC z5(TTJz20x&AGSL(H>na!<_pDAE$#Z6uLN}32V8`N)kyPlQ1QaC2}Ixa?}IO4))wY3 za2`|DT8U^H)7%O8kR$r}Elzcqh!SN1ZiKtZ3C-Hb2yiImfNJEJ@ju=+1S&X=_f+`B zn6i3&LI!X?DpnlFCm!F2Yt;grrL`G0%$gd`lY}6()R}Qc&CL$XbQ6llHB8;}2p}NFnxM=pIwz!xwWc;mcnYFWlS;QZQU=^CVc`n)R8e)5M^%y@CaV$Jj0W(x!~w z;^;lt-nFc%HgX(}Bnbujr`WyDE4*N6CJZY#5y-@oGgC{(V#xcFImkTXoG?QrCE?vv zW%EUeu*kn{sY7UIfMrVSDWN!WPba?IZuCuMp=7p!%nJo$7K(h9Q*8Cra9oj%e00-Tb06ACO#4S+q3 ziF`oq8L~8gzNNiKkdHa^@SfK$O6AvzTOtmFQVD@ajUjrFQ9bLYlVm1mad26adG(*un0yqrlzG=b7z9UjR0sb@KzW*Bt?}^Ii`R|h0*(1 zW_7k1E!~`RbkL@CZq;V1C^E=l@1mdg1S)~iOeQ$151;23UB_H8JTABSq(@^ zeiM4#10pI6aiN{>8Piy^#$lfASI8^vP9AWV$~B z1OVJAD#oGP9>ZKBaP2cyvs!=l*1de&+wWlI0848pm*1>J;9)V{Rqy_sR7x%wyy3i; zeQB?BSxK_OKOr{p)lpINllgac0usNnLj4m!2MdTOj zkB8gxF}@^E@jh1-F#=ah+i)8KyV&QIOSfZvjp8b(f+Ss+K&9~3bzE!=Qj|oI`-?F7phWLPfP7ilP29hXoOcj zU;_lYw7(q||L1V&IN}ev-HWOGb__caih3>9``V&F>e9G-?Pn2Y7gnA75|-v)&vW(g;j_y#s~87!5o@heXD3l< zEv;-L%ACd>2&uCUc+t>6`TW-TgN1H1Yz7ZuQ z7!ibd4TWYMBSs<9sgN&G(VtK?kOC|c(&+(I?=h02o9q1orbikL4Sjz~7wo6`M7Zk7wKh&~d2-}>%N>URD9d{=_ zCdL&q2@%tYP%#4*(>lQanpK~G6|utzMky0+q6i899XozG%O1xv5wzEOpC2U_S)$rE z^+Z!$JmnyXF6*3(ylmwhozXK4BOoc?oDK(SwtDX_wjs+w zjn`M7@$+fr=*%rt)YH+^iF!&ENHx+mxhQ?#n#~W+HCzT(KfqCz{2qcRV+GbM3i z{Lba@w8I}MZMX_rXJ+ORE!S6a+AitcABDMOa!#L=Z zbfTot&G}YavLWN_K>pUVaH}D!rd35}n>Ven1V=GuwrJ2PzriX_O?h#-dM5;0T0_k1 zJrdh+{v|5q6IkobW3|n$d+x>ELZ!?)Wwl)$sq0kqoCJWY`(uuh{D`U(q0V+BTk~Mq z%=)2B2rpm3y_1m6_15n=yQ@D&a6s06ea6wVSHWSC9t4HqWWl)2Tq(K+>Z`A&SO<#9 z>~1_?1Jpg-Vnu>}Fr!9Wh$Z#=_t8Y_ciR@JcsbyOCu)69A*|B0K^ZEl- zWw?EU@RR#Pj)X*;>4Go4wuj0uN^)|4cF&OkWdk2+lgv^j5~ExlousfpLvq@)YumIy zaD;KQKGNAIqg6 z9F`z?U@V}>Y{)`R1zdC`rG94gjMjM{(@am!7|IWlyXKk5O;(wH1V)B$E2cln>44K_ zY19PI%gMF%N!ws0%}K>xtP(yFs z?j4lck#Jo9#J;`axbDOm>t%ZR!j)y0KYxa5Ez~-7oUiII zA3#trZtGdYgh1D}l!it))L=&7vyW&?1K~0+Q}uUq8(|f^lWnZ~wO!fb3Xp zBYo*+DE>GgcS7}^&(u94*L7ROEm`N@qWj9zJ24`o%kwNCcPnvZSl3~m7FZPn2a|=Y zg%FS(59ZUWNnd+$0tNB#c&Xj*MEYtU_rOOtjBh87q}MjX z+kx(9evux>MUl*=ijGq1@lyR1f^u>XT|F48`A>Zg7I&IG@FVw%itTnQ<=~#1i3CSSJ@WD0?6Bd#;(O|dClF9PCjfdZNcJ|3arkK}rZ z1~0AQ@tFk*@fvGvNBr?C@=xb;y@TQh8jz z=4@FSY!pUFFRO*)!VVBk`CX^UcmQx!l&&gah)K2SNG8t`t_8N&C%*aKqX)7LgFnFN#& z;1Nd`(7jIfRoIdP+ug1NSy3+iMMKc$SkCWqUt3$`xwPC8lzN`kFPGOR5P5sKL{Do4 zDY3J?(jU+U9(4NY+`zz<&^TSoF^5yi{|fHJX#=2(%sJnO6|zi-{l^-Q4f?z>=2dk9 zi0pOuGwsi^$;sPg2xXG4) zv`7)RiPcaQxL&I#@L+?rS9SaHEyfM4W&NL61JtQLHg)mRSL?)$husDW-Yc5#;4Wf; zV~bZfsv1h@EO=c`>BSgzavg8se=-568$!)z-}kw{{9fZeJfPbZuvZ@DG~4`wJQrNR zc~T;Q6c{Ij;d|dKm=?FLEs}H z0dz15Fw{qox3rX1RKdx)H^-Zd0QR;>4&Ye=q5t{S>8FGabStgupBlEU29UQqEJEX+eo&y2$t~6?RJOdslUZz##Y&fD=GJK!8Y_gim(oM1cP_wpKQ^UTX$5`-Sj zu2iaQOXg&MH|*82p{q9Ib&=I-B@1+Y|0~$a89n zn%;=&S)qg7WUR)A&*G)~;Hfu^?9Ed^P!>lM-S_9`ZWe9m$xAH*ESLQu`#I|#0bJcb znXObSUEbP?%L4jun&OtWbUss0gHzvmiX=I7z_tjYTIGiq3v$VWjrCw7s z8@96pJf=v#4B>>(`Ty3KL1KAU(U6!4%&qZF?%^XZ68^PvtkeM9D=N;#1^&i`Z)77M zflTUW;L3-aBzcLB)wqm@o&ml<1Wj7{A&O(CG#Y`v=cY*bw6RwSouk*B!f~S6_Tkdz z+6IaTA0IqW;V6>mx%l{XLsbL7$B(uM9)WGwV2cVtrC4~!%&n3IggDA<9} z)kz%6C)U~Zel06fSaGl+jpM#6Wp>V%Qs|QNu23?detg0uP5t(`L9pao5cShZ{wKf_ zVK$2ZLFyExcfs$Xr8OL0?G2CUoP5Nqga^4oLC*AHDirFX{+?$QIWbgX0d~%{1&E@> zOkX+gg&^V55`VbA8Xp~$GfTV#J3iAlucQ$q^ zfy-vTuj#xQp=t2Y-Yh&au4QVoK|LvnEBHK{^CcaN_}%ZgB73VY)HrPw*w`~ZL5c|8 zY?K}b`meBrmR@0q)b&By!_Jx)+)on$8OBS9Uhj8z~&3 zg8bsO)f%a##eAg61!|H{50#kf-_#7cyC+8;L7>qAs1!+YHVK?2ra)CEm^(+4mS63NSWfyK~5D@qP zsY6L4NDI=X(%s$N-Cfe%EiFhR-QA#chje$t-Q1b`=l*b%8RzAkcklhgTI*RmG1A4# zQP8=KLhapy8h*y?#ol?s2z}DZ)W3y7C>IY~5`nX6B}i>$b(e<&+c4~8f~d!HM!d?| zB$hRikRU0BuDmG?RS*kDzmw6$Up;Gm|1E_=-Ma)@E{{Sk3uLVd?kP1^Xju=fpWpqU zm$lW-)wBebuX8&n^RC{$1&`9SLRahM%eo!o8an`AHqGJ*n;RV$LeZwj`-Y})i^ z&tnJHMY8N3SF7`^j!wS+`YElP;b0!X`Teps-q){<`1gw=9;-;C zA{~Vo5!=<$g^h@ajWwDUA0&r!ZF9RXXB?VhkRX$@;j%@m@vYH*GcgtAdQr^|=$c`@ zc)i4KKW8pwk&Lr{z}t!^=XY^YPGN-I65PYEH8 z!tQ?IQ%19!#L(!xbY-G`^-Md!eU(uoyVlJ#H9d zMf^{^yte!M)8B^qrnM==VE$-w!eFFaKfBjfRw*CgTV-UvI6;0u3B-i)RaGFt;?V2b zIcF@i5e2wA&VC12(NMiw*JXEMBp%5h*R}>AEOSib3F-~1b6ba@U%7s>xv6(Kid90v07D=ZrOb%!5B&2t=jf6@-o zOCc3hXJzH@yw?)(l(65$MLRm+M=Z_TtgXNEILdC3sw68UR9KZCFE181RlIC#^wCBc zI;55;me}Rd$!7eplTmlf+O5caK@Kbx*?~ETdUf1|&j|{VB6kKuDiA|qj7;k1R79ubi8yv)%_KOCzo2MyNKaaz6O%q3DMbX4 zk<8i%ioI$17L+SUub;5WQ%Mqhj{rUqLXUpcrK~Ld`r87#u(OX9kdItCTJau_15ntPnt#( z?}})&aABnSY10x?L(3Hkhqf8Y-7K$nLHO=ZbY_ITAAtknis9KHF`;;e1r@p_vU< zsBO%ZB54?M`&z6ZlZ~E_?p%Bz%U6MIw(;Rv~EXi<{J2urUS-=ulDKweQFu zt}GryYz6Fu!@7=0(25h!G%^n_JgwqS7b_yq{jIp2Gc4`5Q0g;S9Ff4Zd)Ca!noxjf zU}HLe&sjS|jW_qrRZ!eAY&;%QfR1uWoTn zC&SCdil?R)vwc`D=)_a^jhZ1DP?gp*WmIqdo~Vtovv1ZzKD%o(6lwm>Yz%!<{lZh^ zfF6HMs8lna4^_9 ztv_ljAmA-rB+$|`enL@DXldy9{i1_=Y83{PW|D1VeR^egoxPJb5?jc;_E(CoP`Wqi zD+=fFE&4(Z^ZZVuz!7z*7$!`VlkifN&YCoL8E)v8N@Av%&OsE>j27fDiJV?t-IS=uvhj>7>xKUiji84$G=(2{b!LyC5GAka4C2K`1I>Osimm!M7Kn72GY}8 z*Uf7dZIu?B^@~)_#*D2&HT!bh`!BtSy|U!Uw5d(&e>Xg&sXpd$ZQhsBx&81#mVs4xk z!q`w*xY+vl-`8*NZ*4MeC{2v_Mg*p7$6nYl;NJh$vv59&!?D*%AIYm=gRZdLTbqBe z*_~|js-et~Mf`Pm#O&2$n>Z`y{>A!6{5duwHK--|E0igs+LRejW`*KD?N6GbEhQyT zZK^!@P7COebl+bhOlbT%c#)zhDb}_64)gdstZD7MV1JM z{JcVKVz3nFk(=`4sUGR=UsOHan@<>#Txc*nzu_AldloS=oWWM!-p7 zz4X5(ZH;-C%DS(1YMeVfQW$anYKIFY`{efgc=DbDznnZlIKDzes`JSm&#r<}-*ChJ z%Q;R9a=3SMwRKM{My7nG`+(eug4@Ywh|whF0oxo+wAm8H2y4wGg2M$t37>V>mqRRg z_fECZf!eb#A_(wiDYeI1JYH1%|0#`Viwj|b*#Hua0m!&Z50g@tyj)o;R%qD>&I<0Q5qTjiA9g2&@-|6r97jiwE-rek}MzKtbpvF%M-59X0BsHSt(c} znpkfjT>`&)Rn^n}6azdTCPXk7?|fTcncWVml7%(Eq%^7QJWHLbET=={yEFlbw&$-#tCZ|Ft%!F_e?QDPdnRACcL}8 zd=3uQp9*_NZ(ixcs`v|%hWVb*f|r`RnQHlQyZS1Vh>*$niXe5_`*4FYGAlJMF-YuS z<#i97;SBw1~NLnI?x z<8h>se7N*edLg|``vf6RW(+YfrCE?k`Hp_=)t^Ie$?&>8>w@4-^o4!DsLEpG^-K@G z0UudaMLr~rq8}!|A{a4IbC!Ey-aHdG*CYf@eoWDLSv2m8lNJ$p%s^`X#7NIANqc)Q z;(DP}I(zHBINt6-cV{2MEHO`voy(Q!D)C}6V?bzZ?85keNN~1J=Am>q$GOVpIZo$i zowzpbqX+7mit1rNh-{R4^D1RkQN-qQd#J=DylrUO*sC!S6_to@wb=uFlgkYKQiNOJ z5?Lt6_kvXXHqBOo=d32NwX>h6ohx3q+qZTJD#|~=CZttU`z|fr2Q;HlP(UxZ=4@F; zFY(hC!=FNo?faJ;2SwGi)b((s(up=H2+*0W54AoFjE(0tS`N#|!NEW$ft^Jw9L)Cf zIGLQB*=0Bc42KDN3BzWAk*mYV^b}@$il)1}bwPbHmi!`yU<8D@CPQXwj=0~ZJA0wc zc4yVUnLn}jZJcdZ+MV$I!^O=Lq!0q_({qD2tO9Un10VeWKl<-qX7f8_5RG!%a-rRT z=Z%^+ng|4gBnUJ)AxJzM=Vt(*0@-iQOu%#4_qYCQD#I71`QD0G1jlQ^?EoY$_q z*cm|ct5`tGi&ws;#_svrDx=9gkx~cXDDn~}cTJ7!@`l(SN-0}xv&P2eYMehFf*JJA zVU*dtJa8Ooc&p+q3wao6);vWFJ$a~pd8k-5nlrRq&6|97KEwQAVJ@qt5C9JStCxzU zt!>RrcdL@bKb&{(sW`uyuMJ#{1`57c@17hj142U82UvpPg{Sq&9xvE$sL5D=cH zIM~*V%<#+kJ&|NgOlocZ7M_t@k)*xV0ES2^{oY0+0qAj<%ntuV`U4{Zkr z>2=OApWd7b9R8)oWONbIca%rGgfo4V(jVyKh8{HYQj&VBK3^|jLXvI#uRR`t9<07tgRFT_F-&7Y-fy@4u?za^dIi0e~G24RR)OKNlC8@ z1%TD>w+}Y-c-%O_wYF=(na~la7y_te@SB`&(mqwzS559O`v!)@lOipJMh2-i`QptK zaj=hOubM8O*>p_Ztn~A$e{V9p(9vI1SYbu`{rlGZNIfV(YxMM3#lx1A%>R8dn1pb3 z*nrzTcwCB`$&2Snzr`s!#XT>QUX-W<<{)MieFJp&9@U}V}-Oo+nd5BV{1

*j_z* zVJC}a2!it=C#xB;)FlLh&UWA1KySs>R1hG=AbmFbu%VC*ljC3+_xc78y(KO zt<~G$!S3Oa6c-Y6Yjl+QxH|rNBCofGG1gXFo+dS2KYsp7J2f-?o|%{EW7#iYHBl@C zP6X!Pzj{`Bxp8#S(RAXm(J@iUMnSt78h6=Pp%(DY*gFd<1({;IGUcI6EPBSP=sEuD|>Ss z!9I-x>Z-2Ub|RC+zm4J-VPU5~axmJCt)-G0Pu;xv0Nge?vrM$;!$rT7nCHick3tf1 zG?qv=q0{7dFx~WNA41^BVm-EWg{dzz* zrBE4#kB-fY(a{{C@krFvP;BR(nC_>Z?k^)NyLC@!dpQ&P*LPyajhEsM)NwfuDhje8>F8@avKEr(|M3yqe)WR<_a8Ja zE^e*Os&G*065wCLE#7Yh-QOzIE!MMAL>QuH{mFfhj-HOK%~m2Yg4SDp*a$G{M| zg8gs9CV#k~`dQ593?@-&`qatguQ{s_Q99@S*~~I zl+xLJk^B4R4ULU@x3<%on%JmsQ76O@S1=In0bh8uyt{MS+1KUozgeHJCt(ghzmPYK@_QBDC+?wc0IA4f(O()j9KfFw$4~`kHIC7#j&L!j@Dk z65=%3HS|-B+Q~pz*zsEF$!lNR#LRfcu>7UYJP<=3135_CjW^wa{*m8zQAqWI6Rg|4aFRS0__oorSOg1b8@5{#94z4dPw zx`XUKdZCwq`!x+FmDw4)Klz{F)0!*x?7g>ETEFpzS3FJS7Oi<^t+zYCZf3J*y`*zL z&tqV~c{Tgvg6u}ko5pYhNfWx>N}pWP5ygN5AjC^4#}~hEwqb?&`8j1}sKBwc3s^|o zt@{T?T#cl%BE5av8%HT2tE3ckdFfa%KJT$Xkr9FxpqAZMc{ocVc(HA8U5K7wuV!a= z%_Ax~wa?vDd$qOXiL^ho9eU@}4Hd6eLaN=w|$Urt+VaK=L|9YhR+=l zsHl}kYwv53&;+CLS?x}v`D@*Olhx1v5{aH_VhjxMm=Vy)<|si@@c@nJ<=&K74f0FK zH$RD_3{Ljx5h=+Dsjs4fp5`AwG5oe@qVp8OK@3XdklDE{_X^PPHq|CVltG6iDjM%v z73&+}lK>ZbBAwL}(DUWd?uqMuJiVfBsdTuc7^&Q@TNBd5fc(~-x|&tDO<1~d{sc0E1S z>LaWcdV>pzcJty5jG~tn=OtI3VIv1KgxiOzmM`!6uhW4Qy4gwlxM}nX5%kH_p#Yppnt?Y8{)gjuccF zQ-XzsQB5G&pF5hGyTE5nN+-___?zv|q9H=0&ZNV^QO99fyY=8A(VtLBwl@47 z`G0Jw+1iL8uiBpbmTp|zpn3SC2Y6iST6g}p-et3eJFk*bV_TJAcBP;66VrGaP9t4gHe<2=e z8HFLE-ro5OP!exPznPu;%)-nfWI{rc&+p^6QOyG4_a?yZ^cI+wjhG*bK-@upr=&FS z(fv)XfN#w4^4Z-Yot}Z&pqtl;G!dxYd>@m0E7>@@=lnY7YHgwE*jn?FaeF92GyGxL zz7tFoDNuVhx_{dF7XSOy);TyR6~Fr8Tuw;=)(0K^wS+6G!~h89IS|<&&wPYKL@>Wx z+4Ok|O5}E7X&4<_ZHshUU;jXh1#s-i>aQrkU*kv1Ph_Ij*0cHQdW^M`aW^j%3yX$& zbIQiHYD{lp;<+Z(`lX^5!B1Qd_dg1Mqj3fW0Z@vtFl4-*;*>{2x9^bkn_i0?2MGe8k3mR2iWHH2B_0=^=m4?cr`8Hedhys)9Fi zqtlBw;p1jRPYfxvkift#Qufx?=Ht=gXZLj&@YQOQJQ$?EI(_bPueFH=Gm`(>J>Mu8 z7)0!^k}M`MSmB`0f!`9ypzSY0D}%T6rD@Ih-==LQ+h@h!a>f9hh4S$+Y;gtZ3HgRP z$F8_DEG9-CNlHXLfVl`Iw}k!)LsnA*1(~#Vqdy>TP@kJ--+=tsHJ{EQr=<}d_S2k4 zTuczKVZoRCcRx$10zROf`Nv_R#yxAR=tTbkmGIQGI4>8aHB2TL=+NCn=;Imq^gAy; z9-u6PrsxLb1{8%w!_^)__o`-7m`HwX-u^M%u}$^nK{hkL;s-~o!jmia!x?MS0;ay+ zI3Jx>*P^;HhU*sFK#>c>0=qoWClQ0?Bp~>L5l`_&pZTY``6&%0yisCe+{6dUYMJ-6 zEY0a=&^gRH0$s?Oeg%{EAA$lzcM*O4zN(wv5d0icPvywGv6t;Eg8 zY<31OkGosiS7j}3k;<3vFg%a&TC zI92K@e+P@unSgGi*s5MdS%utnoos#%EA%TA?HNODeO2eb&RkUv5B325RR0ee{eX|E ziZVS|N8{(L`0fWC#|@p{Gwmwco=5nP*h)9`sY(F`-G8?Rz!r*V|$QT3zUb=C@ucJLiJ+^0Lg}M~Cc;TUfqJ+!%d>b-Eq+28trE3x!|bAx<+Y zLI~`>DevvxW*0RhpmYZ9yfEqbHWd+=^)RH|l?@&YKr}dC>onJSA#p?}hlTaL+{S$U z^UaN0MutA~v)s{+ESLCyxZrk>Au|O|FnrntHv-`$06y28D{ebPV;<0E`Y2I{!HU&X zUrotwG+BI3&I}Til+URuVJX(hR)6{E{BqeP5W~mMDC(R&=IDIXODCO3377u?$4%_=rRILfb(`}xYFQ?y z@R5zHR>!o?{_gnpH%EDvB9oCi#2aSgHK>A{CN9nrqlZs8v&7X_FLOmQ_y+-naLa}N z0>O|Wy(o`!`)6ow028xg%)%^negShI@%v1NE(Exef21n? zGjf_D{{X@PiYwJx)7S+kINu8w>zbjV(w{LgX@%^`Z47ot&>C691^GGoEt+O9>CJX_ z8Tg|W%RauKN-+P#-)gSeJf5`5T~(EVsBmdFM|d5`GD7?bxdvhuPuCr^Xqnmy!7aMX z0(o~)7GbS) z#ZcDlRaQ0&+r23R7Z*WlYAU#Hz$Y&PbLeX_`n&0AP!|;|e-{Q0?qe%$ktZ_Q-vE2G z^9kq+0Mc@HInNqH5jwPQ{@M~khsYJ9F09;;un9X^ef$w07ptP!bLRo#c2ov`G*AzJ zYiIu?3PmBR_M1~m4g_NnT3S@jtq%%_bxuy-#iaWj%=$dOg8a52CyqRf=bDuro$}8x z@|J1(wIc)c^B@cK!G;zHt{d5fWLjyXGLZasSHIh$l9u%;z|qFOdGp!r>ch|ny7W5h z7i=1PlncSsD8*^*Fn9H2fX#Gt=TiiWY?pokPy2z1i6K0!J%*@c&{~o!w)p9Wutd_> z-s}R=$YEFLCo(o>nBPtbSz6nF@f4y}#x^z>p|YCF?Fj!yBzZ>1W`2@H zy7uWPlo2WVQHV{6&T3URYTipJaZP#IH>J-~QP(YCa42NbHj0mp{9BTo!RCaA|7`!v z^Cyen6&{ixa(`_BBs`cn|Gju=)9>0E9AmEea~g%iAU)1E*vzc2uptJ9_;% z3V_}QHCNf0PsZ%Y?|kq;s0)@H8IWxf$!8!)G^GVs>AZRKCWXxm6Tov35fQS&tfF)u zKIlqw{}K}y*Izm@wxBz+jYHFfffK#nW+Sw+T<392Ipp$j;ZW8BKM73D=x;w9oGnh5 z#_fpJA1`MTo-HPQthUI*#HxCJc%u365gsZf9WU=^Ar9G|?(t$3ET39~YysW5UyVuC z%?(oI$pd=Y`e|?YoWnUbhz(Fs&~0sVb%!4T+L>40My$YY^X)#IaBun?D}Cnrdaljz z4Ub0M7ogDJeG^pHdAPOP5hr0}y0ZZzkX8~IleO#LQM^T0se4H?GDBK5SKx6|b=6NI z_kNk_(#ffJK)dyHbnkCOwU*7tppWQA&_N2b6$U$1I!5U=L_EAz7V{cR9L$2=cFf8E zZWl{Ug@R%=FlgcM;(}SY*WWs>z)1hFp3lb4*0Vnt&-7DNg|>@rZOwzhIYG+8#MZ)la7S>B860eOboGBWb)tj(*%pp~;@+8GNGL0Cs7y6!Uz z_I@rPO-I>9_(Y}qnjhK>21bg)aXKdiNJ_5{1@|4c=kgXF7M>oyM}`Ods$!OmjG%y6 z{E^HHLxbXz{%G{13E0tTkG5}$`eeHf(LLpQvlTVTxC~#H1Mu<q3SF~x<-R0$2qM7ZiN8Q#s&U4s%Y3OR`jNRY@7vuTlX8eos zT|O^P*3qW<1(qy5OUnrJJe>yGvIl$R; zgh#T!?K@INyF-Z5v8h1-@BpTYW#(I~Z_Xw;?&)VOKiXZt8~UeN(U#?KSm$83{wuz% zjc@Zb1+{&{V0qn~IGpctmFln5d{k?)YvC z6tP>W5_c2v@&fG^eL95J@hY~0(=%gkQx(fYNUj`giqo?$c!I%qM6AE9zj~@Sm5mZv zS=pXY&p0CG%^W#83@6*`qV>w=RzBPGtz9N8<|J|N%YT|f(-UV*?`n;O0 zqZz?T;sX;a+S61~nv9z#WoQBIuh%=6WBK?KCVBaCtk#U+3U`*cdyFHwLt7kL&&DVS z;6L*&aTK~%=D}Tm%YNndO*&#%!z$3xBcpjetO3ssSdBrvyq1|GWcUS&XACn$4EwKeuQM-12*Jwc)FBH zYY;)@J{%YL84?^^icWjI2(q@&(D=RthL$-$?~YQc@>ml(6Y@oju6i+|kSHrkbi3d5 z#r`6Jff`$C@XO>U>D;Vk11+fm0OQrKrcL;t16!$OfX8xzoHPa;@w2DuuMu026kEFc znRv3rL|J~%&_%=t8T9Ke{wZh~|5;YS`6_AoP+;^$OjtlvA9dHdc!RkOdZ3+8n$ zcr4q;I3Yb1kNmDjEQNq0ip2EgqM*~*Cbn^PFReC=T<#tgiuI>Cq>q(md50%~wQQD;&WvQ> zUMM5$sLCAD77i2klQ%q2Q!_LAt8HGYL+@b$PIIdZY?s^~&_4Vu1$Y|rr@A_t5}M?_ z-i)rUZ-cx~L;%a&U9KW$2P%FRW_5989GIe-ZmpX#tpKsy+*4pL-WW8Z&?*)ne0J32 z1m)|y_xFq;uw0hffCUMOrBkjf=dbhPMkp5B zecqg}nFAOXU~krU*8LI{igdrMwwW)NBAJ)P+a;yHaf$NKjsy8o&c2$S9Y{brvhwFlG_eg3Ha_BXf8~`H)6^ zc~ls$1WOo>7WO4C{>L4PjK-8|G5(UGijaOo+d!;pQywJSytrMulD>vd zfR4hdKTn|d&30FUcv=`H@ zj_Fe;fLG5tyWlgvJf0k`MB2z?7$&@dhnex8>KwzNrIuzIf`joBACCtrJKcr*w`J#a ztKNA|zgH1rk%;rY)%<`{9PQP7t-6v<+IOb1Aved}<#{6bvmDN7))7|j-0V0SHzWd* z;$f02Dl#f3mBpM))}!j0VG)55fBY%d_y2vL)A4qjR(hEAZ`1yfGsuG~YFK@dP$Hk2 zkCfo|+uHh-L~8;!$t;RXRDp*4^l5Ju--sOtr=jUa!*Zz^m*a;4UelctB_=c*iHBnd z8J~to^fo1j=e)hLP|`85Ve=2h=Jx;&S^Qa?;XoA!I5C8ik&Mn%?BBG^fF{;#bV1jT zGc3FLq&7}hj<^#PVz4dBIexLuKfd`(hTp*LtvM?q8{#Xq3fN~`+7FmlS8rJsJ7sl9 zAZ^QZ65rnu;(d?2*y$V`L-RG$IX&2taFvt&F1@lsFps5aKV4^~@pv^lOW@NT zdx2XC7z4GZdPiW5V093ln$D-YEUc5f{5`7k`_`}-nuL8S6APPfYSo+W!i}!#qs4+j zQzPo7KOH2@*2JHEuaOD=OsTI8)|gaZHHXI=Hl^imb^CCc{ zK-dWYaf}Gn5P=uU5@gEgr^w3@lmG8;FVuS|3!7dL6wLmUT>e?MV)kD<>t!t0XS*%r zZOav*Jg3AF2(-8Nt>K=$q((b5s7m7~L;Oc9JuFJZe?k~ck7)0AC&?3%k~W%8+Ts&O zGr4g)J3C*MXdtbvA02&M#DIt5jf!vwgiJ(x2|5IqFoIE~qHo<{M>YKi9Fz*R2oOvC z`@VGcS8wHh1rXBj6pDTTHjx$?#fo99H6;PN;NQ_}w|RW|^Z2!FoQ$W=I3mnWL+0h> zU5bj_;(ZI_+fXE9WRJ_P3)O`{v(qP!cwjs()=VkMQ=bX8YZm$7PUrRT?}{HqI>@MdxnuTkZOD9m-hK`1RIDR?QFQ4R9LSJPNx zXc+th%7Ld;=;r$9tRMGKojSkBPV;RKpPRoRZ&T^j;e4LRf#f*9sCb-v)^Io2A< zR|Ee@CR^U_jxXIjKeP~9kl$a!i+NM*pi%zo^EeOP%vjfU`$ORuc5-HG9fac1CSO`U zlw*!|X6Ilqud%=3O{Rg)X6vf=54ZKpSEoS|~EGUcd0-ziNGjEqjE(Umg)Hfv|lnusR%xIOR(eNqr192}U&{=Bu# z&ql0yKq*k;h8HtHEHJAT_kxa!BmmwBL`&xRrnFXE^C|t)B{wsbU}c>l zq7WnFW<>Xp$@sF$kZIJ@kM^5=ZNU>73gnMRjQ{-77P#;rcoVKg*yWUzJcL!WGF5lY zCx9h=8{lzm_}@l40e%2Vl5s_{-nTf4@|E`J25Onfg~|VbwpfL3goTqF%R?sfi?!Z+ zAeHEbbcdW7V@eGT0KlBMOF#!5w#g|V9a&}&=npg-91t0iktgT-5kY3!Pz;1pg^lu0 zRAe~(wbkzM5srRz{yYz5PTe*{hUVY}fwuDjR6TWfc$DLc&u=~36;_XR4Z>Y3Ibbyw z>epJv1=E7$>2Wp}0uWi|@fVtLB@hU{kZHC!;L222uWfDk#~k}HP0S`=MWZe)(|IIw zfwoT14K0@9O%>kl#=w8KZmu1j3kdc4271Pp5o;Y#weka%*1ePbFn&H(2XI>=+paB1_A}X9UrOze_!Vy^#FyvynI{BEAP}O+tWKOmr;}fQ3;Uz6iWGE-wkE z5Svq1AFQa`B4(5-*un0cbuOgpE0dxi7Z#Q-XLi~i*txsArQ0WdEvqUVOn~maaY#KZWz;%L-_AKhZD-GsgJ&&vl^7(JKb-Z|ak%ENA&MZuf zdVlFy{<|*#+(Yv5w;{(KU+ZPp9i^`+_!o1mkqMJh3qkiLI!pF@I&-uxSUnI5hg(XpbAP@C zVJ~Sf928m?3qVPK_B|q61Yql^`zu61q8_ojnO(Ty?#Ncb`*(_ooz1r-RxWQxkD(dG z^!^HS_4bb=VLS3G@c`5upb&z7YIgRkNY}{A0(5R)T3+Pax11d1wOSk>ev~*y7Nn+T zjAt+>PqcnBdhrh`*>>>z5h0sk25?v)W8p%$+8t}A+74)P_d`i5OHap2&>Xa)huQ9< zqHlDMyF&Gmz}kAdI}_>9D!ixBTh~%oK-s=5O;>biz(7q)$LdhGusT={AIHK{@O&tb zMmeZ}f4{FMk7>>84hLY)Ksbc+@~d6h`j?Zw-oagQuzRVFg2j4Apx5f08$bfPo~#N<6QQ8# zDF3^P`;y(+vDV0ZOX|C^Z*lqbwLeWBt^twFBCly)`9-tc@B0A<6co2|S%-g>0aJqt zV+wwHPIXgol>9Bi+`U6dX*LQ!TqJvLi~99& zLw-DKP&T?wLPmBvW3p&uMU9Pob`hfCDlArG?=a~yrrGC<4rVFV(d%usSEpxj!4yJS z+48245EPV+bcQea&P&2sI|kh71%iObID5-#$wyFd+g^5kReq^=wiUQ>Ueb;RDe`?d z&gS!GzkLN$(8C*l-}WMEOzal?{veuh?VZj2Feo4XB?(>kz$o;j+R0iyVSE-SQ6ZW_ zH2kp5{(qsGI0GDbiM00gNBBhMGlrz z4_pU5v_xoth-O$fJK&Ak`W_pZ*;^I~?UA_i7R&_o>DpFkm%UYSz`}|^LpGmsNCtw@ zkg<%|Co4dNKpxsdL`s##PtsWok(`Nm!w}?P>Av*Nj=nXfI)6(ST2Jb6qlkO-1MPO< zI5M~UbT68pr5sR)*eXActL}BC3Ehr5VFTM+!`2%coX!Ba@neD8NK>>@(zsZ56c;`S zdFsPCr%tAOYn3;w^xw~ld)FPcdflol`v=}X0~^i2KsAR7hdnxCC2|3uaOJ9_7&B3 zXaEbA!HYw7W<*EFj1#0#iSm`I$K;;w^}0D$Sq5&8%F38jrEGM#qO#h3gGhW(0ZP(X zDgO!g?^#)*_0Iok0Z5tu3rKD$Q9O}`utyZY!ahDRnOxv}p`?I{8D`F`a9;H|u^W2_ zHPM>MU$l}5UYd1(pZ?VOy9h2Tz#f$r$*4Ru6QCG5!;&E4eL`rw|Dw?-cSzD}0>Q^y zyEVyXIYbork=^%8yL&O%fy1Vg5qArlcV>IL;i?yBax7l z`)Bw0resniOPw}Cl)k9R`&4iwPa9#hW~TJIKUWa7UlU~%867E`EEj(V=0foRS1JlY zgD#^H9Hhc||YN5|p zT#DQ9x*|h4=&ukj{2fi0SWF>LFcPg#SWBtK4?GZ?XjYwUX)6R>XO6ad}r|{Yu=^=`x4~di6N`KA;j!BKdNK-0`eTUpqrAa@@nc($y9_T_Nlmb#Im@6TjaeYyIa%qv zYj$B`B8B=d`vfjWBa<7PB%5rFlr%CP8l?W0dx@rFlNE`h|ICp8S0!xbRD{O#()|k* zB>fA34u+A*=IyQ?jJG=ma@5CgGqw-N;*#RXpcF<`ASVxmf7=n>1@jLZ+aGG}XaiY; zP68WexMm`jLf6{dU$6BcC$ApYIGQ-pIVrMeWz zx3ptskO58EyPNaeWS;eHQ@hK+GT}T^uDGl`_*W36`-gQD{Cf9~2ZjwmM#m5pMK2Rl za?(0JARrk>0TgdcKyx}~a`M>|Pvw|dEIXcsg^7V8Fup|{=oUTK1XAz_I!AioUSBWu z@QM(+*+698ua7W~PcaFxp^qE-%4wJeDN(2n?hd<>>EU@xz(CV&(}l86f~n_dIKKv;N5!Zkw5 z^ z#Nxi#e?!2DAS%68^?X>g)0D?!4OKM7$+N67~XC8)^Ew0#JND9+X_JVrU-}>r_DHOL%0d zXKDH!P1~wce?f8NEo6E3uN~NL5yvEOKpm+H?ix`&S-G_zCD4v~?-cWLPHF5@nJTBO zp@Qh=+mUlTBz&%+D#31xB=jn*ffAilRey!zZnrk0^+XTywz!?iuI?NfA#l(s1gUEp zS(UU}0ToAG{`cKAP8N9hcpacV2XUrz0iTsgZ-R8?d8hlFi2LjkpVjaJV1X-H>{3Qg zyDXc!1|P>8Xe}6EGQGT*Ip|Tfyu5&iLwT(>khueCGId^rHF|Azy@+hR^~CI7Fv0S% zFC=j7Ldm9EGDu}~Dpxq>j0_jR0hR3a9k+t-=i2pne-nT1AZVW)9Asi=>Y09+f`0e@ zLLca|5!B}w4NlImK7-`2*;X{3rDxdg{Xu4T%K@u1rWA}%>ubBF{P9TC>l7hbPgNX` zmFJ^@h4r%3Zh&f5sraUWjvm}nmddA9I{Fdp5#axXdoRSxHZ`Rykj!wacY&zxMl~{5 z`CpJ?pMFW&t0Xs2FkmVxoVMj*nz-jfAkK$;u`-t%slc|q&&l&P%|Mz-8V z#iuIVE(Konum?b%Kl5oP-X+Yx^(G8?Q^rj5sS#YNQ00A0_-iY8gVd8O{u>+knGjgj#E`9aE3dI2)oP zGkR$?d>XRn?}~McEL0VJa+bZ6VyE)oyR!wXnuJ7phPqJb)YOjPc318dwPAuAz;m8G z&r0%%I?Y6enTl&&tl5#3^SY8MO`d<617EPxc}BADJrzKbXKVU$xbPRmH4t6c#Nb)eDyL6`aP0 zu(Pl`f-_?7%)TAWd=#Fxpzc}Hfi8ssr(Rdt-LR%R&tqet=NAi-_)Xe$gj-g%%o~3J zH>Xcv9DK6KfO_N&diQ$GP73gP@&VeeTMze*t|RV4Ck1^noG7Ajv6Hs`Q)4T@B+DrU zRYD79SS5f7l&!_oS~JfJn7XK(MJWd9C{Yv_;`;`9)P6!%^EMFyK zK<}MiC6Z68T~cJK@7`pHomfPUqm@+F08+8U35Z+lDh` z4_XKV?!(WYXVVLxs+SLdt#W9Nt;i~Q>Nhs&Ck;D;((){#g^LeBG}Wn=)uVtW+VU+6 z7svMbdRGj2^8_dnq=|bC;pLs3yd}m4d^yl@O;G6QT8bzxTuh^hf&K_ggS;u#Vs66v#9;D?JOCyS*)St2f@8v=YRb6Y*C zh7b`PyE~w+^UIJy_0dFSe4EQ+-fmDn`nhG5l`Mj8UFicn^eRT&)9L>7H@tb zLP)5tNvsPhyps8O^&r-geHX5b!^29mPS3FerMhIlHok3u3~ReK^f@WQGbYd6y6&ZO zc@V2K!|GBNuffi$t_xX;hbe9iJkn^K4QM98W6_S~l!>p;Q!Sqo;AiUS#_4UW2ac}lu3Gy}t1PYb2Oh>`Jxsi`SzYlslFElqoH zb-Nw^8MDO_7HLdeVrYl+vun?!v9IC!R9obEB;tN%n0;?zr99Te;`j5<#T7QM=GA(7 z#(lJm3qaHioBdG-+>wja(9mFt?I(b!g{Z()1N4GA?KR4qksJ)%;Swhw#yWML?n}G0 zEb}hSoY}!0TQ-@ueMGY@+DZxmMf39n7hYy4QrqZaZo!AE)yx7=D?m=32El#^@z5TX zEiL<5h%kS5Cpu_IZ3;KHnP#J%v+(MOp~Mo#xOHM}zH3fAW6UrCe`~oqi?3aK$;<*O7iI$glogE#8=gq~?XXZFD6gN%^Op|>70WRjSkwz$y zNNV(AXl?5c!J-PiNy4ibZ>P@FL0VPTr);qmqU0t?qzS$w7+xFXX~1_N z(8_yb|NpHP@J?XV&LFK?TC(ceewVn6uLX(Qx}L3*{p)PT$#!@}DP+?Im>?Yfp*-Zg zhJXj;_Q&EU!JU$|V{FcpeI3SePGNIp0(KctCLSJ*98m;Ez#(GD{#R0h#Qx>Zuf@-x zN(dVsp6H#$g`)@+EgSv#>RrG+Q8qHFXJZiXn9OKq^;PIg*gDaxr3MPHf56+0#?MQ1 zAtV*UaA4X%UkW#oiGo_&+z&qg`9CyW1yE4Y5~UkyN$CzLk?scR?vhZtq@=rBLAnH_ zySqb5y1ToZfBAp@j5EwQ46p9pyL-;rv-F38o&TIjIZ@p#oX910gt7)FeQ`HoVuT>*^uXfExN%n3ed1H zd};yC7Xy`z_hkUnd#z~CP=l`zLlMcy{%*&@NQu+~%2c?w>x1_pUS4IU{EK~74(H8w zWq3SA?Xe)~21fSf@5XwE6M?7~XMEV6p4S)&OjgfNR?KxxzL-%jFse3t*|>5Q=Y<#S z&ubIi@F(89&30_0B za=NJ8SjPe(=)ZGoQ^MbERIbRNlP}j{9e?$c7&(*my>A6#(qVHc$%(7o;L|rG;X>Zt zPN!e}cQ-!+;iZrQtq_=4j}$ITijK5;u-}_h2afxSqg7#|woBYUUoFSR7YJ?TlYD^U zj2v9nG*PX%5j3P-TFemRo+zf1J18jG;sTW#duKUaI&dO;#5_5BgKR zqgL*L5|66&?>~OgDMgGhy7R8Ddm*u*!RoynsHA6^bX}7Y0zj{QyeY3fD9^(WV&fie zb19uH7v7epr?WNWXO8ggZ*O*=Ls@McqGT!d69@uDp#*5cEbB`JOMN|qtv);u3vGA1$c-O;UcZW- zc;D6cSMB3lX=MjP$+-5e&Cft3+ul8k_aL8|Fz^K!pJ~z}wWVZfOO5Dm>l-_kdw*SU3J}WSyb%*yjVF0? z-Wm<%>8Yry9Tfx@>hdSLhym`#Sh+#!BZl;1WVmQM;XhWlQ$t1q!6+BdKiVK&&<_KH zP>73$K!+4%b{=;$IPl~$YqEOsaSo9>OtYKchO#;7qYncMk)>Fk{R zg&ecElF{V!WWJ6WLab!=BH9FAFAoF@XOR%!bqVXv4krD0z2Q9nipxm{-1z!NYoJKu z7tp(Y%RE!VlNoK>qbuM<2R_REy?au%%{%f9lAP*nlq*znd3rtwklXj|Gk+^I{b%nh ztMRAm?KlTQ+|X4eK8s-olvr<{KhhjIOu%o3DC|oY6T zUh6Od-8!4H_iQ!BhFd-Pbvjq5n!$Int!~3QRMOaBK&Wmv8{h-)-tjNz*ppK;vWbLM zHqg+GWa}5BtnOP2jQ%&la)f99<{SYpke(oukkIf)#re_Uihh`YBkW4Q=OZNL+NUi(YN^)wTHJ}qptIunM+C*t1zd;?lH`l4d7;L!NZ zwVyGrTHQqaovV>+>)4>vqd7Gjbp4GxpJS^|PRj_YaO7KPsZq&g$e>`0v|IgTR&V)> zWRT^r@I&(IApgg+$3fMortW7FMqoa=qWBK31Q~#1hO2&o5p1TFnKmzH!m%oofCUHL zh+2!G8rNPRpxGQ7@qi8Bm>i^rp1a_-_hGqh&Z&2EA2tQ_8-)c@yvU7_UvKIH#$=HjJ zAAI?27@{5cP|nztJ*Hiri3%sbQR}7q^G5(0mr-?) zZobZuM%NqfZ@t00rNO?78%Ab@ywdGopr#mB(p4X!Rml^4#d^OTx^A{G#(AQ_0_+|z zO_d*iKRECC9iWj6K7X8?kz|tds^6D|U-HTUSVj;qx{2fxpYgt63Kv9E%uq=v^iSrq zkQ<mlLQu%xUD`Hdb*I@ncvEGpf8k2TESjN&9Sd%{ElTfS(wm+x^*BG*< zvHMJMl&qwr5azA7x-?hWeB!vk%jWK(p2JcrH9Lb*FDx0!$|KBOof0$aAtD*mRt3r@ z$=c;>6I@PCW-d&N*+`s2v8+%@hfnXB%f#71-cVAFeY_P`h=3P1c;Q!&GxuYFha&U! z<)YdW->z&*i1Y_7BCgSIBwIr8Blm%KRFTKc;Gw#^YSMbMa3bA4gj^qI9>wlxewlPV z`9F~V#{l=QA-X|LgLq?fq|lI`a_JEzveiSr5K_HHsv=N1`ewboA@_Td$)6NWqW=i; zpT!s(e>R8rfNKe)msg6Fg4xOH(yR`U6qpZt#e|wkmkAEGmc^3Mr>j_9?z$sZ{EtX*0uAzxrF@+y1zPmwUO|3yX4E0vHHJ* zJ735fy8XT-6Dhjp)#Rl0akbE2Ar9s@>N~$se^HFd8V2BU!Nm{ZC2*4;Aek7Go{14B zZpq860A%voU(5!;AO4*9go86xz(U|`@80)D;`;7I>1OIoZR+J^)bZrFQIm$ju>GSU zO0Pw)MC7rmh_IL#6_pw~h7z_giy}4#c7-Cw-na5d0u#c6<71tjo4!zwa~tT#+b90E z=o86r$DuB*`%hu35Z;&RmWn(9a7sP|KNY;6^YknUVvh6eypfE|zJ>*yM6F&%ddFx9 zrGd$#eI&O>6Q;|abaQi3gTo&P=moy53N1c7<6V1s6ZLFu;c;XNzKtO1Yuf3%*by7G zpZT})^4P^*0GVJu3ozyK@4YD3^I&eZ{Y$Wkb*aF~XY=5nL6YPJFFxu;qb^rh;_GK5 zEXH%PQYJXEP}qGN8{G{Tm7R*tdTE>50`|vzza=H>LZ1=CT_Lfb?hWw8qf;?2piL~Z zN-k)E74!{)&3k*xY0|)nj@R;NI_^m`l4cpNr24gme{iIuu!V%8DN|83S?^Dqy);-b zsxZvBxR2!hRj?tjj@$gbFtKg6i|B|h&XJMeDf9BP`OuJ&yZkoaa#c`L}VVk|NT1^1T>`12IvQSviDIX z-vr!+UizX649Ds`TujXeGe{<@aD(1s1e3kEKI;}$I}1e9&FOzQ=L!Zak>4ZgVkMlPb};l zfoYLAGJojBF7umzYgg{u#I%v<8Jdo!UR6J)iJBU|rf89T@H+(u!q{RlG z`(>$6rFebRN{LevUVlH^U+3e-oc)HFT97l(J6lxNUKl|IG^_8&+)7t&5+rT}Q#&ri z_u6sqJ+J>M%*qh<1{|5Rk0s?p7COHS<;l)2jE$LX-M!%EZf}MpeD~qW#PExCPCire z(I(VIN+;{Dda_;bO9QyhXxY7@RZOI;d&p!e(MKWyGO9q-HX0$JGE#9qp|G7=+>z>z zt+E47&EzBxP0~8}h481o=qfm}uL1@5N_2AQ-kUn<=J~>$xI++NY5aGI16L3=60eZp zWG!-AUX%ORs+RUAnt*a;<(&h=j1&3!WBhRrD={@pw1nOAXuklZbZ6I`G^IIC;?Iqg z5*;P+8EbU)jGg@%Wt))vQUedG9NH?hHe5RoygTAB6ndN&Kg6k8n~X;&K4J;tuGMpR6Z<*h<; zK=&nzx_}WW%|B`13PhywU+>RtR|Q$a2+l3hvtMvTzo(HRd?=dAmif*t-jnTNNl2im ztVEXFCQ>#-(N>+Xvrf23mnnyoR45aeBn%CSVs)i&B9(|R2kRrrJu&0vnSfupFhFN@ z#bNX2_u#ztK>0z!p%n&(1}y^UMBCWCK8llpuVVtaglVL^a(PW<+y|>|Y{zY>-?k1L zojzZNBqgI|wu4J7t+4;P>5!(qGr9NhdJs4Swvs3(0}6Ma!1o|+UkzBm`T_r(hX%fuYcx~6LL ziWK5l_z3*YC^Lm_Un8FrRaK?beG;fg^ool0VMB1IL3d&EHTouRE7`)gW|EdnMjtMbYBgn0kU$M3QB zF?W%^k$F_I6d%8zOQ(peCvli%s~Ue6gOQLBtG@rFLsmC}JlO5l4@sJ@*<8ALXMh1Q zNK4Nwx>eb{5xdCW(2xxs2Eoic4yk75dwENklLfJ5#NjfGcjnkLF`Bo%*$E0yq(G4m zB%d&{Q?toGd7MQ+q&B zLdI_}pLxx~9`2}c@L^k;jAq9a_pN}s*Tu_F3}GEW8pb&!racqlyNHVs>x2<$IA+^W z{YYIa1xef5i0vb?KV{%Ai%K1uOVD^v5bXjRQu5>^3@AIyL+7^YrgW{!6Ygv1IOVGUbnR zezby-rl}Aw0~qXz^93yA*(B?6#KxVLWgXU=QUSwBp>+Z;ek44iXgCA-xG@RBDl##u zWdFYYUkeadl*{w(mJ7>^8}S4RK&>nX%KJ!6Ufq;vmvbgV~7NfW-D zKecx{y<CwsX?nobVm>FySt3w$nWL>hnx4#>*Di7IQK`tHA+}~ibO7Jey2NCPlh~W?zV=TpF z?Q(dGnEYE}h3>u!R->lIn{TSyd~y(xur{HH{xPuSk9C}~-{?-uo$fu{XklU1UCaE2 z+-ZcUJ_;{%e8`|zE!|Lde6-k4WLz6c%Bj2p7UJ-Jn?+YLB4Dar*e--9Du~+N=#8Z; z^5yY7j<0~g={IMN$M(}Vq2HHG;P3=@35n?CZX_=~+~GLL<~zgl&gFP#=PQ@pl#kvV zV?%fl5*~u#jNb%CdV9~OnJBy;oj2f^n?tzA^Ir_R26A2Z0M50#as0+5mR#!endwr1 zN;tc^64S)M4Hir)vfgKh8281892-PoG97<7`$8Zb7=nQKBbUrUO&FSw|LpJw(=OLH zB5=F8+afVKKVqQ`9t=5$Qc@3lxHlPxhs(Efq7B(y5sKkaCA_2yecr~haB^6yv>N*G z_&7LXGMxE)1ZQiN^ny>E`B1%QN5YMyqF#m}PKC%SX3R9NtwqMCx$OVgU#PN+hvk3# zFJHElwZRinQ9qW!5c*Ue@1se%owCJ?I%H3d zUnxWe_HPmR;XsL!Z8EHR8$}Ymy2n1oYF2!BgO9L<>BBbh;`{^`O`_lCQYPo={VOJ} zFL$%H*4WO1R+Tzo1%m=Po)S?73tH~;Zn<)nP}|)_mLffuTK=7dEo<8}ddiQekUQj6 zBAm#q@Spu}bJnhdzg7vu!O`Xuz)?_;&sxB9J6&Ah;ET20ALV}5ssG;guUmTegS~Rf z!opj-yVl7@lWe@e)0=bbs}3QPT+}ZSHkfny(VI*l-x z@ajfX&|ee!sxas)OMxKuZy%CB@0K?-x|Nrn*Ts{|XvckeI%lW9^>Y zE^Ox)_8_V-CU@SU{JZkFIlcFT zt$(16yrM-oJ?~tuv0PhUCH*QI(rH)} znJzD9In+I?{57HM5~whIWxe zR8?1R7&KVu*f7)IkJvfewdV!Efd5B_S)J(c)kw7(??!a>T!XrYxgf6k0_i6t{Dm%z zVUX2+mPN_K{o7u|P#xAR3p!{8Xy)^HtA6y1PL-HAwtuPk??O+-AUXw&xP5w1Cf5+& z1xZN6flWtzc6bu|SYEz9{@46=MQm3|E+sf}syre|5rAmL`0f*Z_58vV&Q+vb{S|5G zUuaPuA!$>pk(I{4ynnyXk%Yek3}CkU1p=&y%xQ~QHO&ygGQFop8Xzm#6A)f#JZ|Z7`4~!RO;4o}RsY55mhVf3mgmTzDK4I2IUNF$6 zPKp2fuLl-9UC0LZ9|FvI{g$9WEb7w;RkW%~7JDl1mDDU$o=;w8W(=E& z3G4SQ9tFjPfncxM*t5z5Uv=Ey@`1@RApPw%`xBEaB3$kWhh7h+NRmGU7#;uqdnrJf z`R_nUO(#4G=y`Dy69ZwFy{~X;$v@*}L~#L0->+S05nfA%>H?0{?KES|23h=7yxaPw zoa#!XIu+$v%W!IW!<&wOK}5`NOG8(EAabhND1GL!P)?kjow|Lm$~M%MG1ZTk|Bgt4 zcgtKxb79+}udtN_HDejzlh=UHB!sC*ix>D~6gtTh3me_XfLVVXA(ir#E>TkHI_Eo1 zH`%Ujtw8)*0=G@Ca$RTR)p~1j^_6ROF0{8er-<;m-hGO6k|7aj%mUT(&Huf{fMIF$ z_Ra~ar|CGW+Bv=Gk!`L}q))iL;~%t$OGrla36pYl9wB=Cw#O#Fatj~%mubwR2K*~t zl(2(wgcUCT{(QsmGgeIz;h11LUKLN1n+7E%+XIRCXnNlXlzAdAAMpsWkzje;+%`mX ziJH8DsiyJCFAgq;ee7>U_XHeQSBjP&`uj)wdg@)9S)9X)>ujP&((JcJ-YsiCfm@$5 zCPf9c`E#f8huUg8zDQKhHy{?ZIv=bq3iITj=zv+>&@}Ryz&!5nR}Ny}*C79uemgox zx!dXLs%I{xZcGSQff50Vpd?B%;~H#Z@8cD7x9a#7{_SXz;LTzyup@traD3pX_;4Bx z#(;CP#o@kvqsX=cHmmCNWck`I1j-#ece(?ZrCQc<=`uwSdFayQbQm)1_kH|V3cGgd zN1-Jl+kgo9GB+? z$%mTiOz@d*i1|f4O|T>!!2dr=wF z`R+nhT&v+lv?zf0Gs3WK-ju|r>(KMYnSPI3_hD{@W#x^QrBW+ikLM%y9McQ zU?4o~2UWm9@ZkeTR%>CX%V@C@1qrY5aW`m_YaK0a+M$*+M!S%>ItU*1zQg&;#AHRH zN{`d(;gLcs_ywFrR0fk%mCa#p+jr9Ovjy#7Q>8ZH1Xu3U|M;<>sl8q$M`)^I0SRUx z(*T>M;dw+Mbl01$t)vfDcnoz8j&X$9VyM+0a#d({FWoWFEJyK>wxr%i6TW>L)MeWm z3Szzb@kdrxUwwt_=HC+EO01W?eOfbY7^KH7By6S+u5O?9af*Muj}juD|Ragtu->SZG&J$4nRWx9RCr zPvOX^1!gAy3Z zPS-Ah)94gINyj$@#a=}au1sbZvwIU4>M&Nf0_nOaOz(;k(iw7#BIIH!m>?3~pFbB( zj6l3lKUeKvj7GdeMx!zRq*)W!V#ead#_jQ{Zp>^gOg>R(a;l)FWi}8o>bDfe*OLA` zLF}d5m2{dwO~tFTIYQ#`(wfUT*K-S_FV*p{{XP)6!g!hyz;*2ledX?gp0F%p?J-@yQD3FN02S+{yiAI$Wui zBZAaV2eU;?B5{<-!aEJI;wbl6V9O66A=u7u#T7Q~s6YO=;S@%haWSYIe7G zK<`D-!9HaR(nxo&@xer1_5s+KJGMQcr7wphl)hfPI7+_33mF;L>W#E5W-?#| z>Ww_Dpf8Qc29w3AY@!5`K>)+eDOS#Qiz(TB*w$N=sca=C&OUp^kWS5aWCb2R4QqDu zZ*jTP&UrB!!Yf+_%q z1}7B!VwYk18R$BeNup$flpA=sRu_GrXp8=ca0&|Qvt7qxVDxj{5dy^3$n*;&D85yv zr^&&CffA~3spaqBlO&i7T9U^ca{UAvfAtx4u}a1VK`G3)j6YjJksw3$!)j&3qb#P+ z?xiHtG-8fI(pW|nJLn40f3Q2?oeGeiQkt7~40b}H%E@JWV&M=+#Z{TxF>vDp^=t?d zS}*|AqJy-PlGlXYd4gKX6Y72=pr3wo>545`n7xbdy2KbyBH$p}v03ty$oJ1^YT8_G zbJ%&KfB!RL4i{@(%d4L@l&9qlj+*2t22gPb{h%at5RhbZOR8*Bpbfr;I zCFjoXLB7?MiPB{#Wnbyi-T<9}mv{p4^?g8y$1Qq|)B8T*7Waztb+CSV6?X2{>;SA? zP-u&OhK-enx$u)o)It62bF`R%&7EN^(|8B`InhhSDWww+E z!}$D|23E_AUwP|VB%k@H2HnbL4lrI$$sY9bl$?%oTAKHe0n()01zLnVV5$@VA2*_~Zfs8*mZjJX~f zwF@IA`%04|i`X(pcRI676?z3HHAFSEjB~)e1uSC+hpnn`kN^27Zo|{+FU%9+b#)?Q z#c1gI-Mk?&D`S0d+F@Wg?Mp1;L*v>nmxzuh@j@`?^+-yh%~GWwpIb<)_I$|8HH$-d_!%h$G;x$X!tr9^)Gx|-^5E>Rv*NlT;i5U(k>gMI z!r~Vw5;jYYm$b`Udve%Jo<@VJ#d@cVy6@g)P3?*C74xGH#@a+hkvBnT91FN@A$_x- z7Hl`L1i@$Kc*z8<4?$|ErREA1WU{e*FgB>hW%6h46Z=lHw~LOu|5|tfdydg!h3~;7 z=Jr)tJi1wTk@DY1seK4{{vzHsE^7k7>oI>3m3F;VjPw4fu3oY7^Zp6Q)pl`-7JHRe zb81V8nSQ-R5Abv5+3?G>wg?arc8e83gYCAwG#23lJsDI(26@+Y*m{YTOpmy?qY>E< zv#8cSWRDS(2atgx^905)lB5JvFyLnAXx`v;#J7amUAMOHznI zE)N=RSZI%H9}U7tD!H#5m~ru7MkwNI{N{)DgAk^rl=`eg!a2-=i0O=qT3~6FfDmcRAgijpf+3Hp4#m&zMwc+3tcZUU? zk!n*j6xSWlU?N*iN}G4l#lqwS`2f*;XD&Vk=hl=+_McLDu@LK$5+zOb_bE-NnGs#X zS-$>hX-cMrpi;iFMrq)TMTj?4)YAxNf9VEeAh z-U;Q}FiQ51d1?xRZ5rUIcJ6Q(*YlE z*&uX$XqRSQH@7t<`O?kQnu4ifyGebn`MEVCGwMJ4`(uKX0d>R;&G8GruR+((t-E{W zS~qN79>CcF8P(G@Jgg4OlcyZ|Tuqu1mlg@Xw+EO}e=XIn^hpLa0<&U&C?o*>|1N4h=29Hfes;bEU6;T%9S5`A7=UHXNrj2*ZN zK$(DJs<|oOd%RyVag=3$N|g)6fTPp0wlP`)7$44)7KLXPz9e!M&o}Qr z=h!ss>P7@9zvvWLTXi2LV8h(swl7Xr%gJ--+U@v+D!S?Rp9$foBvbAa5Y9o{SRx~X zW6@WUR#9JX={He{DG0t&(!%n>kUWhIWMeZkyU*x@xKmeG{S5DY6EgxW7` zk4GLF1cmhK-XV%gVpt6_gpj=(_xs*HieeqkfN!YXg$Fa@Cp6NSfRG2}VO(PYD}w1u zJ}Srtb}jTWEaNdH-m@F-nxh7f+B?&}_pD-J--w*@Mob@p=Ptr~hmoaN{*f-u9gnhu zbCmTmYCH>HtBvDbG(hG=C4?c1frIJV;UkxxJN>OfWcH7@Zx06;5|RD2z9%^UB1M?w z_#jfv0{g>JS)ua1dp2y*xP!<%Dffk*XAieLx%;IL$PoCm;506oy`OMe8l5^KFeY%u zNZ@NwQIo2SD30ufDzn`Aa9s(y?oUMp;e&%ZRmB5)2ohjcCu2lqj3+oDe3aJ3O0AE%PBUv=v(H-^#aYz!&CJARrdj31sqY4Dyu9kKw}jR0 z!Vj^6(AqX|o5>Jh(^ncJsn9{*x)Cmyq6w#>s2!Gkz-cERh7v%<&m8?`k1h54Swxz9 z9LFjPAnMC&%MBF>lFg6v@;V$Z@fb5$p-&jSLt6yj(;+G`xAv?t1`9uiW=@}zb9(o5S0)- z(_gG<{6KgZU&%RcyRfkBBV?e(Ao4Wl4Z1vz6fD)>@?7Z{c~f?>M5FBi{2=HkR4V@t zxg2+c0{NmBF>~7m(3kS5I~s)VrfE>%)0Vb z>DLgzPnVgu&@}*(!RV&RPFF|N=O->m!(%m+$pP9th~(MiB5Ab_z3lq=E%v&yh##Y8 zL1|G}b!RNH}9b^YS% z_Z2^%pV`VA9$x-2faF`V|DSoD!-utci!{0fnIu|lMN8NoXUe1)z=vxc%>45Z(HDa( zsHt|ES0%cCav5(-|6o-xF7WbmHHs5)khE*86o5ut*CV{Upe(a}dBiFbobMtC?rL-i z1~X)uM)9S#y1s)qeEM7`Iok&02m;!71aD-l?>t=Z>1)Z4fOn zr`BBu!o`Qgpr^Xp<}*Gij1F}r{WOp;(t;gg2fW7VV{WMH#Rc;$G|78ZIG@FJOxGT~ zuuYaW=3iVL7UC-;qiN8IIkJ%Gs>?J2LR< z*`9`i2k%w5+gY1VVdvpi(iF%qLdk53wmuQOgIK{e@ zG|`dxED%V3=MCodvH=0-_~$69Yo}g93#-b+f#aTIz?!Ey3Ea3 zjMjR8L>WEfm#KUVLf|=C)~9s~^F9G+pSU$htGi2fkP!B@>+_$DXau7;<}0oS7OI?H z1cuX=((VKzJ1&sJn-P(inT4jS&|>3}$MfFX+dUp~r$%E*&bT%;5}>+SD;2Ri1J^2< z%x2S1YV`G7T$!qdG5E7p@bdauo2>qf(uQj~I&&XrM(^cpb@n);?X;+9je;p}{ zK&3A9Ji4XQTfSRTXZOgcW)T}hstjlmauE@5{6qrWdE;q%{+Bxe-WUZ8iQ~Bg zV4nj0+8IucpoES{fXr3@zUK8V#iEJh&dq;DEf;@+ueN;yYCMyz?{L&ISdXsHe8Fb% zmptra6h1O5RKchZ0v)XzeRGXe^jK7czZmjyx#kuqKnkfn`E_`q8Q)vKX+*|&xCJW+ zrFYzAnRPzDPcMEC*g-eCvI0Q7JrOe5mUn(`U>^=*g}7bZ!(t7<>%%MOaI!l~L_|i$ z*l~?ooF(<}h+4xf~>ZY|X7$MI%%s4i-i152P#%}np8h!Oxp}ml@+72R~E3b51KNJ1h zIB%ecFQ}c#u7$~|4rHqr7N!z$`HdqapUTcMHnB~WO&MPPuLU@rhbK|Y^2mh4gwgDR z{z+FM!?vRZ7+xR~5iS{TA6QPmUSAJ#R?M=Q>$*u(W0>C2ZC$m2k2C{=V#`PwRZ-CQpA|Zox*GiTW$KAoO%YE792#hRR zT0xr&6bHKe%p;>9SWR`cz&GD&xtz`^-w_WPWP&mQa3SP_g-D8PY*n0An`us;H@9+H zD`Ib=04h7Hty2ZlGx`s!c6u7n8R906ui;2hTWf&#PIhouOwmWMaXh^^1voky%-EKz zNG)95ZU{rEzujh0D>SsVhK3cU6Z(|Ihx<&dpOA?fv_{w)0i0{TpF-roqZqa*kQ-veXkr9yK30sTu7Y{R8Kx4tiQEHI|-#wTalY_rk)$kS; z8XBaREX-yoiI$b+V=VfqF5T4Bm7Yt^w>m_EvbffU0c?IeiSORNrOtl$`tATG+-{>C z24m!~$wDOP@#BUmQXb6~7k3x-382><;K+Ez4j~>>vjGZv=v7u)B&uOxVCDYo_K9e| zk@ZuSBGA5|Z@$INDsDtdA9=6I=f@0 z3^8P9{W~}#W%vJU3!`{h(Ac_c@#l?C+X}=N?cI|tSRG4Cz!zmVH_BA^5vHV^UG#+4 z-|S+XENz>a-CA|Q4dY<|q>V^bb;d^#y&Obt>?rl5L#qUGmz+!Q zyAiz*1uxpw9FF<~^wAqT3JCT@IuAwD806c_Yrw4Be?yB?{Y|p9ZM^)OiIIsMhA#^+ z5ny>~vUoLr*1^~B$^>N*=kZeCc``fLOQE3*8fVQ5RpDW)hQJeSqgU9}*f0B>4e(muY33O2+9QMZR_RXZ4>C*2ePJe zbPbyeR2Pugf}Hvlr+_4a`1C0Oq*8%De~-4W4@^vf(YLf8Caw-AU;!W(&r~p@Z{f@n z;r@kNF5W^oArrglWZ7%MLcUJH`yLVAnOVv2KURm|$_ioNV=gqT8acD_H=&-79CrsPkF zxRDmV#N|P75>DCaE~;qVG_$S`_#n%?g04qup!9iF*=+y*ZC6?6AF7OK$$(t%vFIYY_PAXR;|7U7r8^QYP(1kDR8D2D%q--P|+9|M8@uuh*b`+d%509nMoY z5`HD}HYW|5RhcNIvjZuLK;E5G{0AwXg2Hs=;vtb?_P$5B3wC~p2P|N~-T~6vPp_<4 zXeE>M@^BH~Jx&ZP^J>ugxRODsV5nng`z7?T*oqdk)z8c#fF|zANqoMg%iMIDcs5J3 z=X(3yr5c7xWMsEyP!%ZskgN^@94%NHA^;8tMecZ&EH2o4A@tu=+z~TKe$Gr9)(X9W zpzT_&bXhTd7YGqIWzxXsyY}$IY#XHc9qAlJ;{ywXlJaStK${B5LPW|Y^>+Yg{1aKk z*(VcMvx~R6pLYmuc{}oRM3#gQO8I@p(P|mB7Ju^)plr9DQ4^aHq#A4 zavSLn;}LOR;XV|nQ@%23pfL*bo%Y!?r&{3{CoRHUU|>N(p>J6*FWeZMm)T!dkGKw> zbbkA%V?AsQ)$@C?E%vhR%rSkw`vE*ru z6PZZm<@Mqj;B*{7=yD>QKO;L22lTF|K*QMrT>8Lyl=Y`HssS+*jf#Xa4XM7LA&jEr|(Wm}gk>1g3ad zK@bBK{SINzT@zk$@(bhiu zt5yGGiwI@3)I?^qfx#ySHu^f?=g!C#B80_WU}AFWE{u&?n5iYs)9j_C;i9PvUCee? z=PFFjKx+}^ku>gj0T7~e)cV7V-yn4)O4nl`bXNR~lk9HrD133}0v1!_IVE1aQ55WU zD;F1~Hxb*-w#-TBLRox!2ghV7W`Wil&|ez16X&YLJ+30D!q0l+dXnlL68kUeTPvjF zf*^xDDFT5eJ9le4v~)#5w(F_dNm}dBOPc^-8dNKE2|M*M^K}q0F{vmdE-rt3=oKzL z3YAQ)pH7!VCpM|beEYns;z3@6;B!5eL)w?W`B@F_=HEom2{i&^GEd-CC|=SZ)x+kO zGK}#CtkyZX0nNpR_ybPSItj5$tdb=~VH@vEPsehLbr{B9T)s~W-h5ZK={6L?bL*eC z{Wv9^yeVg~AD-VaGw#K=@jiH4au7)6*95Jo@jt=CWlur5KkvONjq{~Pa> z<__u00m5Q{GT-ukRCieF7W^LKc`$9GlgwUdc4Zf9Gz5{B}o2FcvV$x=jxa~9??AV$@e=Pf|L|fE9eQ2fExy3{CE|w zd+F-u$g_eMsF!3V)v}qc&c%plPQ<)8ff|f6*)Oo|Q{$^?rsme7yk@M~KeLp7uu5&M zxhExu`nNs58f>E=p>A#Ae!#Q!4G10pIy`q{W@sY=h!I3}h8x7S&op{&lS5O1JXa-u z;)vuxZ9MSj^xHIMw~nzCm{F;p-vui4)uZho42&nkY}Iwp4Fl-2;fre1D&-hTpuWw{ zZH9VY)-PwX)tT_nfR_Z>{5sGerSdwphn(7Jj_0Sn7N!Kv1TZp`?6qhU|6=tt5e(4K zlKy;hTQ*x4IQM8zL-rut6MN|0J^%}-bg2a%fXSCghko6cP6ajVP&~R^b8{79Q!JcN zKy_?me2TZV0_@V|n3%W-J+L%vR<;+(T5NRz4-5oCrOU1U=f4PeH1M8Kr+--A){VtC zHa8MfpmifILq2P=S`RYzOS*V_ivxM*czd_LEN5%z-@} z8D!{}J-|420iAQi+XDBb#K5AulXg{8KOpd!uH!b;_trJxX8A;s-qq}k*I-GB9|c7%Iw zM7}E9#Sud~0WopVK)q$f$YkoKYgvi(%6vTn{ZeX^GZv+E;FET;><*&Iywcw@i#W^t z)J|IQQFb6qJv?cP{ShAyKx<$uNtxmf)R$}E;}V;hb*}tF&Vj%5UJskqofiwUJ_ew4 z%wG;#c<_MNdW$Y>)ncZ`P>~W=5YV|_>H*RYE`8^a*!`=jwj2WSj|HNKkC4owloZLB z%s3=>I=PqnIy;Q70T|$xT!<9wd0+iYB#+4=97SwtKXO1AEowzg(NYj_#j z$C$~YsiZLtp05d?pK0D+*M8hRK0pRp(@a z_e4P1XD0}ucz#6#vIO8i;--W|X{pHGUsEgbIUF=eea4o;b#$(*>jV1#^~*@Yu{~$J zIcBJhAQZDbMA0%iHR>b%Ca|*Vzo*EIqtE3lGerRoZn81s6CE{`}e-#NnaI(Ww zO8cF2{GK-lZJimT20}K{tWC@aenM1fGNJGy_4yV$zUl4DCW5Ow$hfQ9yCFdL4^C73 z-K0tFygo258>nI6ymgz9fN&915jgx!PHU0ymYj;{apKAibIZ$vNo4(6W zOG=wK|DoxsqO#hy@K1MlcOxy`-Klg75`u(ucQ+y>UD6#MxnJxhzafc?pD?G#gFVbRcjPxl+fzIG50hP^X- zhAn6i!A0lWx?mde00&&U?&`Vxg+mGKfFRl$w+V7P-4@IzNy)-vtb|%ELMX5!!IxqK zqXr*rY$k0R5z$Y>fkBJdK%b+=N^M^6S5a_-O8?K`c8&W|d_twrdkD?E2qW zwEC}C{o$?0J&9Sx8k9QL)M!<1looU$`6LVg&Lrd0>o={fJ_(&FV)*^X5UR=^RcEzPt2n~IRk^sDkYwgjsk({b9gK7W?y#Gi zykv%2pIz@G-%jHWR3&{0{IH=nOC4ks{x2eI{%4+a8m0#_gi6y4s_)!h zsG=fHLD&ZhHB^=>p2D|>4*Zu76u1A5?O$n&UjJt%EPX0Np;`Z6Ay@OxIiU8wL_1(R zHzyI(y>Ivos4kB~0R7i)`b{i;BBUqB59oc{(!E-P_Ico36bn59*14 zRa#X8Ygxw&q3*5TFLi2L z*YMPc15SO;PkVD98V;#{5DU~S?Q7U$4CaHjGX8n&(ohd3XT|+-AhWZ_;`%_JvFB4T zN{Vhzz0G4ElcXOE?CuF~)0E?*@ORLTNM^}^S0;^*9^S#{n&_n2 zW=bRJ&Zgl0dgsdh-OF_( zfFo(u>ACBffh}N4TUoymnPBnMnAv52U?F_NjvXIU>|x)kV#|ZPTv_?_xT!L~Ci~S0 z8`I_klQo^}BPtEz829CW$rGGGL()WSapB*1#qAJ4CRZmgbOe4@JisAzFLl8D`c!r| z#Q@83adqWkIc{fX(iC%JOfHhg>A&Lf&0YNgijrT_OvV-Oxy>!s=)rENZe!zk_k!T` zY29k^lLM>X=-*DRw#o7IP|4wF!GO63n0c$)&$Ril8U~n3cG!Btj=OCP{v~(J^6S$8t-i_w#^l4q z>Gw#@NDxUQqgaGJBPV@v6DDm?f*NlzD6e)P5x}e}kpxYyriskiEKVkY3528gL_bU9 z8av#m|FTQ#8&_FRKy@9&i<&>2t}ZWU^Y=~mtdIh%(HfsL18~;)DnQvWW%W;(x8yCv zgI}N^w*r;2@FIa^BvqhJEG0UNC5I6gx$XS<;Yh?VT^^d+2|Vy2V_D((=gz}lsk))EXm5^pfl?FPt@%3|HgDtz%d3aPk+K_Z3T;xqP6x_yjjBf*ys=8Im4k5Y`i3&~v5PDhvPX(AaEO<6$T@uP@6 zKk8a@H!ij^1y7X!@eQD~>mRm;N;`GEoG&kJz8S+D8?+9pzQockH(z8wIgfl)jwFYThZD_p#*2lEWlOK)BBHWpRtZ!^RQ) z#S{5qzS43qxI`{dd_u3@>Fi>{;|}+yuCOIC@4pAw3M~{lGpqaDg~QsDaEis7XSX{p zQPFFW&%WNe=UJNa%4&lVmLuxxi?qK-sGmCUdZpia@E?J>RgZ-Vi@tv%_Wl-WmDIju zgVTH$PA!SnuCkJ8!Z66x1|hj_HB9wAHES`%n$Y0P^1!Y zETlV4BTY*JhutS-BJxPkuWl{wchSuGSM2P}!2IACy4xP;{ryYRg;-DjedxdE=L(Grr?cx+6RLVjio$Fz*+kkD9hwB;sAj>{oVG{!*5!;K$aV5KVSQj*(1ZfE3a37K=+%>sa69%=@8ggkGr!1FDeRz zxv-@^SCUB_X;iBqtXJ6*nS_M}74V-FmByhGd3i(3j1YySQ>QOdKR4c0<<%zg#lcE| zHq^{om#`87_0=q<05d3{C|%%V3@H@I^BJY!L#L=Vg2Aw|(oIXJs`#YCn#GG}@_3GN zVgPhKud0Zj9|pR5MnMp+UE>Qs<6w!t@dKK(P2Y$7`ewe@Cu;w+ z(3LWdLk#%thy-%=Qsvj9{^_X7>7{V{F%?1aaY>srUc;nffm+tyA=MOk?-IHzim zkxceJCjCW3&z8s5#>WTAez;BTEvRpZ-x{1oN$9!0jep4iDi#4-+NfVFz{1B^W=;OW zLB#TL?2qTY(|#oG2~hcl=$CItdcY#m78lJpBQ{=V3AqI1gbRTe#lhG>0JN38TWMrr z$QcF`{W=|wPPp@Ze=n}Y&}kLtx-V26k*|Ys?!bb8-*NXXA{>}nd(~Rkq`)bYkdc8b zBqZeF;Q2cLQ1PaL5ToTe8p}mI85MI&MvP`#jTsG_6}yS0!2G2 zrws&PJv{SF968Q^DdtUCn<{y)Dul|Bi066-hykfG!(F+fLz|!#Iz1{GG72|L*5Ps` ziKyU5_0M~hls}YJOwDZJ9r*ZS$apX*Dtyom@!5cyj;`$R(A3I4v{a+l$>qWAn_Khy z@i&Zb`v1a&y(31Y!1_yK@mGUQ=d-~pL-T54F{Xfmj;43IaVkHzw&bNvQL*HbEMHU*FyiV1qkJVGJW@g2%G^C zWFPWYY|U)PM~>kM_vV2=-Xa!BdA$VyG<`tA-wgx>tIgHpS;3%OB1F}IeXI5rhWjm> zaTOcC_Xb3*_`ioeEH~eW%I;9f)5=>5m^XYt@E8rUC8nmv&;UcV_is0xpjbu9=Uj>h z=~6Px{mX0g<4TLW$%eQD788J|3N5Un@4F|HD-kfJT{#&Wtd7TlVq=0*E$Q8y)dvAo z5@O=PPE zy+93;cz#AiJNor^h#UxpK2O;+5oS;dt3v{A|KQqrJ28PXb2Rzsrpjoh3YRF}`ODdF zgjShH$=h~73{+(BVE_?D#K8gvNGk`nU0?_abk}Jcop(%_elW@4T>T;170~K{##L)J zRUdvF)Z~O#s2Q#Wc|Yr-@4}<)SChG`3r%RfV8!4!uI#97F zD2US3$}$#w`m^nEcTRSCc;+G`QagLId>#o;mA4Kr*vQbGCu?Y5{_|KKy(00rl&M}Q z0J{~ic!^(OimUXZKfXO4>JzfYj8{|SopT44T4D5O9JMKLn|M@)AGDSbOIT-KhTxz8 zP@-!337TCRK^A>*M=!#Vtpd7TU~WBHM*dotJJ?A^iW6DpzQZQOCXPCUc3|6VrXT=G z%#`A%Jx^C&BKWFll!uA*;kCi=3iR)_0m#@`$HPiFF8GU)F~K*dH5q5k#|-I2|D?)@ z^V1(h$oZXQD3BsIHBefc>Ox!oC?YaQ?dbZF>1Q+G(#G>a5VB#%+DS|w+^{0t_Ht1E zs(d){+KpRVJUe&3J^}DO={9btSr^BYNV(33iG#VebuhH}boh*dE@v}8I94UIY@=v* zw;++l)M+2x>WhQN=R}SEXi7xoxEP*{)AqN8e=VZdkRaT$ahN-zg3eK*ci74#5)g25 zZ^iqdyddGoY*Ovt-A(ShJc=Oq3S~zUMMIH(h^|pV)pT~oT&<=6Dl=d()Z&y=3W8b#hSNes9oF!xN%KAOzxOl+HS0@w1RN0Vr&l%K(J6CP^5oP8Ya0Y z{BqYKk53qp?u4|zvScl)#P(-EE>GQbekd{Rtuw9nF<{WBsjw*%5*g41E}Yy4mq(0o zjZOg5OS0Azpv0gb%;As4t>gP2!f@C?@yFd$5x+hEVvAiOSdrcGnUxxsW&5z>B{TdtpGnR3=jYTaeA~FR7m$*W^!u!P+ z(#6-AA-nErlMTE1&7iq==R23wL))58ydDQG;LDYVaN&wn+(;|4yIZiid1uY6NtQ zL&Wq0?S_!g2?%lk)Ya$f)7RlRw9_U`vJUmZ*Cslt5QQS(l}UR-(D zCLZ8(RW1JzDHe}Vn1xo&+2sp|afvd14_2N)4LfEWg+kT7 zV+SnK7`!h<_h0(jJo(*O^cKISAc*h4A$c;hf|25e@4Hd{a)%o5GC1fiX6i)cb69P` zj;8-;p!gk|(>3f=H{N!^kG3k*pkv6;Bw^U6=q8qxU4{vX`8$%VcBdXvtIW4|QvCD2dn4*xW=*gXg0)zpf@!Snnn zA@Rsg6SOU~Ju9eQ^TL57@Y`oNKXHkxI&u26wKPi3e?GnVkk|8a z-qB1&doLjQ!XPoFTDgokIuQU>R7)mSV3RSEf~yRMj-Rf&-`YO=w0aQLY9yL={iD?& zhU6CrnTHLJj#-swM{@~L?EW#o$di&E88(YI!*-4f1>Jsi`@9M+rj^_(1N)W0Y@+YOg5|b(JfsXco7@blBn#Mj8CF*7Cy_ z!@x;hllZZ4^lcD>AiTQrF5agx1ui(|>gx8a>txaMkssJTOqbrb(nN+vh8#7ZKMV`o@cIQa z{Q1XmyEvGfIMCUQ2_A2<$jG|_V8H2jcL1aw6k=}=8J1dtR8^-EWZXTV#I+@ORpxPg z>PNoy@YphU<^sq#7CvssLN-YWWP10G=?w$IjMy`wn%EUCJsQz8V>Zf!6}V0&Z5pj> z^zDgYn^Z7Pp@MNz*=hds{DK587d#iW9!ECVU2o-d(JeJgc#u0|i-m_D<~B9;gPRx& zpq1&{Ia1b?MC))6w$=7%BL>xt0};(Yc4Y5o!N~kN=QiHY6_{Tg2*}3Pq@fxT?jRgI z8x=EKMHpg1t>rAE_|X<)o{zka%k`kN1?ZPW)guOev2G^ zvNB9uPvE7noN8Rc!|;*`gPD3KJrg4&AHevTh_!apM*XvK_w~&`J0sTCxl`p|ggbmM z#{5^TexiC5t2xAwafqwy_q6@`QTa=lU(!6C`CD)%2H60X(*PQz1KZL~gX*K-b0`No z?bhA5L3?lC$1i2*AA!<|_n%k*2?j-eF^HWSA4fz%g=LJgW`rmj<0cn8vXr#$$8YZc z4YRG{kLUk1v@nB$=ha?_VS92oJK*<`xR-<9S8FMbrpWDSUM@w%sA(Q?4c3ye`$rTX;ZM9_ z^n&n)>w?VK%@jqcx|uj74M z&9D2AupdGflpcV1Dftn)rS(5kk80zzw`OgU7-w}?)g!5i87Y-Ktl*oFF$Q}X)FM9Q)01pVPYMXXKe)QRyIF?=Qw4&a|AudnErwzJO~xN*@`U^fZ3gRCxAp z_CK^UqmR*W%Wz@;!l9!@PLf-dV3C%!f=f5Xh4^4z-o)X}<8UuGk}l-KM-QwcHCJdPagA}{A> z{#?K5Oxj&+^X09_%H@rooT%DdB6#8<7&Tm5H2Svt{SsDT_`qAmJ=^<(oL`2<+4a{k z=kERvCalKw^HfGSBt$mBLh49djdT@Ce(2G^Mrxg2pjMRBh2T(}^4sT9S?oWOO z2CHiu0TSjVcD91lx{>&DZ<1U~HYQ;Y)_CyzQ#2Gz6ei)EHBXP1+VE)Ke6K$xO?;(* zgg!-pN^z8ZDJ(Ei`I5W1x_!G10W!1O{hMHqg!D-5&CN7kudhakxZZt-9(-0-v+2r) z110ta5oUI7b!iO!g5S*uId~wk|DlnQ;D!E}R4?3-cdGo*awXCrM;&L>1{)jZO>sM+ zS$4<>jL}mCvITWvBFDAXU^-Jeql&D}oUkFV1AJRhNnxk2CmL5({n6}*X@ly$6(E)( zsgGjI)rXazHXxk*g5h%h>wS#3J5(yRTJW?wwG2~PNdMH3sBg`o(v_VwD4(=H4CH@h zp)JHh;Z!UO%F6nl&~^d9zL4C8GUT&ldK6uw!gm&8?Cga{%P0g443}AFWsaBM+&|lg z5#g4$8H4#+cogy5I#u$&bLXrb9VR~KQG!SjKF-bd(4-!A^T%3$MXgk}L4q)h9BY0E|T1KczDr0Nm|-Zw^3XD9@UYb^hoMU*u%1~U_m|ALC-hE*;YK2Lbrs>-lk<{Rw~tM1=1YuXJ+6L4n+0f{%h%|p7u@l9T~ z5B$x%+kZ%I5|VHi!@bo%qV~MjE#kE?+1!M@LQ%OM;Lz-VFh-V>`|kHD=qiVW{Au93 ztBVRvCr9;~+{DW+`%jQAuliS4TSM|OzzNoqv&-&WdS&`TLyi%KmZllY5i1RsApGWM#n0jdLwHD+ z`w3eDX=O%@V`2akat&lbr^())ryx|?*%(1rV6jt);L+*1Eyzm#&}QV^GHO+xbNzgl zml;{7+5Y{hO#4#IIJ@#@kR+}T@sy99)k`oN3gv&ne~F{XNNW<5@7g*)jt~xhPnb>wXUuW>IC9 z1zBq?P^~ZbC?q!&m{m4!i8bDu*TBcg$JsRMzt-DbqM-QNTSYAXk@i{CC&J8OG_>0e zp3Ys92%gV>d2>>B!~~@KtpvegXNEupV;F0E%s)|4>|q%Z!|#x&v&8Q_N1i`gL-V!I za{T&EnKq_u;1VUWFD)NpGBhfOXnL_tH5jn-KSTPIc?T66qV!Hj5z?Rgy3w zk9ZJsnA2HwUJU!P|8EO9xt}$#59#`%H~8s+wC9P|EXtOL7)r~fCO9o0e7bj|%ijQT z=|9ID%G%ay#pm=!uTnc-RiNDo#>>UjX~rX+y3uHei)Uy8V;jGXWsmwN%$bAfXZ=cT zmuxP$bkDmD%S!!aGCDa6y{%atN3r0sA6AIYSv>U4?YoR~vvYYjEHm#d>%dBP!ZFOs za@q=S?2EecxpGJn8R7ubk5^~|*?+1O)^O&z*oNO6g!-0iV|TXiZ%|QLmwu#J>cwp= z8GIN`3tW!Zr0a~_d5oJ7?O#h)Ko+8#oFFJlNs+Yb)3koRNIvT~9OT+P#u~1Qh>d}j z(P(lVg7Cme#3$VU&U??YLXdTu@W5_vr-?mom)ca@l9eIbca*%m8DJNS>B4}dHH&4C zz##B9M`Z1NL9vgTa^cmbU5;qW*(nu(83mLzYM&G0ibzw1x&{$;`JO5mA+JnB(}W0u zmdK&|#d%^r<0MaYXUbaz+LsBlf@t6A#%TPl6pez@i|$~Li*g5AF7cANtzS)*Woh}> zdtd}5Ve^Jj+8F*YAJn`YBkP+lx(kcFxNvq&Z*0&zy$)3>)_y7_4HYdSV!|aJ@56$V z%ddJunk$^icE?WAMj0`U!vnL)?~xM-YbWnQnUZiK3$LISEy~hFvjuRPK{8%i{`vC- zSTZax+YDmWKO88V0!is2VAiAm{9>7BJO_a7dta*+Imm`y@z zcg?-+xEE`u)nEKMWM0&`WNxw*>)^id5OeULWth{f3s!Q`NRrbgBx{%BeMi}!QQY~^ zr29IvWHgHhiGj^12@y-1aC&cn4hm{bbT2s}ptL|1Stx*j*Fkc6?o*4~i)hDM2U$#E zezB&;{49~9Bt9nGBD0CN8a-0jb3m1m6F7*!w>hWio}J|+%-QyzSWPHaY2bz~4#|&Z z3hQYsWtQj%Yk0&pn5cuCt{ikSTA#%3R`w@^GyC6|d|=y&GgI6dry@ zC>AyAr>fw~YsE`0>={HR7r*33!=DE+Sd74k8Yg`reGPyYVMwmAUFIFGFI){g9*o<% zcIe!kt8I1yMQ+a1V?A}I$3ty_Tb4F<4ks!JZEfna-@)&&lk7VS5g>zwbxT z%h!vEm*P5GaqB;+zc1eV`;*o@$h&x^@yP2-jW{J$M!`$d0Ubuzlg8hi-W4sdK<7iE zjojkOr5SC!wRtxnvN_-7xdL9|7CBa1&z17-eWDXEQB{ua;>*Lx1--TPYY8jNACh~u z^C1Ddo?(f&HJdgj(@Zkz??#)2MSlJ28Iz00RlS*eV5;b*$0GYhrW0nyVK!tsn$Dj% z6m4z`3IlI%x9z^25yr|2>3DQ9;(is}-^x(91z7WKZ5T}H=|OGoPHqEZWXn#E)0TlR z2(BklcbPGziZa{*}1nrldifQI-FYWA4kt7@*GlOzjwNlz!lNmetbah7Xk15 zbZpt-!}Mfci`R>2(BmUVgB(Uf4hI&z@i$EfA+m&Dhc4HLu>7DZ?ehN6vCJdw90IKc zFc!Ve9fO|TIEDlR3`Q+rL@xJ#Ks!VnoZaaY9HQP%M#23^z*HZb@+B7)^Tq2@4K%Ak z`Ha-?TW|DEA*MSTTEY3%G32o&TibMVF~Km~JTL)e_k<7&Mii$j0t9-h-jQ zq`T~~tCc@WB_nN!qVbAIt$ciHV=ZP3`@)fOF23^e7G=FxX(9&mcF)fXzJuU{J~U%h zMc+?@W=@`3rL6*qDjIA>S8+ z=Cn*!bWEKYSF%2l-T!gQX@_OG$#gl9hTrj?N7bkmy^DUqCc|ge&1AaN9~bxP`m}~6 zIoQV+Zk4iH)c?<;=huaI1v-YLve(Du{G8A=US2mQ48!B>jI;y&n?pBX4MyVf((69h z{r)j!J(CZ&r9UXoaoXRoTb<0i%2*fwTUyq9d2j@F^$%F!t*!1%WfNi^Ns$wL_VjT# zeN<8#`+wPLzufs%`JEWOj#PgW6wGNEiLFh=MT>h}P~h}$-rh}^9~FG(2X+ZH%wuF| zL+JiV>GhzJhyDFJ(P>lhJpb1PlQlXlSalK-;p+VR!}ljAnhqdg34eJuI5z;%o6jAd zI0e#hQ?#E#X0mb_A8)dnIe&P&%ivcQG*PPMOr!^ca$yrpL_58?9!MY;iD)aAH5b!V zj;oMpyt3fRbUHm+U%XgQu)zCE11{9~FV5=vO#R9gKdJ_%=E_6u9?aB4WEuj4e0&pK z{~&6gti;YNRCo6;Df|%i7omg0yC*&^(Rm;Ig4@M`3tEm-7jw0|d*QCQO3b#MA;lT% zRmrEq01@KJ#&XJu8E5Cj^i+f>^Y{Xi!^5*{zIDZa3nZxgPWwrPjBnMPI5}Hdz4>bI zg&mg5#x|S8;ug^&&bO)W@Vwm~WH85%(+5k`Nx?9qPDs>hFYi7|X>Ex|(96Bqb}336qI+IUyEb(hQ#d zqow5Kf>!domAE2*OMRB!qSk}n{;R&NhOz3$2C00lldP(X+Q=|Hyv&aM=EG4H_N6?| zs+tCF*;Qkc1Gea!^+vpo%;Z>c(2kI&6aW0#vbZzlFKI)KO2#>$lRlaY9YFe7G{u0K z)d=+9nzPdPS)`yN+DMryc)v_7HqZRIU}-vcLn8)|AiA>*~8T_3V#t!f3nw;T9XfqRNYfm-alrxps^2_f$-& zAghT(IqR^48@JTD%L$>9o#6p7B^hnq#Z%T9eHdW7PKgTs4il=dCJ}PqicJ7v4F=+0 zX`-OZJ{`WxqR-Nfwz|KBpIpv!gF}GLHFmj|R3`F->pOJb+R0hLQluGj5m-;UR8a7w z>*N-H%zl)|GEJ6KDQeQth>H0-_4Kf`Q)zehyH=|0Nwa9-izAhVNMDsqCb{ON%=Z*o zrfbxQiHN(sE0Ub@risK{#xC;Rd&dFPEeF9$d^HAo61EcFFc!bSb#ij&*m(>72d{8AJk}4#j_OU!j#_W__8x<@&_mmSgR;Xl zM(WeV$SRrA$F?9kZsSBl2T}8oH&W^OxQ~N44W_jVYy%yJSVrQ#3Nn7emxWke`t0hLyE90`|6AwEw1sk zw=VtibM}$hQCZ^mm%CAm>z6xoE#e4w)xO8=!nwS03tHN!+81{8i1S%yVM9Dy(DGAv z4Tjgx^sN_2e`Izyk_zU@gDYy{j|tlS=`YV)L31v{b&Z?(PHv{bLMq|qTWQDIra+_I zE2^C*;=MHxj9^F^u^A^)k7HJ3Q-v(j|MaVCZVB#qPz`pBtWc-KlP7IsQ;$40uSeE-?P{~Y$I}=pJUEY zLz925`tde`fJ}(fu!cgoBk9|8*Dp&Mq-M^qlkfs~-%`x_|5oz5J`sb(zp?eFk4Xo` zr%jl%!Agzw`+c3!sC>YfBN7^_v>6e~=@c5rRtE>t5#L7211Xj0Ro>)+Wbn7KVEv-Lu2S>Eue2PsmKQ;t0E{#1yjZlW)sK+4*sa&*v#sNg4!dTQ4C7$h5$}=pid?mmbDg?IJEq% zeRiln?XWEUI3SE+L9#Hczm|F-|JMTC`@J8FvV#D_v01k3*niXh1~Q-oPt2bK?yi#Ybr(6^$Vqj4!CD z!w&55^Vm(Ez_*+ZVrnm5WX#S~ERj?MIb`&|8ewm7ei20;`K6()fdueO z!^8R8>*MAS+mX1fkvI|##2brsNPr&@G29JMII)Y6eXe4IFcPZJ!_j{MHTg#2OwV)| zJj>nc<9UXyvf-sDVTSB``!_tewCEwfIY+Xc$j)xY3~IQvt$$V^Ubk0l%-X!=FsQVv zNmyqt2$)%LU_=%a86WNzdU;~koaWxx-j8&Wg71A_(hFSS>KaC96l(0K$4gpVyu$eW zIapk-f8l%!(d?-|Axe}V?|bHA0F<60@eo9&^k^SG8%hxrOAKv*F#`Qf96(1 zjoB+<55{1`B>XT5N;-kU3{G=inV=HEIpg5g8e%+HE#*$q(juGY-g33IG2NYJ{zCb> zJF^(R+LNqBlg^&37oljBM6lr99QVZKI0nedgkA5i5OtE?X(iAEdtY{a#p(?W(!Z`z z@+9*Dd}iJcd1?8GEi%ZbYQcS%pyPh^isPOg2TJM&n?V#PZTgXTyCy_*SqtkQ&;?gJ zi8Py_dLv1Fs(o4?6eFe+TdkmH6))y^ypk3JO2Q zM`ZmsG^ySY9oG}gpx5{FsHEq`+zzl|rB$0x39UBqoUZ#^!z)D=AI(SddRrWK(OJ>( z!Ez{Q8qdA0DsYo5nXla3Jcttf=ONbFZQ@XC`ghkP*J#B0>q>{=6Q&ZwCayL{rI_p8Bq!tO^E#~23Vb=*q7xn+rG73h79vYjI;%8LGDj3fJDSIn8#lr9dA~MKb5|(s z%Z6fzcDyu$AHMlyf|p}zJ(1maMV0yVu8bRP|b>lXLP%5L87D7|*sUnGsluK#>~c~5H0GRMg@qtG%%i%y9oyk@AMS-lr?s2If&BQ$QPzQjd+R57368T1 z2peUum>hRNSe=05_tvaHE0Q z#O7cY8XPdciDGvY*`X?aV={ohMZH0YM99MxS`c@A@A8vs>X8rt71NUy8)OZgSe;vEz&=h!

tr0c18~op{xCF{z3QG3&&wAe2m*$B>|);?K7|MsC9*3d_UdOSr=5>)m1b0a~=`= zn~b+Okb4E+gb}lT_;^&Sw33P-Xk{?a0wc!)%G9Z0p51uVQd?0W=J3ZgnUhI|7vFt4 zVXq{*z3oN$rU3EJPPILvkV4(G`cA`0ljmEQHc{;gxg}z}Lm77+ojn)9S;w|jW?|A+ z?TW5}r-#$+!1bT8eEY*l8Gp^*fBp1$Q{9QrTMA@Pm?F-&2QqTQvlcL7yd~A^T`lhG zDUbMnc3A}c06N|i6r`@J53k>~^!3_^Z36H$8$0pesGAFQZ=19Wy2Go-)%Wtk)#@g1 z=5*{ct591`A{@7*KMcj|f$xwoWZSqEN~h#_35_z5m&d3~!bw^#b}Q~wO?adAO!f`! zYuc%;mu#Er-Hm{d0Ma<5+TfeRBbFFNLpR#nrYhnYrmK(K<^yq3p00Se2Sd)}|42!q z$;ku}<&HeO9{LH+_K);p%~OA$zDbEgctP)YlVy_D^Dz*Si1b&~7{h#(#L0LZg^kk0 zSU(Dd4`rgInC~SO_U|UTt@r-d10=ft`L#=j^V>AEJgXzyoKAhJHlLa;jXJ4)i*9!) z8D&uIa@~Je!&zq)AF&V6^0c$z5<|@Z7dD0agpkVJgCo{iI?Bzw&Zn(t5Bn8e7e6!P zgRzg1E9Po?OC2NtOYB|ulX6kqp&i7f-$V(62LqHU4CT?WUdBfx&NwZzw1Y}_NL(&stgkw%g4yygr(v16&+3?{pcP zo}Hx4~ zv*67j)=bDeok+<0Eko;5g86XBN+YCAI|{Ue0Yx_O#Iau&{&WaVy4Yt=s71n60$SKb&R|D*)g+a3_r&I6lc<MY)@let3t>{SibI!A)1}d z$i_anf53Bp;X{$KvdyJ=}YrDC#Z>8Kw70q&2h?RFF;asHrAI1kU4r_~-LvdRX?OCYkJ(DUMWg;EpChJq z20Yqlqe)c3{-$n>XnckHB0j=|Z&|T>f5$c_-hZ%*=|c_$Ld53H;C|!^Qox(lY|`Kk z()anWWjlDoi}i904UI^(_XpXwlZWo$5k9}mH@Axg*2GI%6rfB}=`n;}T>Txcmfbb6 zY!Gxf$bqVA(is=eYuC5hz_UpViOk1OmMxIjNuTM<#q2B=URlL_5;Gq^}W{)ff1SzPG^?Dm$Uxl z=44SrhBBuF5Y|7q38#1k6_cjuiu-J4ESCOVKTN;#vKbENXY7@M3HaLXjXOBtp?6R* z)2%qOcTfFkl9NX&WNY!Rf0y6?p^2J=sOIGM4?fy^dYgYpCL&$R@G6sT7Z9i<=!xe!Y;d47AqT4jPkAwnY z;(K>A1zU<8dYNSjbXU=v%(JoTWxP;={%d?Su$IjxQcd#n|04!P5@UHOloa=*&0A{{ zGL^5pJSy8xpV^o%z4$*hT9e?(%u2oY66ukX-{Ga{J={L7B?!kD9H3K*cuLd)Hh1Uf zs06@bB6s%A2Ub*gmM(H#R-hxFf zZKRb0F)8-{X>Fj$92=g&$=)2w*_lLdmMHf%PdM!S)$>$L}U)ABT^M&!FQ8X&ugNHB%+2+m-TA92Tb`C3qP#uz5W@nC^17=;4O^`+)=VmoMkXkH zBX|=N)6ToGc?;yF#~vR~$Ni0a9?BOmziYvhfBwAKX2-N;(EM2w3M-PLKf$S|wZT3b zv;bH;m6#ro)IRyz-D+h>sBI#`jy}i4mtHdZkLWdLjl{z!D(5_&iyWg_ z{2F!W{hxmR*My()_N_Gm*_-cW!2PainB1JRaChy}C?aIoQ=QM*+W4W>wwO_z%?Ebz zl8Zfa?lLGol>s3K?%(pXxkG@a11Cg#EHm8d{<^#WkHzS{Boji0Cd7X8W?jjpQ!=jn z7A5u!6502RzD^JYCD;_#{WHh!>tY46iH;>IQxvv2rvG=LZ~{@?=|aN5@ARFWx=9LS zhvVViH`J0l5gjysNoli>;b3O*X8K(d=Z>f80oVhd&&<2S%Qy+UbZvkTwEmFh{lYx; zW3+qS}Jkc?15P$-iI(XOD$WAaV41~;u^8n-bX?xA%eW;Xk30658WN@ z(ulB5P)5^jzpi-KJ1wi*!y;2+j6&uWyPLk&o@o_yWoeF zTwY7Q;ieE51kCKH^&2)v-JgOndHIiwrOD20Avo-*1OCs3$ca^F=f^kW4D0yVKfazz z6obRjj(_Gz~z|4nPR{mBNs3`YHNl+j|Y|K>53<7SYK>7Z#XKnH4mjo=N@lJ7MJS@JX z1X_xQpHe)Z@mxtU!q}@a@NGrtk^aARfB+z#nmI*eXudIgHp&J#!u#Mf^BBeXqap}e z=uwcQOlRB5YUaCAZ1wKX8hYNe6{Oz$T*8sN%j6okJL5&6k|ceEHP?=IcKnJKKi`Uu^TCzY6^^Tu;{~(^!ORG3C ztbz6{fPG27DVTIm&2~PIt2m)!E)8{*kwTKx#tIi8Hl9`gHj;Xxt9@`^r6+Z#R7M`L z7?lDY!cTi2SJpN@EbDVfIJ=Mu*V@wrkr^P{JOiEQv#4m07At$hHxME{92BtFQV{aO zC?8|yy4@|6o>4==uRJzRZ*H2n6)O-b1Atc>O;hQDsYTZzPsNei`0BBy$fwqp{OP;^ z#;$`7N)wYmP9#`SC%izr$MrX4cXr+e%y-jhPPpt4sVmW%<>(*M=t|y^xt?L|lIqoR;=Qj7DqZ6rwoKP=tw)Kb75wvWdr+lE?cHwd!2f8v%BZZq zr;D_7BT^#W-Hn7aNOuTGOLrsP-5{WJcc*lBcXxNa=l8Dl|G)>AjEoWq<+^Rb7pJq|lP+$;Xe)@=ew{)}f7G z$7&1(_fC(`#B5=~$++Ujb0Gsx~TQMvYo%%4jTW?mJrmZ9YKBQFd5uB)t={SnLYpFBof z>Iy@7M2B~COOyD0Gd1O;2Oq!UfbpMDD-(zYkGo7VD)XP75Orq;IIW*wCf@o|^PL@F7Zy7hkXhu4Rm9+B+EH0=%LhxupBXFH6SV&RFv zKv*Lqqfar?8(B;SkaFo<+3=w#lE)#0eUa6DaDBP~B|Q%^s8a}sg+sI)(KZ6@;Z zBb~kOkpoibyS=!$zrIM2tFU_S?Lecx65v97qXWXCrUg_!Lo5AHx;ccX3LSvPwwWT} zNif(IhJ-8z&Ni2HQsm7G7gEs2I=?*k(KYNq=ZbR($x+bJ3R<1|0k7h_CztsPHxM~a z|I2$t&rp&2w-%(hLfvF7$_$hj42<)KWzO-+gm3papzm?;h|J}5k;7(6mxDt>@{fZ{ z!JC)I*Rwak^4rIE1sHQdKL~ zHMmjPc>i7U_H^e!*NrF9YCd@AKSKM`{}b9L=k)w(-=+GoS!WGr25>l4#mMV42QqT} z>}g97^CfH#}RaURnyzJ;#?I1 zP-o@awAlt{TmQGde^F#g83_&x>C7=xvVAz^KbDpfDYL|U55c#xats^z9}Uyd{cf>Wo9y*NjD6+H zydn@FNN)!J{-A&a67f%bcc<J}7D*sBF(h)CjO z_?-NEIX*goVMIj*)apJs3-6AA?WGD$JTC47rUrLspiTD+EOmfm#`pE>GTFyodq0)v z9WFSQ6)t*9hRWJqe|o;qKEaqBM%En~d*e@8OEY80k}`6?-It|)OvlUT zEJz9k=(1?LMh}Np`6NlrC6N90SgrKg)&=Xt!aZ&HGCYug}8Ym3r$WlchBaZG$#bA6_Uzl#u zVC8GShtmEk7Z{X=4`MVv_`8gP_Rk_;Dz5Hr%LD?HOI9SMyJrPo(JbFREMDH+Qk216 zc$6(SAveEP8K`?-FBb39cS~D`f35rNQAU{E_Wpg{#csc3eznCOB;DGe>%`QGYynK? z_mw0yHOz-LV;B2B(ip=3MmhJ=F@AAJ1ub%t`0B4geQ_@$eo$McEeiRa*D7hh0KOSg z7O%X4Ju&Enc{FdY;XcFQp9Gt=bl&Lci}30IPqv?Hy77f z$6KBJO^8h5f6lIDzbeeH{s$vm*WyzZ(!q(Dt<(0E!lkj(*VnobC+SIh>1Gy8OFMr3 z{!E(pgog(tQqYYnRhT8_0%msRMSp(kEY&4VryS=<3q{Y>9>5OS~Y>Y4J?5mH&f(C>SgdXuQ zk>I;-H~s+PJNq#j>HH2b1Ps9U>p0iX=rG3+lvfFrXgm`nX>r_cXb4^ zO0jatC^Gj71AZ|TO?fmljVnetOw82p2~#G6eYF2Bi|@j+=W~8$3B-erVQYQsMEo|F zhwRfIk-F+#jcI$;3)nyCo0)8+f?e5_h!-2)vM3> zjj(YHhk4I_7nC3+2EyV<#}UHD*ww#RyWryUxP-2SB@G<1k(?>8QpN%XkqpOZpj7W$ z6bl0&PS}s#_K@dEi~)7f_TN1Cml?bNbUpDM4F`(~9vd)gG`6z32d$QV2>_z%R+Rl7 zlhe8o;L{;{uQi2C7D;8lJWqTQ$D+zJ`zViqU_-*K(Q%43H9RKR_o|d{z>;RQCA^TR zdK3%~HWEc0N&i^+L}g0DzGr%)Efg{`a%D^C%m26lu@A;i1)bh&p143U0U(*KQDoWN zQB$mwx`ka1W(E}E)<+r!GD^hG2d4=|^{Z9)A_?6dEFfw7Pl;FChT;*+0E6Htj)}2Y zi;bXoP@_qg`HCQZ7pF?*={@q%lKUCX99jGWpuU|;;sAT4i*SPwd~T-G zh=UZU?Lfno88n%jt+ZP0aU=EGkfeSYcfDx=-vF=+0Ke*9!XBdHsC-uc-whlpbR*Nl z0m(kz9C|%>358G3U<$YH042mY1{cSZh8H`05_N!$nQ)R&U}}BBHP1 zVM@#GhF3Tx3$0o#)@oo9dN}j1Bbt6q7CA9HoHCL%I6mb@n;C;g%r!2%2^yLU&w z=-M&^1#!Hx>gIk=n2Yx!+p^0&JZzlR9ec+dF22CMkG48eM|uM71@I5?li^Gm;qp3Q z0ez-u@%nIZp)s zd;VSv2~DtQrJO+d9QD#Yz!ymPIg>`>aR($|k-)}`*anlEHE$j4)vzG0x6;?I>f^F2 zFsTrThm#|LcV9Muw)89)yFAk{7AgK&QX>G{=Xy0Ng#ymB>~hxiG`;$**<1<8>5v(7 zFf{^)wp)xRgOkxpBbNgz{>@Td0S#0hHXy*Vshb6h!$I_PEuL4s%W<|paf?p~82>GF z@9K`&XU@O8%=BYY)Ce0(YJ2A}05Rn9n@q9uX;+d~J&nuhOyzfSxk=WmwSO~DcLCgu ziLri)FOOiNk!XL@>2e)?UKC!01vM(yszGI6I0fcyx{OH1z%QP-_x}Y>0Hj@LXY*qG z&CBbWo5LlkvZVh(NePGnTg#K{NW7%t)6y{l_?e{D#8?P0h$nNDjUAY}lA zgQ2AyaBqN^wh3){$EvRvLH}|(8ATV6O;s%wBhQY;BEE>4QjG!$4Q3GHz}06l#R@hy z8@$k3FizWdK%2231iQ0P1g#Q?mea`@WPS&LEdUGdw&u7^u0@K z{rcv}QV*~0L^UNaP^rEfWI zM-*wp(TxeBIr4GdXRtX~KiXbxkgC%m!&~5xU8wx?Je~WC2na-&pKz%{H!*ZAJ>E-o z!zfsOCw+$fYE%LA?h@srXrY9u!zEVT^*bZfN@S0;{cFUqx#)PV4}Mc4LzV(is+NfT z#%i^?z(tp^L=`{J>?v_Vz1fBYbf$sUIl#H?1qXYE9deAI-QhiJZ)8B2e-HdnaWS)~ z2_gk}(f_ppd!Pva-p?{OINX(qMta}OrXlcC=0}!lZVa|Qb=i;d5Vm>l5ehR}uzdt+ zaWy`)wiAXRKYbc!Ulg%daLuvhhO4|Y=YM;Sc6)#-a$E^3pkB{Ikt-V=@IqokrIy{U zznV?~g)Osl)t2uD*zp9ffC!}ajYR-<{fjPC~@BNlB{^@Lz?VYGZzn= zTTM{rnb=*@kTIe4y0Mo1{X-SGLBUO(*=Gg{@kxQzzMZ+)$~K}?YirZ>V|jP4*mTp>-s+!i0cohjm;Un8qdimOOMfO{f0dKJIkVKx1AI;en!#SqWEl~p{mwX2dx$-f|yr z{8#lw6*W>!IfrL=PYD-E>6p-9mSOXP(rCf7GDQug?p9)m?oZ_n!St4L3pUd|76RaG zFJ4VY1+DU#HXf-rnmkiuV{%yJkH3}(boWzOe^gj4aais!p}am{$q&3Jsg{ZQC$OLO z5MphG6cTP7e8lu$Ak7k!RKW%_wqg|3gUj!TD~uDnaP z&I8g{CU5TzEjHQj8@u%VJE4Ybb?!(IFPl7@5iz8lOh^bcG|{PPK@okC@5ipN4v&AS zJ3DXB&XG?288-KHJ^v&c@T}1>iAOR5|hY=(e z%s%dp?sTbhcLohd6ju{gmT-y`VT+{!?5W-QHH^0IshXc{*y-498CX6XW!C4;lR4kB z1e})iqg2aZ-r@eDS&v7%{__FZG*rBzoKgJd*q2_U$MF&+A~5HC#G-t2Jd+WAp~4^yvS^J^wd?~QVP4va6Mv+i4CrX!Zv6_PocE&T%3-JxIGC0iVT zDP}O5&RxKoqM)onz43(|q%1v|QcJJ<< z+t@D1{FIrmVKGPJl>biub)I?u1Y)`?lJ$q}{UCPFhoTLQcO8f?Pv1-Y@g*RJynU=* zAdx8KQlSCt0&N?3jw%tkzTEm$w6@b{`!bQs&VDGeFoj_INSDki zk=!@et`}iGs))loetA}Cyi#j#-xGAT+bNmvMMYlR@F9%jXAB=4gpO9Nh^(-{q%qIr z#2J*(Q!qT8yKU$>r(nCIDy{W&sBQPc8%2#}Ku>y{)8?+MCC>$Jr5^nkk@Er2O7fm7 z?Hg(0>ERa^Sfwg0BRNFqFbV%+bW|aRXPFS?$`2PRlAlN)qoW;O#N$7%=~SKQ&zj8^ zdw10yHt=4Ek@%;K*8w>gi~)~|^eawIq6Tp~n7EzS5mtGi=nqs!>O%E+I3iMr6(VzO zRB%V^9wljg4OaWv5}aD(T*-238utq}AR;5QP*MPqxTVf{r1?|Z^QANwFK@RBSya#E z^yIitOBIVtb2Dkj68>%=>~=z8=h_ga`)HQLLksH<6c7ij&8WfL zk@ceul8qF>a=kuA$$YFc7sq=hzBUgHJD@OGznRlhD=BZ?W7I!;xcXNYE+_LZ3aRa29XK60yxnANh7b-m_5oRPa6Vlyqs)o*JH>dc&@VGWQvfR6WE~xiqihPM z9$dhSu`Z*!x#)p{1jF(e`Jxa!=@%u$KPfms$`896hB`Y3WyOt>c*%|S|3et%M2nk0 z-5QvYgVOXD?$1j#? zX~QO1z?MuhS6I}BY1v3%u*vaAKilks*+=V-$mwjU6a!GPC*=1JmP!{h;Ro=D@Mzm^ zUal|`S>|(}2~h156a*E42U!`X?}j7Ir}a9g(8%7{y@4Kn*R?Rcv_I^FOh|PME)ttR z@J39q;*+}9rZ80;9sSZ9qhtNpgWk~ihw1|VEnW463n#Y|YeK_NpPR{=Am)KNa+_G9 zVZVi-(_|oIyT2G^v2zfUtEeQ`d-?y7hX_E4p+^(t+KKNH+KA&ZtE~t&#O+ERcr4h> zA0}k~1_;U#`?J_n4c~7K@j9OyVgchz?=stcJhk%*yXI<7W@!8I zgjzSfRA*lc+c?YOdm+(@6zoWzup!}N7 zB4wQ{|NZa-K(J#=(wj*aS6bi`b8{u{zSw)BTgiktLNUrOw6ggv*KK)Uv^khLveQ^t z2s8VX=fj!HQh<&0ol|Xp*{HUwi|KsaocRve&)-G~=KOByI(-5xnCFSyafZ*3@6uaz zLAYo8h}-#dRe+%~4mdF)$y63p>WRI@6Q&TwlNA*EZk>`4^PRS~ zw!$*XSobezdV+#NI6e?!!rc%^NxjD+@b?N>Fy6$(4rb-UhPGCJqJuWQ)YP)0NJvP2 zr&}Tj^JTw#_b&M7%E#ShZ6gz7+EvW;vnT+$=Jx6f32WeWK|zTH21GdU5NnosbZf$L7YKDeO6ox)wjWHu#78faeXCWD0 zBhL742vjDY$3+yez`RqY61X&m(l}%N%dPF*)3SALW{|8+(D5KluedL>me}mI=LY~1 z7VK(-5Y~CKrj=ID`g?==SzUYxCz+|sE5&b$A(0b`=Sl-5*H>3x?&j6650~*0YHO(q zOGHJhO>6+k5HL~bcwIhsc6Nd@H#jP)??tO#zaWk$4XOM0vc8B}#yjSb?%)a}Ih51T zuz{f=MDR%}D#oA*<*0&jG~x9)s@2i^S_g>g$b6p;A#FKV__zMAzlNL~AMPe-d_PJR zW1j-E`WQXcz+GQo8`S})_Xr4nNzKjMy5Laz#H!TLw;Lam5JXIg`R@et5zLnLE1|0~ zoq#0sfyyZOTrdt!1noYq4|FEWgM)Vc27B%Fx<6xLVq8Skn~8{s;E|EV3I-KjGil7t zqt!G6HwO{2S<2EqJUq|{2#`|q{)@%wu^1o1Dah)6Qc}xob`F%}Mb9qashF^)KHUHQ z1G1s#=XFXqO2Erur1kGH7c;6hfqcWm|M;6TWqofqP;`NrTVP!zc7P8C4o(`9Ro1*( zm3Bg6v!wq1-ennpO0ZD4(CB$8-0i}`!cH41A|fK^ObjS0;-nrR{0`$yZF0{9hqJO1 z8Qu?*E{tJ`kz{+i{|aL{cG=ufi7rQlG*Y*CcJ^p#ofup~JQ#2$Tl=Q2HiwxCjd zediPfJf&YXmhbgAIg{ydmK(gfZ)H#0c_=A;am|n$6|jK_UW*)O55@Mvx#XM}&GR4Np)l(9s24Bw&S=is`-C}IisL`q>IPP$W~ z#iGGOV~EgDv+liY#dBMIn|^w28?!Ip9;R+CYrS2x@Wh2kd%NezoDz-VQ zj&yT%)o7cdaMw2*f>n={mY0{0jg9>pzY~tLv9Y1u;Fj=wX(yT=7kG1oX*6Ggn~+#^ zlH_*G^#HSLv+H)FEh(99jp6C)<~CU&4E2%2w4(>8*COY8BP82B7HTRX-gTPTH)raO zj8Ss2#Jc(V&Obkc3E^OjuK)SD%_RZ?mpw_Ckf7coCBa}8hid3E`??6J99B}f`jYYr+bUm1R>c~3 zZFs1iHHV&kd?0eRHf@4GJ9>Jmtgy4NV7&1-Hn`l-1SBMEX8#5O}96OyC;_qF7uxuPQ49}YFu%U*3xoS7N=!3A^k$fD89)Y1M3O#YRW zl;l|%RLZrOgH~-y+Jr&=_A@Xr5M!nyn`SN|NFHH%U>?+BHYB8kLA?xQaPjLqJ3DnP z?3!0FzK=JD_B)S`9-b{`Xdk5%&8Urqy4`NRjvx2=GwM1#5m`+V3!|A%7NDPOrYb6G z3}hlX&Vv7XWP^(+Mf&}PK#uZYA&Hurv3sf8U*DgzWf7Nycf!^d z8Xto$vxZBdqUz$q2G$6QJ%Znnx_iwd-tBYm4L3C~CC8$clHW5-Z1jA2d7(xZBhcg@ z%$V~<+7Ti|Z)s_zW*z6Gx%CCch%9W94J*QwiR|8{C(>X?{KGE@E$Pkr1Yct|C4NV= zL&&fcrR%C@Y{!11npv}5F&^Kj`|^mT!c&0%Q3}exto&L+*Noap_|yEH+9I!tfxA-r zd(n4?Gv)FkQ+ehqaj~%#OD85JRVd@A8Gnjn<`u9HgsAqi!Ww- zw?8qg^BQWVFq+CIsaEH=X-M(*gX9N;_IM*kbBlY$wm%`KX~il3MMwx=t$D)7#Deuo zjp~IqZ({G43(5~1X&h-0A>BdM9wQ%ThgL(tZ{h|9cFBH-{>_d*CZO(EonCXFo4m8k zC-j{6(N=YJ}m%?=Ogq5YZKO*!h!Fj%RJomqxp*UTLR` zzDoJYRp;+(2^2Rr=-QkqIMR^wrwTL8;o&{ntnkzB&h}1V5RYqiWl{O$KBFd(^JshY0s6sAaY0HwS%fhtuSUCHUHZ}V(Fh6 zi?RE6FU*?{D#b1aGZN;>G?3;6$vD)KlEcYdF>hWE2}hiWB+<)N2r<^5SKe`~&$Svw|}|8eYHFUp6G?CQPkQ&yw6IQf|H zleuNRkDvbJ=)5)}L z&U(9a(xk~InMxv6GLIV;p;m1b9cqKoeqF2kj-yJnJl+T#!JTaCzrt{&Jid!R@xR1# zqmd3LQ;-?k=~|VrG@4b;N;fYRSzIRjPufhbOavBcBwuhk#!pw&n;ocsurM;&c0!IC zORhpn&ELbVd35(?G((tq1=O4OJQpg>{FH06^_LAL85Y4)*j?K05}EU4dG_?&uJG6) zTYe4&FW&m8>3d(Kkzi{wW1Zz}dR|%#8fLc;R8l=UDuY(9)JU)WVu9lGU5sm!yIalA zzWVl;>?;)9Z6iFlatV98qUa8xRqZzIA3l&So+~^!txuUB1y-tccyqeCWsjDCbJhnz zeTeo*{IS|+k2#7C6RST~3he?(W#%o}VqaTbVh$J?IWsuc`0dClk*n$V+UP&|P$OnV zf88`ro(-t8a(V7fIOcIvMsc=#*>wmt+B3)CI7E_`bB!}r%he=nU7|L|uU#*M{V-F` zDDh26qu#7~;KR?q&2DZk4YM40i|k5>#yVpybOFLNYea&-1!;Glh_&m&6V|-)1*WK( zW29l_Mt&uK2t@wiGh(v!-|D`|ok=vt=hgS&X^YtMc-f_2ZZc)&3=O4=X#Hz!d@L)! z*QNz{adVefDGlp_;JZyvkNGz_>3=Z8x_&Z8}I9 zv}um*b5G@~;MM6VQQfeYb*@j62pyhz=HCPSu(|2>|M%FWxk z)8#BVg!6GFegzbsAS(!x{aTL2NJ1egKWbu^iANgOJosCiPp{WbBwSJ0-R2yGf^@VR zA$Ha(Z)--KMa(DqP1_zJER4(h0OQlZDr%Nn>?$Eo^5Su)@z8vgce`YHAuhqN){tJ^ zut14e`?gWt@hbhBG0r>)JnU{`y@Jlbw{XnaPI*Vx940^;nhH z6i)gjfm+VI3{QNxWOq>E)^Xgv$^sAw7Y{xY^tAE+Xp zH8!jAwAXp4HYou=k?(;hGY16dii*STNAwH*yIs@hq$%x&0o2q>lQ4+tO?KpI&L=Dp z+3u9#&RVNlZg-pAKieD!WQJkFqs)d08QTg@PyC>u$Ptk7oTCUjEw1*U(1@^m!Yl;m z_E$gW>|&`#&hdEGP3zE*`i(7qOrGB=O(;z#lK#v6Q*v|gCBW8i-ino$hLe_QSx z{wqB|OQk#~ulMsO;7pKfGHM4QQcg~B$7IS)iAUUDFZ`I3+_(8W&rAmw>#J^tN&nVLb%fc=(#LMrb>A~q zu+=IS_o2<@LM*{h_8^M(QrZA1kfEmMnGhe3l_#5K;<}bSmGA2L;uezD!9cAEdFgFc z)-E>EF~#AcAqG>Jg4^J^uOM{z=k#Rp^5ti{l1H_2{R0lPubl);7K27tV+q};Zhy|? zMl@G#Z^ZY??i5NyBZy0#o@o|gyGm9r-v0Z|87X8b#KCw}*XONcW-f>M}S0?k%Q4(uP%JS>K zxXJlRIYZ+gM>HQN(9xIOkJLr{k1C$#nXMBOr1NL89p7J~c39U>36ck>?TxTs3{OLo z5_u&LH8wWc8R<3mT73yz-`mQ?=Va{-->uH_-kp)x$^ImlA6HWnX8uHdLX5R8|2vEL zOOxZyP7v{=>$0Pa%%6&ejw2o>e@1fmULsIO;N;e&vAO}fh5K}SPxg+4^tJZqpf=b?eO%DHWlCUNh0gBVfqL?!`;&1fDSLH!$Y;*>DiXt&c z^uyUg|EMI~Rvw2>)C`f&IU+6mtsf>!X@`it=Kr~CQGW9IH&5|LZ;+zBA{qqP%v9!# zjTy2rm+vcXs-AZlYzdaBm48#TNPK-;uEz;@>a9lzTZij40NY6woACBU~l zPf@-}Rf=z*p+x!kHTc*udo+?BU^XCF`!I;R-`ts(Eop z8l@n}^=qhc4#d|!i9}DQ<_sZ0!QShWAw*O@g&mWlEn@F!{43~!tuuKSQ!4O=!Esh$ z_)7X~ru68R5QNsweQ5%x3+etqWu_MYW?q60UwGg+%Wsy7URj40ZlNB<) z3kuh}T2F{dt=Dc<2>jATBV%ZMmtm2K_J>D(z_Ju&fkvJE&iQ9S@#33v$6+=fU3H0&kSos|Tja!+KPf>Y zoqEc<$Eag?l-b&Ze7>`6v5nhUpd2bKz2Lst5JK#*wCc&%w8}HT_h8m6y>WiD)yDOM*R_!@4VVk(tTln}@ z9+KR*U}J8M&~-WE({^^0m6g}4RKwQUe*6-!edPmZ>*jnT8+Vc)3vB!=dPrF0W?4+` zy!YlW>8SXjxh4&|GY0LiAYA}~F(xLX^w3%Q#|R5v<%v@>9}Z(&Mp{@6-_R9C#gbL9 zT}$2wQy&=I?P9Y@<|yZ6F%?*nPGDE2N6E&OEa?w>`LD*ED<-kGb&S892cqCfT+GRj z=E}-XNZWxe>@1_uL~+&rm}`k$cLNe5>ySTJeDh?dCmN1u9Iwb43umHI4DtEJW5&^= z=UKu$L1SsGFUaZQBE8)uLrF?2Q zd`M*__^_nTTSrBC&3KS`XjI!<=A5829?$D&Ggii-vUt<>yTXQ|0iCy9zdqT4S8z&a zuAve*=%9%n`$f!z;ZAY1apNtxtvIRQoq~kGWNK7rawCe$uzmq=s+>_?uDQ=@;hSdd z=QHtlYGu*}?E<>GARdBs)wa3{ill&R>7@RcMk+D%8-4Z8&Z+!^Wu6cUWc*}@vtD#` zZALn#(y1JH$&sz(>StpDl>FVMeo%I~UPl&M@5N%nq~XL456zWkC7{5eOh}eHoirw9 zMPVv(2A1Pu*}P7Yvk#OlGPpJ#>Tb11@rfkOmO4I#W&{6q(QVCF$2fi{Xs{SQESBPz zkETg0yEK>y@Uu7~U=~Xr^FGE%^tX6uI2Rb#FMKs$B|t^xRkO3#QKSy_kNLFUtD)_H z_11XA5ShW7KS29A;m%H%NGS-Jl>X@RPQs4MJpcY;9&$eGB(Bb7Ca(Rvmw_m890-<}NAnMOG*AQivR5Ftd#0iBwhjGIgko?g$HX@zO z%6{vQb`99RgnfWJKka&B+PHv8d~u?t4C;5gad{h{ETUTif2n@6;+$6!4ic>Np~26( zn5&f%9KTE@;;GD$s!~(Dcu1jq-1{PH$*$ih`x)uBwP(qP-1Hc!m?H3*?tBYs^1m59 zL}|I*Znp*?(zoO9@Ac8z?_CZqH+sB1oylldjBk%XRqWl{uB?G&Jly=kq3TtfF5XBK z8Wy?RouA3p`aJ+9KbeK1gu~?xt8X;3md(hNzrO~=^pu@>LAzPLSs3VVnswj!ORy~$ zYlm#|6;<(ebD#GTM$+hpl(kHsl!VXl?hf0jhpM#P9QI0K5Y2C1!S_+dtxOfc;~h*- zdOeBF?{xI*U!<~?;Uz$c)ciF;GxmHrr{@scJ#a=Ws5`=RV8aD}Ff~`J`nw;?8yvpf zT6!J_bsnI!rr$}aI+zi`5{t~@l_;S=damR>TL(V{P3X(ZbM@aPGMl}fYR8(oC86ei z`;hI)c|3?k#5Vf6TGMjbaj4YyX3W@Fo6U8Xp0$i9{cP`!@_sGk`GQ!qquk{hc3%EM zDX$smw1K{n_-Pw+rdm8@2ybDbh9IBJrs9&?H)j&# zsIeRm7jo7>@}MdRKc_&8vqnV;gPw}(?u66hexyE@ME3?kflD;wkT={-0wm#_Lxb?B z$UC`WZt`P)iOaShRLb@db{d7e+s)@>iXT(Gc`X`kF?`}@ftl3+Yu?(eDaywhi=}_U zP{jmydsS7}u!_PP##6aDRk2D3ctsO>X${>xY$MBW*a->gTv;ZA9|UuLpz2oiM|+UX zXbQaOfK!p)qou{=^mTzf5tC4}hG~SjG}G+8;`9V36;ovUdnnlRz(7>Rn7aM}7BP0K zQqxI^c*+t~-0QC+Eka%f*>Zt|HHTX7tcf{ldKcPrySg9l@vgW^k;}Gsix!S+jYF)a zqX}sR|MaM{!aW=yQKRSb!uuKh76eZ~R;b4lMgEE|3#gFR^{nibDjI{yefPe;bHcS?+^)6Ch~!pX=Q(z z*zJjsE3@$9nXs5vx0KBPaEeuS;<*MH2uFWEm#d4u(TM*kM+$GbM&t^JoXHF!9rviqn=yVJ4TPLI@d@LIbYZ02wmMfw+{c378W|R9$8O0^VXY>)R)t7CRAT# zINKc`bFb81h9WTRZtoM|gcn#p<0&eClUfv$jH)e+4C#+yDf6cg9*?BK-yk75em2FH zm57`agEV_GcZnhb)rQ~Bs~#S{82R7yS+Yx28#49;a28O!^cU%1rGMp(>fX3tBD&Rl zJ`$fu72(0dn2yssIjDBhq)mgRZA%mK_U5pBO1QFvWdT|l8X7|N-a4pkYZodSLKKdZ zWifLT3fqYZ=tt>QtXTI72)p^A#Vqg5FnX=vpUcUw)B{ZwF^ix0YRZPXp_Nhb1CCd9 z?83q2xiaMHju*N^YS;|%IX)}gCfc}4QaM{B;9G3qFZI7{I#ZCwp|-yyg3S6oGD426 z=r3xdZ`5&`cHouPmmxk@>qPmYjKG9>w# zM49H|a$7Irz=b}PFbEaYUBQ1$M(z!VRORyWr>HAo?pEDsfu|hEkk32k)+#*T-sO;f zb}_va{W#HXXp0rrj#R|@oABr<7{zcgS9YyP^P|x5(tOY7<)eD?Nod_RoeFd z%?wLpk>?YfmBpAt6$_H%H0QJ3!c{KdOMmW(~*!%n$3cY5#Px0_NE9T|^bh%76P{XO)uEeD)(*Y1%B_h@dGi(0_Yh zo&ApxKylL-9*~P4Oneiok2N^$b(XzJ^G3Y!^JiyM{QajDcT%>X>wU6y8!%)ulEi{az-&1N<>u~Mu=9_*niv9h2Z8;a2 zrx-EKP5QZs);wX2btTtKUWofXmF-CLm`M1B8ni zvih2@?G7P&cPC)!#_AF-sx=GN#0u&&|Fn3QKQ(as^2D9$Hv4T<(_n5Q_n6!3zf}y< z@}6@~d&-cUIBbi#i>b)=WC0Vl1-0^+)D%fm8>WN=fW0M5v|8cMG?aFnxRDUUo6bFg zSM{~fFH@5D726yh`rdUP zR2bv?_k;N4;1Lrgvjdr(5-r^36*-^s@>8bjA|5KP*wn!+U{iH;67j|1au0X~xVteM z@1uPdInofoJnl_v$lIbAABzIegi?QNeo%(Wn)iPLM*p%97qFZOcVsI-eb&d+UmJ)^ zZ?)4?vAKqm0F%sm2WASwMr53al=O0?%~f48YDr}+{6qtrL;|Gavrow)u46S$VsxLz(*;GsNV{|cB@>8on4RHS6` z8Je?WKZ`$cmx~Dvl~VPnOPmixzPP(!z6p9_*UP-Zd2FfA$NgENB@fC%sAh+{aM2>_ z*Rk*%fE;z0#JS+bWBy1e(NQu}{H{EXCmlogrj!}qGBw@n%>8)SHrM2|6aQ1qxGvoxuXVKPn zBDuCukJS!4QaTNat?T-3j{8DyPbLR)%K(y|X=>(kye8Ga8t#QdzxiLy-9iH>*9&isz!y6q`%JTB>=7_Y$&q>9d1&X_wXKBf8w)hUSdTcYRKJX2{ep(A`aUC7rrSPR!EOI~;gSUeixyt-7(yC^ z9DHmK9BQqMx?P3LSY~H86-y~RG9QCbTWZ2pKIVD=@HF@7c4v#&YrM7D%7gFB`d-sY zdh5=L1OGelDD4{>a=3Jnjw13X-nfT65as=wu$pVRq4l#81Q+v^&YOrVr{nd%GsO1y zSG5l9dqicr&g8@%dqa%(4SK_Va(jz4$*5p*bS+! z+-CxeR(qhiEG|GbCHCwCPdIj#cjTw9Qq{7F{yD)|Zogi&AUsW0X%8@h&Hc!BT$4C6 zWbS@XD@1?(z_7Q=Vsrb#lR34WNdI7Z@j27&i7>8=sUVE(45$50U`XIqea?%Afwl(L zs`bz1<+YLMSmyoX{rR@m&orZr-(@4HBwhBZFOVHg@xQ8DD54ZE{n!+%4Tv0t(7JFrPzohmfW`8y{DpvVl(I-{}5 z?>zkfNXp$oG3(H9;*pr<{O`XlMQy`z7% zRrVz>z{kRpHi)R#vz7%GR2dh8sqAH#Xc5d(6MN}C`vw0@?u1m+jOXEz%evXwk(Az0 zU{qO#i%?V|THmmbop^KI?{Coy8SAYbY2*4(2|DbHRV59oa1uLG_eJ!q zR|fgV=oAkSp@sD)!c!=qqygHBg@c>6SmfLn2@MB**-h-N%8mL+PFd|ZMznHPe_Ovh zq4RXbxcu~d)x^?TZ$lCXi)#4v`VyXb2akQZkwcY~`<1k>wBgJj^11l>*EgmDY1~5? zID7Uto=*jrGQLz&X9DgUkcew;h(7-)Wbe1SQO|ELSxU7aMsjl-uMWhDP<9ar;n27e zVn(O)Ge1}9^51$liTBl_#(H@yVagva<~F?;l%o(9;1bYX7!QGN`dUwc>(zW-cJbt( z2JNudhM20wbw0?7Jt~jMGyadJs}8H`3$`F2(g^%Sy1SI_knZm8Zjc7)2I=nZ?iT5i zZjkQoe%trG_aA&*?mcJUefF$bYt77QnJx|Kc#pPdboJGszRKIVf8|R}W%FwxVTXyg& zLx-+xcw6!El(IX0A{dmKDPMiO6|Mff_Vn1%y7E5j^%}kDdf(hFk;AZ_gL}zhWfau9 z@1XEmwW==K9t~56g|!O+2!iv0zrcDa1!kvPLDg55p!xd)Acdt>;%a8@kDG2Fz6V4TVKwdM1M zQw?u@A}_A*q+qnkQAg@nq@SSaI4BCt*O>PL2&w#K2Xp!+p#N*VtXQ@TKSVDu#ZR9K ztY#D194yAo6J$)JL`dmqbZFd30V@|6KALNj!3ki5z&t)->}Q;j-Z+Y{*^-#Gm0CUI z3uc&E-AN-l4ju{|_B}pIIkrpLA%9vDoH%d01K_y7*!vxlP15sb0tx8e)on6gU+@%* zBbK5_R&lul04gC-q4R+r|H9vN@Dv&LS1Gzmzx~lEzE}^TX>StAV{l|-j!SXmzw9s8 zaW^>>XqkAmWYi{wX&?!7Q*mNVHQ*52b!7ih5EuMmIe*n6pU(%g>~cwpB9pFnz@e!L zm|UXn^=L_s`SZ)`Y{78o%yeGqfexrxm$0sA1`ss#<&x+D%lR*V0la{#+iC9%3Nlgm zM0DoK^2KI4tfi;BdEMe?Kmilm(Y?KY?d){2olj&V@X@-9N}1pp5GI3*`qBE4@m5tw z01|+H(*FK;v(ce%Fg7@h0d<(Ql$6wkI(B%4h&aPQM4ulf8CV|(22{e<&W3=P9I9ZO z^Hzf>0hz?s&fVDUN&DLk+hwsmo(SUURG?^~6d5yn$orckCi_49BG<>lApV3(rzV5u z43T^-+rIk8?{GQeBtRRD$7YV-)!zV-U$L1QlNuA+yFo}#J=|w(^mWcQF=-1>?Yr7- z98S9tr%hc{z@r}F0Drl z`0Tj2qyfN39Cga;$O}Auf*bwJgH6D|&|v+>?esI8gV8qcSwUP~oAa7h z>}Kj@i!)8Ci15bNjxYdvOaoCHB?r@3OC4!8-W{*|p4wujK%GPX^gLkf1T)QD?8MIQ zbAbjco5eaP=L^`Z6}J&O78^;D^5K{LnG$7c$g8(Dzb*4U7X6^%w{T{l(NGBnU;gq( zx$Kr?IEd#iqO|A55nN-??&vpkSrIsk!n?RWCoH$2{4Ve%5Z`1+E2sU}jvLK~cLh5; z|7HsP%+99XnTA$+umlurmc58vN^4xU&R6UVO^S_!gPz&;BMayNCpA*6uy*dz6Y~E?jEj{c6#RlH zhW4E)!-zRr<{yqicoA;;%2ct|@+j&CqL~ihH=K_Yv^0kA2MFeGisV%lmf)Z_aYPl_ zijiY(z5z!ULlqBaR&#MN_Ru#v?&I(Mw@2XsH~YT60+u;q8tb6dBTPc#15&AAQa-72 zRO>Ktj_C6D_RDjvf3>?s!LYz^5OK7grAm-8u<5eC6(B{Dvv2?+Vg`ynTok11^{!RuwW!CKwH~Kx;`u~tb_$j zNWF(;^NlB~BMySZ1vtWeE41^bg**wF0;Nx^#5LfzbZkq-;ZAyzBXH`@;<(LV4jsmfE={_Z5ut2u?3t_h-ZsX zC~X-)3v*qthJJjQ1{nf=DsyO&>3FdUW-OZj_F<)VYie&$QqsX(2^+pg{BjUtgH>5! zB=#g3RcP76L%gyI>PL;FgFMkX&r>aDe8jU!f8Z0*#F?*mn)PaWhNKLK%*y&CgObD91>+3fyr?@Oz&zfA^-d-9wW%0VYi)Cs%zk4TU zwG_`fmWGb`-#>tqqI$hjs(N*$+)=NumfUJX#aQ5|VA-h@%U+6ObHz?iO+{30qm6#j zKf0GmauoJT*xCA$qksZZ6krsv=#pmnEby;UhP>re6z`(?;+v|o@@hp=a zRZ!81N-Bc2Ww?*#jN+^}6BsF!h6AMYvoOzU(%i@H1r?9zEWOG?zAW!O?Q!i<1rid0 zSdQq`uL%V-s;P62f<>}S)-#j|TpHnzL{mNMfF~EQFQJ+GqZSmmH|=&j&h>e&12!`g zpSf5!3#{XAABW?NeHGq_Mlc@C&VLDj8~ai!YpZj=Pr{?hXQ#HyR8L*>(ZB6`0U+Ce zZssH=rgwP0LHhgmj98)8r=m_)x z+zJ=VA=K6=ZZPCkx1Y5M|Dt<8rwM2;i3b0Y=S#g*TNQ@`U|s+Q6N^oIm|-2^NytcN zRAHk^OH7yIG4jDzeGO4Th?u)fwZZGjJaLe@J1atZ~GCP!4Qy9&r&v-B&K)y+J__at@!h#~5 ziP#ObHK55`71+D+$2D)CrX2V3PFlufgU1DjKo}KTW#GbL9edSLtANsHTDK zNzrw1-8ZR-!jVGOq;f?RLf2~1!0pVuB?|a_rG8ZBGs^SU4kBC@T{{y~rSjnj!~fiTp9b+HtY_)V9qgwAvDfFg#viv#y_hvck?@ zkYUf82*0uiLd#=v7D*?e=zfgKlDRuXDE&hbW>agU;s^Ml{Ea*VFp zKY8I`xr79-NNSeKWSIO*_k^tU!p1Od>CLyPAPmXOy9HWuay#0w)agqn5l&!2U_5{) zj8ruAk@yk;_3*~oDCIOFsGUK}{4cZlG?or2re(JN+PLa8L8X3ruw7iJ#k164D8^pdY6icp1FEmp-wg)Hd&|ehG=L*o8jVuqw(u|aQ(~7d~jYuU|_sd53)D6RA z-m!a|64%)b|JF|v7dGIo5imCWO<)o`bg11_;@%ty$@ z`ig#tK3kf;;>Lt6aT@-#fBZo*soCfGo_geH=U@mNNGZJTPP2icOPKq}2`h4V?_r^9 zZ!((7yt0(Nz;jWOl2Yi8V5okF%QyZ$60^LPJTQ1}XUjB`DEAG5MPX5bt}Q)H^F&8L zKuj-t+;Ztd1!hUHBWvw_HO^^FZ;7eYd{j}vLHoS+8APz45UP++Q3&CJz|V9Dp(6C2 z%7x8(Ly7D>sS-O*p4Gp4L@zX)+y-F=<4p#`$#)tU=%`KSg|9Kg#XX68ICOTSrQ^Ls8P zUDh4xB{xTsdEGH0Nk~GYYZ}R$qAbY~z9qdg6H;4kZD0)H%#fM0UHsM0_bOM_a{pfV zJ1*}q$kcP|%Oe2mlEUiFOq6;zz!o*xA-iGzZ$Fwc#{aQ%Dyph^za|Y6NJa)EVYYuR zJtAL$h{M%1K=>OpEt}(Fi39hgO!@)~Rw@QcDuMCVQACZQlRj3zi%4K#oZAlLz|^=e zB^n;SKv%p`DzjPk)$czyRXG_a-?_$c*b3G2L|TtCA--boh$F#$379G-sFs^SH^7$E zUwg+cOof>+o+bdzXUi8RakF9iy>0ckX(=Jg)jJv>ivb5DK)t)CF(k^B1zF#p``DJ7 z96PFiREmqQFj>Ttq3EUUj?t~-Rn2@XIqO>N%!u?k%`UEw2cA}kyTOk(9lG927nr(l zk7Xqc*rGzUmrrNxo{xxRY$G48??_72nfj6TCyLiPlzZlh8{s^n@a8iRHQnd^mN|?A zk1gkuUw(z-aykZqvDp7!h<>-?Px#^A0%g5jh7Wg{;M!`ajh*FADU5Ku+4hV~Yj{%M zFaUYv28Hi@`4!65&HYQYBxe8JneAo3pE1jDm+OS@s-k zNnpExT+E_HjKb{c_tNP%Pbovsjt&m@+w?;!SiPpj(9n>-0PR*>xLRU-3Ic4p@%w1I zlh0AMxQ57HamEqBtDumhQt64i#{kq+g^by3o%V$*<5zJ7p-}J^)=()GG}g9K1IN;b zw$;k75f;c7a=-5Kjm+r{lW|q(Byb0&6KVA?{}L(ND7;DkW{cX83R+x%s=QbN_?w%D zTZ*C`c=pLBysi1D#muZacV&F*4rR+6)U9nAjsjn2R-WmAL?AilEAP40|4*X2%NMgipK+NYBe1poj?YnweXPM|JxnEdN%M2ANU&C3iEF%JYxMxSXEz?PN`l-Cg2LjS zhghr4yI08t!iV6_Ghpd8epLCerf%mY@g*3ep!!miDpdbR6@y@#%X9^jN^@$~gom#6 zUtlOqe3l`O)wfwOjF!(ok0`@$og%uP>+o2tx5Pv)imU#q2NJ=m%9M*pL#gK-Z(d#@ zi{|x}jeo;CE*4F-#>dFW8T}{*x|!f|B?To7*jlaB_#Xwr)JH~(I&m&G+7h_2*~n24{)D8W zQm*uA_y?&0#@5o${xm%Uj7lchJVo|K`BX$YN@`y8+uQq3GFJpJ_}@`*WqA6cDZ_wg zEM_^&wRZTXrf?8gbX>ZYx+m*Q%d`-B3W`p6z0+}o16OEi08A+Y+<^_~LfDzXsc>L- zz9`FAMzjM!Cf&^+AH|~Tshr4^5Xe*(3!$G~J_(N|(i<`ivxivHI4qmvADfD6naLTa zW{}6Woxk#ui%e8mETmDzEBCAK?rrss)jE7?e-(*5+^RfPc?2Cwak)GazebNX&A`yr zb+mDCM@5HjjNRxKUsh{>d)LXkM&D^k%5id_Z+Z@eGF2oPjC~hAeXw1p>%GxY)wy}u zz)gHOYrMNqBZbXeyyT|8LO^XtG2eTn=EzKa(&Y8#o#}amN3Yc)X zJBqZkw-HT~j#WY_UG$ z?ObUU-XUc<1?CQUkmkGF3{##PJ|(>R0tjEPD8>X2UF^&9;a@(BqYV<@k^s-wqa2be{O0dM*PuWfN@&HO&e!f zBsXD2q;~y9`$Mj>=lmNi1_tJ|-XFbmWLRQWXaNVT@53r?#v}(A-6q>9?{2+XgEV)! z^MkHc`s#iHD77peC*7_oI_DMDR8$nI$Xp{1dThQj@)S+{m9U}DM&)Rm*-5qQ|6KDN z$HM(`+QunZd`VB!rd^a6`uup>)@tcxx>k0nlrq>yw^ZWwjl0X|+_!|5WN)150=7cq zntAK-&h%uW%Wi^6QZB^vKg%xrx}Tt?>U5T%gh+PDMZ>M1a(}i4$_Jl20O0gMREfx( zyHE1>YZkw#IIYHULuoZJWRzq`MUSeP~zeqgpJRpI2e_y{i3;nq5SCv)VBo%>&)?S zHQMT`@a4U|7o%u*q0_MXs>9#b!+JaI16YfCd9>(}YqE*Ug9*NoBGD-?nIgfxT@;0W zIl3Y<=MZeh_@h8~+m#f<0Y?455Rcab6-x=j4zRo)6z#-{oCz8J6leD8y@fW&^e>NysH#e zhy2bpOoo#8*`-DiF^V%Iz5Wh@9B23(e!dx8^Qr4)jn$hN34}w=Ngt(zh78Qf1iQQ+ zcCg((KP!Ib5dM$y1s{*7X3m0xQb2;!>1?6qSv5|0YP#3onSpzvdpyH=9h}3mdt9gS z^HOkB6`Duv!Z{X|U+Bv3b2*#D4fb++E58=JH-K;Tq4_d=m}fe{s5eK8ZhAUeV`$E0 z&S?DKPV1g>UTz;}VR;gefsiY85EKPlqazxaE-wC>S#%;_^6mhIbxxWjDlo5e;7@>?l_<*+vfVm$ZY;S2`(}$ zmCMRO*xFBv_pRR>B*nklV~JZ~b8N`C$jwO^b6dt~^Y9pQjvdY9*l+cd%gVz5QE>CL zPtvLNM*_aFqM&C%7cd{E8;LSFYFifPay4g8uEEmFOs@Fhw0<>_K{Lc3`0M@h)VA+1 zppDCsc6hmqdb7jg@D96$P5|47Ei9Z&##J7=2&%_~EpmF`wB7$EjrEBAD8e&LuY-3E z+IVNDK7(1^)XBc-6ScdBfOrixTlx0o87#`>VUat0K#z9og(&&_Jr>&Yf2^>(WWw5D)czMTU??m2YH3W(o z3+JRGb>t2Ug1YClWN6f0hp;o<_wW?mEIAoOxS#DJ*@Yb)92Tnep+v82li7uvIH0QZ&bg}E{gcvwuXN!(>+%e=gmtC!kbq$M^% zAeiYAdoLWY{Z#@6oOdPe@`i@hGLO`W~+3Z3NhOGD2ozllWjGcBm z`hPbcl|Z9@dojRCjk0V`V5Vm4e)$W_>CaNC9CAf3#*anx44@427Q#%G^ z?KnvUd0HSAE^jq0M5bNXtp5HoPC_$tu66@MFsHdL5fG%m{r4BhzgY8KeE-zMvkUb0 zHUWw2JeyOb7dvX6j4g9>(!M@>GPe243>6A&SlP_Y?jp;76->8T+(QF=uf_Cwxs(z) zTEyHeUf9vAZWIvUeR2&A3=$Uif}M`~ZJ$u|xPw)e+U)(s3Zt=wt$KmTuq2Fx^t5v` zAKGFZ;9gaE@5evzJ5dUYGk?iyrR(`7iK;{ZSXIGKnCow^!3VDu9=CR6Y^r4CbDMEw z6F{~REIht~pR;-%x*nZhZmOHw_yXt8(6rwYtCbL!z=1RneZgTYu1hBZ?VPW9xe4BXNpIs(2WV_gfiFm1x^zX0iTo5g7iAAS!yqfS4M zx7qhz>zt&&6@lSXSicyOu3onEF?5$xZI7Eg(*%uSbMk&jhN4EUk_BF6q;nYP?Tx1S@y>` zAI(_Z4D;BQ>&m(b2R`z`SZ{7Fcx(n3uU9PIqXU3Yq_6fFll675Hkxk(Pzp{`QvZS? zP*A>j9sNmR8dYz^T_k`JKsCbSPb52BcQ{dknOzbqE@#k~@}|qo{F)IJN|$_o|ITfT zvXqe&HXDVlIWbn9XCHZ9jA2XrW?~kO)%;X(c$?gG4ZagjZTx2 z$C`Z{6-Tb92J$+15}dcxFUfz_;`vzq9c0>EMZzif6LI>9I8XeR!IqTv-+x-8LmQFK zN3-kkw{2d7dbqii6K+oK!6QHEE(cGVrgMaIQ@uLxB2v=r1#|LQKH#JYmf5~2g3E}? zl8qbSBE*5+%FRvwY7$|5yRZ3llc(+3o?5WDVaIqjt2%42go=8MADibtMm{O<;hin_ zBjvyPSD-58RNDV@2^Q&3f0#Qmlm=yi_Hmf0Z4KwkC(C3!(zkl{6iILFc7L2+x=Z44 z@2{t+Z~k;4SmG_Q;A>e4ztdhOZ8^*laK7yRpv*R-ca+KF*A8x7kT^iC&1 znnKqZs-eUg@8X>2dhC%Q#X>p#b3*8aMpG2g`6|Qn7Zf|e*{HPAYB19PFvz{*d7>pa7{7t=2!{Z- zaR&w4lIR1KCf&V(VNZyK4CU#ypR^KMP-i+>4u7iqzp;gnlh|*Rpv&~)iQxPKh0m_8 zT+0Hg2d>toU~X%AVm=ARWz{^zOQ(jZB~_+-nx4Y≪7gP>~iN#rHNOs?ge9LVji( zo*e>+o+5*A7^` zGNu|d*;q+rj5a{j%2{oLTy^RK9dLk9lC`8dj>K52y}e#o+G-!lYlxZ%x_JPg$yDuV zs-y8zbE;ET%~0m@xmQPPUi&28LlBL>5@!8z6%d>MA_-^jV}{C$s+^V-$n*adib>dH zp5oK#Di;u0QT}7WQ87+LdW}|~E7vKjwDiMAGOM^M?H3wr#pZ%No#0~tWW^z6#MHh zYo0fx&kq6aE@l%;y4UJ5Wd6}NIZ)3YTQ%e%<*Kp+9f!y&+~PopZ$?s{{ADp#4r&7O z1^idI|^FHyVNs`&JY0 z%Yms==5IfvCSv#GHR zr4;f#o3>4`9LZ%BIR0G!2~dU$dc*Fby9%6j;vOe?r%W+N*9 zO)(g2;x8F#F*-JD^qABg3~QpR9Rw6juio|jCX-AqR*zfm8!%kqY7*7ed<E(;{wiq@db_C(lwmbXD0YBC>)v*jg>!i_+6V{PCwV@Pv zQL;*01T-Nn*pn#_>)LTDEU|`|Mis+0P7lSv-qE`$e*=moC(t!7^<#Yep|KlmTzo?WP#f%baaCH9*vd*QN6)fo^MbAD4z!l zwcBV%{&Yy=tgeG|K+5Zo3L6DSrcOTZ9|2ov)%pfKw?be-Nwgy)Z#3l>2z_%bK4Ct+ z;B%SpE(;7UkmM7{lh^n^%q24mR=0=R?WpSr_72 z&R6OdEe^`hoXuzHOq-s+oONk-ikkDC)X5YLrbfOLZO{F`7XXLD=OXurEx!{cxmgb~ zG8lzI|G0t~mpkD8t6|F1FioMN9SH_67aeH-LKoQo1EU0Qbsz51c30OBTWHGi5%Js8 zZJbOB^BuLH0|;H`lXvT?>MAavX76W9$4=iw7_JT~wBjIr^Py?+PqIg{c`fyFcojoM zRA^^j=BNlIr28pUr%1_pliFJ|0)n$gK}AvPr<%$%^opSye`CMw|By*W_@qZ=&cJ+C~4Ef9li4XN&?|!PFoc`}&OQYdHZtrjLcHYWG~@AQOo zEUEp`hM>P;tH;xAbGv0 zh1urm`zdXTd>dZ{L0gk8ilrn%hgLxLc>Z4>Tj^5X8&7u{+Hv0};K>tm`@ zNx0>;*&g2=S)*h49G*#D_74}6tNakxBBAw>q@&Nc6f=1F=Ok2}VtnE1fAP89;3uo3 zgDtIazllT(4DNrm3lA4PZhwf)6bdvpPJZbL{_vR<7qU0VaDZ+7S4LcSK#9sjohi4b zeBNxB5lZj;N|_)mYgkCxx)bNi0;xEfHCZgb!wm_h!&MqW<*4DXhPtxw1l8+G2t10Q zy&Ff>>Er%8UsJ5*Po%Y(ya`eYpYT3jOf{DT{;ba-OaEw9PCMD*<$-ue)bW8?q{!}U z_U^w~VsZ&G1Ec23$O8f@B_(D3jl_zDw<|aZrI7L{l7XmnUZjF=OKqa1nJ>BdC(Yhy zJGnq2@8zZAdfK0IPpJG2YKRWs3x;9JJu??C5kn^BaVIU| z(!u!FMgb~y6e#we_^Sw7o<4BO=#H(o!&u(3WOWZr_XS=$VGcg~uC_s;F26w&bqLQ? zNn-HwUgEXCM+$PDSfjnc!z?5xkKJ-R%BgZ|)ckgd=kI~(%a7&F>98@Yfivo!l-VENie(4H>Q--TV?NP%{h1Opc$9pRNPFvO3R02p}|RX!gxi zML1x|i1O$3vtI5=atk{xk)ne@cip1H)t z1t4wNj-Nc9ay6D`kV}i)H(wN6md`gqLUwu<-i>JCWe8~%BZ4^q`AIIb29% z%&ehY^eIEZ_=&&2{l*-f(iBbwK0WTy0%LCX3ZT!5M_c=~xq3Y9-j#6Qebx!LK7b?N>wS6h$JlSgyoc`U8{`_q0! zL3uY4%7^ZP?d3rlYJ=r5p%Nq5mX;JOR(h=;yct||>u^uMrhfU6Z_j2iH*;|rO1-@< zJl@O~yQ&D&$i9QfO0%E*9ZGlmuI39QnF;)P&K7Sr1gJpuwLT*`4|&W#LZR|TbS z32{7RlODnrU|RdkykOoem=KQGr6bY(+ZRfxzsBcC#WguDZ(>v=Yj}o*nZIn>mlQ6H zAYkp7;=`d(ZDl9qY(s;YQklHAW_Z462_ZirIkiG#mRU9qZ_ zc0`OnDkUwit{td}LNIwE&=85tsWf}ev>FSMD3jr*kA}7L&C4t1hB<4DutJWq+I5N) z9CPQDbSd0eaj-}h@>n~l_^3Z%Q&OyIQP2y}EKSF@joT4!u6k3zzyMXZaIBHfcz%KAVz=gBfv23&SGc-7N2sEK97?0)Q3Z!q@rpvBEKJ|i-<)qRGI zm6)ar!cACMj*Vpl^g6fUc7XY`WU@JWb9BFVYu$*455_^#v8gH`w|}rWI{Iq2 z@y(1`V}A%Ezq#^;XcBm^7HW*X(jo>lL3Y1p`Sg2aoduV+Vjq$t)gr*#(SQA@7>wBe z0|r5z=dQfg4*|G;;l4U*W^-n+34dj&RvGMqlG#wb*R$UFX>_vx651o; zm_!fbl1Mr>wI9Hb4Sw zVcJZYsio*>uKze})hJF&EzU=+&s3pgcTcNvTR0~_-(@4?gRn($;VvuD-{TZO1)u*- zL;4G8lx=-;T-qA^w2`9!vixDJe9cE zVXU3J>>eZ^Z|Vo!HGTMwb!u>BZf|4H>Kf|Of14g z7Z+WV6Z+=-q4jJmhA(R+#xOl_aE^64aW{Mo1XFvC$25OF-+!eEj>RwVBNe~xu2mHK zAfwg%qqLQCPR4fY1d74MWsd33TK{_j2F?CXir@2?!LnI}+e8DIAcmZcC;M_?VWkTf zcD9=o7T$wrome&_B%4sH*;$f7^5ppM#-YU345Z5Lt{i2t9a|;s3b-}{ViNTSRRwjp zNEQFXB~GRJxm@wQp6OV$=S(}SDkPlumIw&98IEi=zb_p3+-hqK(^!iw&G@xRC=_YM zYa0!Uv-r>y@%6?)50}NOr=1bCFE+SZcz88;1BUUc{|Qt-n?^+T?za!LxFxeX6&F{1 zdwI4y-Ibm*Wd1XPkX4}trz+~U)_2&-`Fl*ad$=7OT=x_QtgkIzIHYiiP+4wW!F5dI zT3t~D>@VkMrx`cJzW+oJd2{{k}Wo?!zcI9^F#9&iEn~jTP9% zY)@vKlOl2f^?#e7vpw70h!XYO>GjR^?_%ST7q}G9BBS)S^;ebSh1~J!i4o6!&yB3fhAPT8hBg;@$fJ{TY+l}dzCkEetJ88 zBH*-ox|^($#2o77ec!Xj%MQSu=$fL}hMuLr{l$eD0dB;I%V}=Nu543NO%Dz_IXEPM zflwEZ6C=Foj9)M=m5x|dR4p~laA1mz7|(w58HRSXCC;=h<8hCG7MBlSU~MgzCpvoF zTl1GVJUgf|-nCizy*7dywB~9@z+{F^^CPy*%Vg}=7KNfV;lKf0y2P2BLYYG%;704f zn#z-&aj_e4(d_*Od4%{iUsbl9%@jGh^5i%YC;6r)m{JjNX1%+A%}ikx(aGpnB;G-G z%8-bo{fo7~F1D)O&v=J8I_x*zX-UZJMxtn1TCRZZk&Al~c-eDuPUZ>9vxLr^)E9MN z`MwG=iU0X-Ank?suA?3GBWa9W2(-7f*qp+^g12 zymdYqE?l4X!n5KT&V2AB%hz(5SZ@yn^H?IHqk9LFJ3(MT66NB$cXcKFN)~7>7t560 z{Hrc?Ty22AY?fGn_-VB-Uo+QoBK*66Z?oeym(lK?V8tZi?*Uke0<`~>l?S~aF|0kk z-qeoPQx+WN?1T18bmtbKPcMReoI!3DgHgm<15+;q z(`o;{^{qr{b2O8OvZS~>!Q<}l*+D3H;3zA~g#^s8zP-E%X|X7H#i%L;gqco&^AJ9k z5Oj|Bf6#0^_ZGF@L|bSy`nvS^KtxR|#VVObL`q6AO+h*;w7d2A@|0`I;(XKma885) z@ArupBI`|-`xfL+Vq!9hh*XuHYPC4RUa?y5 zj$--QD0sc2p{HN%jHKU~(kct8HXK5;O6I~mp^!27@52q}cnJf~A1`A>)dGc3|D$=Z z^$U)<4f>;J8j>JE%zbV)MvzdZ=BA?l7E;AfPWP~;2AWgcZ=W=MF&@*HJY!gqu#gWy z_Xh#UO4UwQaTWKTbo*C%l!D>$=&U92DIbJu`W-YC-%ALhdk(?z2~yxU-;Z8T5>af* z&JC2yT>Ng>x*Ydp^yFkYssmB6R|U=O>2y#{6I_7-n`d4gqz~2fYZAF<^Bx<1Ka3hZ z$Pt6Wuh?wA(ewsBwu=o7Pf>i$<@rtZFhct)x%TjL8J@?Qx_SKkM@3!wObpDerht)m zJQVweq0@_u3GhQD=C$>>Z+w5ZjBU4G+#JC^jpk=y$omx9&rd1Xi2EGIvgrymL7z44 zKAWiL#O}TDT}}?M8-!>F|Es3(JSYhorSh1P=>xUA=Ey9D@l!Cx1Qs?R3cr_L>a$jx z%f#Pg@0$eGFTA|?(Y1A;6|4TU!v=BJC|VbX;|r%r6s|AiA^S%&@&DFl?_zA3veKsv!+$%7@qYx}BSpPeOPC`D8=*C;IB=?v-e6#Y?|`^ca#&H>J;Uzc*802Pk8p=s zCXDAB3dXA|eN*GWOH*YCG19_(y;)O_;>RC)9ZqZMUm(-g)E2DXpJN85BV{DUL5XT= z3PHnOu`hgQ(2YXbo$#?y-#CcUoAqbtlW)Uy4;l!o!9MqNt=u5>`0V z{Pm=Jz&oY)+5#>yq@nUJPaFO){j)`6&bDSsSFFt~&8{?XJTUV9L4R1daWyJ3p;DYB za!|&mq6q}$jmfZQEq;;?C03k#{z?}pXVZiL4{i_L^(jvqI787Wv^xcbm+HSe_U=IGhB@`8sx!H{J8b#-Kax1eaD2wA(SPL@$0KSu10#Qf zw`Z8ev5111=Bp%^e22^rzuD zMD}Sa7V3e^kh4rBv=Z_owA+0|(rPC=Ewa2j?dj|BW#dghIY#AF@oet(OOSTYbTetE z{GE#iVMlcEiD8o2pO%w40U=T){A*3kLYu{Zmg~rX%z+Q|b4jk!f>1-ln`Pf_d{~{2Za0}swchJ&}va#ue0VWZo zKR9E~{uClTwN&N^*afdy?sdVP?vmF*l3=7l3xs@d&p19-^Y5J-E>+(msVWX(xo+0+ zL5!#E$-&Tr>s!|5Y)J809sqgO zA0BFdWooh-Mlg%0Gs=qz@9c;*XGENLLj=G;S9_{D3G8I`+K?)Y7*)sw!$vcy?+jQ) z4bdTPXHOp>+KihuXnddidvY2DKF7}4$p~}1pH{Brue%Q8joHaA`K5O=u{}RO9Q%vu zX;4A+DAnTPAOp3?GQ^w7h?%iDoTeqsT48ZYHLiDNv3TgsN1!!gg#-;rTtAXr zGsQ!UN=cmuB$6pi%pE#`uST#sI{J9`2OteNg95t4Zj=9*<|m;%80qCIY`k<9 zoW(&c+l~#2m{c-Z?;Y-c0AC-VP5{@lKUg5K(fzCYn-V;LCK7PIGsP;sLL~t{N4v&R ze{jmk^lWUE%7kyj6-t=H%93j;jyRr|rfeB?2<(hMY}v*UeV{RZoSL1yXkis%Afw3p zrX1MA(Tch11$%|G;RJgKJe$Zp72Gjr+eXlcr6h15v=%mb!H0@%zSjN8k8#5=e{$RYLABKTK&RC7Dov+0P>C z%-o`lkL_NQYU;SRC#NRYyGkRR&o2VI{C4*_BY!NZ1P#qad127JuEJ4Y5=_5KNU47_ zK3Jpn2l^Fu^s|=`{lOSvCH%FJO~K@qBuEz-rmZ|R18Ym#VyLdYSB|#ZnM0>oOQUh^ z+zIFt-`<%JdmU0SJh8xcGT(2{9bAHweEqR)NV94j%jGNj>kciNIXT1TjyM7Xn;rI$ zAHSbkop(%x`#38ql_>pMo&4CBkWlWyh2z(26}0b^tN1BMN(27PyAd|a`wfcgrz=q* z?r))s7SnhMGIy8b5FcpuL_^|&7UzKEi`OS572#G-rXZBw3el2g!C;q#ob%b*WYG^4UGdunL@->Kj zS5A!jq^pn{3r8zg<=woALxOFmE3%&t>jQU3=7yD#kTJ~6F>L?Krj%T3?w}0 z>AX7EKj!xQ44@@@r&n5!qQrX+7?{sZ4rC)FyH>W*US%qizenTG@#;Kbj#4abOoHxq z$9vM{^GK_3(nfa%x+V+`#xvw*XMW5~;tx7`#1KxZ5L@wwfOPL~!_8SvaXBtTsDDyb zNI&E>9$BfxcTadXt>iJQ7~1_wJTMuI*jAonDM)mYBe~h)Kb%h+r^gXcM{~sFjA+`w zdWF_&#ErC?_07m^U`(`q&&zj3T6PN1e^jN$$XfE@IImo^CTfKDJouv9!IRB z(O*?Qxw>AW1wo=3&l=f1Uj8?_-j227lv~pH>V0p1q_Wnm-FC)zf8TbNZTN_xw6KS1 zIoCb4l71-$`3B*;DYebk+0@1bP*a4vsgx3_cbBL7^;1rV2MC&w#?%Q;1dhkJr|r76 zh7^e>kDqCZx`LFey67!ne{kAoNe2_v6b-_|tbOLg2gr%?A?@uwVq^=ZFOhcbOPZ}2 z3gkf~SHZU)1WoLvHa1>P2WX7G4?V+9{3fSg<-bal@-b`B#Y<2dspd`o`W?W>Djo|) zJ|jPlrvBR}78b;hosFINUmbVfWq)Jg)46T5T7?x7-5NaLbK8lV5*S;AhuEQGwx4aY z!g!Caum={Cs!TPnGAG8%W_@s+{bo|s)%FeZ{wT;bW^6QcFEFEl#Se__e7&YpvzCT< z=p(y-zK65obp7*}n<)T5X7=wq|t4-RODzHBg$kb=mL-ATal13s7Oi92tQ#_YJa@q_NZOo zsEP{GJpQcljE$318HzU&GG{~xp$6;lv)5;I&@UwbDIpqc^}95-Arrw}KTAz?co^Ez zQRrZ_$3HGa2D!O^ifxR1Cip9Do4=!AB1*^CG<)tL2Qg^FJICpOHo3RhbK{eW-@Tj< zG1eZgnE+YtZ+RC5(%hb#BhBkhOaUn)QX8J_qf*LisI!(e|dq&*haRucL5)4-NZXexW2YUT0zC;HE_Xae@?qEtFKR{#s0%Lu_a~w zf()r#eP&qA$;p}zFrWHPbG*#Ph`s*zFXF{TA@6@zHUl7g==kBz;{UA(vGqatEhF~u zRGwvC|J4f6Ob@;!-2b(ghVQeQ;nlq_z>F9 zM^$J+<(qlBO2WRhI)7=Iq$`;9=|x-|M2VK>fAFwQ@jG z>eFm*i0ysYBVh}Sy$-R-4tQCX&e7~qfM2ANI!C*s$n!}Ho`$QEc=A=>0Qv%h7YmLC%mGQx^keGI%mkc#2mQi$MRa~ENd%zy6qVNd2zS(VEVGhV8c!rZN1 z9tr|KkYdJM5z8eLuD4*glM6FH!N<>i@~kwYef5TNXnsehaQH_7#EcJpof zhg*p`LO@lw@8WQ6L%qdfm&;IWZ(RE=BrG_MQ*(g5w_3l<4=32D<#(eVCNdl@EWJlS zvSj=D&OhbHx7`G$og4!LG4aI1^h8dyhM=vv4}6DBY|O*rQ{GY^A89lv^uxe7?i|;| zc~{rnE%HebrHW{5Er%dL2+@_2(O?h=O%(D?Wl4Zu`i7YjLPEeSDuT3?nvom~2NaCH z6sXUlx*3`O_RdW+5854?sb(j3C zm`nqd-wR~~kRAce76pYa)?opU4-C#j4#URa;GCJBMhr_9EltFfoX#$ud+++d_@DUH z+9(nje^pjpCFLnf`3-{t85;JVwD*xog_)F>+(kNu--B>fS!pj!F@@%=u@YBFNiSE` zs?ZB-gOF<1VD^73fQZ<_<7MZR$N@v6bQAF_4ei#?D4e>-L$%L1q4_FekekRzrs?Rh z@q(Gpm=t7j@2#yfF1Ms}x3ZGIgkg$&OsiZ3uH3S#*8}b#-dui%vQwXWKYLn(qPXsJD&OSxpk8t=#XSa~fI2Itdha$k|f2m;F!;(wn zvP14YlK4L0BAw9J&S`19{rv+F>$h2IiGZ)uvpO+x`0f{kjqTUcFUs>5nB4jr2it-9 z_FKtpUq$_WT-?IK@5l@I`|s2kO{`6n9jwk*%7F!??CySdID!Rq{fn-O%Ifg%--e9u z#24MSfWH;C9qp`>kx2#4B=qLyvhtcD{-&#`aR%nll+I2waY`z!ll3?h-s>S9KT~Sf zjkXXe96D&oi_25%x(>o#lH<$1QbuOxCP_ThxIrGbvt&bQ4BBUI`0o3+Do2;%7GgaM zLqC=r+6wRD?d5m+H#Ww7%V^B@mlGksS0L}T9;>VdUFAXIPxhOJ5^JwZ>bk5 z1s=WU4(i9Cxi1jPv9W>kJ1WHO;rG@_Fx}uFkkBO}cQGHrV`rl+dnZ0>;5Pp2*K4(?O&Zk4FNrF;!6~0I*-Dc^(Ii#88ym1 zchbIyLGypm{SXHJy{&#d;p^&;Fyc%Z?))Z#KIo^z*fCd`=~=(O88e3)B?=9vuz~1c z8eWh47t`;`4pHT%fzhf)wvT=3w?ge<<$59$lDW^M z57*kyZwa0Q9yck0p_$)QL1K|W9L7thY_)J~cBviVcv9)#Uk;oVk(QKyi8~M>Jom;} zm_vgh%>kDzB4dI1o;(%!tR z`aCQKh$Ku*^k3v%9Vmi23dK{uGsf9>=R_oIK6bxA_bg)k{HgR&rR&MX!#x~C1dVzu z0ukY#KxkcHNV%b3ZaIujU!i$E)(zC`Upl(It8=((%TkmrwTYHYljPjkSYl#)?l;Vg z6%}4n?O93j_kcn1c|@dH?@mzvf=na!`!Z|VumYIRmyWX6IV*y@K6r=F?mSi4+zr zgRX8QBJx8XZCWI%P0a6QJ&;U^ma9@$>+|~7`x0fF^M&l$K2DUvwK}~D*RaBsm?PfY z^d3`(?;KJD6f_s)>KVg8`s=2WB!DUOW{Lar5(m#VeEyZ4m^H)DAor=#npt zK&BH@7{9@!ps38^M4*1S;eOyp{rAEF!$^>U6&h)5hpkJW_r!>Gs)>|r1|4_-g8F8U z*w*@?bxz33Hns}-^$Bp~_*41Va9VA^;^2)alKBqfk z7H=QhzBl6oNh5>j3eWcCH>SUW>}JC#7VMAEu&vDpw+^SVc_{F4J0Zoo(N?IR$jZvR zIMI-<>0mbz6bK8Y1j-|AqeOO77~IgnP~Z9_e)&?{=JHq?KE4izHMUAR@}iqt92)Qz ziXXF(&}e7)xp3RHnPu90L`jAHi1SFFbD!hlX`H}~y+tRbEVty=V_5)_Gg&#zq5)K3 zxkPx+&l#k4(5G_+??iZ_ozo)vicOT(VY|BN8wN>ufuDC#Z6hjoliT#+;rY0?)qe$7 z$lgVvMoI!pE1&KwX5abD$;v(UjsHZeGB9+yUKbKLg${#nB1ZB^u5(=FqsK*-goWQu zIqjB7ksi1I!Fb3&^dC|^dUT^{M2`h*U~C7S)!y$h@3YL&tM-CUI(Xo25MB*sbv)x) z)a+d3yuah73cCHP?%hd5i}UkL@NTjHAcl$QzM~3zats!VRIeMo_a`CJ>V$otQsmnz zeBDxIVpQu3U<>k+@H7_RK>~e<5@@TVkJZ3<DfnjfpD-`Qm9Lar7uo<{$Wu=}7;qEL4>Ec0 zq#2nLt@W!n54J7jk*D~G7fLb0ua82MQ|K;*Fyyd9Lm_hVN@1N zyK+wK2_M6uX44ENmRhl7lwTQibHPfoJ+7vX{1^#K$sxPxXi#^3UQ|ebceFNN zcs#UV_`9pGo`VPV<8L1Tw&?~72hzx`DorI$c#cSU+p}gB%i4EvEs}#+8>69KYVm6w zhsTu_;9g8d<8$WRCf`GMbR=fx{CviO7$2pM7QWa5Uk>MeGYK;aOnI~X_|JdmbZ|%K z`SZR~$0>TA@XtrZvDlr}KJlXQ9^;|NtqgYYlk%6rW$r>7^&R<0pbz4veTk}eIer4; zfp^2kH^I7Xx4RGSq|kXS1_=11DaQ`JssV6Kx)UcqHul=Z!t^}rg( z{jgv83748#9>Bb}h$a5Hp!($ZJmqLl&L3$1ppDA72w)~QS0M%W9QQ|<0FS)mcs2qp z#V@|#2MU$>Z6mIVf`ZhdZ1Oo3~xq?NDWn#v~S=hKSGf?t?{qL|<)UwX; z+n;~MDB2k*zx8E!aJZmLjbiyb1WMMEAI5hHtY2Cz<>&H0E2~d-4B&A(`93z|KMxH8 zzfwQ6Hx{eE4&?ft5&WW;4{V$w6nNRPEbyBD=_AuH;DtNrY;f{KPQSHW=QKYv{uj*5 z%$Aa57u< zt2+rOkjl2+@`Z>DAByD3$1;ifS2ej_nAr+CVrF#-j4Zoro?oxFuHIf*Z4@jyvHSgc z_XMV6V9|N1>9*ywAh~m<*7&M_yP+tv|-rQ1^WJmEjhJOKxE)_j?e?TB)2t zu#av}eb!eieax(Yn`-y>I_tRb-R@-k;~sU>Ii1i1uLU*Wqo zH>9j#&RaF{x%?5}xVXSP*$HX2H$=Pqh-~ao5m2K%JSQQLHnKK4dU!zm#8Oa0?ILSp zV!vL9Y6JOj3;fS*6D@q?@vBk`frq(ZOhipqRIIFVKT8Kw7+y;{+z+WC&Ugr^98(9v zGT)8Bx^%Y^=23`1St9s=_c&;DU%&eMr{i8g_}i-TFD>nR4cl?xV-o^Nzy<*$9$YIb6X__l(NRe|yBVR7%v42DVlk(yEZ>rKN5fY%B4Eyd_A3H;dPS@91!v7%W*C{ipdeX%h^199wXez!HFC zm-&c~VB)Ua!B*1SKKG=<{P)@5l*rD+H3>M#g8h?{TF2}ok^g!)?{}tLXH%EaC=plT z12~pkaB`qz*9=_Jn`Rq=qSow;m+$2@GI`25)2#HEH$2U@p6JIqikQilz$Zcm_o$bH zpZq)@i>?7^xRW?ZtvvKY^5<2_Bal?T@iMoXV1LKeyyu$Ip2F#1#9@>@4aKWLZt^o% zsJ6B?u%T1vzsrlnj|Q>!TYrqrt)bpJ*0kLet}A(QsN&uTU=~9gHxQ!a^#+jhO6q9bADmF#%57fRWDqb zit2E!oSJEBH5qsV2aMbV|zLlw2+jn$bYt%FX*bn3NUDkrjwG2L_`-?G%KFza0cg~p-bUZ$ygs?2Q!$Yy z3Z=z<%YVo;gFe+NSo*3n0ilV|+%30p1f+Lx@h{f||1PD<@V%P*@BKtTnn-`S{zJ-e z@y-WpqPpQZg1YnMeU5E_&*?z*q; ze`J&hqle!!b?M37!#P=TcStYjNc!fRox0WRlhlUck7@Dv!8YA1I+2fGRq4<4#i8LPJuLS!WO>J~__dWss%ZCW zG@6OI=dJ|MAfzX@OE;TuS>fe+2IfM)to>YqnOL9AxpMW3?TBB-Ef8PCt2|&5!;pSY zvXc$9yU;ax<2^>~qQUuGfYMniiQUMV9WVbK4Cgdm&Np{ARG?~iFtdRvQD6S%KifO$^(GjLfE{@f)=izw%l=Hg6x@7|2@AD0vDApw^s>7zv zu)PlmSa!a?PI*^CWj6hX>Tvme{YSk415{WG05Cbce9a z;6T37TIAv4x1zER#W@t9g#SYpl>sr-Zf|K9C$(}#=Xb-yBg=PIa+o@^wnPM$0c^Fx zlef!YC5y)VH`7zze6<2R=a%0?Pw#@(z}hG92k%=&OuL~lKrAJj4t$tjD15wH^zzLz z?C*DaxqLnI_W)>k(Ylee7fKQ)Eq2yBN+Ydx)W~>zr$2IWJ0Jrgv_+7}Xxf7D4cJG0 zT$)+H*X|PdwH?afzOX=uhtt3F6S_>;c$u*@U-Tj$1@aA@Jh+TOBWcW2IFSbZ$r)Rq z3O!W(Korai8IAx(*^OE8p6F7C&MjA&cw5$Is zIQ=i;vv|(w@llkh16vyP2Qhkv93@68m{i=xHsOZmc$}=tTb=!5B@0wCkpOZajYj~Y zJ~^Ir-wW?LE*5tb4H(K#2g9Z#ObiyoL%k_S36F+ z+7ZMruvXvQZ%g?PpoP%=YyZa+j2PUZX-;;F6ODM^236MO;z&0 z#Bdtvc1UJ6H2Zk?UaYBAuD7RmzD;$CwflLkzeS5d-<2T}z(JeChX9dNuQMQ@KMEm(npAkJ~knSz=u$9f!xXm=zG8y;AHNk!C! zziY!g8Tgaq>0#E#3+D^E9sLLb(eN};U)n_m-{<)cjj_-CQ0`+UsP_8jy6hw&pw`S2 zIBYEV|CTo#6o(Brb{i;H$Q^~VELhj zWdYGrk+4QCUYRi!kxyfrz3+IrL#mp%A3TnNSz^{D+j}Shw@h7cM?ZuSDU;a3`!A^e z{!zsD$EUW+mdpxj^XV*lrA;#j3?|O#irL-;`2NSM!K65fTB8sDPR#y6BA^-KrllVf ztT$>8?e@Z8P4^Ap=7@`9;%};$2ivR>bIQc_fI1+_r?@Zk^PX;h1dMd(V8X>%0_T`Y$Y)#KxxP#o87}C~e%tW@ zSm6EOpuykYkF;IESF`Ki+}R2mnO3dLejx_?dX5=Q!gjIx%+)Cfpi7)zkb}Cq2xq%R z+-r2=jU5~^vc8&VcKiQ()Iei~my|KRjov$4tD~+t+1cK%wOLvbL~rUZeciNs3#~cw z@l-iy>$rSF)u8^8TJRgv{ZfedkBy%bisy@^d3}=MkF%2H0~x=-%{Tb38{GLcdslbi zzM(!vT3;Q)FtJgsFtN9IE5Bfj3V2;80tAH5MlKIWh-+G?Qy6j%23|i}3oVClbhvli zfWZcpg-%wODMOiZ=Mom!R*vM(Y}nUz>0~6V?&9KU?giwl&|9DY-JlOy7^ zDVog}5Y&()UC;4lE@?W++rfyl1gW{Z3HDD&u+yJ)m>V)0tM(`je>f^(xtj8d5a6dK z>`TkrE0PP!iVQ6=;{sx=;nUy z-Za3oFA2y!W@p)Lc1JAQO20O9a&ia7eW4XRG($E0WQw+oY2fRB!UwF*C=ZtQe8~A) z;mq>S4>uk_cq>Y;*(A#arKCx|U)!y5E@Bi8>ll=2ypCt6Z8LZYB)qQ&7Q0_qKTlb5 zHZ=XEt%*58Ez~;00!iOpPmo$ z8uH}GO6W-K{%jiCMEDyPo?XWO5zO)e-+5=`Tx4|L@*A9R%yl^h&d-WaJrigYXRX*! zj1!)JGuT;bGGKRYAAwA}$t!bu1cPh3+`c1s@y^w)$J0 z)2UOeR@`SvP;|el>+7SWEbCnzE#YY4;~&lx2>tu4f=&-+2YQ~y`6EOHnkC|rj_(#G zGs7ghU&h8&01xIIgIs5K{%QR#L)evmJ?}jRV8Or_zCSTnsLp2)8JPK(3lh%9bmiOv z4L7D&lrUNe|79oB(5oEfHQR->P--+~cgkRDs%saJ(UKDU0|W}AF1)xiL$aS)=fe8< z0W|q9)h(BRFH$*BK07}XHS~jVhDDM(CLv*W*`=3+g}ZQdap705WMO_+=zt1XLCNQ9 zj-=f5;r{~Ijd#f#SaF*9sfP!Se9E#1vQd~u;PRT6W1Ye$FfGdfrIf{5b< z&C3t>xXC>UqU?z-A}GIoFCmq#b*k7pF;jqO{0zSZRmHLffd>PT8HB%d_3NCELhey8 z7ps``+J6)R_r7>YTY>N&P{%6|qz#O=MonX^vMYS^IT7dqSCv zBUGpy=S#|ss&OwBxkXL#6$88v^v?kFel(6Z-!N`!7F@Og!n&KDrY@mU5Red018Q@oC<5l0p9BzY5oVbx1`(Q+06{c8|LiRv-HWel; zI7lA;eZk>R$4ACZXGL);|HlHT%-xY!W%C?l=dybM4DQobt5YN*YG0yMjO3Kvc?UFJ zT9fyFEdDEcV$JEA>+|ZCH~iJ_xuk2#!jFk6}>mN5PuG_G6Nz9rv$Aq&WhlcyB-7qcr|us#0x& zUG?xaGn0$Hy6m0j#mUk3o6PrhQ*|Z`VRNKMII{;trVuy8ddDZenU4KtyhVkxoW+l( zUZC?8UvCaNC4@(!bNaG4Y3JI^+8$j+`{*#}L7LrAEYsjd9%f^an=4Y;h=@kvte<@^ z`E$%ZWr;?a^WG;blnx(4;Mgbv*#HM&!~r&Do%WZpMPg=FOsoBxBQLd~b3-iABParl zjEXi(!AhbTb)O~9R#8OljDlj#Jw#Zz8HUJ0%8S2(&jv4w zHSO@aPr`#8HY}&qt$~muc~rb2LC@>%&oo(xtV)8Y&V#p@_Ira?p2KAF`(ml-{PAp- z-|e`#o_##!oDzmT*^d^7jmj5JeCLY|!oSv{U4LTzyC-y1#(#>XpS@e!*h^2%R}Q97 zp%?kj*;&dt4+Z4;sg=w^ETMBs!8B+S4S!24&j?bv{+3+2X?Ti4n%Q)j_0wNbFelk9 zocN19RC23E!-}d{p7ed9_6g+Vo2a-KR%3smPU6|dLwa_eo(Q&0@Wm&a^SB%#Z)ZrB zqZk+tMYN~X6yMG0Q!dG>t=Gj8%1ZN3- zTk#bclyr&uMtuAqIynlHks&Ovozao5iKizEmrz1+mmjsQ9i+fOEIf|Q=22s0w>xA4 zm^>ig(+-7`=RVa8{3zguee^WF%yMYF^I!=|ts09t`|?Y}D?1N^g$#|UshSg43XZV* zW|EDeKyG_?7wW?ew23mLoa;O>ZB6iNpS>`LkI9uA5o_fB)%)ouz1Q8rfS!|~Xj{RD zP6G^G<#U~@glO-hdpJ@%yE+4LR;V6eVK?y|nJAps*hLR{9K+LJk#feSzmdbF2O>9`5VEBaJe3KitN|3Epw}Jlnd_#$+nvo*7=7y*!Pzei%K|lbc z{Zy^*lB9vC06?pf*;+U7 zfuy!v12{xgRcp+vrE3G6le8(g+`Q5fxy~(5u%tRP+ z<{?30;2&x-Z>|5vpZOM;BRaY|kv!62&&1mY#4}02(UeU6Q}?qBeug4 z{SWC4uI621aANb<5QuJr#aL>-AxtM+P_fX)15s-8jDqB*)UqTp`>;z1d z&D>HO3QHIqD1dH81KqV&113~d@pkuUKlA?erUAuuY5V)Fd0+2W|1e9|CSb6|X6@P& z%4({WYraXux!(n66fYg!%(p~9;@-TtDyCVs0~{;Bs52bY_m?HfO;VktQ?fY-D6a86 zYN1aE#C$DM&z@u>5r5A#r>*z431!=IKnp1E|H1>O1Q#l8O9u~L5EhdIeGJrdas1|; z3Rx*HZ*{(9mH*(q9*`s?RRc?u!+=@^2XZLv)N;*A!(~$0tms9YIX5!m11+MWQhcyiw^)8Id*m68f(wawki}Rx zpdw^b6P4kdW8lBukN3d_Mcwce0k?)RiysV7f3L$39L2fjC<7!RB z2u7FJ@vnnBt6_ATDXQf|<_`-N|2EI0=MzAghr)cUhpsT}mn{a6A;>Qe8k%Wg`pw(N8~&yu zD=WVmGc@K13CB%GIuqiY_Xq_-ys!Sz_U0SZA1D}?G49aFlut^g1Xe2JO)`EO*^m}{Ge zu4DsSiswtViItA~lCw>@Ywgl$)m=RNLPaSjdImlocnGNU-Q23n^PD1T|3vGrB5T~q zi6l<74esq7U7VIp3#|6StJ(! zlasB_YgkJ>c@0iQ{?xdbM`i$3HyPTn{nAkKqI78PgFmP}a_+b}<|z&T^W{t;gkw=> z#3|a|{}L^vqj@T%j!A2KszQ5d>hI*_o!>1TVY4R^vi!K`wb^bhm7xuD>0yPVHT!h%2!db5^R8I~t0=bvYQXv6bMmDrBz%b;JH zwNN>_QNu_$kLYx?*}d-)aEpGn1=Q|YkElU+KwjtD63fth+ZFAkpG$#A7^sp)CR7Qt zr{*rv$cxnCmZ53Q4WwG9OCc)p>R3Diko@69&2IE?<|&_ zxxQ!V5K&fU)qVuhm!$Gn@fo3{?UDGjN`weD!{|%iYjPhD0=9Y|RuI_B@PmLnDQ9nQ zKRok?Kp7v0^>!fCzntRclI>v9WZCERa@iSB9D*1u<=hns5eUfRW7EYms>jUnf|O9q z0d9=zS?_Pdj+?wsjeNyxd_oNh4c_U+P+oPcD^?>{6O^-GB=*j6+W^>}p9h6k1Ac!A z6}-^6i7h_zjEtZ%-KcYxaDb+F==bmkfdo|=5$TUT zkkJ2i#1J?ti<7p`0S9MPUbDSd^+gkImqbJXm|b2!n<2y)Zw1QwYA{IRCAyzM{MSn( z^X6BaK>^xu&PO6IugL5N#w@EZN0y8nQTgjq-RX%*<`DB)gZtmcG1t09dT0^;&(m{r zuo@*uD&(9bKWOCCB-rCEHY+U&eY?Fc5O`fqp!Er8fN>(^x{Wv>!`0}Su)U0!yzV<#18d%zyW8Lmr57c6?=AtO153vqmY@dHJ^ zgcA!dg`iCX>9?8y6<0(0%CKAZ;DFzoVxS!x1b988V~FrWy*rZwd^}5IaWWI)TzvR>5BqA+%}N+jsYtk7!nBTp_N5~oP2?q% zcBT1iVP^&(sUjH{VvZgc?+Q7W%wjE1>d@`?quH$wgJh#eY%?_PRMi`+4q_8%NbgUG z`!^b~R)$srUUOEA(`LD|5D>PqBm3vo&;F3(a|IHAsCdJ_mO2BeMHh=k$^4Y9fJ$A` z>aaf|p|zipE!sFbSzyha%Lk>5j;>WMj4^U71SKpiJL(y=`UI_!kHK~pK~v20{m3g& z{}GD#J$}IVG6#MAgNxHyZR~lwNzcvWQw{AJX)zNl50!0?GPQ~%_Xi3&G*p{@<99v$ zjY6jn6PLKRrSUav7IB2}0Pk=1*b!cx7i`PyZe-MD!S|k-F$JHHVQK8_k_j1B>&w#2N%BKLM)3jD$!Jni%!r`^&!mt+QeyDXj7rJS~qg% ziL=F;V8*K-{#t(J-!QzRv_Uc&G6)?wxaV_AqpU^}2T$$oIvh#f$7pH#cm)OVZFVDJ zn;86M8XOp{wDtxOcK#y$gE|o_hY43bwvN#c-k?a!DcE*TRK(rggYG&t+dg7^q0?K+YgLUX5JInzBP5Q zi4i7SW_s!iB_=1o%U|?K-XS<${lX{t)n>4NQA2oJJ}M+3tk+o7TPv;7Ky`b@dZRJw zQ;CHO-{6E7!oF}t6hIn2y}pe6Wb65B8sMlFE~*#4v%^A1Y0EvxZg?Ti}nTiQwaka2*|+WQ$|WAW+cr4S zmKP2MRpPvT@9kw`nL zvY?TNv6BL{Joc=fIY&C2>u6j00SlHxy1rS{$?Z!lgM#Y#O8dZ^5qn41wa^JkqEk9Fe-E+n(0WNOZp)vBsr9@&L#kPjJDkbB@o4=*lDpU~7+q0fPWs~Gn zUBf`3gm=rBamNen%VGPy1~2M1;?}+*0ZGwB{izikF)@fx-?+o4-ZM7GmDZSWl&6Z9 zSN3*3%(ec#6oUi@2TTZzzAm_uA}>@Y=THhpe6)eA5}Lm{6wMfi<5d_k9SIqz60~bi zR*%z$)L^T1*veoVG5Rp+T3OXaxm?9*>@At2FaGB-Q5Y*e_qWWD`Tn zPa$a098#1G&-r4b_~87IPb1an`qtrPp-!d!C7u z-x(GT4sQB$ljo^Wxf49Zghkx(?unl@I-D&~(ExQKEhk&-=wz2U3fH{XIR$&@Ik^e} zqc9)>z05@Hx-m7Kt?qgH)bnl^$+xrP?8QfDyos}Tps8@+1N~lj$|QW+tV%3+3u|wG zQFvyKoCvlcqKA_b5K~TRXr|LS{vzXmv_A8JhDz&2(FW;;uGjLmg zKZ=;VMUUtWiC=*}<8yQiz2Ok7Z=ey;_ouYP127YId zZb0M_{9(7=3`ADwZvf|%nXwYfUrFih2oV)ZDsDWq&x=F5K`?ywZ-pyIDLzb1VFVa; zO-O}Ds}0V7NYnVQ!WnO%WQ5)i7E0UFa-g)>_+X0~T%Q|Fx1vmvq7ySjHHF5aW+Tu? zZ3gnwtOt{ZYQz>|oUPC&t47QO8av=oZ+ZN3}e-Ve8%ug>v8F73{ zUpktRzjs1PPH-UYw}E#Elt`Z7pcL}u;yc=xj|%36ojGV1D4v)4uCMOkvD(3rFaAe) zQ=$6lpcreuLTm6|zol4Nm56(+cTyP?WK-}%VOE;XjhLz=54qosQ!QoSNh-9)T)if&)?B)JCZ1<${rBZTugWo3&P#}oA6jeoV0S%K?eM-v?tXb- z`lg%{{rq@aq*1DTc5^e-8-jp^jSVeABzAdOu8~pugtGG71!tGC7kW~W z{FwF}_+Vr&KULY1a48ifH{ci@>>@w(`6hnDK|h&8dQ1yI7rplqx%;DCq4}`u!Z9k8 z2O?qPSp)NG2Af-l^=}U6YYyE=tupf7!Vb{pTevJo9lUWZy$qjO&b%yx>RqC%U$Y-vtqwo7kH zCH{P~odtgn5a7>O~m5i*!{oYO+j_T8;8mR{Q3r(FooBTI6%x=Yg`X z;Qu;xO>)R4FQX}+PSR<{fF5;KQzP;<1nK! zSG64FUu(IfK8UC!Jiwv;w*{%Z9Fa^UXv+xp(${W|7?;SEQcP4uO*IG;3{B^X%ZVXS z;P9R3?Z(1F%;M5@Q~{*_{)mZZrdk*oy+~NvRC+eKDhr+$+oexzHwsLb}!?DVWvIz}U(Q0Y%QBvr)1a!Nt>(Jg!`tbF@DQ zvUy%;6srzuOu(kZGkrbZxpSN+9^|@eqwozZNqL*OX5_WGz>YdNI4Di7R^&f?+1v1f z61{^(%eN9kmPhLpeQ`h1#$&vy%5E`R|AG)4c#;j7$U1d?M~Le9cN%L@YEU(aM)NZ^ z^L#!OmuzN%3e5*t;BiotS|Lpzx#rU&H#awfUw+k_qgYI21Sw_lm@HK4LINWr5sQn9 zDGe;J4j<1+pImmrWj}Ch{lViKS~@c{xxTLven6a!Kuywoe+in{_~_6r(O*V%KO>89 zf?+BQkC0l&6CGaxe}nRF?O^VTsqqgeu%VN1!u}7R;Ihfc;N}l}!onGy-~2{{e|lK_ zm~pYkIOB_2mDT6xnsK)&CUA$iiG)a0%*d)9;7%PDsYpA0E?^sed}l z-RFJ9>hDV+ElbQi$6oUCa%^lFPGsF8TyC<7vYR_Q6HR6}%U!!JuqO7O2Vw)GA}46t z`t@K^yhNQr`1E6!LDc#m1(gcg`r$WMhU+Z)Of{|KF&g!@W$r}Lep#Q z8-J@`R$PZm&1l}uHca|{>jonq?3#-oe67KQ0YhaY^2O;Pn$^3(+BFSrpN^Im2L8ta z1UnD0o@FKV%zZEg0pdLFQLz1&+u5@X?B_DQUUyoQ|LN6nV$Fsa(JCV)OOEBLI9tz$g; ze!}m02^5;`5VT!Emfeg-vXxh*Zx-<0dQbkzSN+G0B&i@}0i3L8zrQ>k(QJcq#Nlj>F$>9{t?n4-O}BSbax4e#Iru<{9o{bIQMeD zyE{Aci5Zm+9XBy6Y6$GZ<7U-WdU{l^`Q-L9%||HFOYgm$#csuHwoU_){y$Y8f|OUD zNLmfw5GV{EAXR-DPIjiPGd%E>-5kXiHmFc)uzGNN6;_Px7#b3-?0lx~@~hSpd}`D# z>%EFkPK3E27or*bGa!ia6`iJ}3s&}9TGtT?=p5+Od{aKD>WO^i_X4Hlc8~p+GR$Mc zJLm$PGze=e^+9Ifl$f z<1Yt$Mq@Lk4}TwBhuuKrDY~)&FD|%edA_vzS4R{Jr*Hx(#dM*dHY|b(eq6CA zlf-iEOrb4F^!Yr>@X=AtbSV}-HngqlO~HWE4T^FH@Em1qZXXU+!g*II&$?=*Swt*d z0FiR5R{gic+^|Wv)8M6stMN4@w)|W*smPv{YQK%<056sJsnWT_Bn~LR~ z?u6r;-|*hCzdvwX&KLH^D?k5mzKYoi1sN0rRLk5^dsMH#tGik}DtVQa7B^EHg? zIn&3zzoKOR3G43H6P&8h7mB*L0zE+xUoRz;xp0zX5*Q#{7AD0FkzWEY5%o3sY@Z~i z{B>x|byw>Lx80tqRyLVNvVyX6o>JHyY22rdfS~BE#GKS`#&RJMpLWBH zKS6A`h;MBc8$Xa!7V*q*a<+6j|8;`>7N1PdpISf{OY*^^f_jLY6-86AxM=miiN(|Q z_ecaeH53aM27hT4raF;s?8@B|rTmp~X|`y~ENU(~f3%pMd)N~fji=54by{nq70o8R zuc|TwY`*&Uv2z^19GGYm8T%=d`RJoPa%otxwEo}f3vS=iQfM8+q1hyJQZHA3jLCx< zM5c-?_iZjTAsNN1U$m;4ojk)xVX*rT`<4Gbkhfp|?4O?Arpqpw%>DS58)7z?nVi3E zuSGS~vrew3)4*|+9=Lp0M5n#ENuc(G>mbPfzL3p#)I-q}lPp)6mI!n6BnV3p=jrcKNI{Xqe7%)6mbMZMA%_;dGB97iqCws3}7r;CNL$nAJOfT zyH~${irPPd@m|+%@%jSdK_;vB^3Q{4uk1vsr6rYPd+)L&Ud0vjJukcivkFwMkluzt8@(sDk5~Qi-r7V~z+4NNvg2d()28sf_n*m@=H_N` z(gFylZ&~xOVB8Mg%z8gvjCq@OG6Ugi3Gcw&k*&H{H1kNPe|kZ~HW;EzY!2aEu=z*_ zt*TG*>HHp+P^Z>UGi~`S9EM2#>|OW#u&gK4i}a4>cimyQj*s8X^MlQ0BkHWh74 zLp;Pdi-%;k9?qs4(Y_Kb>eO=1teM-#J~JR_@t5U2U6~eOzuL?0v{MU7MXXDp7dc|w zT#1L4DYu6X|1n_-&@3_BFv8PVIyAS$E#Vc^|$D}N;IV~CA(BMbSI=o(4SLhzq z!R(BKTG)?af}VpZyoVt~+U*}qiqj-<H;VY0-M zDJ%uKbSkenbZBU5kWi`p9!O*@|MYN>TdKYs^RYo$93fpC9)h{j9KX#yE;ewosM+F$ zF|pQ)l&u)|HHO6Ks4t>arNJtF6}M1XEj?Z`s`WS1xmR8sMi%KQksS`XI0(0FVcxRstNS%|r-&zN?eH*200@eZGZ@aXU<1y=(6?5e5%}Cr zQcfpy+CtIb?o+NFxkH(BgI{xmV9<)|E+3aUrz<}*2E)RO7pgn|fMp_{yB<6nqU!;o z9dj3(1>CSn15DEUyGv0tH@DsW{kIjCs!uN4)QblAl_v?KV$*xU5^GV7* zQ+Q%HSi^OMi_B#7n?}PtI`4+puuliWr!fXQmo3hu8fX4BY&K~^QGfMIW_x;jdygmU z3*zsN@lq}x$qCNapdc<5E4UwNqr<}Fk4;YC(JoWzJpTh0o%(_J2Y!w3yT~0q{u;~6 z_FuAq$6i9GlGr#g(dd)9Me}DTvBz@xv?SW^p=e0;Srdrp z{>h?zD;OX64gtZN%LUO4oy6?Uho?*rWda>NFi95To{|>4U${Q_(CNb_r`F#nkw^>e ziRv$#Zmn6KvZ>c9DA>E&3lm{!8R*=b!5st?c!5vPmmBq(J#P6iBvib-8Y%UfND46W z?FCFRh?Kd)8~p!yL^u^m%+dyiGtojPbnhk`7!PDrPc?jIw9#UtXe^M)`Wi$0hE`0o zuWx*rD1h5~`>{|uaL3i#hy8k1$x)C4`!hpJfqfWy+I`Pokd#Nd3RJuvnUjJ6O5-Cq zd9Fe5fiDP;4~Xj&i@BjiClvw0!Vb<~rOXs#H{IP)_RN!Xl>Pmx40!PZH|JbON=Ml# zk+45!Wq(<1xx0MRxx$*YL4ij0(=;u71FCx{@>_Q|ZzDd#QIv7^jY5D3i+<-spxbAR zLsWR`l9IxdCSF>M!?+$Z^{cb>f{@dt>#OU&aLFhDCHYuzS-GHFR?*$Nk@5p&V^-e{ z0W0R|MI4gP-_6@DUeiVIui3(1w*o2g=7t0V%aR%1@bVH!3Xr8-tc7g6{6T1eg9D!~ zW4=6Wezqiebn7kex>+1yZ0wtHht1sCK<8{SQnr_yy>JCZDZNU3&M-8|IG+8h0G?`% zxF`mk(WBV$j1=p&;3kV$AwNeJFsQ{450@ZP#k^Ij(u6eCHQgaR z3~fTE29?C^Yro<%;@|L{>x?I^Vw!}8I4}oK3-3Nj*}Qgtm{!tjKo(kUOzbcVquukm zV|s~-=^y#!^$$5Cq$B21$Ojw`i1EzHg&dO;pr2EZuQoXFPPV_?ZxO2|CP1P>h967T zTgjMmIAy#R6Z!Ml=dS};QP9J4%3^M4Y&gDxd%r;IS8Ma#W&Y8E1@YHL9>l{{48S-{ zo&26EoyY&J5-o>*mw;&C|E(BbfuXqN!rv6H2ki0GH38G4Gj&?yOVq-16j zp`I-l-Pd`ZDkQG)Lp^G-kz?hGSa_Mm041pyruj*A6(BQ+-CLUD6!hsVEk)F6vV()T z9L>P>^z^ttT)sKq=!!Gy4HdIpBm29bY0u`oFAwQ1l#aXH?niOqVVQA!ub)Fut#8y_ zpmfC`m?=w8Xi@buS)1l|K5}*zG5~C1v%FJ7rz1Y?mYgrGjxaXIL=>@Kzw$LW2HCJf zgd53)qiw_D9NMYLGI@H6wdzN*ej-7l+)n$j;$i5)=suflMumA+l&bgc{<7Ac(A`nO z`IgbA3usZo?p^Y>e=&D@5=#hc zZHVr*G@HXjW#cr*YG)Yu2n{APklyJBU#GFEZ>xX#Z_|y(iren&g`8S;yQJ%N;L*T$ z&z(mH4#8v^;lku)!Peb|v=GD)O&;I(#|yXpW7S;)iG9=%|MEl}bss*s{1{>=$oU7! ztsp!D#41T_YDGIub3F6=h2F3$ve9Lk-YbrYJ95>ajo*df|2w z+Cn9r)MgKxI(+mj{wT0V5o13)yA2Mb zowT?FP^GeL&DyTgI9cuO4j+=B4{PG7h^L`Z@#282osNa~r_1@E=Jf9`$nI?NdLLhl!v%S91jEiJT(}r*G#JVB z89Vp)67y@nYV8JGxDx1vKUd5e9k>2&w%h$#KW)M0JY*PsI0MK0PCf~@;JREL&9CCQ zO!vO#t|YhiVpmrGxK<5|6rVh{-$W|)4{^T$nefY<3(xstC!9Et|LFI%{q3MA;6A2BCEL0&{6)j?nxgcatm@EQ;tN9r3Sb>X+Phw)P`` z$Qyllr!dP`=ZOdHJyt^baC6MX;icM z^h!mLZ=I>&)px$Y(@VYc*Vr5RhYWrA#zG>39`y4ka4-3p%)d}TE(;5~g>vmhUI-gL zz@abQ4+=<~ST-y@4wKf;w7#}pv8MREc5F6fisV!d{rWQ=5VYiRnl^1@D}Pd`Wv81D zt)ZlYgV2R}Eb}KMx-thx zTl3|ZhGw^mp$5MF=OVAW!mAc@h_{syVM-zWuc0pI!89g=$gz4JmypCRZAQN0w@8F^ z`Le@Re_jRM&_BBQOvae|Zjt&EIU{sw1_=4RM7VZ6y=DJtJzpA7Xsw>9};Pa2PP9vC-UV)X>H`Sue=N5BTS(;tw^JCHOIpFZb8nnQ_WGJs>aUjTD_s?+G zHa8P`y*|JQDrMm=ebo_8hXHtvcc&LG`nCH6jF0tO+AIDhvETZc3!2=faxJG@H*V*~ zSFvP)+*`BLg|%RO)w$l%@``9Ruc$qnf3an=0RNV&T@HIWH1@ib|8%)H3HMJZa)YBy zMI%jts?#|)Cb~%R*N~{oK0-^z&b!+Hn18!nF?QkV3JQ&KpTAyZA9x1)O$IhQcq&^O zb7*OhR?;}V2?vN#)FyvDkts$7!mR_w!Ma$Hxi%Nj1VX|&K}pf5lKiz_PBl@F)`m~L z;=*J07TDM)E1GR-gu1x6$b|4EJ+uGoXhb_U`Rh$z?IF3PFOM4P)7YnpiUTp)_CQEm zf&3NLiDtb&8{BztALgvuJq%y;@Te1TsU+rhaGy_I^1Y?wdA)EWM}zI9K@K$gcey9Q|Sq_c@TNu zIy*bV!^1^M*3UE4%kiEzq$+ue7;zwCQ+cMxlyZrSge;-Z8B&mH1^hq#lI;PsPqsib z=;Y$b75|5f!rE+dAc0;L6ORIef|a!#GuB}5tgfc{bvI(OB?&>t{lj1Hs=9SD!K6RL z^$ptcWWuCzCMGKL<7nLP-?O>&h!1w8yXIJngtcmdZI zGC;O8!jkN8Gx%I*sr4Ema|M;BSuG8vK&4sUUy?|>wKH9aMMM&I{JGr0b`5wm0gOE~ zHX$V>A`%<7w_e@#mWQ9n;}2ZvNeSMi!IO)Wc6(F-eP3}&VP(1+9L?5?s{Xtd3vtlE z_Y0mvg&=sdtFJG2^BV5S*)dE1QO|wXQcY2+tpI#5jWC7vRuwA7R{5#TE-@-%-ON)h zj~z)iocDiUzI5lQ7dF;1obNH~nDU3Q2ziI64~SDqN=ouMY(fNFISc|k2pn!4|Jz+k z4#O}tjaM(1{E)feo{<)A+(8aJqh`)C!qG9DKHruVd~kNC6=;Q0DCNJrR;(3R5}y(C z4itMhV{PuJ7N|IS{^e;0P@2=@$GP9LKi3!VF^r7~i&R_+Yw77Pude6`;x5k8EbPW{ z)!g=SamYTO|2}>wR*;z-`FS&^2D?y!Bu#Yi6U?fiL05r<%Y+Z^=1YvY`Zzm2Z&4A_ z!4OmqX?UNVlI6`?)mrv=S3Ft37wqiha}r{vCI#1g!*p+LbDn!!eCuAkX;Q0*KxiOn zAPx4Say%tc@g}xqG%n?5Y8kwb6n(K&$aw)$0j{Fh|Mk(!VZyQqdk3@NXPdI`sH|W#EHm|L3qVQ zk90RNVe0wvPi;eHf9m;j(Fh|~FbuRBL7JF7`Ykgy_Q9I7Fjq5MaOIHg`_`69v6?Eo zTmowfB{MTxXKbv@dk=BL)!L6Ee{$^JoS5(hb-4lItlJWrNCcJ|b*{smLq6!G_)LW#Tqxf5BYF3vklOxQsT7c;ew^HRzQyb-{L(A^15%hxKHP;)8F>Rufx-0T;)S(JWW z=(R3U44{@RRU0dQFO3C(rv7gHD4xb)Z=rnYQW-s~(}d{qN}6P}zcR1NjnR|7@QnirLsgjmB^p1s0gPx4~La71)HR z@cPK{?Z$dvqTf%Epw`I=ph$cD<+#!Ws;eEvFVq$pQiqFxtEq_!iW|N0S|N4D<3!1gX<-D(CE2+oUKiJpSt5vhgq(q!!8AQ)*FtRnt+Ou}y zF2>o2ie+)_9iRR6%uKk2$2wTn+N#}9SIZ3~i*L4%RM$LhKIy9FgW7l_`m?XXSTj4HU?;hMi1TFCD&5Bzv$Sm4jM{csDQ$bQo!#^?A4 zSy;CrvK&zxh$Dln7t-{El#rr8nyt|F!cgif&}m>`lL01vW%8H~9YL|cWl{Pcu<8%# zHNO@D1D8+SJ%wQ;!h}sNJmqC^|t+9=xAuc_`Yb3v24duinF=(aWFBJ8Gk= zBqBanaQ_hUt&p3mTeXT!MybEW3UIm0Tuo>q&#+VfQTynh>wQIy->7|lKIJcFgX;M) z%u8urWe_I9W2Y8vrXsHE!~s8<8#g$RZFeu1T;sYW4$yr3EoPm~(O+#n(He;oB7&?2 zEn+BB?Eew5Y_z@O+t=t2@Ep&eIh>rB#Mh}Dn#dlKiGFfn*RY%#oW%jkg;@Na9<#|z znAfa<)88joD`~{Dd~BFuo!Y8qM(Y4wk}cX{sH#Hr4O1OS-bcz^YWFK)%W8L$^^ef%D^P-Sx{vx!vE3^cgzJX`upaP+y2xx%BP=WlhEN zN7#GtNky0p>{4{g%IBRvv;HYHfh(e_&_W~^@?en3B`7qh|K_CEtIPGBgl;rmb9zza za?*0_;h1svx|th|tM_%-$v?0-KADm0a?t06fAFJp0(CJ4QQ$vh=rr&fONs0+PXHQ1 z8RkI}xYP?uAljypGH|Nl5tN8iu%j0rf@q5zjXG9C#O>KO^<92!@+r&`u6K|uoNvS> zZ(KB5RD|$YLX*UVek3r%vNMa^FB9^rF}o1%;r0WQ{MdwFW5PvqwJ50GiNq!P{9Cap z4TEHBQVmDPfQ3;M$ASW8Bjm?G{=->eT%d4bkPSLn&6h}D@k&n)(K=Ze&7iE%pJo3~ z>$~;f>W4wIacb{6G>|TU&Paht>KVmbpCw!O8HY#NP!8^+qES(^cq#4+2!$WOb-2W5 zjvne#Tal-2`Hwi1*S!i6z4)^?%DVADn`5XOewd_riGqB+hi$D>doWtr!jO!doxwYJ zM8XR@*w&bGoZ@sBs$ChlPPq%Q!y-kUtkR3Oo{J#TiGv)NvE2<$ucHIN0ujYBboO=K z-~H>{ywzxh2oG2U!S%iqS+HY$1zbN}djZL7r*Q=4Zraae+WmT2hTiqhB{*=*TLa>({oH+k<5fojT`Ac7!?d>1j zJ)L;6bPYyLSg+66TU;x~Y;oj2&{{XuXHHwW_VdsAig=-Oc0S4(mNz1za~$hI&|uX5 zLxw#`V>|BuRXl0v!maW(xH1L|(ET2Cf{`vRWeh&;*3MBx&HkHa?+f8YS~OzA(or$& z;%H{#&D$P4K{un{IURiUw$?3VFI$z`M1=-W{?K_GQYABhh$pp$tcu~uZoB%DYsV6y zAP5Qq=~JPEngBGZ%~_4bja&FfBV-}Z*0!C$G`StzIVvCXv>T(P&mZ6&4^OpDg?!f@ z_>eHEp%NgdQi`&6(6c|LFYLz1XqUoKuu%qM@O~`2uDElTs1gguv%%=b`c zle#+%AnST%p-TgIE~CC1*rNXibKfr;vTdG@XA;h~5oDQ$e|hfc*xKF=?jx`SxZ#J{ zR@Yj`iNR8Xk1d}}WC`mGp^QDYp3|3?0R}seI$P2ssv;w+6tGrdmkBjD1VI$PW$n*?9Q zOE_gY@&)(!*3ix40i_2Z$D1d6rjC6>?FyM-3N7=wvzQ7N2#aBHp=d!(VF*=1-u@4` zs>vE)y4ub()fg#zyKa^sWa@{V9xmR=x9e*fP=ABNrrh6}HEmzJB|}$&tr##`3W?Fj z-~TxAbs{PibTk;~F9#94tYu|JXDc%DA5CWLe|s*it{8u~Ki6dYiVE3TYBOb+`$^%2 z7vWd6(H&^tg-6;3<_Ud`$xQoeQnR9?7pwCt_0eWSQF{W} z3$LCe8D{D^R0_)w;KRapnz~-GaN{Cm-DAO8WEAa|sxfd?5(EQu>P-_R1+3UxWgVPX zoqBk?J$XxOa?;?nCvWPal0wZAnD1|aL@kf@vx8e@uvm6)t zS$3hx9tpNuzb@o9Gb2ZKPoVwDYWH3@Bi=#9Q^TV{d>5_z6^%dg76ekn=8SW&`wxR>Fw?&!9^tpepHg7(LyjaN{5oPEGAMQz8OY+XQgobOX# zJhXW7j~xSK0DG$j>zK#uGl%rjUoS7Ju2M`4lbk%wlAubdApQJ&P0pJ_j)oSiNqtfQ z=Yhlb|Ft0QYEel!3H(zXDs$;=uUzp=GnJA%U0(xr5i-E$+(%OYjvo$m-($&dv(ay) zQ9Qy;_lg{SbqY9{H(GGObL#V9VPF|((P^qE@Vc6|-e(e1NlVqz0`ve&y-9loW;Du( zKZJs!xwSd3rW8@^<>kTnIP4JjC*R|PQ-o{i^+wE0#%iM@)qm~s(ktciYEEB&0&UFI zpCo$h7FKiniU$YeQ`<9G4ptue2>Lpy%Tqo!j<^kqZ2dSOXpTOehH3Hm6CN2Q0g7on z*@DkO@Jo#rM(L4FkTK5!F3_;1$r6WUqhk;cvrtR|=;&XsC#Tm?eVrjda%Q!*YfHF` zhYK!NF527j<_7Dl6lOk;J%;-hkZBRvZ45*k^?o@J)7K{_7x9h$hMBMWp!F%?G{dOM z*O-N=!S)Xza2a@>uNfi!+jP|OodMt(MGeF@96y&k&$p+QPEKnla01ppNk@GR;gtRTzlaGA<{Ei!KCKu zD!C`e!ln#AS9%=q&+{=*lW*!P|$7#+n>mY$b1l~Oa@TDToo*->UZ|(nJ8dq6pJP>kiQBxCwUT* z%{$e--P`;4bbm^uw8pM@#KNp&Jlo!K>Eb30qG>!}k^z*F$9X6=1(69g&t=4ymEGf! zRQczmqQxLpuJUS{wWzV$(BCl=c{B9s6H)dwwPgi!cj7!(+ICm5g7bjJ&i*bi%uRzx z=iaO#)yIkR+dy9o%I<|AfF1@RrF_yJdap{@Kz%Jb+|Z%{F;pgHiw0Wlzi;mbMQc@X zAR8piiuDsC!(yMbiI^X zZL4Cix2`lYn(7`O4+K|khref%3IW*uUO|3$W^V);+_T>NKE$27@#?%pZReQn|s+VD!L~6RwBHESp=7jb1ZEhk)hVv*Nkl z<)FUgmH|!rEj2u-5a@s3PhVZJ5xRN*xn>jMtY5Q@GD7p{3)6iC9+0;?+aI|`R(Py} zRWrMiNkzahyMG0=XrCgMIfs{mb4>9i{y3KNt<2Fo?eKp|+vU~Zzb|$BHYP;d8O=Kn zbzHFIG1Jgv?xqDidyDY^Yy%D<2iLCyU0AQOod{`a1KGLE_@r$>OqWWlGz)8h0cSyM zl@3b)nk{J*5?4h=HeOwwlzmPxK~Ty~QDEmXM?3x1>66YAZZG1?k!E~o??2Vm$XRuQO9X#WB0BoNZz!io?6Roy;33bzma?>AtXhzRx%b)2lL z;!57#u^QN^`|X~_h!y-f!J4?l%sl15mj9A~n+jx)o0kq1+1xnR=Hp!D82dl7u4uC| z$4=)?ep?t7OW;Bdl(%-e%xjBg{!2!15*P(wCUsGf){kk@tK07&G(`9sqmLM`*16x_ z5;YwSnCV=*%7dxXSGV-UDe+SOam`hK9<@68Xld)w+E!SjnUj0HAsr{Twq~PVog;ex zgQvnaF1ytUk5Z<`jGf}Z{#P9x_97{GhhYaa5+OubC~=zem>DsAYj1hj*_K4rX%4rJa#dH&qdZXZr0tcorU%Is6mI{2lfAocg_a}D zJ)(8IwC(2Ur6$m@?H%tOeJ=DNq8$M<6|HH(2U!zOEIXC_l0uAaB*Q6g!?T zFuw;qAOW=PVmbdw`Nu%Llc}UOywd$h>GttR(cH(J5F5W;55b3tZ!?c->fpKu#*Q6s z0&+Be+D(<^$~D{iW*nm7|3XP3R(8dom>r+goScjq8mD3Dj@a083g5rkQT=q;U!~_x zc!S$u{b&g0As|+LM#)M3g_U@z>%bzLK)RR~kf3q-aB=b28}`8a7W(tAwW&(i6DO?X z)P>yZf`r7xzVDXy)+o{l2wRp+Qh` z^Ek+zX~XO7_(Y>MFIA+zCA;mMZXaw*fQz>UqKtb^MkG8`!*s1yJ@WFO)S&GS!Bp=H zxw?n@`h&|ZPIJ&QH(U-GIbenu!gk~iU+)hZbKkHjYBEq<0YBMt#84=?BMvt$`O68;yfTfft|uV6^0`F~lUx7Fdjmw8%~x8(%d+AmyI<@L^0Pd_Zn;8Kq8VhK zZ%IC-s?}u~KIa;wEtJNu?XS!8kpZby*c zzqI(mOeSEfXq1OYRSp!sQ`*|u1JhZox2`+9XhzQ5kc4w`q+aveE3SVhOnzWqIC6L# z=vsR^S!qpPCL+DQ=}l`zzw}n)pi(F%BxPh|%8nar=QJAz^HL1g^2dDJZy)!~tW|JB zZkp_!ou{j%T%Z2ZLGuuEAOHE|cv`|Gnf_@neM))O8S~F>Qgzec58MNOBjOOsj^mrVmP^g+sgjpGBfjeWz)@oX=hJ zu)$6tTGP{uk58ttSRDpslHk`q|CSrRPE=v|5VsT`si2>|G+9C*)#chLV!zG2L7I5_ zTWy>>5(Jg~v*uvQQPmHLlInyTNW1w;6tu4X#Aa)@`HA4rP7vpE9NLNr1eVuxu5epL zN4Z@<)|#<|Pq2QqjlSGH6n!LYHN;wSygFP~B^7of7bNhD6x+(CY0r5U{2DA*^z+BO zwZrchpJ7*l8`=l(P0huF<9Qpn^FWzgzD!zSoHR#aZK&+d#}irB5qG+b13Wwynr%EF zY9jIVLO1*jM+KLWR0JHbD4X9O{-<{Jegr@K}n>bth^iyVsZMWw9|jjPP_MB}+qQxMoV8vldl8)OB8p8InhB|jksb{Dtg8iI-v zTA%&yk;$NzaI_IP-kwDqe4d}zv`~Tk#w&$6SxAI~1wFgOd&todYHW@DAVZ?fb7S7}E#LEyK40MK6F=Z9!2e8iWuk|!jM+*Jz3zIp_GQV`d_DHea z=)sf2n@q05Z8iowI2s-29nzt9;M0@C(OirqWVs^o*H|)z_Iau3 z<7F7g<>>b=HX!7e>YNqVNvA|SeYjOcy;vW&dS)jOZ}}h-i2e_YXa8@TzhLn0DCYCP z5Pi3rLb`eY`{R-fHS|9i0t5oUxN_~mGhza4?Bi{vp@?6A-7^T;Y+#&7F$+9N?T}X$ zx!oseXUUUJuyWrHSF?3~)bKHsn?4(A{b62ElB)|DuazCIwbhc>?1b)mdFH)e26DQX zW7th!muMT*1Q6=#)5IoU*{g#tr_aa&c3-Dz7AAryIF<*A_}2 z1aCaw*#W9HO3AuN$W&Lo3pySi0hP-W>=S|Yh*hOQIX=tjnh`!iRe61VpSmfBbJ>pU zPiJ5fN>C0^PVX#SwooB5r?(E%5ungnz<~e$){sp(4^`f!G?L44Bx>-%0nSS-+Shr1 zvm*Xc={+r%6}L>F;jL8F02&lA^({Ox10G2Q-SsfmOam-iWf+95*_n@A2umO&?CaUv zZ-o^Y;r*G7ZONAle3Fd({P{{@=npTKD*kz5->5BM#$gpPi^G*h4zXyKwlxPt{t?{y z1_o6f_P#H02X?=vl-^+Vi5Ezp2iHoULl9V5U{;?S&ze5YJO7-#R*v1-9~~hhfs#@*T52zXF~j9|jDWz;56Z zgBwJSirPBa`^@5-4y>)HS~cCflsxLG&UH;eg+VZ7&8_*iAaelt3UDBlYjS-=fBq=| z2D(U0smi}$EzCil>ONxL$d3^3l4f~9JxWi<7V`#~#0INYAb~36TYEd9SX{O&yE0dQBc!$br~aM#D6KTd*P<0W927X^kK6Pa^ zSZP78U>?gr+JFGW%d3I zhmAp&2pwzeAMd3KkLF4Blz)Nk84~)le;A8MtJ=ukB*;@+gilc5?TIUoMMfxy6`(A4 z{L(8R_Cve+E+5!PN`Y)`{cLr|!-^p_rQbl+A~I011V-$e?iMUdN2H;fUyNjSG|(IZ z`$}4|XcXt4QP%i}w+aqS+ee&Zq{5^yf^LD81Mm~Ev7ie9KhMM;O~(o}t!8CWI0;Nx z^1%V4`0%#u{z$zxhqE4uo>~NU_J4N?ki^noWf*}$dv#6H+fiH=a|~P<$Ul`ByF{~{ z02M1K*)u8$(Pg8c4B6@ZJon%2yt_aK73Y0YCmyex{7PB>PfPwGo{ckvctBwZ- zUUpTh@0^b7WYue8Lq5n1(iqrvMS)n-_ekMq>@6(i9Q}L8#cCQE++Xoe`$K z`cGTUMeYUNI{7Yo&R1OQDwrPKoxFY&_OXct6%K&JtgT90{mo{(3yvm1s;0Sr(QQBdU4R>#2~+f9qxr?Cl3$Iu?>Y#M(G7Gp+dwC~ zJe?q8(xjXe)K)>f{M}PVG&}m_1v(u(b(eqB;vd6h9+*h|xozyt{#zoTCy(0TeDFh| zhl6tZUlSy`gf=H-Z3@Ir$TkJ1Z^ib0adXe@TPlgoKJzP4qp>8s`@*6peIR0`ebe)n zrQwxi6UgYmenyHmbOu^;*3Ir!r^~!kI{~!72u`*c_-Wmu&;GK@sl^j8qM(O|acF4P zlj&r&dJ%GpLe7^3q1`e~`V=M_1J2M(wNC>GL7gDz-lfR${PwnZGxtvL~U%T`w&*5~V z4awO4KWP*I=m&nh34$ARzCpR{OJk@GeispeH}XykyU*#!)}Ip5`@Ivj@57A}E%&dX zqKYr~K)Uhaz1N-kLQWVvdpncl{>ABu|Cw@D>W3|O%|IT{Tc@7}ewg$#tEN}@Oq z6%TJSt17)C%oHs9u)$>t8>|;{nvcF4};*I`#~j&=(V>-}u>#|dh& zJ;6sQfjZRnEYe7CFNha(R2g3R^(5TI-2S=5LjI?7>6{gz1&;I^$=NYVw!mesyfilb z_1X-5@#{mNG`StC82vs-IU)^RyJM z-n%W|lB%p(KuvoSRK%xQxIxcq@N;263UeHhP5fW(5ZQH8t^)JgKOpzDvZIWy#Asf1 z(_%|na&CS^x-(pLVgnA?!;J>>H~7zUjDEEuCE?baU_W} zg4b@8`x~UdB-ik|MZA&>X#dP8PEbb#gem28FOH&IbS=)7B}Xy$EN3nWYXSdHgaE&=OXdU_=Eb>X7pkZrdOKtL?t952kJ zSR{hp1XKr;JAHeML(FudZ38jvRra~qRLduGNh7Qo@O5?d>>!zfeR#;L(G26{wWkEx zv;s6ttp`x$44MJu8V?0u_$+T#A&`~&ylMkJmiNBm79J+$Vktss@I_Sr4bBm0v|$l_ zVXd`qEGb@!5k;myb0fr{!ICKP;|IGblAKPQEteHwKhqGM)-TFhKZmXx`fC@7gUzW~ z>r4Q=2O|Ot{@0Ttyzk6SvT+(0Vh&-UOLtC8xJhbNM9EZ5y{7Ukv7LIyut~!Gs%%CC z75w2%a;IR&`3jd|F{mUVaKuRg@Um==M`<*pY|YjL{}4v4PoE<0b!Xb{s#Rc!8#287 zY$!z)^@aC=2!G)A=y_qmziA&=VRA(LFA`23=tgpv=_oV(<6DJn_2dZVs?|I zDW?V0`C#Jd)^WUproVgG-v&- zoxRLwQ5!=^EXn?LuhS0e(`oA5fxf<53;AhW1y16R(X<1Rauxn>c{1cu85BI1v;VxL z)d>pL57m9d;_J#q5)>4?G1G6m==6#W{Ur52Ex;tv#R=m!ZuVbn*TdAUUl)Fump@-T z{&3__(;5$6n+oV@5b!nBN~(zm`ZG>{kEuLtUoMm~zR#CKgSEc7BFmS#HGRlSD&9t@#@2tD2XhloMS@z=HgMNT5V$ z@Ge_=rQklRMP__tn|JgY8Kw6AfxWMYx_7(>s?GN`yU1K+;Vh(EG>wo*j5bk>B{k}l zNLe11*5oTkao3ci+~wNmHJ_-+k2QxKWHNFe2Sb;C(k-CXn?>+R@{Xrh{;sb|d#}|^ zFE`6o{2+T12QA;mf9@ySM=#)dgD*Dn`$e?`^LCW=aPu(FHQlABznfBisX6}D`))uX z3z*b3=4J#>pv%fI?9;eEf>-eumDM6rgooYyA_^fF_K9S^*=Nrx;Od_eFI_&)eR)Cw zhO~h2CB6q1t&x(k>@HUnX%^k?j@~e?6*CiW@2TNstv>oPv_^qVGBP4`rlmbIksg&MxeoNmYQKR7h0wK~R4 z%T#AJZ>6osW+!_M2}aCfwlAIENcnREUQ(1aWPW(LWnkr?;?)+f0>Mwj3tz<64?*=Q zjXSeduX(#LZlnRF=(y813ZL+JF9anZ;K^gHwLU#2W`O$$JP>zeLCC|w@n={O&tQXf z)s^?hHTLt!)o5g8@;C^b#lat>pdk~#+Uecz5A5Lz7PI(ezHo9DeFH8+sHjxL!*JMI&Le|Wbn35>c4zy^u5xDIeM&URJ5t_%I&8$A;DG?iRmq`&K;6Nz!K*TO=9@ubT#EuoShq!2jU#kze?FZ!J@*7AIygkJ4faqBZ>m6fehqFWGO_pZr$qZfEwLy zLR*E=G<)FWxQ)%_^xv)T_08mM6|Mo^;;sXz)YmPbAXBgvW80v(@JTz)80w`p92z6Gf#>>C(S`ad6_fDX?}-e`oCNqnl1?Hf~mjn$)ZXk?-u z@`lZFG&sm94;^PzOy9b}2JxVMz;uc-jxd>phgbrMZoTk(^Cs{X%4f5Q?7~tlx0)%o+z|?<`@(!;u2l8b z?h9-^;r4+Cv=O^;h!mr<^LxJQFd3iDMhPF^nnt$4!-f+|D%qo3Jjm-Ww4O06RLjR6 zPIJi(5ucRsD9}q|YA~)`-e(;|1B^KG@AtUZu1}oD@lK1O^fI#ue>FUL=Gf0zzc-fN(vc} zGi4PbLd!!)H_!aB?|XZ1H+o=-tSwQyL_bbZPK@XDg5pk2vl*Epp$RM_+6*O1QIFFs zxn5y#ImF}pxJ#%>NQpP^LqN>do&#J7CS?T0>1mU4fHC>uV`7P2kM;#{t(7; zzJ6npTGdkNCCT}{(kuLw{E89H&XIruJ%x&pS04p_WAYn%;L;O@Z|lO!ZmG-7skg@! z@*9M8nLst{dq3X-jdwP^0!c`i5v7%tGS@y%v$fYzSsg;3TVFxQZo0p}t7~j}#WVNV z)w0%CVW&aAkexAKr2coz1O|rx9LGM2vc+j23m8^N@Wc0Qq9m{tqr_H^SA@PZ(D&BI zHNpz>naD3$th5Wd-9=kA?u2wdm-8sAflIq0aBGlKF;Qw|-PNe1g@>OW-zn&cnBV*F z?)u7(yS`b5n0}8>?KasxWAGdC+sPIOo$QBox99gl&!Xa?H?PXJS{iKV7XAZ-?DYMr z>tiA$Q~3MEm+2}cIS75R6>?0J_?N#Yqiu;;uSK5?3-AAJ7QC8;3$htDk3BdGJyXfTibF59vf}?ZWny`xjb#6m z>7L(s_>RhZz<$jQN=J{x9%A*X(C}ny!o8=T05srlsfW$|Pm4{PcU|aFLd{sH=M zJ>PHJ=8Oyn#O(J)Yx6kj>e73a{gv)E-kzz-OjEmA8DaK8n$jI3m$LPH$cN||K0)@h zD`aGJ+%v3IJ7-RCT#j1YU6(Y?a6OEyYcw1ux($R)fkNy zr>n_Yu&{`?WB(xRaA*)T~^%O7S@(1Gyz`b*79%-r5r1 z7={08>ol%7{aj>pwS>8H>gQsjqwQy@NAiJXf<`33lmFLp-;#Fv&h*q03eT#@TzORT zP|9^_wNY95X)Yau>D28-;*qkFgL=6`O3ND!1RF}1)pixvEQTVR^x~%lFhyRQr(IAy=0H3)%<@KOyy7pc`=@V9CyfD2Un-8 z0y_EV`-0L+7K3r`;J9fW%w>=r1z(P%H7E}ESako^p~j(D1J87R_2idAi<*+dd4ZObg}6;*_Z*s3!* zQKuXAI}g995+Cl+3zm@F2{?^r-rnT~N$q*qty8d(@TIPxSHulEY2^jAI75y2PIEy- zHrIy_b$<%u>wP9DBA!1XR=ycU%Xn+hgyfMUh|U&?F|g=)goT3*6P)jOd%I_{Az*>1 zk47B5S*g7>`1n9_reUZb-2&XIr%9!GbCYIit!_zF*Ixxe-6|Gj;}-_oSB|i)7(5jX zaI=LpJV^(N%m-Z{7`|49p8WUO`(&);oSU;ARpI$Cuj0^u8B0{gN7LNccC{+q(ocIJ zVa+_US1>g#$Ke$oj4Ja9)HGbD?aIf~HY2_Zu=B#!V>0gD)I=SQ0+g$6O#DQlU)W^9 zf7s`AXqG80?&6p>@KsY++mrX1yo$}Bx~^gUsffV*1HFvjUufXdD8?s@m$_&?mp=p; zFQW)XS?hdT+uwh3$$~4cIKQ4M)Ne}v7Krj37iUA5-bc6Z!)7UX^4|1+f0 z!kIONMC0*_Lq-!GSWC{0zT(Y4BK35bh%UyFgws0u{;iC4c6RnhB#~CjV;po2NK8P^ z4Ub94&a7-Q;#ZYGtLYGV;O4}x?Ytv!l#LBL`Yx~gJU}Q^969U1*ev~O>f0IkhKaiD zdB7KrIzjyQ{9>Z}d>#FC2^UWqW!@8#`9YDz$rD{f1(KCbPyRTRQlIT)aBywPzB3O)%Pl$k89Zl?_%FNzHGzx}}j@98x1 z!Hhpy`koN{EcV(fg0J=m(>Z*g8Yq^OL(GHBly@9aJ5Le6H({-lSQ0H(6jgQEAvRY5 zYJoS>eDi4X+Emn@yIZ+~z*oh>Z+D~L8CBgef?G+gLnXHTjXW07WVJzYE(m zW7(7~U!YKz&_5(JboBhZl!*cHr6J~zPS}rsx3*p6JHe2~O)>0e(s= ztwmL6>)E%s718eu4~2$hB5~k*?V6x;C_9``h|<)ekc)FQwQ$XQpxN!O)2dYaDbrW> zA=cA=0Yk%c8;^YBAOtQbch~E6YfuO#WiNPj$G!7sm&A2&01a%JHMDr80Jey3VtG1=tii8<7)3=#6gRLD6hY&5(WDmXYC zN}*qKy7E;Sv51KN3*0ICTG78cjPfMj3m^ta4^i%VSDW-%`NDD@m({Cv7L(7oWoL1_!! zv@d`pV4W^re*FBk6C4P?EBV&ME(hZf7KFe8ntHMl6`MttJiH^Ppfc3GP-D>nQaP%O zhFAiux3_yl8HqXtKZYe$r;X}R+FDv60L}POFI=EF=hs|5(5ps+<1&Sqk>mEXSnvM~ z6I@otxVX5OXEmC-zkM9r6B&QbDohRmo#@dM^EKsjE8m-lv$K9}{yF*eV|)yu#}y&4 z)90-v7x7??K|EI*tl@T)y~$FuEjAEyPxek>#}^b5*OPO+TB?pAF!#W}V82H~Fq`^x zBkg1jiYqyE?A~4@r~|Atnd!gx%Hlu;1F??1^S!j@cChS!t|?5yr4R~$zD9{s$Rw8v z-?hM?#>VFNcoIsYs34&qd0OqAf-!Mg)?~|UZfJ4p52E(+!B{Lv#QEoUBFHAYK{S7{#L>R$C++uc)4BK-r|Rq z0Kc^^9zLFdQilc5Cq}k4Un^@bgQ%c~@o{{AbFDK(F(Q<*BOY|G(Q>^Zz^=dOFtW5Y z4K^a3KZ^TP{{G=_D~T^A(*ZF=yg^Smn9DqABOc}U_foTtrXdu3-%{TP!Q+eQhe0_w z^Ib~Z@1wgb-hao@RTf0Vh>Vw4RRjFKmPN36pU6-NHEb2 zdx)nDg}@-tBgrSgnvjWOfrK7Z5RAyJbTI_7`3~)7I2^X+n!FY<_$Y<2e)8nj7}4s< zvSk9{usjpAzt8)f?;)0`V}`K7>&*JEW%XBg4GS)=QRdK91*qyN&bhIDR1>T8406mT z85=fbxy4CgYQ8^2)U zhYp+){O((bBKHS2Qv%@qQ!p+79-YMT-2TuZm4w7}7)-)1B%V}r@cKMX%pb@AU0ezB zCm-6u9j9YeZx7Fvj)t`6g9IWUBXb(8W2Qej3w!5%E^Ow_%giM0`$Rr~%Nt!6@T-}J ztGBE`XXsO+RpPRMlHz+wOGC1nuodipFQ1b_>(YpZ%aFAG1Yvx3a|7@2%FoG5?0k+r zyLt^9Tv75tI;#Ort5w7q6-0|pzy)>vDEi_-S!!zo;18p|twLw~ugy*H=A5jAELOrR zdFG_1GpVA_6M!tI1dR`XBRtE3a};#7q-*|}``UH_j8HO)>Nr_WPV|Hpke6T$$wA|I zIBMEtShg@Or)UPC_Gg4$toLA=pU9v;Sj0@Q#Rao4zuQ5yn0I79fV6JqkGGFrRk*wU zN@bf;jFhHBQkwAQqfxiOv)3Rcy*+y*kUu!^&dH9$`1A>%Ai^$~h@6bv_3WHlO4Khx zy^XBqU+Hm(=)^Pky*8G2N zN|;i$N7fjcHu%yka=T!x>2>I?$5&$a#`j4L;o*flF@&?0bs^YPR>+@39 z4^>3OuNvrjn_y4F#Ii!i7vEL@_Yy5_3e-Kh@d+Z11u!-qRx(lLBO7%_yu>4qHVF*% zaNdL|pM0?d#cn#3K>?Q7p9~u(Q~2+6YQkbVJBuRX;);v4n-@OqjGhm!2zZlGVh-^- z*=xEN4`vk%;wuCZ@-is<%Nu3o!Dxja22G$w_dO29vk2WP^sda}fzb_%0{8Myl2$@; z&Zjz}h@x+0MN;1z)YdGG*L#k4#qB$4HEbQN=!s#(<@L;)(CFk*Zw>5W@uZ`&@(k0# zT;G=~Q&`v9$`GTw!=b@R!pxPk+~in(y(nvMUg{ETocGPO$c}{l&!w8WKB@&REw-$> zbdPi!@<;!r0YXB~k(vxKs2FHLHfG9<%nWsaTjqADnL)JCw`NGpwdom~f}sdm+=n$}EB%ULD+qsj8J2dg$F$t5~rKbb5 zQOZzZ)21J1rV=4eYEQQhFFW*FEj|9%TBFWKOFZNy= z&?`SaQNuQgwpLQ&s81Rt1rI;<_C?$Y z1eMdHH#is+wtildzWgTR%U$_X(`KhBSZ553{cSpmt?Hj+Q&Yq89%Ko-_~5{sB;r&h zud8oN=4MP6P<@AX{DvaP56Ch5st##t?B8+EwY^a*fXpCT6@>&N#nY~K2R@4&%ft<; z1U)|{Rw8o-9qXzl!=kpqX3Zp(w3Z>`JHMWv-;AC5Q&8tmzZ_PaPRxKo!FW)Y{;5Lw~gu}gqwdX>@SO2J1V5<`#=R!my~526l2*06;YAEG*Mlw~wZYMkdXmkk1Y zLv9{+-Qh!Sw3__=cjX_Q?W_@N5tWw|BoDKBo9d_dDJ!3qvr9kY!j>yv9nw*}x1wVj zPJdu7&}jddc$8B?Q2!PDaYYPM)?F7K?)a7yWhNG}DnW9U-N z`M=cKgxHcdm|W?KzFAg)14l(*%z!FdLx_GXwvnl-PVboTD>szc$S7lg9}OP;9(AtQ zNCYlq%zO}jZ=~pB7;nF9-gCi#E158&MvG&J1T}%~0-t@XMx)QxpzBws(4ePwX3hQL za424~1dN0XR~Ge1gV+o=K`iO3QQr(M-4x_Nyomn2ag~h~vyg`|W z{k_c);Y{9qz^&YFZku@DUX_11B6^u)c_*@2d{k8U`0&0Y(}1B9PrA3o&&$8f4y3|y z8NFDIi>~(MCdOrONl6tcdGTD>3?W-zq&&+CZDhSbG9L}k+gDtPVJ2s5i%n+2Bnw-% zq)MJONu8we$1fJtb$j!S?!H&c&tCaanWEuge~og8OFKGKsqc60xMJ z-mWC|c$=(~`0yz=H`jfw>uKON%RHFHQdF}87eicTBq8yF3?RGvtL3;|D~)PBub2>C zr>fJj3L=68C-CkE|J$v{7zx^O{^M@m8=|#R%%^rgLhwS(vqe>^s=k;b&1_WntT}OY zs~eh2XlG=76A_|_2oMLd$9>7Pua}zQ;xIHcG?S7O5?>#MgutR^WV+YU>1q~!LS`W; zd3C&q?KV98!~X&nquRi1C%Igg%MBLtQd;hfM;;2i4E|Sn8kxL6)`epXRFGh`F)}hd zKgd{^YRHQF{-w8X7H>V9pPJ~1DLNvlx?1@}>iUtp`Kv^rS-x$JSA7e+7w(l-u+!x( zK%|Iq)HM)1r{6<0)IC?zQ&Ui&5IJ>XR|zb5;7^<^4)AN$)Yvuf)h4ihav^0@=>6dL z)$=>z2NEKpp5;CX)1aHf+0Q1gnek8=84-@8v_4FKTm1FEk1S-?u@RUhx?LO0k}%4I zwJ|+U{pY>VdzHxikoQ6aJ?FQYOFTg@;EqZ z)3Kd`a}$^~7#N8+cUKGRu2;XMg9(=FLEbhTZr z7#XsIrhb6aq5XO9dkOla^I-^*p@`G~LKf|~4D}sG*POdGeTMPBolz;+qU`JeZ2F0;sEncRbgrRF?a`x>tuR+iMW zUw+N8kgFKiKq2!lnE+XNd6bVQ%i(MW&9feAjS@#@{XP;w7^&9Qtq%|4jk#0=`3JM( z#37>3to*w?F2<7U4q|Z|xs^JcPNN2~Zpi;R;$qnRJr;j<#*g93=)5Kel#d=022Al+ zCKv{m2&#db^NaKEOSLzef7K4rm9qsA{QUg9_of32L=^VMq9{u^Tx&d|R{&Y*|( zf-RO3#lhDO^ZPp#gm9r8W?M2-7sRv?TMsP*zNDVycj3~uyL&@2R?)AQ5Vi)*CrXs& z)Kgywp%2&g3>cvVrSYW1L>qVXL2*maex2&UYrh+*A<00|vd`13uh}u1X=PA%)s=Yn z07v3_7&Fln_(rSaVu!*%;0oCtGsE;mxi|rb_Ebt<-TgO4GTr}a0hEaLW7a<&Cv=>) zfsT74rS&HFp!Uc7oI+dLMmKVMe9-w__ zxVaRt!pl%zM&+pg67U$y*lnkz1p5YMVt(*P7l81!__AB?kg_la+ixGO3N8|-XWU+z z4{&i@pD!$uQ&Up>0+VETNqj^z`0UU~U)9qv$R~2}RR_M|JY7=op!!$C3VJi?q$eOJ z%Q8mcyjzL^!JsghG1~lTikmSS8dpsJ6AW0~=-f(@G6@B4ExgyABQ&({NRgDY`#6Jw zEW5o!&KGro4l7yly&71KQ6y*!Jqobw#CFV_%|}KkIhkY{5hcSi*-c(jki7XNXlS^{ z{i-u~LVo1s_sWWgCUnV;mVA@$oR@uBrRZp2^{Vv0H77KO79Ev-`-*5!+PY^wy;B?q z_lp^)ij|F}AI#sM6EgJa7ZzS~SF%&%E* z)vmwFVkWJ#%2g6r@Du_CI4&LD`MuLk&$NKnvOjhB>*ehSJP+eG5{pQ1oGSk%HQWkN zA&k{)SN@(p(urwfVDRth>0y6XIcxlG?lbM;)9TH?zDaPf|GPVQjefW^7l!+#)^u)& zQc_$k0oyPqdQtUcX^I>yeYTXo1^`aK0{Zt)Pc8~LC-VMNWSwwKJ&XqZ^rK6*}}Me zcLm@9S{b~kt5I-3rS?Af*IN4f#6u9v3AAXFhB@j``Ds-QyP$Kq{!qfZI|u|*M~Cdr zwNx3ZuNCrV9>4qXft~mC2?s%18bb8uSyW_&X}DaMeB;n#x8E-WCDw!GQw&kgJw5`Bgh813f+I1onAxp?Px-}BXi$wluI z9MtRLm*1IPW9hxurPMD1wp$+H4PT84MLeihTwPsx8T5dSs`p|zaQbLg2PNgA^oJ$a z?e$@}dy8|_E;dA0JC0XX_VsHlZkX_}-$Ia|{_@&CEW3OCQ&A#&JAF}L?}`TBspt?I zc6(8NXoKY<9{xpsNIxm7J>t+GVYcaa1_!~*6B|i`NJD2Xs8f7J70gr_3J)GcRm9$W zx%}MjAo@Tinj_Pj>L*W+vAx*%XKTy~m5lDPh zyQJ_5h0%j|cug@l_f)y#S_oJfZ7!Y6dUTa#F(Gj`+?Ig&^OHi#R%@Yxx&uD^oLGY&A_-dYRd&S-}#8+`$KM5 zSIP3=oer^@nws3`*gz(TBBZJBH#eUIq%k>j-q*1gHu?Ny3N8W>(RQi`W$=nJ_$h~i zN)C3Hq2z_;n~A^}&~b5;ctw8aU}6<}c^#h8Mywbcr^fuHuu&5Otj;ynbnc)6j)Z32kb$58;CE`0y+(U*l#J~55M7VBrMlBt<> z#M7$K#YM(~C(eHrY`2$xtd529iZ&|CM2lM+@ob#eHl!nLGw%5Mevw`dc4ft=n}DtZ zf+zlEC+=QFf&xx zM?u7FHSv7Ec}c0?yAM9?^mNtqcKW@H+CWG(S?X?6l`cYFX_Cosw>B#vg#raKt|6ei znW}O6#e5Kyp}NKLUQ$vrryqpy1{(DUh}uDS_)Jsu1sL&Ai>$0uGbMufxYRwW0PF`S z4vz1YEz50ci3?!fuXfw08ntyFlWRKHn~%l+X|@tdV4lb?&=`6CSLegz3BcLd~7<6qft^x(mYYHyV0_M2Ltqoavn=dLw~!rbUdg#EQN3gvnlqa3Xu zDTzLQJuY_Im+S!W_xHSIDPLk_Qd9j+$}n6XHOQA4e`ynf;RQUiG~IP_9)|WJWpCuh z*uh((&%(lEIrjbNPbqME+e(nR0j|zDGP1wjK}Y@_Gx*`XhXvU%DJeM$OpI5RUMoX@ zol5*WyZYnD2Wb;P21K8{|DtI%HTiQ8(S@B=wDm|UtiM8kI;xyQ@013kQ*7`+z zWjy%jd@D+$V;nu-LjeZz@>Wv~u{{DCI%fe{E_BvgM^zII*TRBVP)lq7zqX(o4DzdH zJA$MuG}LhD>TxL*t=#1CxM=?GR?AQCFOlIR!+WJ@mNTe=VRCX7Ioi#|oqFRbqH1Aa z$D5rhvLrRhc~0@rf!;`-ssHXrIEn^0I@Fpm+LnaMR-!Sj$WA{|=k#z@H4H>&c{Y^P zP4z5M58!L5DUXKDY8s+q$N*kdWL0SM4YrS}zIFs4T&aQX2gv(4%4{Z3Jqx};(iIWi zadCSw_^ATp0f5j7_}i)#Ln+bsi5=Q8gUcx8i~{;;15EuLVwWL&AYtS7;3*(Eb(k~H zkFT}7tQ0>bdBfZk%fu!S6rr}`YOborN#$!j`MqN$1K8W#x1^fop@^$fbkfm^y}Pop z$sg!No8AFpKK~jbn4PpVC~IGl1%M@M_tCQA;B|#+ZVVvNuy6PCx33Rz7LxJ6IQSb# z2LKd|Aljr^cnH7#7g+(f7>`7cX{h7R;lSW@qSt?bIo@I%0mrmVbn#fltnQJG z4M2LpH%(Tl(V`vAAjMT4bm5EJ=@I`6gdG;ih?8wvulU5Wp-gcV3g6Oe3c3 zOhJ-I&V3!eyeR9T$F5_kJUP@}Hg;+5BxQAS&pVdprnahGbK3T6sRrFO$$CT_!rj8B zJjub6y4yZWpgtir!~OpGwGbjl;bzQ={||iB&fBCaHrBiju3H;=vETyRdA*T2xI&Kd zQw=+Fi|VPhY3>_4=9q0Bd((~zwbK2wy>YV(Fk7+gAUZk?UA#kv26rMF@GYd6lf9IVTlSc0>a%&AD zi<6QSM!=SSp3@DfXP!b8URiasJ8QX)7`58{@p9IP&jOgl@1@*j zA24ARZOHgcgSBt6xfU@a$8)xVRLYL|LrOMV%;|$Lp;UmUm-$KgjsX1T{kJY%rhFBE zbZh;$DpGxFl9I;rp$HYJ@Q+dT-SOLF5f5a(KtTwL`z14bq%Wm|rCk)MD0wiDfN2Gu zEu(6k3SeXmrr$&BL9WthPHq19Z1s4q-eH81lz3w}b)ps6cf4?s3>YKaP8lPjL!UA=S zJoV6PrI}mXYx}3mx~>2-JL`xQ*!nbxjt!s!iCQh0CPIGrNZ5UZYZbz@Ymngu2fBjG zL@5f3=si()mdIETfTwx@1cQ(n$rBD}&s$LD1NOux-PblYEBtjPmO7qZp3-2hwk$)o zf6?bgBPw4Y?nj(G35B4ye*~)Z(B#il$T;wzkymYB*o&ge*8&${prtJjZjYDhu5}x? zRiA0EK4J!#IX*uvHhbmT24-?uB4gwT$AIw#ddWgEa3k~@DPV6*RzU_a$PoyYZw+|j z>Y@ArCnNw6V%h!>ZacoopX3>$$j}dOVnWuwjoAxJ_OtbVxV9SxVt9Nk4cb}0D`f%` z{=ghb+`o!KfMRKlN0nple>JYz&&oS$PskvwJ$c4f26BM?n``WprlUBeSm~@)ZY2vj zcJML3L}n49dE)HJnojS-1oy;r2+G!CcBV%2Qvzavd)VizhKCC8VT;_eWxG?T%%7Km8LhG&JOX z`uL}WFO6{ckIiRC-*8hH`B2Oq`n+u&Dl4w4T!B#06E_Li`M|$z8~4Rl9~8+2;XB`X zN&udTV_*Wao!1$lul#+p%YCqg4-Y-6*1?0?+C($m-T$%JAB<;172tyXfOmE5>fqmA zsLlBAI#hN{2%TsUM(ud^hNVO0h0!ei)!8medn%imxh1@ptZY>duTl?vz0;WpLuaV< z?KE-%EQ#L7cG%NT%KF7)FLK06cS3NNP=NtK;Ews-PkaAB2o?c(C^+1IZ=^TeZuKGb zddW}Z_763X9vjSI*iha$5(|LPL>!s<-R+&;8mrL{;Z;{KDhK{e55{by**dL61;3tR_U$ zzg_{sz)VH#d#}^)4UHZ!HeZ20bj`RG0@jbhFF(pQ10D*>o6yaZG*tEV5z}MCpg8JD zmR;T4*B_+!;Eis@dB$SsHuqC$|u8B^WxUN$HKecpmna5Ha^lsm{nTzd44sJh?4|xSXoD}bMI<5qyY#tm!9Ug+JCfhzU2k3@tla4Z|)M&4590CDH zBGm&lw8X7V&=edRO>eDDPhyGLMTyZC{tVUD&fj((63Jr)!!Y3%vDB{zo6_O{NjCQR zqgTqC?&kkyIU*KP>mQ#i>_k?b#^+DV8{8JR=A+Vm~;ei0svkuAyMph(`g@~wu2MO6kb zekDkfh5+pL%kAGqw<#Y6hCz-5SAPNm!Yw^t-zKS_3;3EyuS;qstvaj5vQ(G6@((C7 zc1IBz)ri}T-{_P{SutPGbc>k`_%ea0y7V&n=;>CJLiYCmE_EPc4ZV_!b)ti``V_^9 z4DGU{LZ4J1V!;fSR4_zDSpQ0hY%f-M=-R27-%tQIK}|AZ9JBqXcX5&iH$b)9+S)p1 zk_s3tt*m-eS2x|fyktE*8f8jtFB#$h3G!;R1c@@~;=@6?xK;CkEYaS1591y}vhS zuNy+;6Yy(y%*@L9Q_V@bt@W;^)wkNVs6d*>D-@*@xi4N?YOR9&!HCt{ejnTxtQ zcb%k@!8#~F6V*cryq9kNGzy~%W{)QoK}Cz#3i1ar(b zW=qh~(9xtmsW}_g$OBH&^jBBF%P|ZJMa}H&p>Ix%z}CpO`-#HH?2U~Mk{`gCFoP4= zV+vrPv{k=PC>gF>FY?4~EzQE4-V)7?SA=CEzLU@WoAf5(w=+`kI1iS*D&gnsFOihb z_c428nL>;4CRHh^DVrcK7u)qUJX1O`6{OLA84d26SO3aX;S~6OfblrQoaAjGT#v37 zWbFL?m*kYq&_^WiH-7*O>aZv^3*$_3+&QusqJBg&&RfHr+@k!!(-q5VJ3T$kcH7LU zr}CB2{HT%9as$&4TC33XuD&a=_BMt3B)5ru4CxK!Ms;ns4Dgt~Y-D4XyJr4uvEH?B zh=_jVZL~((S>2a4NzZBYyScj8*_%XPwyw(Zvm^3~$?-X>`K1lC_l=(O+c zh9dEMoU%el=#)Z(6kzk1pi3K7wrvhkab@iL$uX`MAT^I%F!|X~Qxjezg`3^o-F@4g zOdm&=&w*hp#k!o*j-%YdaG9zXr#%r~42N#w9T3N7@Y&O~rUDHtLq$;|%unUxf{QIr zA1N5#pBeOWkMJ+nF;o#1;@?O^^VJALK!d$-bY<=d9-Wj;KYHXz!|+Ns%x3##yarh< zV|usPc+8)%amj|ojprEf;-)iMOOQ0d2B2CXt7W@!c_hr;YC~n$bL#1#5T5KeB0||% z{N~woUfj00lwYnyiv^#g-V=WJ9JtF0_`J%0Pw2KimBAZz9B4-MI1eh1aSr)jpsy-| z#p`sVl2&#yXo@DzV&rh4B+9!^M=4uE4$|we=5^W{;v-s2P0Du>ga4^+^u^Ln%XdB6{C8dm&5$h^WYJFbIt?v6@p1W_ zzqhYH2RHCJ|6Z7RHb4I63HDET$YQG>-+aIm0>;d@<)ap{@^ zAF9_zO6`md1?>{Y7nFf$w+^4>Wjg>WULpT`ytnItql$jLh?!nq-ta0IY@QU`{_pJy=H0&5Ex!a9Kc+E%577>Bn zAQWzC$#D@{rI|dY3^q832qc;f$yQCFqrH%JaG$7N|Cj24+|=ZxYNNvfMk=#f;i@voXJNiEJ4n#~@VA`c`VKg~ zEG=zfTJ`Yp!n>wHNCGy7WWUcsMkIiFSs?eSC5-v<<_Ef(6N4ZmxOzJW+)s5g)np?=-q$pz`%A6 zttfyER-_3y-V1u_eYvnh*HJFbj)TW)v<}51HT9pdYbY1l)k&P|Dm`R1{W}=du^_{nwIDqX zor3I6vPi^)7#y>`kNuIZ0PY5m2tu=)R`19PnHR=WL`;bIKf&Qz9d-~exqiBOvA>UP zh}sqRuYl(2<_Zsmn>Wu{?9266@S$w~qh03uU`)l!g!0K3N1~#_$8G&WbigA7J|sTy zi8v_eqU|dFkbgwsz4#2CoVxDw3rOS3hbP%l5ec0vt|rIj{DHW|6cGB85+GWx3k?a* zjtX4J33;{%$m!GUg95B5kj%Y_mkH~@@`emBrw}nUZrnw9;;o><#cNaq4qpYjAeUgf zib8*Xjxt%yHN?xK)k=MT%(*Dkchc__6zaS{R%lNGwdDEsFm8wU1uQ0|h?KMQ8yZ?# zPPgAuc!Y$P-NO?Tg#1eVvmYba*D(l?j}si}%b>|vKVTpV;!!n75T)3-jfHURA~^*) zvBZ6})u~aSoH*!{80LZmyU#GVI*+g(7SPaE=HbE}{`uD-w>05fs=`Dv3VCZQ?c%S&) zRC|(zo)eo%D{$zKnX~gJC@g($S^zESBHvdSDkmKQK|v_pul+Ko^A9r2L{1}s#`hgiZ3EiUW&m&PpZ#^Y?syBR`mPp}a>s*P1N zWzjyI=e+^LTi8=mO(n(^r1`*U<|Fj{3PZx-{j1@#tGG5(xKFrVu;tr`IxQNtYi)6NlK;F( ztG@YTF8B2GKx%4gQl0~(Rk(S12bY^1?k32!$f!V#V_idoGvPnv zEDThf@CUt`}B)&F_lH}2m$(8}^Oz9o25Q=|KIN0?Z^xP^%6?5&alzlSvy zmY9h1f(T^tI#mp+^W=Rc67|M{_VvTy8w;mWB0}Qa0s}q2)#8B>7)(Lu3n?rwlXmVK zqw~|R#e#7s&xW1RFWlI98WGsW&6l?23mSQifL1bU;GIZMlV3iddI1zMP;h~3YGxk| zH9tU)0dB7IqxJ5Uqo)t|+<(cRai=EI7}NA7nN zfxmi&N)G2yryB!S(XvbONIQ+!l96{JGX|KG9aq~izd(s$x&R?*20|2kV;X^6z5luE zk>ELeB|JYLCgShkzEAk0mGse9Ru+YV%DE!wAfCa)Zlkh_L%+o-rX}cBjk^}}rZ9znbg_I2z+ z%xE4W4E#!a{O*{+#@O8@GkLzi@C0VQN)7}b1p&x6rV=E39p`6QbD8qD5lWey?>4A0@Glv(Tu7a8 zS8FO=9 z*I&pCDH&Z@E!0WNA}&txdlOM`U7vQ@fNv$Zp!XHZhr~Z2m=vbol(iYG<|)6yP+7N! zxx*hw&4C3<1Y6zDTNI4gLDJe~b9W37E`n;^ecP)0TU-xU4=I->w9par{!@0hyK$$k zcuXR0vhUiP`Q0A{KN(-`ow?us^8xm%+q=~hUmw0& zz#{|1|8}DL>47~qF7`!N*lGA|HEF$K;mtOXnU6jhE2FLbV#sm4J9NOSBYtI^{mx>p zu^edrfKDT(?aWbDU#4WF-DzAy7 z777uFEo_k@e>| zd1=j7uPAc=y-KxiQt8kq_jQZDzhja%&fs;5E`I-|>xvKDN@C(HVk4Fi#z6OA7ysj+ zy*Tn1xY8%Y0d0bo$b^i~9U z4!x5~@h9tHba5ej+}`>S)b6*1*wOCDRFEqi+EDG(Bs0;4ij3S=_|>f!Z4GQ$bBzo3 z-)zAMS{(wWFXf;%()g{dDW_f5sT*Jj<58_RlC|UP-tLg3F;#1F@U^*w~~o)^Q~HanaLW zqL`RMA{VaeZBy8Y2x9d1>o}tvhP`v}rj(f!FH;lD7pTkIr9L_bOqdNJajOpq zK&Up^ooUmQOb;wF*m*Wgv?k`{CYgXG!>x~dsP|Qxm#9+h*)sSr$ zH_%&g)$K~lz!LKEo%4ZQ@yQ()@fp7YJ7?&{O#vk25h}9|Mch zoPN+Zv9r$w_Ymo~z7?H*OU}60Z zN@ia~C;*VpV?B*@GY0gVwl4na#(7s$i(}0DO6{NC z&)uqIv&{6QP;iC!r@Mp&?G;@74id@{m@L>Le7jx5P9nigCdBRPHf+fiQdZWMPJD)^ zg(N+r_?sLZMl=h6w?eDTL)X_dqTTaO+8`UAnpuRx$)H*4Eoo-Pn{gJDjE9Lz(d{`> zph%CkD3iAHKlHbnKq&l_`(MWw13i%9_xc?3XSD?u>gQDQ_Jc)7nBJQM@#C3V_0InE z8Un<7M@-qfPmDpbmUkTljvfOZ*6vJFgguNz}*b*};F-SH~jAuU}Z4HDAb zjf5cGeHY)jcZ~b-M?J%F*!!Hd*P8R0Po%b%ysWKLP!SWe|D%ba8#Ii_Ykqj{ zl+s3BThx}EzA#P4t0mRO2PCQ{m6;(Qig?+4-wRRSeSN&7`^NUogLM*)gp>Ba?_VPp z^O9@JOM&frgn0hOI7zEL$*b;T>qbQMq~+pO7TzWhQ>cw<2B(lwJJg=mm*{LXP(lH{ zL1%UMQejk@n)>H~^q`2$8xE+3U!^SZ@}scCGY?O}9_u)56>x3$`|FiN7ZFH%fCw}C z?cqOiS*e!!j%54zuZ_8W@&8xj@Or`eFPCb1G1Z*HPghzU3h{*lh6gFkjbkb*y8i)% z;cb$-@V$(X9Jb-Vp?Ggi5BtDlKJQ7gc$5AfppgFof<66_@hF(&PHI8RA!{jU!QJdRNr@h)o?{#0`Pz_It1%@NcA5MT*>`rl(?;d=>0DyNd){}qlfO&9#dD%3B zVw)b$4?LVO>XQKSG*OcrYECp;@gErF!`t;BndHkPh!;T#Q;V#`ARk;CUJUN^J}~5MGroSk&^?) z00SU^B@x6nwDpL)x;ed?2w@NsbW1O~j44x}d@ry0rfL~QWeRA$Thpo8cFTtUSEtY> z;L(5a&lX_Aui`!vc$BV)3e6tzkCVUtH|77ESV*nkqG}o;QOsEK!Hp3+`(talwFK}p z2Fa$(HnK2kdXl*;+5cr{Ml^g^z$Y9uv@?Qg`3wciyi3o-K;JFna3Z(52k0)g3yt!Q zOk4~|LaTy3i*rp=JM*yYva)HeT`vfCu0+>!G>a8!FdZlcsM>MdM4Gf zGrzD4S)~-Yg=lC}s8Xn`6^v)FTWcBJCFxaRi*C?3FP_sDIYyZ5@Z7ua+ND?8TV3`P z7SlQRavn%hW=GnPV!VRGuMX(zV~4s^gnXPHmvBl$#npdfaw&HA59u6f5gy(OqyGVX zl*vNr{gswSM0QZB;$LsY6i|f%I0>7x58pIT>__xeVc}fsT)Jh!ZEp-in)DVXhVbOH0&CMyBAAUn%G>R9Ts$yQ}z~CbQeM%H%~IU>zE21Jzt?je>fYB_kv$DpIyjSI1d zg(vDJ+$Q(1V+84By)x5N>)seF29?_LD~U_qe)gN2`8QWQY%=;GcVeDD#{O3I|LPw? zI;qr-ztLpk)y^%vzwRsLU}!%K$*kv$_kAuaJ(^c5Ieq$1#Er#;kq>73{$`*vmG!q0 zUMn7MxDe5LH*%p=*)mPXHK8b-@aKnvG+1{y6`3v&lzq)$IcopnuoxbRN(n(X_xtnUr=lFy7C&Up( zVu!r!CErot6b@_=nxE8$6#);5!^K%tR4lb-DxD}u8>o85gc-c`GbXv#^cyf7Sak+j z*8?C(G0~x)9(307YDA7j&<6g9;UEj7{sG}BdHjKe2nd_hj2aKCC~X}=k_dFsTSbnc zZfZJs4lnW9rrS{cu~_>tDj@|;Jeo4YgKy1Hlao>@47;6@F7peqN0Dd+t+;Xu==cN~ z2$6O+jZCRnR(_%)yKtiXTbkYY$&S|4RHsBe;6#qy-K|!__Dwp$diGr_4smQJNTrzm zMdit38Hm~LQ|RV0-Ev0Ez> z!DZLfH}&TouTFIJ!P>j+l*pwou~*yiEiNLbXJzmlJ>b%!jLcQ2NxOTsApN+?&hBPR zQf2T`jblnArov3Ad!mV4yb=fy=!DJ$@S_Gc8b4G1FKyl|^cL-);K8 z?yX&0fplhWocL|W&1;esG(@ppZx;NiLU`hG3YX%xAADz>&`N{$o9h@q4)XPOziks* zj9mOy*}Z!cICj_&9A1+~J2?28=O;SInT!8AUqt~jO;>fAd9=GbGDNjUu!tW{i0vc7 zjT7^xbxB?ImT^SBIRmgr(8fkKZxO_(Ui0wyiuJ$!ZBWc*M@r{M#1I0^mVl^{d=J8H zr7aXfo#BHvKwED8TM%g{-1Ql^VRDbq#K9&e>C*MeC*Eu}WE&Dx*~lIL=s}9y~ad*vyUpvS!T20~|C=r+0Pv?(O5tO;*|`tdW-lRzP?Btf{CzdnV5O3rpY9DVfG zu#eS|kN9NWKw*5y@#SsP+Njgn7>SCmJS*$MqBfBuvNi4c({-!m&o7`}M)N)G=;j*2 z51$;O!Tgv_8%oY82#?Hb7haf6q7=}I+M%xx_sPMkt=&INpokJ};GJnO)9^mrZt)fzJo!%QkG(b+tW@QNRf8H}t(Z%c3JUzSQ`XYILc{qc} zqnR=|%BtL4#(E^-Ek?F?^0H!(+x%}a#G~m%xw9E42lJW9jHdD+Z4Xa3?-mj8XlXAi z@_5R``}S8mNbnu=_}ymS2JVuc&31?EuQj3W#u7&XQz@ys$6)rU@3mUyqdhkf38lsT zfdvTo;ZV|QJwD0dWw$qzAO-pAB*X|GMW=~)J|uWXOy@{!dkjR)UN$*gX4t8Gx_3K( z0l$sPhW`?GcNbqT>{G`$a{b&3dc=6PTBnD*>#s09LB-;Q-#{aGI@)azcF_HvnD3L` z&N;_7i;oM-3*?NvTC3(D^??2P`JSKxepTlozc!O7hl;9nnJ&8LGXF!IxtiieYCwX? zWb4U!Mv&AcRB&Md7#tc;@^gD!TKZlsWiS{t*Q$^}&-!fx8>h3QOb6qo2?{fo!`*+e zL3*uQmsf`c*HKW=6~E^8g210=Pq|we5Y7lxc*SW?qZygs7KJEu1g3PZb|1`u!tou? z$r3eUXL@G3My5o@!Fv&>E2Np79GE*QjWKgMZ>ciBK|K@PkN626H7glho`Aw(kd;pT zMe0-htd0m7dnmaS@f4^ubHW|QTASG^%6wsDW7-}RY z7wZg0UM3*4_I7Jl3$;!kq`cy7U*rbB!4SaP2`|-4BSvRnfKc?@N&}+@ht7(6vtl#? zW7X;YiVU&U*3ChS+|7xW1aYKQ%7+G9d0Wm1J;kfsHveg z>i^xl?gx6AND+0*O$Qm_+b-5?&7>G;M~(+P!$yeQ;p4d<{YsFIfA>Cxj6k2y{=2MM z)U1?0pEowwX7ULift?FDB7S%H(J?V-YOTP9PP(DhBztvzRFa?1z!~oZeeg!Zi3qWF zJhUPmm$OT67mME6S$XQLUsk&l^n4=~j@(#5%h@RfAi)L&+Ruu#FLcl0z8o#YlLQd= zzixpp9s|gQ(&-8tRmE=!u_%!u--rTAp*yK?Y@B=w{y^#52HUN${;O56OP1PGhHm_6 zw7ISM;y35w+;f*L2$oL3J1!bHW)KPkK#oF9pXP>|uJn_RK9A0)cwj@*Tj=jMLdZ_c zoasq0TAa}Z3bN2Y@PTGf@ilBEqZ5Fvpg<&mJxu$t99OW3M1TrO6Vin)9dGy)d`D9> ztBq3{AB41avY=L#M+f=0+=kc+9C3DhZj=YspiG{g*uQIymvc1FU7QUiTTZtD6^a)! z?yfohN+CQAa!~Pg!vg!lA3+ z;W6;bJnaR%P_T1r>|Kc;6V`7h^lCR7LLDUKd-wnYaMPXL(`RzzsC7h9Yz$b>zbVo8 zz7mnnQ(Ph!k;Qak%}Hv*E#=!T8};f|;(&96)xOkR**Lx?g&e(xhzKirE1G8QOpVZ> zGoAIl`e5k`*Om zrSbgmcMT_rrIsO;T6maFP9VDjm(;QkFDDiR9e(?kwvrUKm+q~kign(%08Vj{qYwlSI@FI0QT3*ZbJs@x}Ga97$Rw=tZy5E1- zWEW+=gJ%{mC^)0zP^C+vn)CnC8W1KS{ac`A>4q%X;1|@b!-L0{hmVe~i#L56`<8_; zl2BdsV$inwO*3?dRh9ea-Q~T~??A)H@KY0BQl1nVM`b)GxfR`qi0FHkFbQMH zN8WE%|lriekW3JQc?%dbWh$+yz1p) zqjgL@#>PV^RN)em$n5MrAJ=Kme-^*>Vc5UOZg-0@KpyB;s7Om^LKQVMDqeO40g96|xPh$C z8^;Gau)3-~AQnc7#K-5GnOT$+79zz$z!nS*Jj}ix$_7;S*O|<&?j1X}yZ6pgR2GJrSdY%;~ID&q_eU2xM z-Ih;3z^LhuPtg%8>^7T`VZi5>fR7JK)ZO5!EHvC&9#RuZQh)5@Z{}Ua&*R|eV?`V8 zN2e-}|3*YTXfZULuSyfPrSl!8NLI-?NlNW0c=u1(UEvF2K{achvcIby{QrFCaR;*> z{!Et?H0>TA51~}<5oZ)nuB?*ev$Getvr;r&VRLBc?fB5xjZ zWxG7LU@pW9e2Qq&5x3?W%DV5@-EI-qo;H1`N{WIYr#8U%s=W~x&?5n?*_lu(jtr`M-+E~~=&Ifnd7gKu%_)ZJ1iCKuGia;pOd;sf(k zhn$G%3aEX0>ozP2A@C_XA}0LgJQSp_n3V$-s{%2;;rVa=*@e)8vCYl&L_{pCvBUS;DA*Ygy35U2j1j|A`;$7(w@q429Id zE&`dyzQ;o3>AS~EulIGRhY5Nm{0Fqq0;;f7;-h~xu3HlA^IBlDGVdv+rCsPMYi|9K z*1fM$;dp+~$k1F!<0wRjfTiz;!%GHBNzY7j(@SbHcYnp_e-~Y=sK&-_nbDuISTA7* ze!DivuyudBqcyL5A@k?=Czc-19XK@bkD@*~(8^y`|I6cprli*z|7(Uz;#Z&1^cabD z=aKI38@Xizq<$kXacbkFi(G-;5|;-DUUE7od$ECmtmJ~GV#eS>fZLf(AD-@8IB!G* z?G((b$|0p4ZSsGzC@kxp5$j`F-F4`~U4qLVc>#N41-2nyZpJ`0lpkQx%U@CL|IDxH zcTSI%D;N<%`;lM-Pnk~S^_*x5jW4b*ER@v{93S~D=IkAAc^kOR2bONXB@cZXnvMB5 zuG!$l&HKL)BaM3SF)b`d#%zp|2>sNwh$?4GHc|eBAKqPq!@BiE=HCSxzU>E*1jCxV z>PqD+)7?LL>phaUk9)aJTL&7pg02XDyR4!LzHMS)Mg2k0f875-MMWE8_KA%t#s>_x zg#v=UrKxFbZ~uiA=gU8iTNudT_w_8BkGV7H4|J3uktYvD=jx{|NorZ6Sp69k7+4%O zbdp%LKBvPpIx{<7-6Ndao=w6uI!d814=YF2bLL&J5mE#uae=`F?>>V(s2r1k*`x$aOpnw7A?d(3WWaHC;nS{_6JU`ss<6hdKEGas)go~QN z7Q=pRzhpTzyO&_l68wFI_?bf0Zz+X!STg^b({3Nb)qWP~1thFM_SainS{QSQ-hvR1 z=etw>@50pra0AbR5zEaE+efn%an2|V^2!pS-zFF!!~$;Rng>J~gag%|F>zF2Zqun= z1Gb`nf4w>mm?sN>aGZ;y^ct?O2be5=JD4Pa3!I5Xv9#CEVvC8$EAyfBZM_~RGV^50cRKX=d~gNaHhLg=%wf3v^XK{7X%9RngIHvY`ocx&WZ;KNDwq41@EO{xGN zh0b3`2wu@viUm!J$b=FU3Bs|lxCN&fJaRN9%6R!^`_|l!PTl{22!#Etr6@lphKd{s zs0UFv2~DM@(*#V}Y%42g#8gDp*b07tHZHI9(`xw|EV>2ie1euZIHW6O;ZQ6zk48e?GQC zE0mK}WYKEXhnUD19FC9NcI7O00rDQUSkqI9ImxiJK0YYNYpaH^7;b=u_>O2Hfj#l{ zAV8qlU#^7)8JdQ+YNm*ByCpl(q&YH+9Y1?6LYk#bz;fYfB5-ta?rEUdVPsKXKE1Y# zCHN5DB|uvu1uFqXE-}zZMI|8Bq}N>&day7a{>>NNbyq$gj0>oBs|o3vo10WDLgSC;n@2k2D0$d1zq19F$+@dK zuo@W29cm9I*v|EX$FX=k;p%3uWF%xf#n4nW3Cc1DUru?sV7fRI#5(HmI?!01On0-5 zdD;U5Vf!aS8{rq#>ooMKdQk~~BZ}8oNGYXysyqz%F8{_T0nK4o)Yl@G`9eG0h2QEZ zq;vI`@T+@aQP9vc>8eWs+=^dix_`hV{cv~LC`b_Cykw~(A?ZDJa7ptk1h*= zzqsMOqT_t2F8d2%*e`aw*xk_9ysjToP;odsLZyX=7u{FoR+PQU*tw-QA*OvXoM=}h z;`^=68s~qFQvY%z;u()0wQDk9d>K zztd-%39VO6lHHelj$2gTqnYUQfnt2F_YFF4kcnv@q4abbIhxXuXdu08X)X3b2e&s7 zcGYW+sQ95CO%A+GpT=QDNQI)0UGzaNHPn8yBj)q0oK%_Y<>3P6F2|Jw9KJG(RM>T6 z<$T-S?ar-JNOfT`DAGSJy8GoWYTR<$A?bNJf!w6sXJuS{2cogDL{(hs!w%8)x2SrMll26OdZlS@xbQAYz1Sh>|0QW1y(lVh*}2j z+(ODuwNL_K;T%@;Z z2LE0pWdimB{!vU$YX9)6|2Kn-;4>+0toO*tQH0r zis_R=K$QicCtR-TK7ZNz{$u>r<}J!A0FMj~4&lD}<4Z)5@k}p1L#RY<+*tx3f;HXR z&rR!QCjo)$<5`lRhXc79H4h8ywoA43J=ec$CHwIfYCLPlPAqnIjr(;E9RZvJCK41( zyNEo%@b*JV(ex>fH4FF!;NCco!^7Y4ijWzVzhP;kt2%|x>L9gL7sOErdF@*RvV>v^ zwu~{v6EkxYn0>D2qM?-4XxR2CG<|O|Nkb!Eje2>>gnFVAM8vG!n`#v+*s9zPx7u-r zjmk7Qe|4(Sj*xIJqch?#tJHO?}*!>6U`}Hoy*-`3b-1>`P_i= zREN#Ypyu82ko1(;$EJ5 zUn6iWPqqs^UT9rogX@@LVmyw!nvDh=rWealE}cm=UAY(rP~{W9C~}GM>Hs zt9=L92JR_lPBd&7m9FYWn#vRX6-FOmj#py_OFsVhH8Ubk(0$`XK2>7f>q}xF5G3B> z*F?L!^|FC@jykF%j}IT>4CBNl5#yu9*?R+N9-#xRHIX5p!q$>&zSznZ+~CuEQqDiK z$|HT$xT-ip{-mQrzCRyQ_frL>cOzJVdj}|!)fa^MLA6qz$m%G_Cs7~>cY3oE1r1O5 zt9XhnDYrmb0PkdFvxvMQarxrsBYW6hOd;Ob@G}p3*83kb-#C=PyDg&~s ztgNBNW6C|&o|PVArr}_-TPUh<=wi*F6=`QFfoce2V(@y*i0Xk|YR6dj`CG>9Us{I=SObXgrMscFxm8E{YU+y61}p=LAfeplzc|M{;4 zbAr$kSy}SXH@$4su1P^rAGiyz8RGtFV$tSzZ&2XB91y-oKu;QHE_k%0*kAehy?+-T)DKo{CXC?Eo89sg@v~nK2d*ht&ANSD@2O&?4Y>TmA zn=W#c&vn=()dsvxvW>wie)mKA%f%>!rZ6@+WS|P6vnr%R8eLh5HB{`A86Ej`jxH>5 zQERfvZ%k$WauQI$)LtiC&E!1K1Az+|`_ySibbv|>v#j5~xe`FWYqw%K@oe)JoKZDR zO>n~;2cJ00JHt#WK7PdBsOQko$)L`+B8)Th7hH0R13UHWfwiQNqgo|^*oX3pLyJ}* zVmTd7eh|~j!7(;!>s+YM-rRvc5Nr*Eh8kcm@M-3g&%kQm;`2}RKRw@REPf8*xeP}o z;=woR16weDqcaWwY{a_eNtEaBrlt(!6%@ljX#%|8Za3&9^8-p)Zf&m^OwuY1Ap-aJ z1$i|H<6pjZ*D<|z5_mTHVK%Ej;r@1pRI?%>Az@zsK|Du762lC}&B?hvElS;3mnBSu zy%ooqp6lq#HR-u~CC=0b@v?Vjunrc-#(c%f;Bl6|thX2(`Yi_ZWM{{9X8WvI`21t(2c*Koaco+(M8#>Mzx z1rDUz%yqTn%4l8M&ZB`VO-##x181HBJ{ty6DoYSRG-?$&5l#YLpgRN{A;7@^I0H&p zDxlcIiS!_I`%Z9xsrrYMYEgSJVaw>ZR&-czxJuRVzQVymAi|5y=(^YcB&FPWz_bg4 zl<0Lg{ht&M|CbI|f&pN715w7F9Kzng$0YCw zY622bEpqh(ODej4-z-~;fSK`rri+#`t)^GAa=>kkh0 zhKeb+ctIfu$S{O-Emaz9KTt}$`s;uCQ)QQ#%FJykRx*5ZVzTISQkZ6ROFzXFl6Q*m zgKyT$euv7R-|J1;UjplOnJW)kE^Jn%Y*lx7xPq_<_-kUpaKK$NGHziDBMwY%fkpGb zg#xG|FbWQry82w}xWd=QInk=5U z@7``XG3P64%8r`?y?9hkF3W0b1YQZgJsCD93K$g;wZMjv_xx0M6n|92@CY6f+3A9p z;1_Msi|J3ONXkm0H*WZQ0gfD~-WovtpFK{ha7+ekIo8F(W>hI)t+uwV$A>ZHkclv# z6#ykT$1h-FY>G8VCMk(OR-@CC)E&ZRR>SeLIxn;G0e!G(fJT;6a@3OWwRdjDWBzVk zU!MIs7*>msei;W5W>&~3-jP70y2$XCJ4`nr_`l>#UPeo=_#w}xj{`#Vw|wh zB<$b+BE|#mWjSDFHNL`;=Y@^`v$Mr`7+>GH_5zhFTiL~TtycG2k3+3~o)7uNp#%LX z@|22bp9~CxDJm=U$A%tL>YZJjuGWM?aIn1e!A|qQ;{Z_8acb6BLTbWN1sRzxvVcb; ziI|~*C`MJGEwGrtGT~AjJ}|cls6y9x%BtTRWMBL{C_Vn7#S9%;EF<9>`%gGZQLbE5 z5I5lv!vN>voo=ld!n(AoWt zbRNAA)YJU{OLG&D_oA2?e;a3p0n-ClJt_v z$>3qV1P&UNG)m={&(8;4P(RR#;)RiM^A^);@$b&%fV^3ACF{IvIMgDby4v+ebKh3yi7CZ@i!rDm%%(KIw#3J3HV>U;3CC ze?47Xc=&zMlHEVpmZzoc9WAsem?s`{EKPuqSYvpD6s#90y`QD^qC2kjjUv2e#V7quzf;Li8COlSl~(-1plzd8xg4U+q5;kX^}1(|&+m3t|z!zQu)UNMW7 zQM_qr!&pIYZDIy%z*?=En%e3CR*;ykN9Hzyw&u?*0kmtltBTB)*50X=H=*wg`4!*N zQEXk+6SLaOn(j@VFKf&_m&)QZ)7^8?69g7VS1{;m>-mb}Q_yJ!un3d4?~Z=GyaW9z zCMI6V+>`2m|7FJ|lLdS?)lB=~Wyb5;Er?>!xQ>YkN**l38phs-Mn{ z1vZrrpymcaq$Z}h!V9(b*|nonL>AK0|JH8Nm_|mtXxk<7iza^XU3nN}2;hJc6Di^T z>NTUjd$o0Yz=LP-TF~d0W5dmr20EO}Bl-uR4jgu>{mzQS?yDHSsHs9Glk9}Azb0o!ATZ}v zl(%5L4PN*~%tlWy-AkreRQh!D&0` zp4=#6{Z?ukyPbrXNj*Dtdj9Paf~XTyqynJ}z=ppa^&Bl*lZ1W&M$hk9*fDcZ3GvOL zv1bP3gf&b9)_pH!+<*Pk*VZoj*MfJgJ(U7zzF?x3{S&WSWfDLrm!fiX8%Ci8^yPWV zD)z$ORJho*5K)RbNU|7?0z4Qk`Tq3g7S?NDS}4XR9*0Bq?Dmm680_yKs4&n{r+hfS zX_Yu3LL-PC$s*34{sq8VjEY+pBn_F86lVEZ8AeuTVO{N!uY^VHZN}%O-_zbNE|}BN zy}y}pbL^UTXw&~Yu9&STpK3fkRirW1ywZ59=~yFnsaX*W&Spqr+zsTBQxXgs%KK8_ zH#f-*#YAPfB-N&-hd-}@Pg~sWZNni$sDyOkb}Yfx0zL&8t8@J*rh^4ghd5?qajCKz zQm85p)a-3hx!&s{j4YH!F^(uRYRD@sY$u_Mep?(91zngC0sSU^2a>*Cy^^4#*Zx_r zqw$x((Z9>OPhzCBf`1c|UO7Amq7pEs_1bzp))6OtLhLWWxAA_F){t3+y>R~?a3}Pa6`-oLxxl9g_@D)?YCG?_uhH;d|3HQZ2}?c8aGMX zrYE&XNX6mc&W#)3P(&euHbe{ai3b-*8uH?DNJ@I0@iQX|mmr-9;0*u$FJ6umFSFI# zunc+j-i`l-R#$2em=(tZ3JYLbN!YQ($I}pCz(NFxYyv6kB*P(hd>=G6EiFo#LbmzX zn1=CI{y!3#Scyp-5jI6M(MkZ6SIU4FT1 z#~*WBKO*(TCLeB2wjb|ayEP~%sMG6fDQjy6G&14Xwzl`4US3AOA~EZRC^Edd1?mpvs-Vk*|h%-GWs8DONDmY$_4RgQK)K(;9Qmkz4%0KGOkYodk5itqiINS(!suRD87 z6pZVkCOwfgB^X5Ra|S@Ea&UcG0%P~&=mMm-puvLR(MnSl71>nn5Z`>3O?!0)AN@u@ zI5etq5>g3|zO}!X8`Z$a>Eb+L)vo|3k($k@`%{bL>AL-f{}`(`6!MB0Y@zY+K0lvD z5c0y#jxG{l+o8pqQK6j@)ti?Y1l%m2cwSwM&q4zylkJkxOt!j@WZ8bXtzKB$`d&Da zeSOxs@Ed+4Z_q-O58U2jTxRg1^9C%z%OfL0I}`;6ZyFNlV`)8PIG7n()uC@8%?^x8 zC$O2`A^n#tV)M(9Sl9;*I1e8*0S)|o8vO*)IU7pI+QsfRU?2Xh1fG~bYHUiJ(JN?Z zK!9(3uK(M{QGDSQnYkqN0;MD@c^V#fmk;alB;}*={@DWE;DiHOhmEe_Vwiwfh@=9$ zFRg;a;7O;(@1NQKj*QH#;va*>RZ`01lEcJK3YD&}$%)0-VKE%@VACOv#Q5=n#R#H* zzP{;yxZzDBDV8cyQ$a#TcG~QLWzw#Ja9WK03V}z5KnCJTdN;a5YJKmV|LpF*1el&q?Do z=Th>$;Qo1krx7Cr!N9!1CgA7q@har^uz&BX{klj5(aCTDiobE%;(`zt#r{PSsW`eQ z40dI8<(D&dsNqemiBvWd;BIB|u62b%L9-ckL3}Uz ziFR_v>`>l>kOuGIW5^X2{zeKAd~WX03Hb|HkHEMd5(?A)@Ftqxa!&;S%Dl{rI5zx`4f%O&DumuPv=*$ZohZHrUbto7v#RBiq z3!2!2A9kiRH7PFWF-=oDiEyJ?{42|Q8W=vzH^CfQ@|uCyMImX)iR^g@Vvg$A zAj2j8wSj`atve>V5&r-sI-wCEKnaL;k@2 zurNU-Vzl}5XD`?U5dpT*U-LIsFl+?Cp|^^db(IIYp<)I;c5NYUn?1bFYIrm?zgSHi zS5?W0HU!r&#e|e7fe|Sqpm#}iDv_{T{E=yQGXkt}%tNK17bCQTC5>wT#>F-hJT!oy zW<7p(Jw5B5n>UaK+h+Fq&D9sUW2mly;b}vjSy$h-_tbSTzO7nUM1_XuuVf1;9=*f4 zWYkFi`|0t8i+OfaQ?pm!+S~KS`3;R;OYY(2@zggDDTEo7@ zZSxTed!Pjvw1xg_dw*%aPI}QF%R?}#bYwR zNW#YS)5GiuI^a#b+QMYRB3~Xfe{5}9zOU)$$6VJZr3F7G5uG8D^ov?{lQNRBm}3Euz51=#m#vWmM$ zyZ+*LHT?VQ!fm^GC$Uff=^mh9m#}uK_vxw$9|$fKT4c`s@Xbw~REK7@Bk5y7PB63?tD7(_}t-x2hU^IQ!rV0RyBw19bLc%EdgR9JG9^%K&%PXydxOz9`=;8t`sWQIm&jEX# zD_oiKW9oWcm)EMC7#((|wmzAv9S8!Y%-!Q<)@-K&q3GBq>P78}nL2B^&Mz)D4-a+C zI)~?gxNo2rnnGtvXYKfydzbW@i=W_wR;8x=$}1(KHG#A*id8rwyV182MU7UF6+Og2 zpWfSl+3S|=`RSh;7ZbHn>Ce&=Lv&g}9{?Sem3*%-cTn5y-fH((|`V^jFEZ0>ygs{l%s@RX_cY>hp|% z)r&(xK>@Ye>QOCKacQv<2(>@;ll!^V4H=N`z#s$FSEz^pCgNF;(Q0^@wF`KXxK?*3 zTapkHJKC(1{(E>Z`0RHIQ6%F2)l+LiMQ~WYKKt8d9y2o$(rK1Htzafu4Y1|IcB}n4G3lOX+^23Q_!aVpdix8cBJT=V(-=!REiX zOoW6uNlp)sTaN<-7tVHTK`kPmXQ3*z$a0zCp`P|g1eBx6UK!251L@1nl29rTP1kfj^I;%qVDe*QPAw=1an}I-d^_a7I)NVmuAQNledoR9}mhemV^!2d&wzT(c@NE zQ0l6{Tye8ZSu4=wlQK5-B77)D$Z-hZj&AV5?P7;*Z5dCPn{Ut|F~LuW!-=R^#Uarb zQFEHdM8`OtbwhvU4}jb)GT`K|Xwk{b%YXmeqWUBZ)@cwB`z|-Lo}Zs5rl!IZ5fRN) z8<4THv!9-xiYhB(cw3W*N=r+tXF*c*SROSsPoclY-ZL{6qk6pBnuhnnCe zrHo{%_3LtHvBCd7yKloAdL|7_-K|iHY=he{+hlz0!z2IdG&>q0;!aNWG|7qXYZU>B*q(k-2+RtQ3GRFRtW3YV`Vip zakH|sll{zKa%e7|-zlEqIBS%gwx7!Ec+!!x(93S!)H5MZaP2b6GS15R`wM^h`nBTG zgWu^ikKw4ZJW@#^(Jn!;{N7Jf!*W2nAxc(;Ey`|aZ1jVicd?QIBR^w>&Jjvf+z#!j zOHynVO?UkaSA)G#+TVX4zcd;%i<7I`^M?L<-#ud#z6hVMRaH7KR#v`*75HvzaY@1q zi#EXU?evtw)+0N1vJ!JG*n}Rgk7`{&Ryr_u`4E8yO92B7kM>LCpv%iBirwcZ9+mzO|X*FhWwfu#*M#t`D!~U zB@|j`H)5^AGDd-7uOWwv;JW>xr^CbG^{?opA}PMr4g*RB7n)ZmH96dz^>W=cmw)N} zasb8n^C8z??DB-i9`UHo&vgfWW)nD^6F4TNi2Drhzk8GN^1Q;8t8Fb~|14hO!)H&K zknyCY%Y0}IP0T)78ED3Dil4*NTF$#<`EqRFzxW@_!z*P;RC`@)60#ux6nB7iAVJGA z8wmJ97+^}{dl3i=`-0JtPNB7!hNHGzmo85}y98i4?{lwLnGD2hH&|k(u;?L?k&!_H zffoxdt9~pFqo(2MMt2cQfFKn%@j$TIr^XBPcvwj~HzYR|r~jmU&V}N_DEq&3d=`EQ`5UR{+4E+fWzAILydtQP7@%-X$g<5N$Ya5McDw zCqofy8fqq5MreI*j2?g{7yyqzp2z3D`DWMNQ`$in8=Ijg_2FiF z_~`)?i!1@`3a{rqGH<7iZGCwX$>Cgd=nQ{+@5pBF#;)o`#W;)qjZh<<7ib8ZUY_BF zZ#ShEPgqvkKAo?EJ|VKB-u?J9kXaP@pZwd8WYza;Jm3*U7uIi4s;sz~s8Hq9khVVt2ThD5_Is&C$1&mcpw=(82uKuNaV+bBBaw_<5qyD}r zMkjd#AGAtFMI7@ya|;<5QjkPDv!yG4hv0pfVvkJ|p=u-eA|nPS^$3maCxv4Z;73;!)iobAaj$9feBmz#(5W7PJKyM0X3fnSE6 z)%rEnusR)!EnYxSt~}*`U3^KH3I?WKKAuSp%|$H&Q24=vs=!boP(M_E7J+lM&mE|AU}i+!C9Kl$rS zX8l*lW+Soa3{QOrJOVMIMTPd2ZvV4ttpDxQ7OsK#{L>ZFft95_5uj)JM)*?ik3Mb4 zwz+!$ZLb-qb9t_Ug#{8O>f5U*I{HNNVhBd?ZjlW6KD1t6nAn%8DU7Fo1OS8^Mu`R#AAHgj zGfWzF`AhqiA2Y_H**Z-UB|7S+LCNWFdh3`#AcE8Vy&KB@RMAwyhT>&um3B5@3fa?+ zCK>U^lkt??t|+!d+@;-m0#BR*5R_BH#>=YJNnC*#?0oP-N0$co6FPnCP!o*vbEt=H z=IH4x)zrX8jO0c_tia+}$_gZ*G$$6RzDlLGV(d?t4cuY>6h&Xeg>fPR&G416!v+_$ zXSfsnOI9urCFxY4Y!|5{Pag`Em=$q3ty`AtD|KT?VImn&Dp6tGRDSXgCsQv1PGyf9i~iOGuW}R z%|}APT=7*h4Yo*BcwJpS8-9SA2V)@Bo+0Ab-{Z`jog`4c=s_T0{8kuaz$i-=0?KwC zAH`%qq8@}Jfd1)LdG6+6zXKn?BZ~PZqgV`U&Fo;mVzN6W~=@>nc?}yJjP$fN+efVDjBv^}XbAz2M)yFlab|*#E+&o@Sa?5=- z00KtZDtBh)=6<0r&&xA(Txw3lJ#+PNOgC9CtUQX z{3okx)2o;{Z$6wO^aE!EZ_D(PQpLR>a&)m6f>>vt{DzP_@z1-v-!y3``>OtaAHkgi zdIgl+|6%JZgR+d)c3&EmlG6*ZNmd>jqDMyuo6h0A7Q5<%988JD3k3Vw$g zmN!*QkTP?3wUqn#_g8f#=;QzkmO|TRFvr`3uB|StzlsH48xEJ^3ug-h6i{)gm%vFP zrsqe2^nh+9%4SQe=of>xLCgEYB5!`=?`m&?qezwcJr9SY^E01F;&LaDjH{)ro)PQ? zD+R^M&kW4tB0VxN-tgyJ`CZEOkX#vUmQuT(sUuPWrl}k8LGUe?y-5-^MIOBF7lfc| z8XQ0cffuyK0mP@IdTYh@r7BwzaA7z=69<0=*ygQOnqKZ(*3V6t?{9`I99fG1_{_k@ zB7AB%)nIS0LRI>N0=OLdyi9W8bQ7!Glpc1!2l#CR*6UoKtp8%ZQHCLL=Vm4bomb)( zB|})|w}l=2oo`<;whDM_;@B7*bS*BDnwnB`!j$wmnGc2@TmBLG_>mm^2SHl&C4)R` zY4i@4m}NGw*fOQDH8nX50x-+e6%=S+h%4GVC!)DbaAdxrE-^wVX>aNBqB6y$SNUGw zc7tf`P<8G0@+$5G?pD27@Sl65n7-2flgZ}#GN{oIb<{s-6LTHTyg(z?D06mvhZJ2c zF#2oVc>n$k+2yBJs0*WJ2e2>z)1xEiVvk;cVX!Meo@@CGlk+YebKg(lsBm4dmFrGJ zVxjs^GR<-pI!D##RQW87#S=}W5@c_wawG(U&Y7y+tE(>3yrKq;(^jBaL9Evi^(Aa> zG#KrRMm*wXWLN$xA#yzTk6nY$A$Ok*e7Nf`7nT6mtnEBdRG?)B9Y;62UPrJBH#U`J zXG5UqWL&wbiSG}aJQ;Yd8^@BGNwM?W>`pWj{vN$4EG*XJwwuSlTk@@F8Y)!;t>kH1 z1Q^@Upn>0SDCsz5y-5K>AkHf@H6QGNzVhk;u{hhxjg4`T2-h$hHTU!rfpRg!0i zDFkDEMd>d^_QDSW#S?-uBKlveR^XgL9`twL0oD(0C}^$2@%|lMbh*%z{Q0<S6KNEGk~tp&=@C%O_uNq&CNT@RbpCML;>^rIzjBis2#!Ny(Dre&`y3iG^? z$*-=B^)){;XX#bfRQh^Y90BMneF=A<+?&%}<2l`o6|%Y@vGYs>d4dt=D^ zfmm76Z-J>wS9?+gIVBTr8Hh3PW70{0rG7aX5P!*A?tNI|%ktr#1Ob5&aNq2AXPtqa zSjSv9T%hL{^>Uq;8-2g!=L(-$-L6DBr%w` zI@u5c-`g9dU>zPvAindm83yvRj1jfkN4cj+bPVXq9rnA#MUr)C3+V=6$%*0ozNM#+ zhoa%MUY(qO0D^vGuVak+(eS57Jv`=Tf{+mqsjJp8zTBXpWI}tEZArM)HQ9{x$6en> zPow_zP1_uHI2+4*z4a=T^k9}C-xmQPxY}Z>q00PL0P=->c{#M09jy3!?f*hRL7oE` z0};g$Z6thZk&IG1JKHy1+KQ}zQo*+d)Z40qw^sB(EcM=8B}##k^v@*&-$-(}&XZQ` zEn7;sO-rlEMNxC@<$EJ;?hn1faAm2sC}qC_*2rLMj&!hPzh;b{~zaVJvi8X7~M6jIXk^kuMcWD)|p#LiRNA%Mp; zX;YTQ+3m|;*ZsGvE9^Vn1U5AyN>4Ap>j#^)`v(KFysSebAQmv^c1~^Tx&lLB83@!x z=i8sQCo2MMP0@&Ap1kiej$?`*(Zy(MELT+zE@w19|Hq*A#~UfsJX8UzVl69SjnA5t z?b#3r=1ma2jiQa3T$b(E3od87sJYHqu#d8*!Xo)0b_Bqo=CY?4T^?g1er~5ToG^%$ z0#O<_3&Yvw4ju7uNcBz5tW!&5N6dPNVzfESEuXmIS@MSefRl9o=#uaw{)k*m?xV){ zUZC$TF^I$-=mz?4Y9J=>I5dKOO}0$7P)p(E{T7J~Jwaf7o-?7@VPUU5=+zx{-H7RoPr^SB?RZVKQ%n%Pp5eXaSg7+sv9f_how_@VlzgmQ*a zXJUhE)w|G&OR}#AN7M0zA3!Uh;lC33oUQK+v8pZ9DjtMD@pTt%O?~izeEFkz=jKI*Rzt1qruKf@0a#b zTul`w4)wELc#ib(s8~{AZPK*#%);H>E%lQS?(CW;1{}h!%=X7`+XRsN7Yw0+(Wrlo zONJ!{OIrz%Iir`0%>P|BgQ|a`HEr=hSoS-jE;0p=`vadYKCjZzBUr9K(E$0CRB`d^ z)^sQK< z%K=9F31mjoE#V`Jm`=bB5^y@3g?1ijshGxnKVaGtCw8Fo@A^?D7f>Gp4Gj%cgSx3D?> z1Wa{m<{_YX|`OI=BM;F`@<5WHu>4%*4LBKGaI57%ew8W~ohVa&>R#?c8BrUMrX5 z&7xpSjmazEWW=|)a3Kg{kO#E^F$E61N(L-INg-pd_>{3VOjzuNO?hg^6BDnaEFPOx zQFzL)T%-$N!CqC%U}1D_?ziM6Ch_HRJ*{*FdRgz#Xo zwIEj4+Vr53`oqA&0EX8tYK;e|wo07-`7-JBt-3CCuLC4}OZZ8<{HzzawFF^Qz8f6n zgMITW)~luEe1?(^K${>q96szvS4&A>d`TO+ii^lKsy(rsZZqO2Ce@hRjQzP@4ZJ_P z)wDHx9+Ef=gjjFqld4H3CH#wG3m&gS;whUuT#mQi_Q=~)s%Eawa^M2}agJy}J5%4G zrcKD|#iITgp)f?it8}?i&1S9L6qI!YH0ezY#eO*!9AzV zu*I~rlog#=0tEt0weDzVeUO1)iod3$izM~ej?50%` zotlXzlzuJEVQDy#Y#T9lO_;aCnz|l-63)pgAxqj^6&Znp@J*Cy^`_oJplc!=( zGXl?&SJ+{tr&(PCzvtl)@1M*;Ct@0R#g|ILQTq!-Fl}v`HV0aDJ^hQV(HxXw*lNt$ z*}4?{C&X?hoQJ~l_mNvm-lGkHrBJb11bvaP%lU%O@zkm#zUMm|4r?9oqGpPju4kr| zRj)@sH}Cah%4oC^KRs-b-Fx4nECL@O%T`^!8p~iak4tzcow@}ZOnUWc%WgaWix=PK znQ?&6qR121GLLQweSmU7jkV%hMUH@fh2bXtCXE#)Cc3m-g6k%QV*53QpDz^?9OTw@ zC*V6}sbsiA4ah2r&fq^+^Y&5661u*>=+$Uic~y1^?2hl9Pv79mq!&4*mHF;b|5aBpLYO;I!+jJ;@bWIhe32Xcv%7cIYoUtKG6v??P@(;EVQJsFO-`WPfyQ; zmXw!UGLLuxJz2wmZ*=^WEAzdd=VI4?N$7LsSf%33m#Vx(8Ie^yTp3vbUW{>Q zSW%Y=TtttT?B^#VA|ma_Bejg(v-1>j6q}`P%{PVW zA~3j-HzfEH_RrwVdx{T?bd0i?=_R?hE>S(aZ>112w^+&Fz{81&Me)Vt*Kn}(bvVtM z-Ihd92pqb;preyViP&7uU03svRnKa)qB+15}V-Xuef!nuyImpJAo`q(u*aGCDtE5FDujIWY>xs zo74U0^xxPvkHzbj5LC%hfP4=WT7A`xZuy>n+4J(+f%s~MK9f0?sPJ9@4 z^L&|^c~9xA0h9r{PxnqW1pH{E??YD_>lgbLdP*XR)skF)nMR1KQrekE+gShkx70r6 zjiFN|C1t-oj?UF8W`ITbGEFzfT-f3D09pLzN4@)A(cAy!0<@%YDo@Z9C;x;&*(Sl) z*s$r@Op%KwqFQF<{iq5QGArx)6AnoVATWLO0z5Wn0)>@FS*4BA9NIB4$b>hWfh-pWjGBjuLW{geLG!2i901FO(tO=F)Cc@T&7FZJ@CN9Dq=AessIuFd;?jT)NQ z&d)bnPI|aIS#(_@@(e@m+8@iS{fnTXsi8j^piY_wZ7K*wG6src&3F|E&~DdB@K&C^ ze08!rJ4#>2;E>K|mQu1>xS5~K8bW=EwmqH_klRz!+d~84)7!@9YP~DRloXfgEt}C8 z(rtV_ZD^>Vp&ez#%9`Bj_!c}!lpu$aWZ9iFgU_`QI%$3Z1z;T9$!B!!Co;U{y}W%Z zy}Y_!Q1wq5m zm~<%IU^gWPZ|1_pQ)|w$UDrp@9q?kDymrtybOgG#rwp_fkYe zWZXFmg3MRAmR#XUBc`GfL6``}vld5P`soL+Egm#MK9`>tZZg4R^`m63+TqJG3<*oc z+*42#n_UgoSErqAU1?Y`!TJ68N6D%4NVs%TEzyTl6-Yg$eN*tbozJ@S>XUC($NhH9*da-cTfR(iU(2Mbo@m=Lp zBFV}as0<7|?K$`{6ax75sQ>>JB5sDv}r9P}wq8%O~;o-u?qq|m{ee?Iv z%%=)X{?wGDSG*f_)YOOTiO9zS=#ARtJc%7}Y`P)-5{rmZGTl{?c+R-h$N_%i>J?pj zs{;f120RWo)pB#a{aWAF@AvVDqndpm!?%Orfe&ss(0SN+dm9+o>Xxp3E7`!s-nLdp z^0Ns=TkCc|LlMfHyE1Z~Wm<|UIlVn(V&ZvNGX**sU=MdytjkQpq@%WeVypn|!S3bh zZR6JN16G%+M{+7F_HGTpT{}a_Ek+$`tRkuqv(_=lvw_dxxX9jz;lTgtOQN+7SjUSr z0$?ZOJ7F{#9zJ$c;^up4x=c+W+s>&4jA4j)Ut9lKUWJUPAAi|pZLpWkiwp*s;wvppG z<|$7qor4M}ls+mEJPO}k=G;Z4nDg0V+>T@hdeg{H-6pS}5YEkrV8Dx)J8x`-b>Q>e z+z@!Z13L>XBlEb0`tVM{kFZD&i?Cwo&PfRcsN{H=qWMbvD!xRLPOVgF~@ zSfkCu?<$779SoLllEvPH`0%KBfu4ZtX{bI>WPVfY({_qV|f z9^Xd+F(4@y#6C?BgM*6$%_YDstQXzAoiC(zgm-$!V>$Jz0hN{BFh}id3v%l(3XQ92gjkOO5k?D3mjhd3rQoT_!)t zGn~!M%UfAqhHy)%Sxlf7n3!y{TX9hyElFB@<^^_H_LsAq4b2?lUnP9ly`9XhL-j5X zkVe(lO_=EzdRz06@P+M{XMdscIRuIWyC}$h*5@H(yyZh}0D~{8U0;+ET5`D(UHd z&-43k?@tL1Fvxd!I0RW%iKR!v5c_n>ly9+G>_Y_nxUZ;XOD>o!o!HsEs*Ly}BBd$T zZX!6oqu?MU(PtFa*H#C}pKSus>vO_3-`$2?%m6qwSoC>5dw9yddNe= z!xIdmM0AmqKQ)gdFeRk&O{wDzdei@Du$tBB#`yB9aif3qF~9_(2+}))ot@nXMl$dG zBvwo@!c4Wy%$YWi#HvtH%l?I4@tTJ}H)n1SH^mS?W%cqfTyFsa%$#2(%M*zV`mk^n z4!Fr%?$N*PdIiiMgI>6872sv@NK^#`C6rdIghfQY_i(9`YsPkmW=dQSr{yxN%1eB} zw?^ryk$p)NC1t>=wJ-9~1T0&iz@I+3*=ur^v|Q^Cw$G>ob$)ugL!e91?%0mibjL}$ zT#R%dJ}{0j|D%E!cXjyw^Bqy@;9H~k_=~?;;Ao;+F%%lLb<(H?<0$?_?1w6y^yzyu zsNV9jvoUZ|QaO~kSmnIyIkUnmmb7DD1R_fPZ7*iKy>w@dVg+QNwWD#f2`#@*g9G-A z)QDh;OOTRK1c6m_n5B}(<<8`wQx+%h^n5Kr${O4TV-GLJfQcCOxDP zD8R;Ow729NiRf|*k5K%y$WQz63xjgjNQU#Q2ND9^OCL5E0cJSnc>guGGNEL)xQSTb z1M$%Ccnmfh!c|f%Vw}NX*~Z)5y7PT&6o9nZ^u@g2mj1QO=>-pp0+I8kM^uTNMVz)i%yzEYjQAIsR*-Vdm;&TNnEw>-$9lCwoUgj#9YkBQ%lQUhJZuqK<#hx5oot|4GqbZ9=KQ& zt3BL1C9a_yU-bUCS`?=d7S`al43hKlnK?gfu5oTdyEB|~0VDOjcVl)D@?hIF>`kyZ z-*hbtH?*A7iiJG?6)1_9=URBb%Vxz`083oQe&pke;H=pCDXXM4KR~?&dlQC|uXm*b z)#2N>#mt4K#`^z4P`_O|Jod>tue={lJc@CNetQsVe&@dAb8+P|%$t;GLKSG6fdt9g z9G-9}j=+gWd2AE%IN&bKhNMT0)7Gf4BASKcx!@!viuO50=t#@N)hDnbW@5?jyg`O- zn{OjoiHrLd)15HEQwXwlpo^`ZfEpTmdg^!%VhL?#YeWGI_jrFM52%-&P$*J&77OdE zOS{sc&)J%+nsj32C<$t~m^eD)Iq@DNgTtsvPtu}JV#pMnSm0FJK}Q{yxX|XSw|Dx> z`)>7Ln|GGm(4OmIt~R9M6I5l`!NsSEkP2^aQ?mIwNNRhX;ysId|5utz`CY`SE&ldO zRh@Hdr!#{v;-bb`HrWwL}wdV70k8XTwqmpt&{?uzQ&I}}n<(wSN-JVPU+mAhnQ z;5!@N_f|Z=$#9qrsSgsDOsk!d{)Om;;-}&Mj<{oc)v!Bsvf0k4Z#l|I+whtHfpf_uY%-ra0y}XCyEsjnNU7NkgD4U3S+kw2v>1 z=fE;h;#T2o=e!tzf@9rRErsmKsh-75*xVotsRkZ?3PuL$(&4FrPt|_3+1Fm`)?}NI zazmo3;`XO0=T!4=dSrMI_Hr9s2<^A@Y?#ue-w#v%N6jkzP29%YQV6CjIKmiRVkeeH@{(*&swLe;H zg_0KPMB?t+=z_8$3xD#GEpzL`ct;EwN9B+AG#B$5bM<4kT_1)XjByrkcdgD-IEjwD z?#ik{#gQ%ab#K0)A`Yj;*0(<*{u0&=N5i67c??AABgHKa5y}W$Ufq9_x@6hak*d0E zy><{?elRDhfUB-lA)&ZweQSPZitM=1CC#y9p^-_(B~!=Ff!W$!XC*-~`jsy;pa zGAnt$V-p0*=3T0)QQcx;vQUuMn62@(m+t9TRkav{iFM$1XuJF?WXiKLvpYd|yqF2o z7-AS|WI%7t+`<-SBLxS!bIoh}F{gNPwR_qcV z!t3?Gp_i~iUJQ9G30TD4SNpcBY03EraXmirPrcJL&R2}vt*)sQDO4+S;9+kuH0*}~ z0%LrPzXg5Jg$7S(^0P39lq~Gl6D$ zrQ%|*>TVlTU|>LO+U0=7Y6#y5eXq)A0dRVQgCp>M^<5REP)QQn$OL`8H947}1NNt# zIZj&&nrbm^of)h~J;+jb0KA-%no|cy;k6_tx|ai(tsh5!2S6YJ+4sbX?;<>ZNV*2b zZUo0sfn&BaAAjPjh{&p=3A!k}daK^>Q z2QGnlE}+-*+P86Z>cuM8Iu*c`!w1w#${Haji7oMd|G?NZqVMu)mpw+9jtrM=a%o^0 ztu3OhvHMr83B}MN`4A=)W|K6jqNWzIIh;mCLj(28qpWql98ev*1SybXPM;-vsD%~a z-*HmE1!Zl6;~kf#r^nQGvH-+s_&`6=h1P*MbiNDrr%nR|lt&Q4LoZ3ED+Oo36AxHx z@HjbQn5p0lc;GkRzeAufNnSXA85fG?ZI#kq48 z4<(o_BMPyEr-^*|Dn~1ze6t)HHvO)VkR%%8Q}5+O&u&-P?~%b!0;uXxeS+`(#gSCh z_N26Y9u{6wdQ{0NjZSQgsK&=IAcDU-@7a+RHm^-gOvwJ3|Djqa2HYe8)ZapoJAyqG zCs>5HKjCT#Z0dFb!Z>~wqms|ch76r(JH3N!b1A78ZFh(KkPe`LjQjR2;QIh1zO_}5 zCnk^_nU<;iV;ErWRB$%z?^~A{y$tYsFdJQ6Vo=ApI;L&W-0^re*XOXdaTb_XF!A8Eh9~CoR|9N#(e>CVfyBH)267Vi9dydvLFlA-&BEMdaOU7Al`JFz zUS4GD1;SBO(s%#1L~Y!fn^ttcxirBsNK126)s{^qmFDr0^6eqX=?y9%AGtNt^)3^5Nz+i-87LQ-Dd$#k(Y@HG0{{6e#LGAQJ5@7Df0RaM_bm!#f15!aRFkK2s zOCx&y`t|b0258oOJm3FZHM}r5D7}}w=d%!jL&R;lff;-%UPO>^>ge=dPqSLrj#$VS z=9gN9nw}7*1A>|DV$Vr5(OcffH0IW@HB3xRt!+FZ?iY_fKHrGjbHE7}|7>awoHLZ` z4+K0=pjk(F0v-Xqs=iSl3S+}zSJcr9Z)UwfU+@><2v25rbvNvvSD(eo0v+fsQ5w~zBxXj{> zd@viyZP1Q+JD!OrIFh$-E#qN_3&NFQAWyoD0Za1f9Tyq=e`=JJ9*_G!3Osd&?hZcx zzz7qw;dsz@SC%3@EDZVj`nuy+NU59DFk>vkFHFk8ZYr{*iC#ZscRVFj?fu`c5Jv}`NTRolKt7E zx6ISF(TArK{&}4qJk5!qq$Ip|L&FM6nusx~>!S$Rv5hi)aLW@X4f`!##4pjIImh;I zOpKQYP5#<$apQ2JUAXK{yl`-ExZX(8Q_$6o$$1|NoL*4CC~tpyM70EOU%S2ilb)=mI5uzbH4WSzh8X4 z9xh~K-syR(tM!hn>Rs_1^zE0S7;aM5NTy!%X@0#35#{u9aHLE91 z9O7RUk>F1ZEmcOO^(|vh2U%Fah@i8Xbn_+mu0`ylP7`fA3i57I5qVPJi->u|DAuUY z)$r}4Jo-ZhuftJsrGoWsO+y}a&XXFFCv=xaGQ`iq{)E30%Xi)#P!;&4Poa$zN?|^L z{qts&xsUYQ;yK#OzZUG=>{vDx%(uZ`6xT})Wgf;S#xOw(_e=R*15|YX7q;K_%ht{X z=ZZlG;;ts5wrYmrtf~7gJb2j@*y&CKH`7irfI!U+87O=)zu$L3Og>Rnc@;g@%4EIi>l{G?WWE7`9}p~T?^gkk zKKv2w=78sd7Qjq7+DsMBzM!=pQ~=goOhpm5h+La80l{4p5o{Jmh4b!4r?J1?aJ$%b zGMELMZfL~V(qYmA11wMDoV2vxs_TeZf2dQ-{n6w#F)|4r)zd@KvSOf$i4X_}Zg6Uh zZU|ub@7DQrcDk+nQ+9DiC!539Ycmc;=(5Zuw*vM> z;?-Z2w{})@`&aVk=>hrQSRnQZgkyoma%sghxEV45(=FMo5z%B$j~cbP$kCWCKa7R! zZ2+2#G4p76|NGkoTZ-EP3j$SM9Aa8*Kk~Q`li4js@xKUvPMq5*RcCd$am+YJ3&JoQ z0IwoM+-iDtv(=Lpq>zC#0jp6j5|G++UjEsw^&Lr{OvNfzW8Ngf)E#K zXRDv^6O69S)i3^)ZqH)UL?##wWQ8f=_)!7{FO(bvSX$LvcQW5y3O|ZWW=GVsm|&}6 zLJ%Q{)XdY!pPV%P4f_0a{rzhXmsAL9y;Sbh7#*DF?96ewK0M^&WjEUcAgBNC$|6!- za7EuNlxoR460~L95)y;!03PqpW!SVCO%@9(TuOQ_wBa)RVJi%OR4CI$&P{kZs8!vUVyy3K(2dQ}dk_id4N%;~@po$18(Q4xo3)C@7$B zXT{(l2s|+MpA6vu&inPAmINH}0o0nXdWaDKuDVkLl6_Y{sc_tWDQ_=rdqHtA2tXF+ z+2{8B7n5Z0`LEP=v zf7F=0+juOoL8c#Kd(|39&3*yNJ!^lzHM&+XHab;O0R-frHyyC%?o3v)Tqt9TXivO; z>ez2=x~n4#q!Fz>`I;9t15t$72-xy&&fkE#?mM`*FST%>y*4lXNzB5Xch=2)axkdU zIioo;GJkS{K2larLev2B-rHMWVzk}(=Q~Es^`*XROzto9%_y$;j9-#(nP3$;509^w4BrPvQB$*ljO0Qc=YgEr|xET_EWr zx2%F0Y-0wdrignI(Qgj&_$I_hIRml;Dw4M>7tGcKNDz%)7ff_DxvDcy27lCqLkwY+ z{+@gF^RxNQzIl|}1!qFScQyAGP_9$hpJG3^yvoOablZVw&bCjD(S#Ar;o-|>4-zj(epwmhF$Dde%&mk>eS&Uf$-K<8SKqZ1h;8V>_E<~yR@ayUXd8b38^U@u4WJ*BSt-Ab&$HQrO!5;p`ZUPh@WkTv#9lsKC<; zIOklcX{qaUM;ow%B>-^x-YJ<70a2krHUjn-lU0*xkvK%pa8Rthq^ z*`F8UY*aKz73B{@Zg7-AUoERa)Yk~76$T+bG7vP?T%PuL{xSs<;_Qd*VTmd&6XPVG zvaYsL0GRYs*U1Rt;$dJHVH;z>JDDS_<;|HVmJsL8coPLXRY^y6A z`3{D<(wkJldh7IyQ}0EyYt6SkddjKUClQtMrLp1P1&*w2N`(hg4;uX&j`4Gs68U&c zr}w~@gOZ=QLKp7{Oa=7TK-iP^igtj%5VkajzsPM4K78|rvOXIEIy{-T$)O{95Tp$+ z<+W`v(WE|3G(N6v@&zS-+h8}-2c=aXDI!JC#K7~a9I5hQDx2~M8c|1Ng*GuTjy0!V z@prJQcNIIOvGMuXd}KOYX$L~P&r2zj20PCL@QwKBV0bbmW93L#O618}F8}dm%;`qQ z9HIJ|{Vx}w697u*d_qR7;ULKa??)D@qeFIyt^*41Cx;wRAN##3(H7`g~BDXk?3dA44 z6!t|&3%&s@$$U-XeQXDki|-D-V!b76?>)W)jEQPxrGIExyh1LcB%{8S5A@?4u&ySO zRIl8jIq$bgu!Dx?1@3+JX(4DNTD7pFPTzd$K$2-MwY%I{Jw&xZwu*A1(ZQ&JFI@Pr z&qD~M(>Ne^6rCsp)B^?mCnY!0{xIKhH)-h@#+x3mMFUHOrv4acC|V5uyB38jAZ98K zCcE74TpgV%9+msh+qVK0^rhVKefsElzZHtwTcA~6GuwB*Cn@RxZhw-?q@Vp7l!o?+ zuTg_9irnSi3ftL*W$e~k7zh7;AG%!F>T6LOyT8!f=dB7I^?e8yw-O#N5}@z)RriU) z^f*lVxRoeB{Y+<=OggZ!aoB|p10c+{M-lV+>Q0~m%0lfoo@~z=lT#KO+X;2NxwJuX zH-U-ZRN`hz1BpGGL*Li0!P7d~N$NzSvBc4;V3K@*y0SGx2;mBnEri7gqgKwX=lj3x>*}FPg`?!{MaVh+{xzrp7-Id( z?hiL~*B)DF;;Zh_L1$>SJj!HG$2n|vd>#JEq~AOL{)NqIy(9!sYk_Vp!4ycI6552X z#1^{Qp(!(d1Wo6?I@^M5bCS64DaAM#2-r8j|8E!VKX3BtUxk+94GISmnlCYbj5`)J z{+D;48J7co*$=h-A*5c<3ue=_7Cl22p98s0ZR+~0=S6yx2hIz0GFSy{UZuh~?0>VF z@maA)E=PTM(n(V#Pmyu;# zO6D^T7Jd?_jby3sJ?_4YFA}O))i*;*VLM^KpP2ki^jY)I)RsqniMF+JC|cal*u*rS z524|sIlV<7vf7$}^hcrkp37%<_h-To^*g)1BCJ!&;y-Rlv+CcUEB-8^Uq+ORnspqoc!(6n6Xo z3d{hfyeGt|aj+5mMr3a3$3#9Q`?E17*^QK=+*q0@ZNu_Xi{v%M(M;G7}hPpVM+SxgxX4N$451JRej{**`k<;EW1w>pE*VpoU8F+dVvwUyke^;*G zY@tpG7n6W0J>y&fkk^y>ZPVi3NV8Gtv<(;ZWG)9*SBSCQKcrN5$Zgydww5u1x>#>d=>050$3S(Q;{`>{twEGQ zm;S>K8#YzyT5nPa;b)EAnqF!es`~8gzDeD1ixhTK6uw&9vijXK&lu8ii@|~6&f#X) zu{PCGdOJK4vRE)FzRbFk2nK-0vlN#Q_?$oNuHvY6jj!mVSQ7>}IE75X-~_EgNMA<{ zJA%czC$=7wmfD(7<}{71kcgpsDp^bJ>$F1Y zW;2ZQl1xF7Bg1H!f;8@WjPnZ)Mq6m$6QIRbS7W@!$iCV=11KLY6P>>!YHsdrOUK0I z)*woa*6)dAZ3PZ$U2gRNa%8~8l7LRF&INs37%%hoyg%DW^5At*(=9i@JCVkWMU9AP zy@Zgj7(~2TqXc*1`T@$|_*qrXg^YwuZ|&xz*r$joRvZS{nu>TWh3DLw2EIL8n`tCK z!jURTdA+u?m;+M4S?(hUpXY8Jno1UG3dnZU1&yG zH07wjBQLWNic4J`ev>_>wdYmH`z4>`(~;&rQnuoPU%&) zRd3roYaff*{x)>6+*RSLxoYF0sHhTZGyr2sSQ|XKGj5Q1(7gFw%THLf8k)OjtK)8~ z=?jZ?ucKJuYmPZJL=rH-^YkRcYSmBuV?u-E_=h~9EegN+z)Kc5H9xda>yz)aKb8Y$ z%tnB(oR6TeB*}st)!C#-6{XXJa^!PIvZ^YcO$Y&`^bJSe5GIYVN#UocrwqYJ{*!P> zX+O;X{(CtvKu*tK5W#S{20%LN&4Fu_;1{L$02D<|dwS_Jdx!VvdihT6>7E^20NI_f z?v3RvKzq%Ni!(wDMj9M|uYlYB0rCF+@y%YOfssjp?H>3Tx+y&WI}3>q2(5-CK?esN z(3U^|j@QQX0i8)r{*iamSGzYBurFUPz=7us!Wh5weTAo%?xki3X#??l%SIq_B?%aT zBKcVi53~LH^;W0HJ|Dxv@{+SmBaSEyY=+}_j!gOQlp0^g_UI# zDRy*=FPWl)!|vcNjYK#rOiTak&35@L)}G@q(tRPv4~2|?_6+)H(vJ6+WttQZ+nB_U zhg$b7tKRvfd|?+A%;i%ydib0#Um#tsoyVqOK0uFnP8aI!D}TisYlF3KHwNrZQ~$)? zvVtHC&c@;qHC<;L$pF*bF zpRKnHfAX}+8M&{h%!l`sPiMJyJ72LCqlAcV38}TEXJ$kLsy^3pe-w0Ebj&M#M@PdOf*RDjsrs-gnSJ0|NG88SD#f`-}(@`}RcoV|T)%ZW3Dc8W;RC0Y&@FSuw zRC4j>*a@I2gJbTb>SQVUhw{P!x=+vVTWP6y*Q4K!RDd}!N3XUo_r_-yJzBq?NZ;dvY1l zT6Ta{G2Nyr4Q^^ToxAP&wJai9fNX^V2LTJlnOX(cxHFmtteNZMZHgk&Ej<^C&dl+vvii4?pB`H~g!g zA0f$+oER+NWqpZwVZKT04El~{6|HK&V6Y6|JoDbT`CW4x^5KCmctZ_#JLxqMKl%W5m#6(UcJsY_2YMucM-S?~%EM{BTE}#@zCXW|CC8S_5=_glA)zB# zlAv+*T1n}6A4NP?(>{Xiux96lU$ug^9Qi?B~=@wOi{sqWfmo>$p7!kXKm2qLw5g6ChKa5mobZ#gLRkPlHRujmY23;jw|oMuM+|K2 zpcoSV)xU3hUuHME9#L{|U;~HTWNtg%TG9#dCy&3Vt~2~;tm9iMszWQ&R5jZMBLYPR z5Y|LKhRJ!hFEdHtqf-1ZxX%{Vj{-B=Um<3z<4oXC=wlb4ailkR5%?HEbM|U>kkbVa z{NoFkU98iqLS_50WPyO;^M{{?b0m=SZS!<5djLQ)GVu~njQ_~#?vdISa-!CZ@xb-X z5;EF1P@My$Be*NGkldqv-eW>8VrJlyR~2fk^0uHL`Fb<$n1uDUx9M=m9sBz#mqFaT zsNN;0BuUcWi3;h;{@Q}I-F+0WHD_E_9{$~8ZFRpQU3Xkq-3-uHY?iEbCS3-=x$2R6 zIP4@!1QRfW^}anHv*dT)#U9BLjsA6{T&QsD1#p_%lneRcA+UaDhh>I*ylbR z$_+|1=M5;p<`KB2)Rt8T6A1!kOwkZ~laPnSq(Mw8U_m~=vz1cr?*{vRkKhOts)c1X zgwG0lG@NMy%1c9zveuU4z9@o$30a&$Gyr(A+Fe+_)2){)-Mw$kf8$l@kjEPGsb84@ z<8=VFT#RN~GfzzSZwAahB5aEMMNQz~t4V{(?&x^DYnDY|Cn8S)C@OlkOXUJ_`ITqK znEU+hf5YLfQjVUf{uC4|oBqY#~3WI(8 zNjJvt*=){r2Nux4CfltJcHVjrNxzERMakgMf9~Yl^_W0Jo|tt0{O#`U2aG8Ur6kZ6 zH9TN$r=(^jU6`$h?kN{eSrWy0DxZ9K0H_3Q(qNpH0wkM~(a4B@j_^vHFy{XyBL!|1 z#sdO!ZM9OnkE(VCz&FsuK6Zn#T{sRfJM8u%T? z&;XosHf7WF?dxsrE61PQp4&hVQ%e}}__3z4so9BzG}`IuZyu~&9bCjf>{*9sm1crb z1X*ABrhILE*;Be75j?6%v5j6`th4YOx0;Gx%_{ z2=U|1%j%6dYKMbb9G&~xDrf}Cz%qo8BI(SblXlIkme%O1FSXuKXA-?OU613`KIg-M zs>X4D7H3KVCg7kG2Nx58@j5L%YD76Rv;$jf9u$MbLQA)LZywHEqhl>hEMADNTi-{; zQGo!Q2*5a2A5G(d#IjW@XsAq(%fk!rv{tdHMihix>NXxfO&JdQ!|e`&BNfj{w?tq? zIRUpl#OKi}OKon#EQG6JD-grmbM{I?*0Hfgw?!TVg_0F=x>Uk;0EG`QoL0=cGwsg} z32b(LkA;K;BnPE)pr_4B$)RE;?fw2#TLZssrcQS6(X*+E@=bQx7->8KaM>C?!P*^+ zadF|fSTl8NY}_du1&cUM;be*)!$yWYKif&KlvWi6*` zCA7wN0RrKUk3X4?CR$GHHeT+)XW@N=-zM9mu8mQ>Ghtzji)&fS`&rA#<>-&)LMHv= z>-QE@KM6>Mysl^V5|Wa-hko{LRRZo|<((t^p^!)ZxKM+Q7QAQVk)ivyR`m)BwhaoN zjUnm8KR&;g{Tt%Hhj>rhZO@yk6viN%PyLj5m^9JV)&2PKW6|Y>s$RNVo952xUU0y- z*IorD1+oQ^^>~Q?L)?1+MYT0;qob%CmEchk6%hjE6cC> zUydHa1qGdoCuqYHCA!kga9I~x)Bk!O5gdGmlZ(sJ&Gq)FncW@Qha?>t8KuEdKFVd6 z^yTIMT3dVOLe2PBu!`!-*Dvo1H#K_&`;!VHOKa;8nWsqS%@3QC+uJ=EyUl6m>pN;> z`s#rVTd%tI6H|r@P|NeCa%pCCE;)XCdpdh$Yxy`ZAB0-Q*7~!e>#aRK=lm$ya+c>Ms*NEUzw`89ZdgaH+$;mafw63zVpGudH6$iB*p<>@nJ&dY*^yaXu!t8lSTGX^( zde@e}+T7HQb6+f9yjpInviE5;+#x-$IzkOR2RDPo;*9zjj^S#`zOQMl`F%` zh%Ct4@I59GUFtf;6P9N>ql)mAcxIG-#ws=%i#Y%b;)aKAg9A6(?JqsH%6jj%N+GAeO?g*RP zQX+l@7~#>a)aKruZ9#~LV<|oK_0J0>h(S17cWA=Q^p#e!q)fj8QvKYqmdKOcD`Em? zYBE<{XC}_Z@_AIy?rqFq|2`0MCcDJX&YrEi(HPkI9zwE+JmZ5ybK7%V&`DP=$U`o& zBN*FC&3De-OGrb@y(&vLAW2$4^3dpJas#_-%lSKGq1daz@fTjO9y+Az9tV_&{}!)J zz!uSt1G?Q3Ki10;dkq0ObNm-rOv@Q%ulGvbOepY`E#W6W>zy2Ua>HBY3o{ zyC?Y$$|$3>G+d0|1s%V{Pj)H-QU8qGAkL3)12+0R#I_G1>J}{P&w6TWM}QJqcM<~$ zKhZCee7-M^5gjmvF^+h?qt$l28K>?Wbi?dd#_-QD(>uYYeA|av+FR10)qx6s$NSyA z@mWLQjl(bX$st6hCthpT*|%Vw{+s<4QjNv49*8%=Z!Kq$MHbavdt@~D6TbosV;CRRJ z?*~0IRKd(51Pv)cCwyq^MuJxFsfvTWd;D)I%GPbJY?IC;!~R6GpLZN$wl8!v&);Lw ze+!J_TalaaPTp;<3ck?+ww{}s|F0p_md^^V2c3BL_=GF$^$s2oCeB@ncvV~5G1F&i zNYzgzuzj>?fWc0HqkeQ^PWF%zjF57|=mvG{MZSBt{LX>Pl93JmWXt|jI_i zG~8%lh7>2mIm79pLc`jch=e$y7aXy(5z&!}I?%&$W1v9g274NcxBa}=73hzJKG3X- zPkfF)c_R9BuqGpVx_N$J864i3W=U4vXRrKDLU8V$JQ*S(JC}SoVfUS5PcAXpgX|CN zl!!~^Pmkjk2PInK#2`>+{rNp1NO1y|Rz&`HqL47g(<37l&g6bQITBPfHfzf2{EKso zxuL64sk*F`A||(${-&?q=Jl14K_EyDKb0Tb86-If9rD-;8xph~SJLev= zdVk+Qzfo;xYKV1OfcE~lMz8xC3cph9MSPX=$Ci2BiR%H<2S|sxzb>$OC6M@_55Gsd zq=Lr_%sw$N5>h+st1F+E{V2PQ!=CCd^F8PA4@D#RujF1a=&X_pZ9FP*;JgtducNGe z@%!dtZS5LKucppNjq8&P8LslWn5?ca;EYhE{0@&<^rb?9c;ZL_z=|SDiX!}$%r~ym zLvO7Z9QeD>a&zTff9&mmtVGF1>GVm|u$1gw>)VGQY>qWr1K;WX-j9XvFS^9637ar< zZS3g5g`exe8UojVGe6@;iRJsdBpC}@C?k1(`_E4*xRE3T>4LF2qf2$kLc`2jqvp8w z+KC(2bM6mUGjc*QNN?~V26eqco9@nphd?~RfxQ#>U6@klYJ<2UcXq z@#7GGxFbVO-Vxl3H?lJJzH-3Jz{pYu`7PXz4ljY-`iQrAEo;)V2YE+{Y^~x?AEmS{ z3hU{9o0)t3Qt5B^DIA?2i{zKNtxfHQ(ZJ za78?XEWoB@cMY?Zo+Km>>(@$NSxeP2<8r!0Q07B2krbZv@fb0JqD(eXn*&X?MLMhG z7_f`~4AD`Do~gMVdj4Vb*}ac1K6M&>$^6nTF%t*rFoY;N!7l{&kl@{x_hcnnk;9B+ z#kEc7tNN>mp4F$kKLmo>k5&Wd+({@5E4NtX2kAG2{QB74-Mr~aXNi(FqG$f06vghC z6V`3MyNZ|EYe7)~%m*-+wXXrpK(v3|lNkIw7`P@%;v|R5ms6$uf}iR-{s#5L$C;xi z-}1&mRK$z|i3tyR>0Gs^zTy!xKc5dy_f$`n2qZwhs178DavRS?V}%~}Q5qy9pzga5 z!D1VYjxDF1=7Y4y2Tiva(*JfQ0-)~7KQK_6{RSeH=buy9Pu%ZJ&)|xjU;SIj_ip2* z2F8b2iuW^b=PrTTO^K%m`AL?()>oh)->B{JLvnAz`uLlIlDw?Op;#vLq#e{#upF!t^tZ zIjApyyl_Y|;M<`8?2`2`i66cE%gr;sEOpRtVJlUJfrVLeWVEf2CmLpY%b4}dA1%Pu zZivyHtQ&S3%D$U(TH{b_OKaUn`^hB+=978@MR!1JNGl>rnDgt5@+YhGP&)Swq-CBT z%%*|}@p(5AAh~({FJ8^fQ_LLd*6J^dkAUs-(sA+zpeH&;WO$Ad>j~e?}J>$fyuF2~z2qFKgQt@eRzXI_;L)Ga!be}s1Ntk&(a;qwAHy8PLhf1cD>L_0k z5=>{x7c)u*A*#w2&Qb?)p=qmKnAcw!G=w&s6RdK}OG+4Z*YIZuDf<}7x;n$r=GAw> z@wwK~97ac~TUrpEm2#-meD7VxC&tQq;u!!b#`M7|4xbWk_ZFFuripT$y}fUBYHIRTqKNBvZ_{`QC8>fpq_b# zI!qilwj|qrIYQKkA2^lEP~B{%qQjVtWO%#PeWdRx&tBloY*b_kE*5``d9x!A@m zhzdA?BP=WU=_{DIGi4zX1ALh6K>(Nncf8A zueaZVHS}*&+1NDVA*Z1>IqT+9D5p9n`_i>py5^=Py4it{NL58ux~p_Y0VFADtCO`e z*}X#YddP1v`foXK(_)e_t=v6NrK$ZdVV08V5cu1K#ummI^#|z-`j-6 z@Pb!#nBRRJjsIYzc6ezl-qm&k?+A$~5+EqdtZ;{X-2mOc8e(4_0U4CIX5a#Qa3cwa z17RJ#)3%%+hjJ#4hm&J>Ne=cZtxg)e9C`aP>BM`^5&E%Fh*IM`grJh0toXH>2&T## zEv+qIKMm=1dO!>c>Os5=A(22bIA+rYP!4^tv9=ESCf*o1SUKo9b2hDVQ?#jxIjM7_ znD#&3$~FFKl5X9jqAY*779;%1#MdPk$UJ&>YY2RN#D5Ku3sGh zx!FD1FREZ?x*0917c_PTrmGym=OGL-Y+phO<_FgRA`daAv;4=h1}iSax5Ni(q|5e{ zSxK2QBCEMimKZUkQ;Kc+P9Ip~K@@ZDSUF*H2mdEjLlI#gNkVt=wi4u84{t3Wqmcmx zukAiwZb_J_ILQ8%ku;OEw9Gnhb(E0xO(z+nK^IODlZT*hs-AJ4?jB?_#m-Xydxjmc>;>%L4+m`Qj^?C!wFYGsQc`Tiec(MQViZi*NGj>8UaWsd-Kp660;lF?Kt)DV{F#wb z4u3_y@fjX$aq41@BJ)iqWeXFNqMXJCUvoJt7lRK74Smh6n&GxMk-yDXq= zh}+?EXLDS~Xdn=X?(rY?&C~v0tje94IeLn^dYD`7v;%^RF-zO%w76-fg&`eLM2Gbk z_LyqbJ3cw3S$>zxSKBNqO{^XuRSFBUWND!1WtCQmyz;gnvE=Hiam-fc^G6z9$aRPL zJ#w?nDt8OFMmpru#xkZ@frjhh*tM7{pMEKpvCMSCzH6-dLYy1?-G}$~9L*gZirhqD zLX3jf#Gmi)Gv%F|UXxt@Iy!oSR4HTE)!evEl~~tYvi@>q!IQ*d(?<*(yy_(euGvuy z^%2x35u+2@pBtuEL7tG&5FKPZpB5s$lVrtKiA}|c#N)j!`cvLiR92`et233;mhb%t z{N8+UVK~8~NI$oj-5tMs<^ZYUyf<$XwK-ox`0B6p^xRfBF7J>t?^JFVFmArLSUvlt zBe_2$3f)02hnta$ZV3oD$!+}kFYhDA1iS0Wsno56=JyS;_qXGVhZ_oQOTPHE-asI% zzd)8vSN+hn#sy8CwG^>fLyVxoM}w>^CK7K4%$^1)p1ihn>1P2JgZ7txkG z7;D90o=VqDIj%{6p5W7`(R-0?4Fftu1I6}k?&p%-`qF}v?=^_5pd?uZ%<%U+$ir{i z3g$e{TbYhU*0Qe+R*!VQ2;X18Ja`frY>6tf*xZJ?prM|k9WrY5hI1^~@`uDKAJ%5wA}cB?RZR*SJ(?C)S-yYQ#k?7BG{xkq=h_N)k(O4> zzFD8h#66#@9TzF_W#%=XT^N$?E9)9V-bmw>uwPvX2@SU2;EOesdxdQg3GGo5dWp-; zRnpKK|Ii+If*e|*8-Y+QacYf@$9bW00|Rpwx>b}FaNdX840VwAQrG9FvusWx z_)CztZ)0q05Csp-CWmGF9`mL%j&r7n5R+qe`-k=Qc~)e@s}5?k3`WA@7Hs8|RMOjV zonwiyNcUF_v&Hw0pE zq0hq&^v&HB$>X{YAtrfuGIP6EH;((BXIB)liBIS=aJP~UIQH@5ho@?f->)xVJB)EH z%r~)nY?UeJAhbO7QN!}bovtF0~9{c{YpoAQ7}87Vt_%peb7-uuKoxDarv`J!E|xMWt@g(4+G%5 z3&dn*6F1_g*30Z$1CEu322*i?5Q6QXM9-h?I3EWEB7wIT<65%tbcS&c<6aKcao1)y z<}qU#>4nC)oQv(86CILj>e>@nS56Xh{MIsS`NW9WR^UF*L3Mw0XzE~vFbniw7^Ct0 zWQ=QWYGb7#vb5K47-nr8Y1g=Y=n!jpaXF{ETt`l}4)b`IK!1L^o1oij#QDB?C-);r4V2s)bk)XQ4RWZrXDXRhJ_pm-#3{Q2y{Q@jjr08PCr2+K+AV zFyc zn8g$@@ksdj`SCa}=x%Rsn?sAy06e9ETjJ1-{ZG&fD9fZZicv^N=j6GY@47A2+}tXB zNCY-90q6WAVCi3K;Ek4p_mqYFsZs4S)Y23GO8g9yr{NP%*MbiY3^ydRPoLJsJmEmt z_s%<|8k@{KHP6ugT;JqJ`T&v8#`*RjyZRX7mX-a=PK51unj6)zT7JH%iUrDLVPmc( z#Gb1;oom~4GGPo0A!(_!RwGq<(D$+{gi*y|wUy6ux-&H>rOuC%SwKL`dBnX0NY$cQ z;a4KLGEz#jR5r?!91pu)S~QEQmg)CCIL^Ab9~&I_HmFM)VHd8g>*uLyJtryH4;7QjAP~Qd!h5< z4XKf(J)wG-FGjZe%-(fIO1WecbC*dH?CLM@vL9=~ChL_;&uTbLE7wY;^vFf4qmCXu zhLU>pvN@+O*S>DDY2(cs)(dn@QdUts!G^ue?j~UK^i!*e9rZ$^lAy(n+T+JrDlAv8 z-mZN`9!~51@+B%I)+fPgSsIyyQ=7MA&n!+X=Ak@DrP^8!y5yA0gtD@2JF z=0gL;I-=yFFEt!h&rGXa@m)EYO`mpHF?OHRvggh8g>f*YNf~U_kBzR4?zY5;NP0_1 zd>EJyIM{ybIvbYRE<$u{iB{_PapdZ^SKXoFeq19LzJ5oXyU^JNYN0jgI#mY7 zyJ)}Runovqb?P%Nk#Q<_*bCM8kTw+w$&4{Y#ZL3uMV2^u^-Q%x>AUpDjvi$c6RU1A zg!DfqY6|>70JYjaNojSwvN97EE~>wbPh~!ekMHW=&t}y7;#XOOoHMer-kPtWC_JR4 zGT6~+DTS)8{obT_>_prC>$kTrvKyj9UDs~GeKc^w%%p^_HJo;D4-P|j> zldW`k$V^R5oX_btVqK(~np-PTVu6+_#b(p}j!kSI&^pS@bOs(}FGQBoS0=yD&bsO+ z-%UzN`Y}GPHoFjtcp_R+Q31*1PPZq+LPAiEvps`1l&+&IbnirFuOOu|2G)+vTpU<7 zl1kqYmftP;5IP-7F8a0O#bHP54sk1sMpYym8(YKU^^;2GDh_qN6oji67Z>L?{PdD5 zuEM;FJ~bu9?8le)A2e^SY;I~ODZL+C-`#cdj`?s-yw-=b{Q}CgflRy9&0Bu|0xUOQ ze+$)AdnF-S=fxbho-A98l8?6!+KTjmcIvUFJuy$6QNC)g?XoJYbo@g3A{N%gMOnw? zdj>rB+bec*ROT_vc*emyRL>W0;$^!`TkS6G!18Uamc=D349UrQ7nH3>S+0%u7s(gN zv*@7QGu0_~Bb~)ID<(X4vd3Iof@nOkGVv`vxZFx~#FsCAO***qKDPQ|34{3$UqORU zk&Vklh)L*smF&K=kpb<9OeCKZqh>aj27L6A`6yYG8yUUseiFaqSU0YxhnX$EQ7+7_ zdR*?JrRjuz16Rh7HEwsDncHUjM8nQLzg^1Z3da~Nj?B3=$JTa@RJo2S_Z_s&NR_Dw z2OFD0rbceh*7Df+{9bjSITrPvlauo-6;)E_LlR+CJ}O=$#*(c9a* zsj=~!=<$-Ev!|2P7AvxyDc#e+{Ln$^V4)nU9D4tYk<{7pRM_NK``gpH>HX7Ib6WP3 zGs}&^KgQfU&a@0^hgM;52A+D5`u#yWJBr6L>=^0A6nF&FQys}j*!HOxqxo%<@SBU! zR%#*xcEvYu-uS+GBanM*fce5TJx$e=maLm~5)!dE(~lxnLweinTLvYIUu&$xaZfHH zavtPsa&ro3T@Ma!4KTuhc{esT)j-(@)mENb7;9n;pN22BW>YJLb_UP>tVh=l9~b~S zibFegr6IpniA7P@CO?`J8`VYq*e-bP+b*wGsroXAA;t~3biX?P=_ zJ}sq}i*oq*9N@jsyeOL#Y^1&cuS-YUU%HxKph*Wbkf~@B|Ak%#F+*l)n?I~odOwrb zIwU?f*H=M@CSSsB$fM?2!RJ!V1|rk#w;xuh#poCv+b{b^?NhGLe7#*_AS8RpRI~jo zOt2KXb1o%2Eey=+X8j=E6dfK0w?WE(oU^%3uFCoO&b>f1yRdVAsZNBT!7EwJw`1OM zyjEj9iFYCe$rI!wch|V^t1XfExt&EdRo%J5;-J8ilFR3XdOka#%x#sFE&?cg>=aFs z?B;T%mi-aO&Hk{soc5{%ea|=nmEd@f`Z*L#0_7#DS1il7zun#+R9x^0lN;6*_7r2Y zHYF(LsE_s;n#|OxZi_8)X?yHmuyA8sCwrzVKe=DGzhzDnbx$suyCY4Ypa^=KkGF2~n6n>KK@CppjzSLLolM7oz z6jiZLsh)~zP}JSKW`kd^nlwB+$&;m_keq{QnmdmeJ)IF5`JwNzJkK4UfQd=--8@73 z)E7cuM4hn9mdQb7(qNV06OJBx^l(CK)*8_cb+xGDYP(NjqA$tGu85Cbt*?!lx}uX@ zyuFcg>o|fVqWYi+t&*wUU&N;}y+i)Rq=1o0Sa(r)1BG;+Gjmc!AkZlix-kHUnX}%D zl$w$UpZ=&vQn70{|E2eOM(6V+-xK8NS5F`i4_xQTE8*iE z8IuLQ@OosURk~sS1=PD}|5ewdecG+c`d& z9DJ+AY$v{wl|PQC*>*ARSM~Px5e(3w`#hVtJl1;hNXb$Sv>^GKB(eQmPtb8YE$^#h zC8r}=;3HhN@L;^@{v8@NJ#<#gCFAiQQ)8(z>ywcO>_X~AT6+WZv6v9lkaakTAH+La zPa1P}N&2!HR1Z&Yt^Iu_$l+%7tv<;>^fyM&vLlF^SUTs~v_grKMUFUYKurvZwhs%g zebAkKkF64|1wI>wCt+!bEx8f9>Y(Zd@`zKuw{1 zQZh2lEdj@Wom_CHOl&X{Jjm47$4Jk1Bs{;T=NKBdQ(Ku+KJ2<^Udub_gQB$>ESB>; zX!k_#Aqx&RvaVg^=I=PgVtU?3e}C$s%@SlqK!lm@CD%VXV8lP5w{nVXhfk!{Gc zS|4-ZA*KAJeYowq1nC`PUMfqtn}nl=?pp1qwTbKGxZd@i)Rg<7En#6PBTgymJL}r@ z4YN^I?rUwIB+@IDm+=iJP85oW=mA{I-MF;ydF_2t`s#XisrB~#A3hqcKnOaLG)z<(TDvDy+<0=MDl`hh4EXj_Bc??AN~S5qiw!j>h-Hhl#$>Rm?%kmGi+s(s z@9vw^bvp}OLhz~+j+s(NMn(z?SF+2h*7|WXuU?kUVOk$PWQs*E`tnH7F>*0CXIw>g zzE3X0?}mo!*~vLi@XB?tlslO>HmOOs4kp)!>n0@v)1uNG_H+Iy^&N7W9-L(l(};yy zF4_F-?%@qM&jKswO<6dL16u3mIUtEJYnb)+JSS9U&sn`{N6XZ8gqY{v!;Qy;ZIVd% z(M-h;dbtk1iB=r={IpU5dL?n5i|%q%eDZ-qF0UdxU_^9lVD(;_;pSevZvc%m7MM6P z_#Jcb@Qe8S&Gf&9poX3|QRf)f}z6(fwSdRL~q*6vReBF$c-_JDZ^#<#b74cwmXn zXJ8IW8MwsuKWAKJl47ekc)mcx3=yJY`0S%PbVjr!W_R%46n4RJZVR8vHD7j zHf7}S*=(BANw0lK9hbhmKjE)LV`IZ()%iK*0gTq2C81m4^k~i&eH6bl+c{+xeEJZj z&Hs{xM8tf(DTr~?aSi1Yr#{FVFkW6ke?4z?fzqJ%c&j0Y&@Ca0;2W2*E zW@&9eU9x+q4jeB51G&^`&xcs~82?5fCKtUoM{v~>2O40WLSR|eF>6?KcC|-P zXKGh+TA{&{p{rdeOjnS7loD4qX?-R*GF`fBGW@1iQfVnaHNT=QvSnqwnc~u=Iez_$ zr}^L*QBYH7D~Ro>mOITWf0B^(Gbnb_^X^{`I-Nb3cN;(>-G{D%Q)>8&OzPEN&8<(Ezh4rv(~p~*?` zs;y)caK&qDD?DU`Szs?FLUIcB10AUn;#RB?$BcTx*MVjkD^{yCh(YZYyT3~M^hvn* zDwA41;|vJi%%M_FwYr_%hp}FZBBm=>YHTl^J)5$-yIbI~feATFAe9_1bE(HRyj26P zsgq5oL{OjRi3{$;`}x7cdgnOnMJb%$m%*!jBnl=n9&_E{}il%PWU>*53B|O_LWNxCTwY!-c1E6%*<@+ zXuQFHkp$8T1kXC)vItr(nwRkC`Z11~Io10}3JMB=;o$iAMJ)H_$Q#3dv;ZtOZ#v-L z67YXml=im*6{iIUdK08-z>vrJ zpV-^03FHS#GVpT1CXab%%BRVfuY8zh3)@`x-o29^If;l%1IIj?*Q)c~RoHh9BdllB z<2{rgJV?06XZ_geEaHibmX;PsQM;cVB7)!7+}*F*7fgF`V*T?=lgoXtGJyvDZ>Km7 zct!m1{@#w|aD@xxYAYETB^enRwc-07=2U5OiSB;Q;C(^G^J^h0vWV{6Wo*z0T=Scg z=MrG?N%RPJOK)|{3c*h;5sH9)NT$u5r@xZ?_s|94z}VmM)Dqh%^;DS z($7IhHwf-1fx5S?c+^dfUzl9i-s&6yZV23)ey#~ zD}hyN#4|yBUi0?tSj$qrK!2sx-sskVZ@f26i1~*;UYB%eFo;*~vh>NA=BXuo-??A= z3Ia2*)pLS7VFu9^3NSN8rTfQ*4mxE7qvCCx00|E^jmC*EVK4S%rW4%UcgGy10-fd| zwqjf|`?mBWbokiBNiH=>jq1D}0%%hrHN7v`F6Z9)pt8K)9^h9r55}pdO z{1iFBBSC{)S97m$$oODq2QjSVz;M=XA}$j`2oq6bOfRzc_FS%R22d*VtxcT-OXIn- zSx$hNHQB=KPFaG5X5aY9NXdGdRnnUVnwvkXqE7BREmq#(o%_(tPSOw+oyoh8&L!6KDsGtKCn?3anQZFJ9xAfi_ zm~|`Y6sw{nFcLFD3mvHu~APo1Q&f zwB>r7F$Oyu>G8ml(z9LMXB~I?xs())3Io@uwur?qdEXW55Z zz-n{6U72vjQA!%ESe>+LOrHY4a!Iyv2&1MQ_~xx@^VSyh%h)c^uTx42axe~OeS13l zkTsw3TA%B}`|vuMku04XQUt^s7sJ{2gMf*1R_hJYX2G$?qW!5T&YnuRT8W(bxLgwX z^JYI3-hAacK0cl*!Ca-WylkXZadW08%|EVc$2$CG*V5?LK!_;|b}qL{J+bIM=iJKj zN)Lq|YgdbT7kGcDFM08U$`E#T@`bPu;0%$iQ7;q8Lx_^1C*Q<@cgukI!_S{XLPJxx zS0^o39v!=Ms7A_~57O;_9!qBd;Sg3%lc-b69K+>0xD^0t2}j%md}i)+w}Ablf>4_( z7w~40llcv@^5YlI|9*=Q*gS(`>+Ti>QCsf2bmnci=C!`fk?4pf`#-M-!lD=Ksk-)o zm81pfjb&MD0e4Xd^%>-9Wr`4!p{!PA9P5n38`((;9>RTwBY_ORA5PkzXBB&(@Ze1w_G~WN+ zztTWc#2A`b`)eqa==9E}5YJsY#v5!3HY}Q>E0(yECpvxk4pITitJ{@WM{ydxRIl0# zYmUxmm~^@>;W<=Q*@^END(~Ltl(FO|U^qBt(DTz5LVMts$@g+-C@E6Y7owD}<6dxq z&s(e@i>}|F00YzeA=;L$ zS*?%5F5SChBZ3EOK&?Gi-d?M#X5o8|lG1Wvuxw>@wd?iY*DtLTKmlash1VH%fDy!2 zxY!$;np%E*aTI7^-@rf|Sk5*f)%y<;a;jH+R|$Rr;aITIxra;z2`aG#zf_rMtRPw; zJs?e&Y6A5;a>&Zsmf^I!Wt**ArhMjxJQi?Ak;|%yROk0PzfcHomM>Q60Bzl0WT~;9 ze(dL%x3V46v_9?$I_7+%jT?Vu;Mtwvx}f%>=8@wi5$*QMow&aDvMQ-*Q(qJBP*BZQ zEX}pW3U$MLA$-b6OM4fm=NtYH|rhF`~KMhZ`Ew#pmGy2%b)ifW4}s zVuHsWp7Gi>ama-Y*gWzpT!!!bMNB0005s|p3KU7BJXky^v|QAutFL3EW&b*->Dcd+ zJ>bC4S0e7djgJ?xyU=`c*bG8vvXTLpI#5c<4s*R6mEcEXR@;QyT_DjAj7Dp~Xc$Q? z|Lmfixp@ZQU{$y%uM9*Di2M2C+CVAdWB4TZh+=x|V&$?bm~2r|(ZL%E56}Db{e6Xz zo8dn7=`(a4e0Dt1t?K4O)+M<95mz?0+>@duooQ3P6wDxIKL%k=vrjLNhsK@1bSVp{ z^DjpED7e5AfBp3iFg_umBm(#Ngn)4gROc=+c%q0X`iTHnS5<6nm$gQ^(iLb@)6#kr zuOZkCZrys8rEVEILUZQKb-%h#m8WmTPK=Eu2X#5k}CR_3>iNV`@4VP!UbP{ ze>G`oU*y`}Qjh zS_LNNUlVRy^cQ9bI!q;$3?pTXjg2pIo4q5cRXo_=CEm-$BGnHAk;du@4RZlH8jkbJ-PY^U@mC_Aif`1 zx$HCc^yA7nK@1hyl@=LsBBy@M<@ZQg2X6T(;#LqckiGvR>h7PDU1oMacz&Yj;nAZ_ zt<79lNi9xvP5!z8qxk^rP=Jx`(M4s2kwC~rgrM-$!ghDb+wtjPIr`Zkg9RxFAu>*d z-^3+54_^hws^EGL|GhsH>O&ECoSwxw>UsVEh!*&54w)Jy&1`Z=4 z5|6XMiEIf_%MLrJ&{rOBmmAru&<9pYuj{?6+d;6W(0M|2)P7GC#Pwy@!(Vw#B=-1J2mTSNLxIIX`L)$nW+o?MIrm7iGU+{&!$q zUDc4FD0)?tq~u)`MBwOYIVt~)SVQCn!9i;(C;z1D*9k&{>%qC5P0qt%;1Tq7Sj3e4 zyGZrD-kqw%r;9~W*RuSJG2TIl795Tua1EMDw7WC!5D$6GvjbsUwub5XZUkDr_n`kk zjXp!@4P~bLY|c&=Uaw3~_y@%#lu)u_-P!hsYYfqxj_Y z>Vq5|8dqK5NwPL9kuEOg6Ik;TCrKT#+H{{S;nUq7+5My7o%R=7Mc+Ktr4~V| zLT2)N>+<>Hg1A~$B;v_WD-jgLRb^e!hplvVUWi8s`{w562GEG4{AcP?@vZy zczBqJg{3mVUPf9PTWqDX5d17Ua`i_ra>MIjDmk~x4o$50iavVWf@$w~q!1Zg_Sos+cPm`crx%i*l&rZ@e zzrj@P36d$_w{L%Fok6r+;p5W)Yc^3!GaHP@!7c>VzfWb&OOQ$d@A5O=|)gfqUHz75{-W{;%l2?K3xrMZ2H2}B$Xbfh~HgFztq_FtKcXZYf z2xP(`Tr-a&>w+&XA)kh_QU9=C|Bb-1R?qi>7|&Df$*pI{oXT~TzI`*1m4X zzk~d4U?Ui?=kCt4={?yE;leX2k+nunzsEr2r~yMoIebu)q3*C-N(j3JpN{nVW8D-R zj7^L)@=z#o_ko2W9-4a6f3~Wb(y7;f0g11HDtR54yUlL>?B73ffn^S1{TPe=swxPz zGZ2RSXWHcKR)hR{-IYS+?f;x-lmk589FUeqQ`b6hL)+z_2RR_Tq6gb{EL!VcE7|J* z$O}9?OeF0E{ut3`#7Ng~;zOI?N2nVJ^mmWdaMos=+o#W}JYN5vl-s$FkHX?WuSyqo zH~N$et$)t~A;5pOn3vo<|4!=pF^BRBaTbVzgQ`&WzDmJAM&Pp9ulIbBpX?-QH?vB5 zR9HHFe#hxw^B#Tv9m={Y(6}Y&fl4RgD?04>^2){vr;xh;KRfl)yqEREqjEqw4BAYSE`&(mynV=N1z~#hx z>~EV&DO*^efszDnkX9~|Q^KiyhzA7jKabJFF$IiKpf0N3U(eMpcgp5{-1`&95tCoW zHia@5nY3QqgQRpkjJaMzM1DUA0C5TkGnI6TA9uXJF1ZG|?`6Awzea<0h=Qgng}~-& zS0amEczKOaPNwJOu~R^?ZkS~kDc>OIHn12+OKMR!O;|I)2CI}<57A0pzxVcQXR3_e zzQpg5ZmGoMd=D@=CdZuL65hCunch!H=|rk+loW-UA^@E2X5QB%geXoLVpi8mTbz@!^z& zo{T;N?sbm8t+BCeGy$PVdGyGUFBU~{%j-Z96Lu_^jI^R62n>o#%=9_g~ z1FMVvPgaJAm~2V*I3k^Ya&4Q(vd63|P3|Hzr%2|tpODpa84sQ5O!d<)ew+awN+lF* z2ud-HbGi4h{;iUsVWJPog(LtP7*>(rPdp~wo>u~jDtXjT%e0H`WW~8}Xr*gss%Nh? zvsJ4q2>$*U%4BlzkB^U-IsO0jg8Tvhs-ViZd}V zeBGEIa0I8DuuJfbxD|x4>M{KfV9jou^7r)=Y19rH8P3SHs#mLBbQNS(*U>s;Q)l^k~JQWcWGk~iO7>gBh)(36?3 z=j{cqHK=12MST=8*pnpT2lCG50(wNuZFOiN$YzkksFD^z_+7m~db_EK2{1e|_7g3( zSiAE@dNBp?{=FAPW=fQpypG_PvOCb>vsECO3I9lRg&*j<4Cv9nF4i1H&T+vUA zd-o2Ae54xc)vH&Qohee?!9PQx@KJPR-Q4ddiBJr-kWg9sA89W5$mqQ`(-I_rIv>Lb z>O+XZHke>^Hb@{Ms)e(F6b(xGO;jltM40(cjg0(o_&u-%RS6Aa)Cc?YhL1NrscJv=95CZFDJ#v=x#9v=uDTt-&QwuVH zbO6*YYhOOU$-41d5i|Otbnl)_5VpYG)qQ(L3}X7f&YnQ_Was_A=lK6;$sWLzJk#3u$d&VdjX()Wq__|xxh`#`akN+og~S}dv&0Tm~lF2<~`23x$cvNCY@<1Ywez2q7_U@Yz5A?nrQTmeD#gE_lHmSe}(#f(?r-)zy-ac@z5j~{B_u>b*Y7%2>OJz)poJ?&B^^~1lkO2J}Pp*)QY=m?@XjeXmkle*w#UHfu9u=E-y z0$U1ETW#2VlnBOgDmhSahH^)?FIkctz=5i%D&mP{krD7xsFV80X*!-?o?<}%f1tVk z55_;;&TP&6?-in(+mrFR3n;_`-*&MBUDwTpZW|Cu6~fwboi75nsv|`zcPvype}%mNPv9=Z~@2#=tXs4x>b=#yo8tv0h6*y zlOh)0fX{$^2m1^I1H=7x;nlav8-(F}sI1=^p0^mn6p=xK5_Av*ZMk%iot+)`lhIr2 z(_NC$d^SB48oE&Ii42s@a+n!(Cb#2^>X3cA2!U@TXjNQ63HG>Gkr3aG;CRRlX04z~ z7r}3%q3@E4++1L(Wrr0?2aY^ItJKug47i-2l)tavGlL8sdASa;{pCO~3rtK*onb6G z+#b76vvo@3shZrL4??=TvDDq$s(jX1;OmQw>P}>972NaOZZ-%3?jCxMT5c{3oggFY z0h4Oe6Mhmg$s#DI4aKU$Xub zfV_M6CD|qZQHMCV>t7R|U-sO!CEQEG${2n+MoI|AX=5BRz^YL%!aD2#-X;T(yyL>4 z7Ptngj~+d0b$gCbl%toAK`B60B%6w5A85awxfj`q!)dTp?|u;7nd92q897LUNyr<4 zi^~2U=!>p=0O5w`~P*e-AlRDj-9Er^w`{4|TiXS^+?(!#W5zAx3Dp z0HaW3H!dx*^5wc3MnPH`UKTnj@S5Qd>~`^BmE;#8c}zu9Ga_Xr?Y6ab4s+EJ#FGpH zA)S$tfgc#zhxJ@&;h8o!SXMwwu&lfQ93Ld1P*CSW0Zt>J)RBdD;~42HPdWAh#@pJR`1Uo&>k%(JA{ULQktMt~kS0JY z<$sY!IbPJg5GsX*i>lbgA5qlkfSuwD>1AO}!hmy5<-FE|C}@WUP@fRAmd$rw)NhU8 zxU)Lh?kwvPo&uYe*?3cEP>O*4gxQyOS4||2AXH!nk#!!leFeUSs Date: Thu, 14 Sep 2023 14:22:03 +0200 Subject: [PATCH 062/322] Update quality metrics index docs --- doc/modules/qualitymetrics.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/modules/qualitymetrics.rst b/doc/modules/qualitymetrics.rst index 8c7c0a2cc3..447d83db52 100644 --- a/doc/modules/qualitymetrics.rst +++ b/doc/modules/qualitymetrics.rst @@ -25,9 +25,11 @@ For more details about each metric and it's availability and use within SpikeInt :glob: qualitymetrics/amplitude_cutoff + qualitymetrics/amplitude_cv qualitymetrics/amplitude_median qualitymetrics/d_prime qualitymetrics/drift + qualitymetrics/firing_range qualitymetrics/firing_rate qualitymetrics/isi_violations qualitymetrics/isolation_distance From f013828bf4cc1363518fdc0e7940cfac07555149 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 14 Sep 2023 17:20:49 +0200 Subject: [PATCH 063/322] Allow MergeUnitsSorting to handle tuples --- src/spikeinterface/curation/mergeunitssorting.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/curation/mergeunitssorting.py b/src/spikeinterface/curation/mergeunitssorting.py index 264ac3a56d..6baa68b0da 100644 --- a/src/spikeinterface/curation/mergeunitssorting.py +++ b/src/spikeinterface/curation/mergeunitssorting.py @@ -12,7 +12,7 @@ class MergeUnitsSorting(BaseSorting): ---------- parent_sorting: Recording The sorting object - units_to_merge: list of lists + units_to_merge: list/tuple of lists/tuples A list of lists for every merge group. Each element needs to have at least two elements (two units to merge), but it can also have more (merge multiple units at once). new_unit_ids: None or list @@ -24,6 +24,7 @@ class MergeUnitsSorting(BaseSorting): Default: 'keep' delta_time_ms: float or None Number of ms to consider for duplicated spikes. None won't check for duplications + Returns ------- sorting: Sorting @@ -33,7 +34,7 @@ class MergeUnitsSorting(BaseSorting): def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties_policy="keep", delta_time_ms=0.4): self._parent_sorting = parent_sorting - if not isinstance(units_to_merge[0], list): + if not isinstance(units_to_merge[0], (list, tuple)): # keep backward compatibility : the previous behavior was only one merge units_to_merge = [units_to_merge] From 8349b90593622af022fa6b80ede0bc021296e5d6 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 14 Sep 2023 21:07:04 +0200 Subject: [PATCH 064/322] implement proof of concept merge_clusters/split_clusters from tridesclous and columbia codes. --- src/spikeinterface/core/globals.py | 2 +- src/spikeinterface/core/job_tools.py | 60 ++- src/spikeinterface/core/node_pipeline.py | 7 +- .../sortingcomponents/clustering/clean.py | 45 ++ .../sortingcomponents/clustering/merge.py | 500 ++++++++++++++++++ .../sortingcomponents/clustering/split.py | 260 +++++++++ .../sortingcomponents/clustering/tools.py | 196 +++++++ .../sortingcomponents/tests/test_split.py | 12 + 8 files changed, 1074 insertions(+), 8 deletions(-) create mode 100644 src/spikeinterface/sortingcomponents/clustering/clean.py create mode 100644 src/spikeinterface/sortingcomponents/clustering/merge.py create mode 100644 src/spikeinterface/sortingcomponents/clustering/split.py create mode 100644 src/spikeinterface/sortingcomponents/clustering/tools.py create mode 100644 src/spikeinterface/sortingcomponents/tests/test_split.py diff --git a/src/spikeinterface/core/globals.py b/src/spikeinterface/core/globals.py index e5581c7a67..d039206296 100644 --- a/src/spikeinterface/core/globals.py +++ b/src/spikeinterface/core/globals.py @@ -96,7 +96,7 @@ def is_set_global_dataset_folder(): ######################################## global global_job_kwargs -global_job_kwargs = dict(n_jobs=1, chunk_duration="1s", progress_bar=True) +global_job_kwargs = dict(n_jobs=1, chunk_duration="1s", progress_bar=True, mp_context=None, max_threads_per_process=1) global global_job_kwargs_set global_job_kwargs_set = False diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index c0ee77d2fd..3e25d64983 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -380,10 +380,6 @@ def run(self): self.gather_func(res) else: n_jobs = min(self.n_jobs, len(all_chunks)) - ######## Do you want to limit the number of threads per process? - ######## It has to be done to speed up numpy a lot if multicores - ######## Otherwise, np.dot will be slow. How to do that, up to you - ######## This is just a suggestion, but here it adds a dependency # parallel with ProcessPoolExecutor( @@ -436,3 +432,59 @@ def function_wrapper(args): else: with threadpool_limits(limits=max_threads_per_process): return _func(segment_index, start_frame, end_frame, _worker_ctx) + + +# Here some utils + + +class MockFuture: + """A non-concurrent class for mocking the concurrent.futures API.""" + + def __init__(self, f, *args): + self.f = f + self.args = args + + def result(self): + return self.f(*self.args) + + +class MockPoolExecutor: + """A non-concurrent class for mocking the concurrent.futures API.""" + + def __init__( + self, + max_workers=None, + mp_context=None, + initializer=None, + initargs=None, + context=None, + ): + if initializer is not None: + initializer(*initargs) + self.map = map + self.imap = map + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return + + def submit(self, f, *args): + return MockFuture(f, *args) + + +class MockQueue: + """Another helper class for turning off concurrency when debugging.""" + + def __init__(self): + self.q = [] + self.put = self.q.append + self.get = lambda: self.q.pop(0) + + +def get_poolexecutor(n_jobs): + if n_jobs == 1: + return MockPoolExecutor + else: + return ProcessPoolExecutor diff --git a/src/spikeinterface/core/node_pipeline.py b/src/spikeinterface/core/node_pipeline.py index b11f40a441..cd858da1e1 100644 --- a/src/spikeinterface/core/node_pipeline.py +++ b/src/spikeinterface/core/node_pipeline.py @@ -436,6 +436,7 @@ def run_node_pipeline( job_name="pipeline", mp_context=None, gather_mode="memory", + gather_kwargs={}, squeeze_output=True, folder=None, names=None, @@ -452,7 +453,7 @@ def run_node_pipeline( if gather_mode == "memory": gather_func = GatherToMemory() elif gather_mode == "npy": - gather_func = GatherToNpy(folder, names) + gather_func = GatherToNpy(folder, names, **gather_kwargs) else: raise ValueError(f"wrong gather_mode : {gather_mode}") @@ -597,9 +598,9 @@ class GatherToNpy: * create the npy v1.0 header at the end with the correct shape and dtype """ - def __init__(self, folder, names, npy_header_size=1024): + def __init__(self, folder, names, npy_header_size=1024, exist_ok=False): self.folder = Path(folder) - self.folder.mkdir(parents=True, exist_ok=False) + self.folder.mkdir(parents=True, exist_ok=exist_ok) assert names is not None self.names = names self.npy_header_size = npy_header_size diff --git a/src/spikeinterface/sortingcomponents/clustering/clean.py b/src/spikeinterface/sortingcomponents/clustering/clean.py new file mode 100644 index 0000000000..cbded0c49f --- /dev/null +++ b/src/spikeinterface/sortingcomponents/clustering/clean.py @@ -0,0 +1,45 @@ +import numpy as np + +from .tools import FeaturesLoader, compute_template_from_sparse + +# This is work in progress ... + + +def clean_clusters( + peaks, + peak_labels, + recording, + features_dict_or_folder, + peak_sign="neg", +): + total_channels = recording.get_num_channels() + + if isinstance(features_dict_or_folder, dict): + features = features_dict_or_folder + else: + features = FeaturesLoader(features_dict_or_folder) + + clean_labels = peak_labels.copy() + + sparse_wfs = features["sparse_wfs"] + sparse_mask = features["sparse_mask"] + + labels_set = np.setdiff1d(peak_labels, [-1]).tolist() + n = len(labels_set) + + count = np.zeros(n, dtype="int64") + for i, label in enumerate(labels_set): + count[i] = np.sum(peak_labels == label) + print(count) + + templates = compute_template_from_sparse(peaks, peak_labels, labels_set, sparse_wfs, sparse_mask, total_channels) + + if peak_sign == "both": + max_values = np.max(np.abs(templates), axis=(1, 2)) + elif peak_sign == "neg": + max_values = -np.min(templates, axis=(1, 2)) + elif peak_sign == "pos": + max_values = np.max(templates, axis=(1, 2)) + print(max_values) + + return clean_labels diff --git a/src/spikeinterface/sortingcomponents/clustering/merge.py b/src/spikeinterface/sortingcomponents/clustering/merge.py new file mode 100644 index 0000000000..2e839ef0fc --- /dev/null +++ b/src/spikeinterface/sortingcomponents/clustering/merge.py @@ -0,0 +1,500 @@ +from pathlib import Path +from multiprocessing import get_context +from concurrent.futures import ProcessPoolExecutor +from threadpoolctl import threadpool_limits +from tqdm.auto import tqdm + +import scipy.spatial +from sklearn.decomposition import PCA +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from hdbscan import HDBSCAN + +import numpy as np +import networkx as nx + +from spikeinterface.core.job_tools import get_poolexecutor, fix_job_kwargs + + +from .isocut5 import isocut5 + +from .tools import aggregate_sparse_features, FeaturesLoader, compute_template_from_sparse + + +def merge_clusters( + peaks, + peak_labels, + recording, + features_dict_or_folder, + radius_um=70, + method="waveforms_lda", + method_kwargs={}, + **job_kwargs, +): + """ + Merge cluster using differents methods. + + Parameters + ---------- + peaks: numpy.ndarray 1d + detected peaks (or a subset) + peak_labels: numpy.ndarray 1d + original label before merge + peak_labels.size == peaks.size + recording: Recording object + A recording object + features_dict_or_folder: dict or folder + A dictionary of features precomputed with peak_pipeline or a folder containing npz file for features. + method: str + The method used + method_kwargs: dict + Option for the method. + Returns + ------- + merge_peak_labels: numpy.ndarray 1d + New vectors label after merges. + peak_shifts: numpy.ndarray 1d + A vector of sample shift to be reverse applied on original sample_index on peak detection + Negative shift means too early. + Posituve shift means too late. + So the correction must be applied like this externaly: + final_peaks = peaks.copy() + final_peaks['sample_index'] -= peak_shifts + + """ + + job_kwargs = fix_job_kwargs(job_kwargs) + + features = FeaturesLoader.from_dict_or_folder(features_dict_or_folder) + sparse_wfs = features["sparse_wfs"] + sparse_mask = features["sparse_mask"] + + labels_set, pair_mask, pair_shift, pair_values = find_merge_pairs( + peaks, + peak_labels, + recording, + features_dict_or_folder, + sparse_wfs, + sparse_mask, + radius_um=radius_um, + method=method, + method_kwargs=method_kwargs, + **job_kwargs, + ) + + # merges = agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="partial") + merges = agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full") + + group_shifts = resolve_final_shifts(labels_set, merges, pair_mask, pair_shift) + + # apply final label and shift + merge_peak_labels = peak_labels.copy() + peak_shifts = np.zeros(peak_labels.size, dtype="int64") + for merge, shifts in zip(merges, group_shifts): + label0 = merge[0] + mask = np.in1d(peak_labels, merge) + merge_peak_labels[mask] = label0 + for l, label1 in enumerate(merge): + if l == 0: + # the first label is the reference (shift=0) + continue + peak_shifts[peak_labels == label1] = shifts[l] + + return merge_peak_labels, peak_shifts + + +def resolve_final_shifts(labels_set, merges, pair_mask, pair_shift): + labels_set = list(labels_set) + + group_shifts = [] + for merge in merges: + shifts = np.zeros(len(merge), dtype="int64") + + label_inds = [labels_set.index(label) for label in merge] + + label0 = merge[0] + ind0 = label_inds[0] + + # First find relative shift to label0 (l=0) in the subgraph + local_pair_mask = pair_mask[label_inds, :][:, label_inds] + local_pair_shift = None + G = None + for l, label1 in enumerate(merge): + if l == 0: + # the first label is the reference (shift=0) + continue + ind1 = label_inds[l] + if local_pair_mask[0, l]: + # easy case the pair label0<>label1 was existing + shift = pair_shift[ind0, ind1] + else: + # more complicated case need to find intermediate label and propagate the shift!! + if G is None: + # the the graph only once and only if needed + G = nx.from_numpy_array(local_pair_mask | local_pair_mask.T) + local_pair_shift = pair_shift[label_inds, :][:, label_inds] + local_pair_shift += local_pair_shift.T + + shift_chain = nx.shortest_path(G, source=l, target=0) + shift = 0 + for i in range(len(shift_chain) - 1): + shift += local_pair_shift[shift_chain[i + 1], shift_chain[i]] + shifts[l] = shift + + group_shifts.append(shifts) + + return group_shifts + + +def agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full"): + """ + Agglomerate merge pairs into final merge groups. + + The merges are ordered by label. + + """ + + labels_set = np.array(labels_set) + + merges = [] + + graph = nx.from_numpy_matrix(pair_mask | pair_mask.T) + # put real nodes names for debugging + maps = dict(zip(np.arange(labels_set.size), labels_set)) + graph = nx.relabel_nodes(graph, maps) + + groups = list(nx.connected_components(graph)) + for group in groups: + if len(group) == 1: + continue + sub_graph = graph.subgraph(group) + # print(group, sub_graph) + cliques = list(nx.find_cliques(sub_graph)) + if len(cliques) == 1 and len(cliques[0]) == len(group): + # the sub graph is full connected: no ambiguity + # merges.append(labels_set[cliques[0]]) + merges.append(cliques[0]) + elif len(cliques) > 1: + # the subgraph is not fully connected + if connection_mode == "full": + # node merge + pass + elif connection_mode == "partial": + group = list(group) + # merges.append(labels_set[group]) + merges.append(group) + elif connection_mode == "clique": + raise NotImplementedError + else: + raise ValueError + + # DEBUG = True + DEBUG = False + if DEBUG: + import matplotlib.pyplot as plt + + fig = plt.figure() + nx.draw_networkx(sub_graph) + plt.show() + + DEBUG = True + # DEBUG = False + if DEBUG: + import matplotlib.pyplot as plt + + fig = plt.figure() + nx.draw_networkx(graph) + plt.show() + + # ensure ordered label + merges = [np.sort(merge) for merge in merges] + + return merges + + +def find_merge_pairs( + peaks, + peak_labels, + recording, + features_dict_or_folder, + sparse_wfs, + sparse_mask, + radius_um=70, + method="waveforms_lda", + method_kwargs={}, + **job_kwargs + # n_jobs=1, + # mp_context="fork", + # max_threads_per_process=1, + # progress_bar=True, +): + """ + Searh some possible merge 2 by 2. + """ + job_kwargs = fix_job_kwargs(job_kwargs) + + # features_dict_or_folder = Path(features_dict_or_folder) + + # peaks = features_dict_or_folder['peaks'] + total_channels = recording.get_num_channels() + + # sparse_wfs = features['sparse_wfs'] + + labels_set = np.setdiff1d(peak_labels, [-1]).tolist() + n = len(labels_set) + pair_mask = np.triu(np.ones((n, n), dtype="bool")) & ~np.eye(n, dtype="bool") + pair_shift = np.zeros((n, n), dtype="int64") + pair_values = np.zeros((n, n), dtype="float64") + + # compute template (no shift at this step) + + templates = compute_template_from_sparse( + peaks, peak_labels, labels_set, sparse_wfs, sparse_mask, total_channels, peak_shifts=None + ) + + max_chans = np.argmax(np.max(np.abs(templates), axis=1), axis=1) + + channel_locs = recording.get_channel_locations() + template_locs = channel_locs[max_chans, :] + template_dist = scipy.spatial.distance.cdist(template_locs, template_locs, metric="euclidean") + + pair_mask = pair_mask & (template_dist < radius_um) + indices0, indices1 = np.nonzero(pair_mask) + + n_jobs = job_kwargs["n_jobs"] + mp_context = job_kwargs["mp_context"] + max_threads_per_process = job_kwargs["max_threads_per_process"] + progress_bar = job_kwargs["progress_bar"] + + Executor = get_poolexecutor(n_jobs) + + with Executor( + max_workers=n_jobs, + initializer=find_pair_worker_init, + mp_context=get_context(mp_context), + initargs=(recording, features_dict_or_folder, peak_labels, method, method_kwargs, max_threads_per_process), + ) as pool: + jobs = [] + for ind0, ind1 in zip(indices0, indices1): + label0 = labels_set[ind0] + label1 = labels_set[ind1] + jobs.append(pool.submit(find_pair_function_wrapper, label0, label1)) + + if progress_bar: + iterator = tqdm(jobs, desc=f"find_merge_pairs with {method}", total=len(jobs)) + else: + iterator = jobs + + for res in iterator: + is_merge, label0, label1, shift, merge_value = res.result() + ind0 = labels_set.index(label0) + ind1 = labels_set.index(label1) + + pair_mask[ind0, ind1] = is_merge + if is_merge: + pair_shift[ind0, ind1] = shift + pair_values[ind0, ind1] = merge_value + + pair_mask = pair_mask & (template_dist < radius_um) + indices0, indices1 = np.nonzero(pair_mask) + + return labels_set, pair_mask, pair_shift, pair_values + + +def find_pair_worker_init( + recording, features_dict_or_folder, original_labels, method, method_kwargs, max_threads_per_process +): + global _ctx + _ctx = {} + + _ctx["recording"] = recording + _ctx["original_labels"] = original_labels + _ctx["method"] = method + _ctx["method_kwargs"] = method_kwargs + _ctx["method_class"] = find_pair_method_dict[method] + _ctx["max_threads_per_process"] = max_threads_per_process + + # if isinstance(features_dict_or_folder, dict): + # _ctx["features"] = features_dict_or_folder + # else: + # _ctx["features"] = FeaturesLoader(features_dict_or_folder) + + _ctx["features"] = FeaturesLoader.from_dict_or_folder(features_dict_or_folder) + + _ctx["peaks"] = _ctx["features"]["peaks"] + + +def find_pair_function_wrapper(label0, label1): + global _ctx + with threadpool_limits(limits=_ctx["max_threads_per_process"]): + is_merge, label0, label1, shift, merge_value = _ctx["method_class"].merge( + label0, label1, _ctx["original_labels"], _ctx["peaks"], _ctx["features"], **_ctx["method_kwargs"] + ) + return is_merge, label0, label1, shift, merge_value + + +class WaveformsLda: + name = "waveforms_lda" + + @staticmethod + def merge( + label0, + label1, + original_labels, + peaks, + features, + waveforms_sparse_mask=None, + feature_name="sparse_tsvd", + projection="centroid", + criteria="diptest", + threshold_diptest=0.5, + threshold_percentile=80.0, + num_shift=2, + ): + if num_shift > 0: + assert feature_name == "sparse_wfs" + sparse_wfs = features[feature_name] + + assert waveforms_sparse_mask is not None + + (inds0,) = np.nonzero(original_labels == label0) + chans0 = np.unique(peaks["channel_index"][inds0]) + target_chans0 = np.flatnonzero(np.all(waveforms_sparse_mask[chans0, :], axis=0)) + + (inds1,) = np.nonzero(original_labels == label1) + chans1 = np.unique(peaks["channel_index"][inds1]) + target_chans1 = np.flatnonzero(np.all(waveforms_sparse_mask[chans1, :], axis=0)) + + if inds0.size <40 or inds1.size <40: + is_merge = False + merge_value = 0 + final_shift = 0 + return is_merge, label0, label1, final_shift, merge_value + + + target_chans = np.intersect1d(target_chans0, target_chans1) + + inds = np.concatenate([inds0, inds1]) + labels = np.zeros(inds.size, dtype="int") + labels[inds0.size :] = 1 + wfs, out = aggregate_sparse_features(peaks, inds, sparse_wfs, waveforms_sparse_mask, target_chans) + wfs = wfs[~out] + labels = labels[~out] + + cut = np.searchsorted(labels, 1) + wfs0_ = wfs[:cut, :, :] + wfs1_ = wfs[cut:, :, :] + + template0_ = np.mean(wfs0_, axis=0) + template1_ = np.mean(wfs1_, axis=0) + num_samples = template0_.shape[0] + + template0 = template0_[num_shift : num_samples - num_shift, :] + + wfs0 = wfs0_[:, num_shift : num_samples - num_shift, :] + + # best shift strategy 1 = max cosine + # values = [] + # for shift in range(num_shift * 2 + 1): + # template1 = template1_[shift : shift + template0.shape[0], :] + # norm = np.linalg.norm(template0.flatten()) * np.linalg.norm(template1.flatten()) + # value = np.sum(template0.flatten() * template1.flatten()) / norm + # values.append(value) + # best_shift = np.argmax(values) + + # best shift strategy 2 = min dist**2 + # values = [] + # for shift in range(num_shift * 2 + 1): + # template1 = template1_[shift : shift + template0.shape[0], :] + # value = np.sum((template1 - template0)**2) + # values.append(value) + # best_shift = np.argmin(values) + + # best shift strategy 3 : average delta argmin between channels + channel_shift = np.argmax(np.abs(template1_), axis=0) - np.argmax(np.abs(template0_), axis=0) + mask = np.abs(channel_shift) <= num_shift + channel_shift = channel_shift[mask] + if channel_shift.size > 0: + best_shift = int(np.round(np.mean(channel_shift))) + num_shift + else: + best_shift = num_shift + + wfs1 = wfs1_[:, best_shift : best_shift + template0.shape[0], :] + template1 = template1_[best_shift : best_shift + template0.shape[0], :] + + if projection == "lda": + wfs_0_1 = np.concatenate([wfs0, wfs1], axis=0) + flat_wfs = wfs_0_1.reshape(wfs_0_1.shape[0], -1) + feat = LinearDiscriminantAnalysis(n_components=1).fit_transform(flat_wfs, labels) + feat = feat[:, 0] + feat0 = feat[:cut] + feat1 = feat[cut:] + + elif projection == "centroid": + vector_0_1 = template1 - template0 + vector_0_1 /= np.sum(vector_0_1**2) + feat0 = np.sum((wfs0 - template0[np.newaxis, :, :]) * vector_0_1[np.newaxis, :, :], axis=(1, 2)) + feat1 = np.sum((wfs1 - template0[np.newaxis, :, :]) * vector_0_1[np.newaxis, :, :], axis=(1, 2)) + # feat = np.sum((wfs_0_1 - template0[np.newaxis, :, :]) * vector_0_1[np.newaxis, :, :], axis=(1, 2)) + feat = np.concatenate([feat0, feat1], axis=0) + + else: + raise ValueError(f"bad projection {projection}") + + if criteria == "diptest": + dipscore, cutpoint = isocut5(feat) + is_merge = dipscore < threshold_diptest + merge_value = dipscore + elif criteria == "percentile": + l0 = np.percentile(feat0, threshold_percentile) + l1 = np.percentile(feat1, 100.0 - threshold_percentile) + is_merge = l0 >= l1 + merge_value = l0 - l1 + else: + raise ValueError(f"bad criteria {criteria}") + + if is_merge: + final_shift = best_shift - num_shift + else: + final_shift = 0 + + DEBUG = True + # DEBUG = False + + if DEBUG and is_merge: + # if DEBUG: + import matplotlib.pyplot as plt + + flatten_wfs0 = wfs0.swapaxes(1, 2).reshape(wfs0.shape[0], -1) + flatten_wfs1 = wfs1.swapaxes(1, 2).reshape(wfs1.shape[0], -1) + + fig, axs = plt.subplots(ncols=2) + ax = axs[0] + ax.plot(flatten_wfs0.T, color="C0", alpha=0.01) + ax.plot(flatten_wfs1.T, color="C1", alpha=0.01) + m0 = np.mean(flatten_wfs0, axis=0) + m1 = np.mean(flatten_wfs1, axis=0) + ax.plot(m0, color="C0", alpha=1, lw=4, label=f"{label0} {inds0.size}") + ax.plot(m1, color="C1", alpha=1, lw=4, label=f"{label1} {inds1.size}") + + ax.legend() + + bins = np.linspace(np.percentile(feat, 1), np.percentile(feat, 99), 100) + + count0, _ = np.histogram(feat0, bins=bins) + count1, _ = np.histogram(feat1, bins=bins) + + ax = axs[1] + ax.plot(bins[:-1], count0, color="C0") + ax.plot(bins[:-1], count1, color="C1") + + ax.set_title(f"{dipscore:.4f} {is_merge}") + plt.show() + + + return is_merge, label0, label1, final_shift, merge_value + + +find_pair_method_list = [ + WaveformsLda, +] +find_pair_method_dict = {e.name: e for e in find_pair_method_list} diff --git a/src/spikeinterface/sortingcomponents/clustering/split.py b/src/spikeinterface/sortingcomponents/clustering/split.py new file mode 100644 index 0000000000..411d8c2116 --- /dev/null +++ b/src/spikeinterface/sortingcomponents/clustering/split.py @@ -0,0 +1,260 @@ +from multiprocessing import get_context +from threadpoolctl import threadpool_limits +from tqdm.auto import tqdm + +from sklearn.decomposition import TruncatedSVD +from hdbscan import HDBSCAN + +import numpy as np + +from spikeinterface.core.job_tools import get_poolexecutor, fix_job_kwargs + +from .tools import aggregate_sparse_features, FeaturesLoader +from .isocut5 import isocut5 + + +def split_clusters( + peak_labels, + recording, + features_dict_or_folder, + method="hdbscan_on_local_pca", + method_kwargs={}, + recursive=False, + recursive_depth=None, + returns_split_count=False, + **job_kwargs, +): + """ + Run recusrsively or not in a multi process pool a local split method. + + Parameters + ---------- + peak_labels: numpy.array + Peak label before split + recording: Recording + Recording object + features_dict_or_folder: dict or folder + A dictionary of features precomputed with peak_pipeline or a folder containing npz file for features. + method: str + The method name + method_kwargs: dict + The method option + recursive: bool Default False + Reccursive or not. + recursive_depth: None or int + If recursive=True, then this is the max split per spikes. + returns_split_count: bool + Optionally return the split count vector. Same size as labels. + + Returns + ------- + new_labels: numpy.ndarray + The labels of peaks after split. + split_count: numpy.ndarray + Optionally returned + """ + + job_kwargs = fix_job_kwargs(job_kwargs) + n_jobs = job_kwargs["n_jobs"] + mp_context = job_kwargs["mp_context"] + progress_bar = job_kwargs["progress_bar"] + max_threads_per_process = job_kwargs["max_threads_per_process"] + + original_labels = peak_labels + peak_labels = peak_labels.copy() + split_count = np.zeros(peak_labels.size, dtype=int) + + Executor = get_poolexecutor(n_jobs) + + with Executor( + max_workers=n_jobs, + initializer=split_worker_init, + mp_context=get_context(mp_context), + initargs=(recording, features_dict_or_folder, original_labels, method, method_kwargs, max_threads_per_process), + ) as pool: + labels_set = np.setdiff1d(peak_labels, [-1]) + current_max_label = np.max(labels_set) + 1 + + jobs = [] + for label in labels_set: + peak_indices = np.flatnonzero(peak_labels == label) + if peak_indices.size > 0: + jobs.append(pool.submit(split_function_wrapper, peak_indices)) + + if progress_bar: + iterator = tqdm(jobs, desc=f"split_clusters with {method}", total=len(labels_set)) + else: + iterator = jobs + + for res in iterator: + is_split, local_labels, peak_indices = res.result() + if not is_split: + continue + + mask = local_labels >= 0 + peak_labels[peak_indices[mask]] = local_labels[mask] + current_max_label + peak_labels[peak_indices[~mask]] = local_labels[~mask] + + split_count[peak_indices] += 1 + + current_max_label += np.max(local_labels[mask]) + 1 + + if recursive: + if recursive_depth is not None: + # stop reccursivity when recursive_depth is reach + extra_ball = np.max(split_count[peak_indices]) < recursive_depth + else: + # reccurssive always + extra_ball = True + + if extra_ball: + new_labels_set = np.setdiff1d(peak_labels[peak_indices], [-1]) + for label in new_labels_set: + peak_indices = np.flatnonzero(peak_labels == label) + if peak_indices.size > 0: + jobs.append(pool.submit(split_function_wrapper, peak_indices)) + if progress_bar: + iterator.total += 1 + + if returns_split_count: + return peak_labels, split_count + else: + return peak_labels + + +global _ctx + + +def split_worker_init( + recording, features_dict_or_folder, original_labels, method, method_kwargs, max_threads_per_process +): + global _ctx + _ctx = {} + + _ctx["recording"] = recording + features_dict_or_folder + _ctx["original_labels"] = original_labels + _ctx["method"] = method + _ctx["method_kwargs"] = method_kwargs + _ctx["method_class"] = split_methods_dict[method] + _ctx["max_threads_per_process"] = max_threads_per_process + _ctx["features"] = FeaturesLoader.from_dict_or_folder(features_dict_or_folder) + _ctx["peaks"] = _ctx["features"]["peaks"] + + +def split_function_wrapper(peak_indices): + global _ctx + with threadpool_limits(limits=_ctx["max_threads_per_process"]): + is_split, local_labels = _ctx["method_class"].split( + peak_indices, _ctx["peaks"], _ctx["features"], **_ctx["method_kwargs"] + ) + return is_split, local_labels, peak_indices + + +class HdbscanOnLocalPca: + # @charlie : this is the equivalent of "herding_split()" in DART + # but simplified, flexible and renamed + + name = "hdbscan_on_local_pca" + + @staticmethod + def split( + peak_indices, + peaks, + features, + clusterer="hdbscan", + feature_name="sparse_tsvd", + neighbours_mask=None, + waveforms_sparse_mask=None, + min_size_split=25, + min_cluster_size=25, + min_samples=25, + n_pca_features=2, + ): + local_labels = np.zeros(peak_indices.size, dtype=np.int64) + + # can be sparse_tsvd or sparse_wfs + sparse_features = features[feature_name] + + assert waveforms_sparse_mask is not None + + # target channel subset is done intersect local channels + neighbours + local_chans = np.unique(peaks["channel_index"][peak_indices]) + target_channels = np.flatnonzero(np.all(neighbours_mask[local_chans, :], axis=0)) + + # TODO fix this a better way, this when cluster have too few overlapping channels + minimum_channels = 2 + if target_channels.size < minimum_channels: + return False, None + + aligned_wfs, dont_have_channels = aggregate_sparse_features( + peaks, peak_indices, sparse_features, waveforms_sparse_mask, target_channels + ) + + local_labels[dont_have_channels] = -2 + kept = np.flatnonzero(~dont_have_channels) + if kept.size < min_size_split: + return False, None + + aligned_wfs = aligned_wfs[kept, :, :] + + flatten_features = aligned_wfs.reshape(aligned_wfs.shape[0], -1) + + # final_features = PCA(n_pca_features, whiten=True).fit_transform(flatten_features) + # final_features = PCA(n_pca_features, whiten=False).fit_transform(flatten_features) + final_features = TruncatedSVD(n_pca_features).fit_transform(flatten_features) + + if clusterer == "hdbscan": + clust = HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, allow_single_cluster=True) + clust.fit(final_features) + possible_labels = clust.labels_ + elif clusterer == "isocut5": + dipscore, cutpoint = isocut5(final_features[:, 0]) + possible_labels = np.zeros(final_features.shape[0]) + if dipscore > 1.5: + mask = final_features[:, 0] > cutpoint + if np.sum(mask) > min_cluster_size and np.sum(~mask): + possible_labels[mask] = 1 + else: + return False, None + else: + raise ValueError(f"wrong clusterer {clusterer}") + + is_split = np.setdiff1d(possible_labels, [-1]).size > 1 + + # DEBUG = True + DEBUG = False + if DEBUG: + import matplotlib.pyplot as plt + + labels_set = np.setdiff1d(possible_labels, [-1]) + colors = plt.get_cmap("tab10", len(labels_set)) + colors = {k: colors(i) for i, k in enumerate(labels_set)} + colors[-1] = "k" + fix, axs = plt.subplots(nrows=2) + + flatten_wfs = aligned_wfs.swapaxes(1, 2).reshape(aligned_wfs.shape[0], -1) + + sl = slice(None, None, 10) + for k in np.unique(possible_labels): + mask = possible_labels == k + ax = axs[0] + ax.scatter(final_features[:, 0][mask][sl], final_features[:, 1][mask][sl], s=5, color=colors[k]) + + ax = axs[1] + ax.plot(flatten_wfs[mask][sl].T, color=colors[k], alpha=0.5) + + plt.show() + + if not is_split: + return is_split, None + + local_labels[kept] = possible_labels + + return is_split, local_labels + + +split_methods_list = [ + HdbscanOnLocalPca, +] +split_methods_dict = {e.name: e for e in split_methods_list} diff --git a/src/spikeinterface/sortingcomponents/clustering/tools.py b/src/spikeinterface/sortingcomponents/clustering/tools.py new file mode 100644 index 0000000000..9a537ab8a8 --- /dev/null +++ b/src/spikeinterface/sortingcomponents/clustering/tools.py @@ -0,0 +1,196 @@ +from pathlib import Path +from typing import Any +import numpy as np + + +# TODO find a way to attach a a sparse_mask to a given features (waveforms, pca, tsvd ....) + + +class FeaturesLoader: + """ + Feature can be computed in memory or in a folder contaning npy files. + + This class read the folder and behave like a dict of array lazily. + + Parameters + ---------- + feature_folder + + preload + + """ + + def __init__(self, feature_folder, preload=["peaks"]): + self.feature_folder = Path(feature_folder) + + self.file_feature = {} + self.loaded_features = {} + for file in self.feature_folder.glob("*.npy"): + name = file.stem + if name in preload: + self.loaded_features[name] = np.load(file) + else: + self.file_feature[name] = file + + def __getitem__(self, name): + if name in self.loaded_features: + return self.loaded_features[name] + else: + return np.load(self.file_feature[name], mmap_mode="r") + + @staticmethod + def from_dict_or_folder(features_dict_or_folder): + if isinstance(features_dict_or_folder, dict): + return features_dict_or_folder + else: + return FeaturesLoader(features_dict_or_folder) + + +def aggregate_sparse_features(peaks, peak_indices, sparse_feature, sparse_mask, target_channels): + """ + Aggregate sparse features that have unaligned channels and realigned then on target_channels. + + This is usefull to aligned back peaks waveform or pca or tsvd when detected a differents channels. + + + Parameters + ---------- + peaks + + peak_indices + + sparse_feature + + sparse_mask + + target_channels + + Returns + ------- + aligned_features: numpy.array + Aligned features. shape is (local_peaks.size, sparse_feature.shape[1], target_channels.size) + dont_have_channels: numpy.array + Boolean vector to indicate spikes that do not have all target channels to be taken in account + shape is peak_indices.size + """ + local_peaks = peaks[peak_indices] + + aligned_features = np.zeros( + (local_peaks.size, sparse_feature.shape[1], target_channels.size), dtype=sparse_feature.dtype + ) + dont_have_channels = np.zeros(peak_indices.size, dtype=bool) + + for chan in np.unique(local_peaks["channel_index"]): + sparse_chans = np.flatnonzero(sparse_mask[chan, :]) + peak_inds = np.flatnonzero(local_peaks["channel_index"] == chan) + if np.all(np.in1d(target_channels, sparse_chans)): + # peaks feature channel have all target_channels + source_chans = np.flatnonzero(np.in1d(sparse_chans, target_channels)) + aligned_features[peak_inds, :, :] = sparse_feature[peak_indices[peak_inds], :, :][:, :, source_chans] + else: + # some channel are missing, peak are not removde + dont_have_channels[peak_inds] = True + + return aligned_features, dont_have_channels + + +def compute_template_from_sparse( + peaks, labels, labels_set, sparse_waveforms, sparse_mask, total_channels, peak_shifts=None +): + """ + Compute template average from single sparse waveforms buffer. + + Parameters + ---------- + peaks + + labels + + labels_set + + sparse_waveforms + + sparse_mask + + total_channels + + peak_shifts + + Returns + ------- + templates: numpy.array + Templates shape : (len(labels_set), num_samples, total_channels) + """ + n = len(labels_set) + + templates = np.zeros((n, sparse_waveforms.shape[1], total_channels), dtype=sparse_waveforms.dtype) + + for i, label in enumerate(labels_set): + peak_indices = np.flatnonzero(labels == label) + + local_chans = np.unique(peaks["channel_index"][peak_indices]) + target_channels = np.flatnonzero(np.all(sparse_mask[local_chans, :], axis=0)) + + aligned_wfs, dont_have_channels = aggregate_sparse_features( + peaks, peak_indices, sparse_waveforms, sparse_mask, target_channels + ) + + if peak_shifts is not None: + apply_waveforms_shift(aligned_wfs, peak_shifts[peak_indices], inplace=True) + + templates[i, :, :][:, target_channels] = np.mean(aligned_wfs[~dont_have_channels], axis=0) + + return templates + + +def apply_waveforms_shift(waveforms, peak_shifts, inplace=False): + """ + Apply a shift a spike level to realign waveforms buffers. + + This is usefull to compute template after merge when to cluster are shifted. + + A negative shift need the waveforms to be moved toward the right because the trough was too early. + A positive shift need the waveforms to be moved toward the left because the trough was too late. + + Note the border sample are left as before without move. + + Parameters + ---------- + + waveforms + + peak_shifts + + inplace + + Returns + ------- + aligned_waveforms + + + """ + + print("apply_waveforms_shift") + + if inplace: + aligned_waveforms = waveforms + else: + aligned_waveforms = waveforms.copy() + + shift_set = np.unique(peak_shifts) + assert max(np.abs(shift_set)) < aligned_waveforms.shape[1] + + for shift in shift_set: + if shift == 0: + continue + mask = peak_shifts == shift + wfs = waveforms[mask] + + if shift > 0: + aligned_waveforms[mask, :-shift, :] = wfs[:, shift:, :] + else: + aligned_waveforms[mask, -shift:, :] = wfs[:, :-shift, :] + + print("apply_waveforms_shift DONE") + + return aligned_waveforms diff --git a/src/spikeinterface/sortingcomponents/tests/test_split.py b/src/spikeinterface/sortingcomponents/tests/test_split.py new file mode 100644 index 0000000000..ed5e756469 --- /dev/null +++ b/src/spikeinterface/sortingcomponents/tests/test_split.py @@ -0,0 +1,12 @@ +import pytest +import numpy as np + +from spikeinterface.sortingcomponents.clustering.split import split_clusters + + +def test_split(): + pass + + +if __name__ == "__main__": + test_split() From 09c12de7ce912841ebc45acfaf16877dbe888d2f Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 14 Sep 2023 21:17:10 +0200 Subject: [PATCH 065/322] back to tridesclous2 --- .../sorters/internal/tridesclous2.py | 294 ++++++++++++++---- 1 file changed, 239 insertions(+), 55 deletions(-) diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index 7cbf01cf68..fdec7c12b3 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -1,33 +1,50 @@ +import shutil from .si_based import ComponentsBasedSorter -from spikeinterface.core import load_extractor, BaseRecording, get_noise_levels, extract_waveforms, NumpySorting +from spikeinterface.core import (load_extractor, BaseRecording, get_noise_levels, + extract_waveforms, NumpySorting, get_channel_distances) +from spikeinterface.core.waveform_tools import extract_waveforms_to_single_buffer from spikeinterface.core.job_tools import fix_job_kwargs + from spikeinterface.preprocessing import bandpass_filter, common_reference, zscore +from spikeinterface.core.basesorting import minimum_spike_dtype import numpy as np +import pickle +import json class Tridesclous2Sorter(ComponentsBasedSorter): sorter_name = "tridesclous2" _default_params = { "apply_preprocessing": True, - "general": {"ms_before": 2.5, "ms_after": 3.5, "radius_um": 100}, + "waveforms" : {"ms_before": 0.5, "ms_after": 1.5, }, "filtering": {"freq_min": 300, "freq_max": 8000.0}, - "detection": {"peak_sign": "neg", "detect_threshold": 5, "exclude_sweep_ms": 0.4}, + "detection": {"peak_sign": "neg", "detect_threshold": 5, "exclude_sweep_ms": 0.8, "radius_um": 150.}, "hdbscan_kwargs": { "min_cluster_size": 25, "allow_single_cluster": True, "core_dist_n_jobs": -1, "cluster_selection_method": "leaf", }, - "waveforms": {"max_spikes_per_unit": 300}, "selection": {"n_peaks_per_channel": 5000, "min_n_peaks": 20000}, - "localization": {"max_distance_um": 1000, "optimizer": "minimize_with_log_penality"}, + "svd": {"n_components": 6}, + "clustering": { + "split_radius_um": 40., + "merge_radius_um": 40., + }, + "templates": { + "ms_before": 1.5, + "ms_after": 2.5, + # "peak_shift_ms": 0.2, + }, "matching": { "peak_shift_ms": 0.2, + "radius_um": 100. }, - "job_kwargs": {}, + "job_kwargs": {"n_jobs":-1}, + "save_array": True, } @classmethod @@ -40,12 +57,18 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): job_kwargs = fix_job_kwargs(job_kwargs) job_kwargs["progress_bar"] = verbose - # this is importanted only on demand because numba import are too heavy - from spikeinterface.sortingcomponents.peak_detection import detect_peaks - from spikeinterface.sortingcomponents.peak_localization import localize_peaks - from spikeinterface.sortingcomponents.peak_selection import select_peaks - from spikeinterface.sortingcomponents.clustering import find_cluster_from_peaks from spikeinterface.sortingcomponents.matching import find_spikes_from_templates + from spikeinterface.core.node_pipeline import run_node_pipeline, ExtractDenseWaveforms, ExtractSparseWaveforms, PeakRetriever + from spikeinterface.sortingcomponents.peak_detection import detect_peaks, DetectPeakLocallyExclusive + from spikeinterface.sortingcomponents.peak_selection import select_peaks + from spikeinterface.sortingcomponents.peak_localization import LocalizeCenterOfMass, LocalizeGridConvolution + from spikeinterface.sortingcomponents.waveforms.temporal_pca import TemporalPCAProjection + + from spikeinterface.sortingcomponents.clustering.split import split_clusters + from spikeinterface.sortingcomponents.clustering.merge import merge_clusters + from spikeinterface.sortingcomponents.clustering.tools import compute_template_from_sparse + + from sklearn.decomposition import TruncatedSVD import hdbscan @@ -57,6 +80,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): # preprocessing if params["apply_preprocessing"]: recording = bandpass_filter(recording_raw, **params["filtering"]) + # TODO what is the best about zscore>common_reference or the reverse recording = common_reference(recording) recording = zscore(recording, dtype="float32") noise_levels = np.ones(num_chans, dtype="float32") @@ -66,83 +90,243 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): # detection detection_params = params["detection"].copy() - detection_params["radius_um"] = params["general"]["radius_um"] detection_params["noise_levels"] = noise_levels - peaks = detect_peaks(recording, method="locally_exclusive", **detection_params, **job_kwargs) + all_peaks = detect_peaks(recording, method="locally_exclusive", **detection_params, **job_kwargs) if verbose: - print("We found %d peaks in total" % len(peaks)) + print("We found %d peaks in total" % len(all_peaks)) # selection selection_params = params["selection"].copy() - selection_params["n_peaks"] = params["selection"]["n_peaks_per_channel"] * num_chans - selection_params["n_peaks"] = max(selection_params["min_n_peaks"], selection_params["n_peaks"]) - selection_params["noise_levels"] = noise_levels - some_peaks = select_peaks( - peaks, method="smart_sampling_amplitudes", select_per_channel=False, **selection_params - ) + n_peaks = params["selection"]["n_peaks_per_channel"] * num_chans + n_peaks = max(selection_params["min_n_peaks"], n_peaks) + peaks = select_peaks(all_peaks, method="uniform", n_peaks=n_peaks) if verbose: - print("We kept %d peaks for clustering" % len(some_peaks)) + print("We kept %d peaks for clustering" % len(peaks)) + + + # SVD for time compression + few_peaks = select_peaks(peaks, method="uniform", n_peaks=5000) + few_wfs = extract_waveform_at_max_channel(recording, few_peaks, **job_kwargs) + + wfs = few_wfs[:, :, 0] + tsvd = TruncatedSVD(params["svd"]["n_components"]) + tsvd.fit(wfs) + + model_folder = sorter_output_folder / 'tsvd_model' + + model_folder.mkdir(exist_ok=True) + with open(model_folder / "pca_model.pkl", "wb") as f: + pickle.dump(tsvd, f) + + ms_before = params["waveforms"]["ms_before"] + ms_after = params["waveforms"]["ms_after"] + model_params = { + "ms_before": ms_before, + "ms_after": ms_after, + "sampling_frequency": float(sampling_frequency), + } + with open(model_folder / "params.json", "w") as f: + json.dump(model_params, f) + + # features - # localization - localization_params = params["localization"].copy() - localization_params["radius_um"] = params["general"]["radius_um"] - peak_locations = localize_peaks( - recording, some_peaks, method="monopolar_triangulation", **localization_params, **job_kwargs + features_folder = sorter_output_folder / 'features' + node0 = PeakRetriever(recording, peaks) + + # node1 = ExtractDenseWaveforms(rec, parents=[node0], return_output=False, + # ms_before=0.5, + # ms_after=1.5, + # ) + + # node2 = LocalizeCenterOfMass(rec, parents=[node0, node1], return_output=True, + # local_radius_um=75.0, + # feature="ptp", ) + + # node2 = LocalizeGridConvolution(rec, parents=[node0, node1], return_output=True, + # local_radius_um=40., + # upsampling_um=5.0, + # ) + + node3 = ExtractSparseWaveforms(recording, parents=[node0], return_output=True, + ms_before=0.5, + ms_after=1.5, + radius_um=100.0, ) - # ~ print(peak_locations.dtype) + model_folder_path = sorter_output_folder / 'tsvd_model' + + node4 = TemporalPCAProjection(recording, parents=[node0, node3], return_output=True, + model_folder_path=model_folder_path) + + + # pipeline_nodes = [node0, node1, node2, node3, node4] + pipeline_nodes = [node0, node3, node4] - # features = localisations only - peak_features = np.zeros((peak_locations.size, 3), dtype="float64") - for i, dim in enumerate(["x", "y", "z"]): - peak_features[:, i] = peak_locations[dim] + output = run_node_pipeline(recording, pipeline_nodes, job_kwargs, gather_mode="npy", gather_kwargs=dict(exist_ok=True), + folder=features_folder, names=["sparse_wfs", "sparse_tsvd"]) - # clusering is hdbscan + # TODO make this generic in GatherNPY ??? + sparse_mask = node3.neighbours_mask + np.save(features_folder/ 'sparse_mask.npy', sparse_mask) + np.save(features_folder/ 'peaks.npy', peaks) + - out = hdbscan.hdbscan(peak_features, **params["hdbscan_kwargs"]) - peak_labels = out[0] - mask = peak_labels >= 0 - labels = np.unique(peak_labels[mask]) + # Clustering: channel index > split > merge + split_radius_um = params["clustering"]["split_radius_um"] + neighbours_mask = get_channel_distances(recording) < split_radius_um + + original_labels = peaks['channel_index'] + + post_split_label, split_count = split_clusters( + original_labels, + recording, + features_folder, + method="hdbscan_on_local_pca", + method_kwargs=dict( + # clusterer="hdbscan", + clusterer="isocut5", + + feature_name="sparse_tsvd", + # feature_name="sparse_wfs", + + neighbours_mask=neighbours_mask, + waveforms_sparse_mask=sparse_mask, + min_size_split=50, + min_cluster_size=50, + min_samples=50, + n_pca_features=3, + ), + + recursive=True, + recursive_depth=3, + + returns_split_count=True, + **job_kwargs + + ) + + merge_radius_um = params["clustering"]["merge_radius_um"] + + post_merge_label, peak_shifts = merge_clusters( + peaks, + post_split_label, + recording, + features_folder, + radius_um=merge_radius_um, + + method="waveforms_lda", + method_kwargs=dict( + # neighbours_mask=neighbours_mask, + waveforms_sparse_mask=sparse_mask, + + # feature_name="sparse_tsvd", + feature_name="sparse_wfs", + + # projection='lda', + projection='centroid', + + # criteria='diptest', + # threshold_diptest=0.5, + criteria="percentile", + threshold_percentile=80., + + # num_shift=0 + num_shift=2, + + ), + **job_kwargs + ) + + # sparse_wfs = np.load(features_folder / "sparse_wfs.npy", mmap_mode="r") + + + new_peaks = peaks.copy() + new_peaks["sample_index"] -= peak_shifts + + labels_set = np.unique(post_merge_label) + labels_set = labels_set[labels_set >= 0] + mask = post_merge_label >= 0 - # extract waveform for template matching sorting_temp = NumpySorting.from_times_labels( - some_peaks["sample_index"][mask], peak_labels[mask], sampling_frequency + new_peaks["sample_index"][mask], post_merge_label[mask], sampling_frequency, + unit_ids=labels_set, ) sorting_temp = sorting_temp.save(folder=sorter_output_folder / "sorting_temp") - waveforms_params = params["waveforms"].copy() - waveforms_params["ms_before"] = params["general"]["ms_before"] - waveforms_params["ms_after"] = params["general"]["ms_after"] + + ms_before = params["templates"]["ms_before"] + ms_after = params["templates"]["ms_after"] + max_spikes_per_unit = 300 + we = extract_waveforms( - recording, sorting_temp, sorter_output_folder / "waveforms_temp", **waveforms_params, **job_kwargs + recording, sorting_temp, sorter_output_folder / "waveforms_temp", ms_before=ms_before, ms_after=ms_after, + max_spikes_per_unit=max_spikes_per_unit, **job_kwargs ) - ## We launch a OMP matching pursuit by full convolution of the templates and the raw traces matching_params = params["matching"].copy() matching_params["waveform_extractor"] = we matching_params["noise_levels"] = noise_levels matching_params["peak_sign"] = params["detection"]["peak_sign"] matching_params["detect_threshold"] = params["detection"]["detect_threshold"] - matching_params["radius_um"] = params["general"]["radius_um"] - - # TODO: route that params - # ~ 'num_closest' : 5, - # ~ 'sample_shift': 3, - # ~ 'ms_before': 0.8, - # ~ 'ms_after': 1.2, - # ~ 'num_peeler_loop': 2, - # ~ 'num_template_try' : 1, + matching_params["radius_um"] = params["detection"]["radius_um"] spikes = find_spikes_from_templates( recording, method="tridesclous", method_kwargs=matching_params, **job_kwargs ) - if verbose: - print("We found %d spikes" % len(spikes)) - sorting = NumpySorting.from_times_labels(spikes["sample_index"], spikes["cluster_index"], sampling_frequency) + if params["save_array"]: + + np.save(sorter_output_folder / 'noise_levels.npy', noise_levels) + np.save(sorter_output_folder / 'all_peaks.npy', all_peaks) + np.save(sorter_output_folder / 'post_split_label.npy', post_split_label) + np.save(sorter_output_folder / 'split_count.npy', split_count) + np.save(sorter_output_folder / 'post_merge_label.npy', post_merge_label) + np.save(sorter_output_folder / 'spikes.npy', spikes) + + final_spikes = np.zeros(spikes.size, dtype=minimum_spike_dtype) + final_spikes["sample_index"] = spikes["sample_index"] + final_spikes["unit_index"] = spikes["cluster_index"] + final_spikes["segment_index"] = spikes["segment_index"] + + + sorting = NumpySorting(final_spikes, sampling_frequency, labels_set) sorting = sorting.save(folder=sorter_output_folder / "sorting") return sorting + + + +def extract_waveform_at_max_channel(rec, peaks, + ms_before=0.5, ms_after=1.5, + **job_kwargs): + """ + Helper function to extractor waveforms at max channel from a peak list + + + """ + n = rec.get_num_channels() + unit_ids = np.arange(n, dtype='int64') + sparsity_mask = np.eye(n, dtype='bool') + + spikes = np.zeros(peaks.size, dtype = [("sample_index", "int64"), ("unit_index", "int64"), ("segment_index", "int64")]) + spikes["sample_index"] = peaks["sample_index"] + spikes["unit_index"] = peaks["channel_index"] + spikes["segment_index"] = peaks["segment_index"] + + nbefore = int(ms_before * rec.sampling_frequency / 1000.) + nafter = int(ms_after * rec.sampling_frequency/ 1000.) + + all_wfs = extract_waveforms_to_single_buffer(rec, spikes, unit_ids, nbefore, nafter, + mode="shared_memory", return_scaled=False, + sparsity_mask=sparsity_mask, copy=True, + **job_kwargs, + ) + + return all_wfs + + + + From b78257cf7217764de00be0eac72b56deb499e1bd Mon Sep 17 00:00:00 2001 From: Sebastien Date: Fri, 15 Sep 2023 11:51:55 +0200 Subject: [PATCH 066/322] Speed up searchsorted calls --- src/spikeinterface/core/basesorting.py | 3 +-- src/spikeinterface/core/generate.py | 7 +++---- src/spikeinterface/core/node_pipeline.py | 12 ++++-------- src/spikeinterface/core/numpyextractors.py | 3 +-- src/spikeinterface/core/segmentutils.py | 6 ++---- src/spikeinterface/core/waveform_tools.py | 15 +++++---------- .../curation/remove_duplicated_spikes.py | 3 +-- .../postprocessing/amplitude_scalings.py | 3 +-- .../postprocessing/principal_component.py | 3 +-- .../postprocessing/spike_amplitudes.py | 4 +--- .../postprocessing/spike_locations.py | 3 +-- src/spikeinterface/qualitymetrics/misc_metrics.py | 6 ++---- .../sortingcomponents/motion_interpolation.py | 3 +-- 13 files changed, 24 insertions(+), 47 deletions(-) diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index 52f71c2399..eb141abde4 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -473,8 +473,7 @@ def to_spike_vector(self, concatenated=True, extremum_channel_inds=None, use_cac if not concatenated: spikes_ = [] for segment_index in range(self.get_num_segments()): - s0 = np.searchsorted(spikes["segment_index"], segment_index, side="left") - s1 = np.searchsorted(spikes["segment_index"], segment_index + 1, side="left") + s0, s1 = np.searchsorted(spikes["segment_index"], [segment_index, segment_index + 1], side="left") spikes_.append(spikes[s0:s1]) spikes = spikes_ diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 401c498f03..56a2bb4f48 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1109,8 +1109,7 @@ def __init__( num_samples = [num_samples] for segment_index in range(sorting.get_num_segments()): - start = np.searchsorted(self.spike_vector["segment_index"], segment_index, side="left") - end = np.searchsorted(self.spike_vector["segment_index"], segment_index, side="right") + start, end = np.searchsorted(self.spike_vector["segment_index"], [segment_index, segment_index+1], side="left") spikes = self.spike_vector[start:end] amplitude_vec = amplitude_vector[start:end] if amplitude_vector is not None else None upsample_vec = upsample_vector[start:end] if upsample_vector is not None else None @@ -1208,8 +1207,8 @@ def get_traces( else: traces = np.zeros([end_frame - start_frame, n_channels], dtype=self.dtype) - start = np.searchsorted(self.spike_vector["sample_index"], start_frame - self.templates.shape[1], side="left") - end = np.searchsorted(self.spike_vector["sample_index"], end_frame + self.templates.shape[1], side="right") + start, end = np.searchsorted(self.spike_vector["sample_index"], [start_frame - self.templates.shape[1], + end_frame + self.templates.shape[1] + 1], side="left") for i in range(start, end): spike = self.spike_vector[i] diff --git a/src/spikeinterface/core/node_pipeline.py b/src/spikeinterface/core/node_pipeline.py index b11f40a441..5627eba518 100644 --- a/src/spikeinterface/core/node_pipeline.py +++ b/src/spikeinterface/core/node_pipeline.py @@ -111,8 +111,7 @@ def __init__(self, recording, peaks): # precompute segment slice self.segment_slices = [] for segment_index in range(recording.get_num_segments()): - i0 = np.searchsorted(peaks["segment_index"], segment_index) - i1 = np.searchsorted(peaks["segment_index"], segment_index + 1) + i0, i1 = np.searchsorted(peaks["segment_index"], [segment_index, segment_index + 1]) self.segment_slices.append(slice(i0, i1)) def get_trace_margin(self): @@ -125,8 +124,7 @@ def compute(self, traces, start_frame, end_frame, segment_index, max_margin): # get local peaks sl = self.segment_slices[segment_index] peaks_in_segment = self.peaks[sl] - i0 = np.searchsorted(peaks_in_segment["sample_index"], start_frame) - i1 = np.searchsorted(peaks_in_segment["sample_index"], end_frame) + i0, i1 = np.searchsorted(peaks_in_segment["segment_index"], [start_frame, end_frame]) local_peaks = peaks_in_segment[i0:i1] # make sample index local to traces @@ -183,8 +181,7 @@ def __init__( # precompute segment slice self.segment_slices = [] for segment_index in range(recording.get_num_segments()): - i0 = np.searchsorted(self.peaks["segment_index"], segment_index) - i1 = np.searchsorted(self.peaks["segment_index"], segment_index + 1) + i0, i1 = np.searchsorted(self.peaks["segment_index"], [segment_index, segment_index + 1]) self.segment_slices.append(slice(i0, i1)) def get_trace_margin(self): @@ -197,8 +194,7 @@ def compute(self, traces, start_frame, end_frame, segment_index, max_margin): # get local peaks sl = self.segment_slices[segment_index] peaks_in_segment = self.peaks[sl] - i0 = np.searchsorted(peaks_in_segment["sample_index"], start_frame) - i1 = np.searchsorted(peaks_in_segment["sample_index"], end_frame) + i0, i1 = np.searchsorted(peaks_in_segment["segment_index"], [start_frame, end_frame]) local_peaks = peaks_in_segment[i0:i1] # make sample index local to traces diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index 97f22615df..d5663156c7 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -338,8 +338,7 @@ def get_unit_spike_train(self, unit_id, start_frame, end_frame): if self.spikes_in_seg is None: # the slicing of segment is done only once the first time # this fasten the constructor a lot - s0 = np.searchsorted(self.spikes["segment_index"], self.segment_index, side="left") - s1 = np.searchsorted(self.spikes["segment_index"], self.segment_index + 1, side="left") + s0, s1 = np.searchsorted(self.spikes["segment_index"], [self.segment_index, self.segment_index + 1]) self.spikes_in_seg = self.spikes[s0:s1] unit_index = self.unit_ids.index(unit_id) diff --git a/src/spikeinterface/core/segmentutils.py b/src/spikeinterface/core/segmentutils.py index f70c45bfe5..85e36cf7a5 100644 --- a/src/spikeinterface/core/segmentutils.py +++ b/src/spikeinterface/core/segmentutils.py @@ -174,8 +174,7 @@ def get_traces(self, start_frame, end_frame, channel_indices): # Return (0 * num_channels) array of correct dtype return self.parent_segments[0].get_traces(0, 0, channel_indices) - i0 = np.searchsorted(self.cumsum_length, start_frame, side="right") - 1 - i1 = np.searchsorted(self.cumsum_length, end_frame, side="right") - 1 + i0, i1 = np.searchsorted(self.cumsum_length, [start_frame, end_frame], side="right") - 1 # several case: # * come from one segment (i0 == i1) @@ -469,8 +468,7 @@ def get_unit_spike_train( if end_frame is None: end_frame = self.get_num_samples() - i0 = np.searchsorted(self.cumsum_length, start_frame, side="right") - 1 - i1 = np.searchsorted(self.cumsum_length, end_frame, side="right") - 1 + i0, i1 = np.searchsorted(self.cumsum_length, [start_frame, end_frame], side="right") - 1 # several case: # * come from one segment (i0 == i1) diff --git a/src/spikeinterface/core/waveform_tools.py b/src/spikeinterface/core/waveform_tools.py index da8e3d64b6..0ac20b9fec 100644 --- a/src/spikeinterface/core/waveform_tools.py +++ b/src/spikeinterface/core/waveform_tools.py @@ -344,15 +344,13 @@ def _worker_distribute_buffers(segment_index, start_frame, end_frame, worker_ctx # take only spikes with the correct segment_index # this is a slice so no copy!! - s0 = np.searchsorted(spikes["segment_index"], segment_index) - s1 = np.searchsorted(spikes["segment_index"], segment_index + 1) + s0, s1 = np.searchsorted(spikes["segment_index"], [segment_index, segment_index + 1]) in_seg_spikes = spikes[s0:s1] # take only spikes in range [start_frame, end_frame] # this is a slice so no copy!! # the border of segment are protected by nbefore on left an nafter on the right - i0 = np.searchsorted(in_seg_spikes["sample_index"], max(start_frame, nbefore)) - i1 = np.searchsorted(in_seg_spikes["sample_index"], min(end_frame, seg_size - nafter)) + i0, i1 = np.searchsorted(in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)]) # slice in absolut in spikes vector l0 = i0 + s0 @@ -562,8 +560,7 @@ def _init_worker_distribute_single_buffer( # prepare segment slices segment_slices = [] for segment_index in range(recording.get_num_segments()): - s0 = np.searchsorted(spikes["segment_index"], segment_index) - s1 = np.searchsorted(spikes["segment_index"], segment_index + 1) + s0, s1 = np.searchsorted(spikes["segment_index"], [segment_index, segment_index + 1]) segment_slices.append((s0, s1)) worker_ctx["segment_slices"] = segment_slices @@ -590,8 +587,7 @@ def _worker_distribute_single_buffer(segment_index, start_frame, end_frame, work # take only spikes in range [start_frame, end_frame] # this is a slice so no copy!! # the border of segment are protected by nbefore on left an nafter on the right - i0 = np.searchsorted(in_seg_spikes["sample_index"], max(start_frame, nbefore)) - i1 = np.searchsorted(in_seg_spikes["sample_index"], min(end_frame, seg_size - nafter)) + i0, i1 = np.searchsorted(in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)]) # slice in absolut in spikes vector l0 = i0 + s0 @@ -685,8 +681,7 @@ def has_exceeding_spikes(recording, sorting): """ spike_vector = sorting.to_spike_vector() for segment_index in range(recording.get_num_segments()): - start_seg_ind = np.searchsorted(spike_vector["segment_index"], segment_index) - end_seg_ind = np.searchsorted(spike_vector["segment_index"], segment_index + 1) + start_seg_ind, end_seg_ind = np.searchsorted(spike_vector["segment_index"], [segment_index, segment_index + 1]) spike_vector_seg = spike_vector[start_seg_ind:end_seg_ind] if len(spike_vector_seg) > 0: if spike_vector_seg["sample_index"][-1] > recording.get_num_samples(segment_index=segment_index) - 1: diff --git a/src/spikeinterface/curation/remove_duplicated_spikes.py b/src/spikeinterface/curation/remove_duplicated_spikes.py index 04af69b37a..3badaa9402 100644 --- a/src/spikeinterface/curation/remove_duplicated_spikes.py +++ b/src/spikeinterface/curation/remove_duplicated_spikes.py @@ -82,8 +82,7 @@ def get_unit_spike_train( if end_frame == None: end_frame = spike_train[-1] if len(spike_train) > 0 else 0 - start = np.searchsorted(spike_train, start_frame, side="left") - end = np.searchsorted(spike_train, end_frame, side="right") + start, end = np.searchsorted(spike_train, [start_frame, end + 1], side="left") return spike_train[start:end] diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 5a0148c5c4..bb97f246d9 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -99,8 +99,7 @@ def _run(self, **job_kwargs): # precompute segment slice segment_slices = [] for segment_index in range(we.get_num_segments()): - i0 = np.searchsorted(self.spikes["segment_index"], segment_index) - i1 = np.searchsorted(self.spikes["segment_index"], segment_index + 1) + i0, i1 = np.searchsorted(self.spikes["segment_index"], [segment_index, segment_index + 1]) segment_slices.append(slice(i0, i1)) # and run diff --git a/src/spikeinterface/postprocessing/principal_component.py b/src/spikeinterface/postprocessing/principal_component.py index 233625e09e..ce1c3bd5a0 100644 --- a/src/spikeinterface/postprocessing/principal_component.py +++ b/src/spikeinterface/postprocessing/principal_component.py @@ -600,8 +600,7 @@ def _all_pc_extractor_chunk(segment_index, start_frame, end_frame, worker_ctx): seg_size = recording.get_num_samples(segment_index=segment_index) - i0 = np.searchsorted(spike_times, start_frame) - i1 = np.searchsorted(spike_times, end_frame) + i0, i1 = np.searchsorted(spike_times, [start_frame, end_frame]) if i0 != i1: # protect from spikes on border : spike_time<0 or spike_time>seg_size diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 62a4e2c320..fd6078b9b0 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -218,9 +218,7 @@ def _spike_amplitudes_chunk(segment_index, start_frame, end_frame, worker_ctx): d = np.diff(spike_times) assert np.all(d >= 0) - i0 = np.searchsorted(spike_times, start_frame) - i1 = np.searchsorted(spike_times, end_frame) - + i0, i1 = np.searchsorted(spike_times, [start_frame, end_frame]) n_spikes = i1 - i0 amplitudes = np.zeros(n_spikes, dtype=recording.get_dtype()) diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index c6f498f7e8..5f23e25b32 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -77,8 +77,7 @@ def get_data(self, outputs="concatenated"): elif outputs == "by_unit": locations_by_unit = [] for segment_index in range(self.waveform_extractor.get_num_segments()): - i0 = np.searchsorted(self.spikes["segment_index"], segment_index, side="left") - i1 = np.searchsorted(self.spikes["segment_index"], segment_index, side="right") + i0, i1 = np.searchsorted(self.spikes["segment_index"], [segment_index, segment_index + 1], side="left") spikes = self.spikes[i0:i1] locations = self._extension_data["spike_locations"][i0:i1] diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index ee28485983..01701e4f65 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -848,16 +848,14 @@ def compute_drift_metrics( spike_vector = sorting.to_spike_vector() # retrieve spikes in segment - i0 = np.searchsorted(spike_vector["segment_index"], segment_index) - i1 = np.searchsorted(spike_vector["segment_index"], segment_index + 1) + i0, i1 = np.searchsorted(spike_vector["segment_index"], [segment_index, segment_index + 1]) spikes_in_segment = spike_vector[i0:i1] spike_locations_in_segment = spike_locations[i0:i1] # compute median positions (if less than min_spikes_per_interval, median position is 0) median_positions = np.nan * np.zeros((len(unit_ids), num_bin_edges - 1)) for bin_index, (start_frame, end_frame) in enumerate(zip(bins[:-1], bins[1:])): - i0 = np.searchsorted(spikes_in_segment["sample_index"], start_frame) - i1 = np.searchsorted(spikes_in_segment["sample_index"], end_frame) + i0, i1 = np.searchsorted(spikes_in_segment["sample_index"], [start_frame, end_frame]) spikes_in_bin = spikes_in_segment[i0:i1] spike_locations_in_bin = spike_locations_in_segment[i0:i1][direction] diff --git a/src/spikeinterface/sortingcomponents/motion_interpolation.py b/src/spikeinterface/sortingcomponents/motion_interpolation.py index b4a44105e4..1f6c348574 100644 --- a/src/spikeinterface/sortingcomponents/motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/motion_interpolation.py @@ -155,8 +155,7 @@ def interpolate_motion_on_traces( **spatial_interpolation_kwargs, ) - i0 = np.searchsorted(bin_inds, bin_ind, side="left") - i1 = np.searchsorted(bin_inds, bin_ind, side="right") + i0, i1 = np.searchsorted(bin_inds, [bin_ind, bin_ind + 1] side="left") # here we use a simple np.matmul even if dirft_kernel can be super sparse. # because the speed for a sparse matmul is not so good when we disable multi threaad (due multi processing From 164430c83cf66221bed677198fa8d468a8781c1d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 15 Sep 2023 09:54:35 +0000 Subject: [PATCH 067/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/core/generate.py | 11 ++++++++--- src/spikeinterface/core/waveform_tools.py | 8 ++++++-- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 56a2bb4f48..6f85e76f1f 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1109,7 +1109,9 @@ def __init__( num_samples = [num_samples] for segment_index in range(sorting.get_num_segments()): - start, end = np.searchsorted(self.spike_vector["segment_index"], [segment_index, segment_index+1], side="left") + start, end = np.searchsorted( + self.spike_vector["segment_index"], [segment_index, segment_index + 1], side="left" + ) spikes = self.spike_vector[start:end] amplitude_vec = amplitude_vector[start:end] if amplitude_vector is not None else None upsample_vec = upsample_vector[start:end] if upsample_vector is not None else None @@ -1207,8 +1209,11 @@ def get_traces( else: traces = np.zeros([end_frame - start_frame, n_channels], dtype=self.dtype) - start, end = np.searchsorted(self.spike_vector["sample_index"], [start_frame - self.templates.shape[1], - end_frame + self.templates.shape[1] + 1], side="left") + start, end = np.searchsorted( + self.spike_vector["sample_index"], + [start_frame - self.templates.shape[1], end_frame + self.templates.shape[1] + 1], + side="left", + ) for i in range(start, end): spike = self.spike_vector[i] diff --git a/src/spikeinterface/core/waveform_tools.py b/src/spikeinterface/core/waveform_tools.py index 0ac20b9fec..a2f1296e31 100644 --- a/src/spikeinterface/core/waveform_tools.py +++ b/src/spikeinterface/core/waveform_tools.py @@ -350,7 +350,9 @@ def _worker_distribute_buffers(segment_index, start_frame, end_frame, worker_ctx # take only spikes in range [start_frame, end_frame] # this is a slice so no copy!! # the border of segment are protected by nbefore on left an nafter on the right - i0, i1 = np.searchsorted(in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)]) + i0, i1 = np.searchsorted( + in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)] + ) # slice in absolut in spikes vector l0 = i0 + s0 @@ -587,7 +589,9 @@ def _worker_distribute_single_buffer(segment_index, start_frame, end_frame, work # take only spikes in range [start_frame, end_frame] # this is a slice so no copy!! # the border of segment are protected by nbefore on left an nafter on the right - i0, i1 = np.searchsorted(in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)]) + i0, i1 = np.searchsorted( + in_seg_spikes["sample_index"], [max(start_frame, nbefore), min(end_frame, seg_size - nafter)] + ) # slice in absolut in spikes vector l0 = i0 + s0 From 426f395c6cb210b016b119225af540fd968fb30f Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 15 Sep 2023 12:38:50 +0200 Subject: [PATCH 068/322] Removed unnecessary else --- src/spikeinterface/core/waveform_extractor.py | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 3647e915bf..6881ab3ec5 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -523,20 +523,20 @@ def is_extension(self, extension_name) -> bool: """ if self.folder is None: return extension_name in self._loaded_extensions + + if extension_name in self._loaded_extensions: + # extension already loaded in memory + return True else: - # Extensions already loaded in memory - if extension_name in self._loaded_extensions: - return True - else: - if self.format == "binary": - return (self.folder / extension_name).is_dir() and ( - self.folder / extension_name / "params.json" - ).is_file() - elif self.format == "zarr": - return ( - extension_name in self._waveforms_root.keys() - and "params" in self._waveforms_root[extension_name].attrs.keys() - ) + if self.format == "binary": + return (self.folder / extension_name).is_dir() and ( + self.folder / extension_name / "params.json" + ).is_file() + elif self.format == "zarr": + return ( + extension_name in self._waveforms_root.keys() + and "params" in self._waveforms_root[extension_name].attrs.keys() + ) def load_extension(self, extension_name): """ From 9ad5f56907a848b757977e8dc2316445f867e269 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 15 Sep 2023 13:01:43 +0200 Subject: [PATCH 069/322] Update src/spikeinterface/sortingcomponents/motion_interpolation.py Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- src/spikeinterface/sortingcomponents/motion_interpolation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/motion_interpolation.py b/src/spikeinterface/sortingcomponents/motion_interpolation.py index 1f6c348574..18bb4f5a99 100644 --- a/src/spikeinterface/sortingcomponents/motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/motion_interpolation.py @@ -155,7 +155,7 @@ def interpolate_motion_on_traces( **spatial_interpolation_kwargs, ) - i0, i1 = np.searchsorted(bin_inds, [bin_ind, bin_ind + 1] side="left") + i0, i1 = np.searchsorted(bin_inds, [bin_ind, bin_ind + 1], side="left") # here we use a simple np.matmul even if dirft_kernel can be super sparse. # because the speed for a sparse matmul is not so good when we disable multi threaad (due multi processing From 9c6e6c1cef249d0382c6c441cdd7d2a7b0194cb1 Mon Sep 17 00:00:00 2001 From: Sebastien Date: Fri, 15 Sep 2023 13:30:36 +0200 Subject: [PATCH 070/322] Typos while copy/paste --- src/spikeinterface/core/node_pipeline.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/core/node_pipeline.py b/src/spikeinterface/core/node_pipeline.py index 5627eba518..651804c995 100644 --- a/src/spikeinterface/core/node_pipeline.py +++ b/src/spikeinterface/core/node_pipeline.py @@ -124,7 +124,7 @@ def compute(self, traces, start_frame, end_frame, segment_index, max_margin): # get local peaks sl = self.segment_slices[segment_index] peaks_in_segment = self.peaks[sl] - i0, i1 = np.searchsorted(peaks_in_segment["segment_index"], [start_frame, end_frame]) + i0, i1 = np.searchsorted(peaks_in_segment["sample_index"], [start_frame, end_frame]) local_peaks = peaks_in_segment[i0:i1] # make sample index local to traces @@ -194,7 +194,7 @@ def compute(self, traces, start_frame, end_frame, segment_index, max_margin): # get local peaks sl = self.segment_slices[segment_index] peaks_in_segment = self.peaks[sl] - i0, i1 = np.searchsorted(peaks_in_segment["segment_index"], [start_frame, end_frame]) + i0, i1 = np.searchsorted(peaks_in_segment["sample_index"], [start_frame, end_frame]) local_peaks = peaks_in_segment[i0:i1] # make sample index local to traces From 646455a1054bf4cebed133c3197e8598ef75e59f Mon Sep 17 00:00:00 2001 From: Sebastien Date: Fri, 15 Sep 2023 13:38:26 +0200 Subject: [PATCH 071/322] Some more searchsorted --- .../postprocessing/amplitude_scalings.py | 15 +++++---------- .../widgets/_legacy_mpl_widgets/activity.py | 3 +-- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index bb97f246d9..73e75870f9 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -316,8 +316,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) spikes_in_segment = spikes[segment_slices[segment_index]] - i0 = np.searchsorted(spikes_in_segment["sample_index"], start_frame) - i1 = np.searchsorted(spikes_in_segment["sample_index"], end_frame) + i0, i1 = np.searchsorted(spikes_in_segment["sample_index"], [start_frame, end_frame]) if i0 != i1: local_spikes = spikes_in_segment[i0:i1] @@ -334,8 +333,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) # set colliding spikes apart (if needed) if handle_collisions: # local spikes with margin! - i0_margin = np.searchsorted(spikes_in_segment["sample_index"], start_frame - left) - i1_margin = np.searchsorted(spikes_in_segment["sample_index"], end_frame + right) + i0_margin, i1_margin = np.searchsorted(spikes_in_segment["sample_index"], [start_frame - left, end_frame + right]) local_spikes_w_margin = spikes_in_segment[i0_margin:i1_margin] collisions_local = find_collisions( local_spikes, local_spikes_w_margin, delta_collision_samples, unit_inds_to_channel_indices @@ -461,14 +459,11 @@ def find_collisions(spikes, spikes_w_margin, delta_collision_samples, unit_inds_ spike_index_w_margin = np.where(spikes_w_margin == spike)[0][0] # find the possible spikes per and post within delta_collision_samples - consecutive_window_pre = np.searchsorted( + consecutive_window_pre, consecutive_window_post = np.searchsorted( spikes_w_margin["sample_index"], - spike["sample_index"] - delta_collision_samples, - ) - consecutive_window_post = np.searchsorted( - spikes_w_margin["sample_index"], - spike["sample_index"] + delta_collision_samples, + [spike["sample_index"] - delta_collision_samples, spike["sample_index"] + delta_collision_samples] ) + # exclude the spike itself (it is included in the collision_spikes by construction) pre_possible_consecutive_spike_indices = np.arange(consecutive_window_pre, spike_index_w_margin) post_possible_consecutive_spike_indices = np.arange(spike_index_w_margin + 1, consecutive_window_post) diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/activity.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/activity.py index 939475c17d..9715b7ea87 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/activity.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/activity.py @@ -95,8 +95,7 @@ def plot(self): num_frames = int(duration / self.bin_duration_s) def animate_func(i): - i0 = np.searchsorted(peaks["sample_index"], bin_size * i) - i1 = np.searchsorted(peaks["sample_index"], bin_size * (i + 1)) + i0, i1 = np.searchsorted(peaks["sample_index"], [bin_size * i, bin_size * (i + 1)]) local_peaks = peaks[i0:i1] artists = self._plot_one_bin(rec, probe, local_peaks, self.bin_duration_s) return artists From 4410d6e8d06a8f3db8004846152be90bf04b8615 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 15 Sep 2023 11:40:20 +0000 Subject: [PATCH 072/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/postprocessing/amplitude_scalings.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 73e75870f9..d4446e2289 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -333,7 +333,9 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) # set colliding spikes apart (if needed) if handle_collisions: # local spikes with margin! - i0_margin, i1_margin = np.searchsorted(spikes_in_segment["sample_index"], [start_frame - left, end_frame + right]) + i0_margin, i1_margin = np.searchsorted( + spikes_in_segment["sample_index"], [start_frame - left, end_frame + right] + ) local_spikes_w_margin = spikes_in_segment[i0_margin:i1_margin] collisions_local = find_collisions( local_spikes, local_spikes_w_margin, delta_collision_samples, unit_inds_to_channel_indices @@ -461,7 +463,7 @@ def find_collisions(spikes, spikes_w_margin, delta_collision_samples, unit_inds_ # find the possible spikes per and post within delta_collision_samples consecutive_window_pre, consecutive_window_post = np.searchsorted( spikes_w_margin["sample_index"], - [spike["sample_index"] - delta_collision_samples, spike["sample_index"] + delta_collision_samples] + [spike["sample_index"] - delta_collision_samples, spike["sample_index"] + delta_collision_samples], ) # exclude the spike itself (it is included in the collision_spikes by construction) From 334f178aaafc0cccbc81db9821749691b7d67da6 Mon Sep 17 00:00:00 2001 From: Sebastien Date: Fri, 15 Sep 2023 13:52:20 +0200 Subject: [PATCH 073/322] Fix --- src/spikeinterface/curation/remove_duplicated_spikes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/curation/remove_duplicated_spikes.py b/src/spikeinterface/curation/remove_duplicated_spikes.py index 3badaa9402..d01ca1f6a1 100644 --- a/src/spikeinterface/curation/remove_duplicated_spikes.py +++ b/src/spikeinterface/curation/remove_duplicated_spikes.py @@ -82,7 +82,7 @@ def get_unit_spike_train( if end_frame == None: end_frame = spike_train[-1] if len(spike_train) > 0 else 0 - start, end = np.searchsorted(spike_train, [start_frame, end + 1], side="left") + start, end = np.searchsorted(spike_train, [start_frame, end_frame + 1], side="left") return spike_train[start:end] From 1ac47ffd3c2525b4fa406937b7d2391ee759e4ea Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 15 Sep 2023 14:12:04 +0200 Subject: [PATCH 074/322] in1d to isin --- src/spikeinterface/comparison/basecomparison.py | 4 ++-- src/spikeinterface/comparison/comparisontools.py | 2 +- src/spikeinterface/core/baserecording.py | 2 +- src/spikeinterface/core/basesnippets.py | 2 +- src/spikeinterface/core/basesorting.py | 2 +- src/spikeinterface/core/generate.py | 4 ++-- src/spikeinterface/core/tests/test_sparsity.py | 2 +- src/spikeinterface/curation/mergeunitssorting.py | 4 ++-- src/spikeinterface/extractors/bids.py | 2 +- .../postprocessing/amplitude_scalings.py | 4 ++-- src/spikeinterface/postprocessing/spike_amplitudes.py | 4 ++-- src/spikeinterface/postprocessing/spike_locations.py | 4 ++-- .../preprocessing/interpolate_bad_channels.py | 2 +- src/spikeinterface/qualitymetrics/misc_metrics.py | 2 +- src/spikeinterface/qualitymetrics/pca_metrics.py | 10 +++++----- .../sortingcomponents/benchmark/benchmark_matching.py | 4 ++-- .../benchmark/benchmark_peak_selection.py | 8 ++++---- .../sortingcomponents/clustering/clustering_tools.py | 10 +++++----- .../sortingcomponents/clustering/sliding_hdbscan.py | 10 +++++----- .../widgets/unit_waveforms_density_map.py | 2 +- 20 files changed, 42 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/comparison/basecomparison.py b/src/spikeinterface/comparison/basecomparison.py index 79c784491a..6f45f1497d 100644 --- a/src/spikeinterface/comparison/basecomparison.py +++ b/src/spikeinterface/comparison/basecomparison.py @@ -262,11 +262,11 @@ def get_ordered_agreement_scores(self): indexes = np.arange(scores.shape[1]) order1 = [] for r in range(scores.shape[0]): - possible = indexes[~np.in1d(indexes, order1)] + possible = indexes[~isin(indexes, order1)] if possible.size > 0: ind = np.argmax(scores.iloc[r, possible].values) order1.append(possible[ind]) - remain = indexes[~np.in1d(indexes, order1)] + remain = indexes[~isin(indexes, order1)] order1.extend(remain) scores = scores.iloc[:, order1] diff --git a/src/spikeinterface/comparison/comparisontools.py b/src/spikeinterface/comparison/comparisontools.py index db45e2b25b..eb7b5c703c 100644 --- a/src/spikeinterface/comparison/comparisontools.py +++ b/src/spikeinterface/comparison/comparisontools.py @@ -538,7 +538,7 @@ def do_confusion_matrix(event_counts1, event_counts2, match_12, match_event_coun matched_units2 = match_12[match_12 != -1].values unmatched_units1 = match_12[match_12 == -1].index - unmatched_units2 = unit2_ids[~np.in1d(unit2_ids, matched_units2)] + unmatched_units2 = unit2_ids[~isin(unit2_ids, matched_units2)] ordered_units1 = np.hstack([matched_units1, unmatched_units1]) ordered_units2 = np.hstack([matched_units2, unmatched_units2]) diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index af4970a4ad..8c4a2941a0 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -592,7 +592,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceRecording - new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~isin(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceRecording(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesnippets.py b/src/spikeinterface/core/basesnippets.py index 737087abc1..7fd0823fc0 100644 --- a/src/spikeinterface/core/basesnippets.py +++ b/src/spikeinterface/core/basesnippets.py @@ -139,7 +139,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceSnippets - new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~isin(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceSnippets(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index 52f71c2399..423f974220 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -346,7 +346,7 @@ def remove_units(self, remove_unit_ids): """ from spikeinterface import UnitsSelectionSorting - new_unit_ids = self.unit_ids[~np.in1d(self.unit_ids, remove_unit_ids)] + new_unit_ids = self.unit_ids[~isin(self.unit_ids, remove_unit_ids)] new_sorting = UnitsSelectionSorting(self, new_unit_ids) return new_sorting diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 401c498f03..44d62818f9 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -166,7 +166,7 @@ def generate_sorting( ) if empty_units is not None: - keep = ~np.in1d(labels, empty_units) + keep = ~isin(labels, empty_units) times = times[keep] labels = labels[keep] @@ -219,7 +219,7 @@ def add_synchrony_to_sorting(sorting, sync_event_ratio=0.3, seed=None): sample_index = spike["sample_index"] if sample_index not in units_used_for_spike: units_used_for_spike[sample_index] = np.array([spike["unit_index"]]) - units_not_used = unit_ids[~np.in1d(unit_ids, units_used_for_spike[sample_index])] + units_not_used = unit_ids[~isin(unit_ids, units_used_for_spike[sample_index])] if len(units_not_used) == 0: continue diff --git a/src/spikeinterface/core/tests/test_sparsity.py b/src/spikeinterface/core/tests/test_sparsity.py index a6b94c9b84..61c4179652 100644 --- a/src/spikeinterface/core/tests/test_sparsity.py +++ b/src/spikeinterface/core/tests/test_sparsity.py @@ -34,7 +34,7 @@ def test_ChannelSparsity(): for key, v in sparsity.unit_id_to_channel_ids.items(): assert key in unit_ids - assert np.all(np.in1d(v, channel_ids)) + assert np.all(isin(v, channel_ids)) for key, v in sparsity.unit_id_to_channel_indices.items(): assert key in unit_ids diff --git a/src/spikeinterface/curation/mergeunitssorting.py b/src/spikeinterface/curation/mergeunitssorting.py index 264ac3a56d..ccbaa32e7b 100644 --- a/src/spikeinterface/curation/mergeunitssorting.py +++ b/src/spikeinterface/curation/mergeunitssorting.py @@ -59,7 +59,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties else: # we cannot automatically find new names new_unit_ids = [f"merge{i}" for i in range(num_merge)] - if np.any(np.in1d(new_unit_ids, keep_unit_ids)): + if np.any(isin(new_unit_ids, keep_unit_ids)): raise ValueError( "Unable to find 'new_unit_ids' because it is a string and parents " "already contain merges. Pass a list of 'new_unit_ids' as an argument." @@ -68,7 +68,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties # dtype int new_unit_ids = list(max(parents_unit_ids) + 1 + np.arange(num_merge, dtype=dtype)) else: - if np.any(np.in1d(new_unit_ids, keep_unit_ids)): + if np.any(isin(new_unit_ids, keep_unit_ids)): raise ValueError("'new_unit_ids' already exist in the sorting.unit_ids. Provide new ones") assert len(new_unit_ids) == num_merge, "new_unit_ids must have the same size as units_to_merge" diff --git a/src/spikeinterface/extractors/bids.py b/src/spikeinterface/extractors/bids.py index 02e7d5677d..9de272c56e 100644 --- a/src/spikeinterface/extractors/bids.py +++ b/src/spikeinterface/extractors/bids.py @@ -76,7 +76,7 @@ def _read_probe_group(folder, bids_name, recording_channel_ids): contact_ids = channels["contact_id"].values.astype("U") # extracting information of requested channels - keep = np.in1d(channel_ids, recording_channel_ids) + keep = isin(channel_ids, recording_channel_ids) channel_ids = channel_ids[keep] contact_ids = contact_ids[keep] diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 5a0148c5c4..af618cf4db 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -47,9 +47,9 @@ def _set_params( def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(isin(old_unit_ids, unit_ids)) - spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) + spike_mask = isin(self.spikes["unit_index"], unit_inds) new_amplitude_scalings = self._extension_data["amplitude_scalings"][spike_mask] return dict(amplitude_scalings=new_amplitude_scalings) diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 62a4e2c320..729dbd12bb 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -28,13 +28,13 @@ def _select_extension_data(self, unit_ids): # load filter and save amplitude files sorting = self.waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) - (keep_unit_indices,) = np.nonzero(np.in1d(sorting.unit_ids, unit_ids)) + (keep_unit_indices,) = np.nonzero(isin(sorting.unit_ids, unit_ids)) new_extension_data = dict() for seg_index in range(sorting.get_num_segments()): amp_data_name = f"amplitude_segment_{seg_index}" amps = self._extension_data[amp_data_name] - filtered_idxs = np.in1d(spikes[seg_index]["unit_index"], keep_unit_indices) + filtered_idxs = isin(spikes[seg_index]["unit_index"], keep_unit_indices) new_extension_data[amp_data_name] = amps[filtered_idxs] return new_extension_data diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index c6f498f7e8..eb3f1255c8 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -32,9 +32,9 @@ def _set_params(self, ms_before=0.5, ms_after=0.5, method="center_of_mass", meth def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(isin(old_unit_ids, unit_ids)) - spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) + spike_mask = isin(self.spikes["unit_index"], unit_inds) new_spike_locations = self._extension_data["spike_locations"][spike_mask] return dict(spike_locations=new_spike_locations) diff --git a/src/spikeinterface/preprocessing/interpolate_bad_channels.py b/src/spikeinterface/preprocessing/interpolate_bad_channels.py index e634d55e7f..5773b6a2ef 100644 --- a/src/spikeinterface/preprocessing/interpolate_bad_channels.py +++ b/src/spikeinterface/preprocessing/interpolate_bad_channels.py @@ -49,7 +49,7 @@ def __init__(self, recording, bad_channel_ids, sigma_um=None, p=1.3, weights=Non self.bad_channel_ids = bad_channel_ids self._bad_channel_idxs = recording.ids_to_indices(self.bad_channel_ids) - self._good_channel_idxs = ~np.in1d(np.arange(recording.get_num_channels()), self._bad_channel_idxs) + self._good_channel_idxs = ~isin(np.arange(recording.get_num_channels()), self._bad_channel_idxs) self._bad_channel_idxs.setflags(write=False) if sigma_um is None: diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index ee28485983..a51bfe9164 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -544,7 +544,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k # some segments/units might have no spikes if len(spikes_per_unit) == 0: continue - spike_complexity = complexity[np.in1d(unique_spike_index, spikes_per_unit["sample_index"])] + spike_complexity = complexity[isin(unique_spike_index, spikes_per_unit["sample_index"])] for synchrony_size in synchrony_sizes: synchrony_counts[synchrony_size][unit_index] += np.count_nonzero(spike_complexity >= synchrony_size) diff --git a/src/spikeinterface/qualitymetrics/pca_metrics.py b/src/spikeinterface/qualitymetrics/pca_metrics.py index 59000211d4..0702c8f35a 100644 --- a/src/spikeinterface/qualitymetrics/pca_metrics.py +++ b/src/spikeinterface/qualitymetrics/pca_metrics.py @@ -152,8 +152,8 @@ def calculate_pc_metrics( neighbor_unit_ids = unit_ids neighbor_channel_indices = we.channel_ids_to_indices(neighbor_channel_ids) - labels = all_labels[np.in1d(all_labels, neighbor_unit_ids)] - pcs = all_pcs[np.in1d(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] + labels = all_labels[isin(all_labels, neighbor_unit_ids)] + pcs = all_pcs[isin(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] pcs_flat = pcs.reshape(pcs.shape[0], -1) func_args = ( @@ -506,7 +506,7 @@ def nearest_neighbors_isolation( other_units_ids = [ unit_id for unit_id in other_units_ids - if np.sum(np.in1d(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) + if np.sum(isin(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) >= (n_channels_target_unit * min_spatial_overlap) ] @@ -536,10 +536,10 @@ def nearest_neighbors_isolation( if waveform_extractor.is_sparse(): # in this case, waveforms are sparse so we need to do some smart indexing waveforms_target_unit_sampled = waveforms_target_unit_sampled[ - :, :, np.in1d(closest_chans_target_unit, common_channel_idxs) + :, :, isin(closest_chans_target_unit, common_channel_idxs) ] waveforms_other_unit_sampled = waveforms_other_unit_sampled[ - :, :, np.in1d(closest_chans_other_unit, common_channel_idxs) + :, :, isin(closest_chans_other_unit, common_channel_idxs) ] else: waveforms_target_unit_sampled = waveforms_target_unit_sampled[:, :, common_channel_idxs] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py index 07c7db155c..ee8ace42ee 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py @@ -502,7 +502,7 @@ def plot_errors_matching(benchmark, comp, unit_id, nb_spikes=200, metric="cosine seg_num = 0 # TODO: make compatible with multiple segments idx_1 = np.where(comp.get_labels1(unit_id)[seg_num] == label) idx_2 = benchmark.we.get_sampled_indices(unit_id)["spike_index"] - intersection = np.where(np.in1d(idx_2, idx_1))[0] + intersection = np.where(isin(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] if len(intersection) == 0: print(f"No {label}s found for unit {unit_id}") @@ -552,7 +552,7 @@ def plot_errors_matching_all_neurons(benchmark, comp, nb_spikes=200, metric="cos for label in ["TP", "FN"]: idx_1 = np.where(comp.get_labels1(unit_id) == label)[0] - intersection = np.where(np.in1d(idx_2, idx_1))[0] + intersection = np.where(isin(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] wfs_sliced = wfs[intersection, :, :] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py index 1514a63dd4..ca18db58d6 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py @@ -133,7 +133,7 @@ def run(self, peaks=None, positions=None, delta=0.2): matches = make_matching_events(times2, spikes1["sample_index"], int(delta * self.sampling_rate / 1000)) self.good_matches = matches["index1"] - garbage_matches = ~np.in1d(np.arange(len(times2)), self.good_matches) + garbage_matches = ~isin(np.arange(len(times2)), self.good_matches) garbage_channels = self.peaks["channel_index"][garbage_matches] garbage_peaks = times2[garbage_matches] nb_garbage = len(garbage_peaks) @@ -365,7 +365,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["full_gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["full_gt"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.gt_peaks["sample_index"], all_spikes[idx]) + mask = isin(self.gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.gt_peaks["amplitude"][mask]) ax.scatter(self.gt_positions["x"][mask], self.gt_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.gt_positions["x"][mask].mean(), self.gt_positions["y"][mask].mean()) @@ -391,7 +391,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["gt"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) + mask = isin(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.sliced_gt_peaks["amplitude"][mask]) ax.scatter( self.sliced_gt_positions["x"][mask], self.sliced_gt_positions["y"][mask], c=colors, s=1, alpha=0.5 @@ -420,7 +420,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["garbage"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["garbage"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.garbage_peaks["sample_index"], all_spikes[idx]) + mask = isin(self.garbage_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.garbage_peaks["amplitude"][mask]) ax.scatter(self.garbage_positions["x"][mask], self.garbage_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.garbage_positions["x"][mask].mean(), self.garbage_positions["y"][mask].mean()) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 6edf5af16b..fb45e5fc3a 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -30,7 +30,7 @@ def _split_waveforms( local_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(cluster_probability > probability_thr) - local_labels_with_noise[~np.in1d(local_labels_with_noise, persistent_clusters)] = -1 + local_labels_with_noise[~isin(local_labels_with_noise, persistent_clusters)] = -1 # remove super small cluster labels, count = np.unique(local_labels_with_noise[:valid_size], return_counts=True) @@ -43,7 +43,7 @@ def _split_waveforms( to_remove = labels[(count / valid_size) < minimum_cluster_size_ratio] # ~ print('to_remove', to_remove, count / valid_size) if to_remove.size > 0: - local_labels_with_noise[np.in1d(local_labels_with_noise, to_remove)] = -1 + local_labels_with_noise[isin(local_labels_with_noise, to_remove)] = -1 local_labels_with_noise[valid_size:] = -2 @@ -123,7 +123,7 @@ def _split_waveforms_nested( active_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(clustering[2] > probability_thr) - active_labels_with_noise[~np.in1d(active_labels_with_noise, persistent_clusters)] = -1 + active_labels_with_noise[~isin(active_labels_with_noise, persistent_clusters)] = -1 active_labels = active_labels_with_noise[active_ind < valid_size] active_labels_set = np.unique(active_labels) @@ -381,9 +381,9 @@ def auto_clean_clustering( continue wfs0 = wfs_arrays[label0] - wfs0 = wfs0[:, :, np.in1d(channel_inds0, used_chans)] + wfs0 = wfs0[:, :, isin(channel_inds0, used_chans)] wfs1 = wfs_arrays[label1] - wfs1 = wfs1[:, :, np.in1d(channel_inds1, used_chans)] + wfs1 = wfs1[:, :, isin(channel_inds1, used_chans)] # TODO : remove assert wfs0.shape[2] == wfs1.shape[2] diff --git a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py index aeec14158f..0f1d503bdf 100644 --- a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py +++ b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py @@ -198,7 +198,7 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): for chan_ind in prev_local_chan_inds: if total_count[chan_ind] == 0: continue - # ~ inds, = np.nonzero(np.in1d(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) + # ~ inds, = np.nonzero(isin(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) (inds,) = np.nonzero((peaks["channel_index"] == chan_ind) & (peak_labels == 0)) if inds.size <= d["min_spike_on_channel"]: chan_amps[chan_ind] = 0.0 @@ -235,12 +235,12 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # TODO: only for debug, remove later - assert np.all(np.in1d(local_chan_inds, wf_chans)) + assert np.all(isin(local_chan_inds, wf_chans)) # none label spikes wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, local_chan_inds)] + wfs_chan = wfs_chan[:, :, isin(wf_chans, local_chan_inds)] wfs.append(wfs_chan) # put noise to enhance clusters @@ -517,7 +517,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # print('wf_chans', wf_chans) # TODO: only for debug, remove later - assert np.all(np.in1d(wanted_chans, wf_chans)) + assert np.all(isin(wanted_chans, wf_chans)) wfs_chan = wfs_arrays[chan_ind] # TODO: only for debug, remove later @@ -525,7 +525,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, wanted_chans)] + wfs_chan = wfs_chan[:, :, isin(wf_chans, wanted_chans)] wfs.append(wfs_chan) wfs = np.concatenate(wfs, axis=0) diff --git a/src/spikeinterface/widgets/unit_waveforms_density_map.py b/src/spikeinterface/widgets/unit_waveforms_density_map.py index e8a6868e92..2515d844eb 100644 --- a/src/spikeinterface/widgets/unit_waveforms_density_map.py +++ b/src/spikeinterface/widgets/unit_waveforms_density_map.py @@ -103,7 +103,7 @@ def __init__( if same_axis and not np.array_equal(chan_inds, shared_chan_inds): # add more channels if necessary wfs_ = np.zeros((wfs.shape[0], wfs.shape[1], shared_chan_inds.size), dtype=float) - mask = np.in1d(shared_chan_inds, chan_inds) + mask = isin(shared_chan_inds, chan_inds) wfs_[:, :, mask] = wfs wfs_[:, :, ~mask] = np.nan wfs = wfs_ From e947e09a9c3d397ceabfd8eae50ba8a5ed345cf5 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 15 Sep 2023 14:20:32 +0200 Subject: [PATCH 075/322] Revert "in1d to isin" This reverts commit 1ac47ffd3c2525b4fa406937b7d2391ee759e4ea. --- src/spikeinterface/comparison/basecomparison.py | 4 ++-- src/spikeinterface/comparison/comparisontools.py | 2 +- src/spikeinterface/core/baserecording.py | 2 +- src/spikeinterface/core/basesnippets.py | 2 +- src/spikeinterface/core/basesorting.py | 2 +- src/spikeinterface/core/generate.py | 4 ++-- src/spikeinterface/core/tests/test_sparsity.py | 2 +- src/spikeinterface/curation/mergeunitssorting.py | 4 ++-- src/spikeinterface/extractors/bids.py | 2 +- .../postprocessing/amplitude_scalings.py | 4 ++-- src/spikeinterface/postprocessing/spike_amplitudes.py | 4 ++-- src/spikeinterface/postprocessing/spike_locations.py | 4 ++-- .../preprocessing/interpolate_bad_channels.py | 2 +- src/spikeinterface/qualitymetrics/misc_metrics.py | 2 +- src/spikeinterface/qualitymetrics/pca_metrics.py | 10 +++++----- .../sortingcomponents/benchmark/benchmark_matching.py | 4 ++-- .../benchmark/benchmark_peak_selection.py | 8 ++++---- .../sortingcomponents/clustering/clustering_tools.py | 10 +++++----- .../sortingcomponents/clustering/sliding_hdbscan.py | 10 +++++----- .../widgets/unit_waveforms_density_map.py | 2 +- 20 files changed, 42 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/comparison/basecomparison.py b/src/spikeinterface/comparison/basecomparison.py index 6f45f1497d..79c784491a 100644 --- a/src/spikeinterface/comparison/basecomparison.py +++ b/src/spikeinterface/comparison/basecomparison.py @@ -262,11 +262,11 @@ def get_ordered_agreement_scores(self): indexes = np.arange(scores.shape[1]) order1 = [] for r in range(scores.shape[0]): - possible = indexes[~isin(indexes, order1)] + possible = indexes[~np.in1d(indexes, order1)] if possible.size > 0: ind = np.argmax(scores.iloc[r, possible].values) order1.append(possible[ind]) - remain = indexes[~isin(indexes, order1)] + remain = indexes[~np.in1d(indexes, order1)] order1.extend(remain) scores = scores.iloc[:, order1] diff --git a/src/spikeinterface/comparison/comparisontools.py b/src/spikeinterface/comparison/comparisontools.py index eb7b5c703c..db45e2b25b 100644 --- a/src/spikeinterface/comparison/comparisontools.py +++ b/src/spikeinterface/comparison/comparisontools.py @@ -538,7 +538,7 @@ def do_confusion_matrix(event_counts1, event_counts2, match_12, match_event_coun matched_units2 = match_12[match_12 != -1].values unmatched_units1 = match_12[match_12 == -1].index - unmatched_units2 = unit2_ids[~isin(unit2_ids, matched_units2)] + unmatched_units2 = unit2_ids[~np.in1d(unit2_ids, matched_units2)] ordered_units1 = np.hstack([matched_units1, unmatched_units1]) ordered_units2 = np.hstack([matched_units2, unmatched_units2]) diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index 8c4a2941a0..af4970a4ad 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -592,7 +592,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceRecording - new_channel_ids = self.channel_ids[~isin(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceRecording(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesnippets.py b/src/spikeinterface/core/basesnippets.py index 7fd0823fc0..737087abc1 100644 --- a/src/spikeinterface/core/basesnippets.py +++ b/src/spikeinterface/core/basesnippets.py @@ -139,7 +139,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceSnippets - new_channel_ids = self.channel_ids[~isin(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceSnippets(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index 423f974220..52f71c2399 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -346,7 +346,7 @@ def remove_units(self, remove_unit_ids): """ from spikeinterface import UnitsSelectionSorting - new_unit_ids = self.unit_ids[~isin(self.unit_ids, remove_unit_ids)] + new_unit_ids = self.unit_ids[~np.in1d(self.unit_ids, remove_unit_ids)] new_sorting = UnitsSelectionSorting(self, new_unit_ids) return new_sorting diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 44d62818f9..401c498f03 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -166,7 +166,7 @@ def generate_sorting( ) if empty_units is not None: - keep = ~isin(labels, empty_units) + keep = ~np.in1d(labels, empty_units) times = times[keep] labels = labels[keep] @@ -219,7 +219,7 @@ def add_synchrony_to_sorting(sorting, sync_event_ratio=0.3, seed=None): sample_index = spike["sample_index"] if sample_index not in units_used_for_spike: units_used_for_spike[sample_index] = np.array([spike["unit_index"]]) - units_not_used = unit_ids[~isin(unit_ids, units_used_for_spike[sample_index])] + units_not_used = unit_ids[~np.in1d(unit_ids, units_used_for_spike[sample_index])] if len(units_not_used) == 0: continue diff --git a/src/spikeinterface/core/tests/test_sparsity.py b/src/spikeinterface/core/tests/test_sparsity.py index 61c4179652..a6b94c9b84 100644 --- a/src/spikeinterface/core/tests/test_sparsity.py +++ b/src/spikeinterface/core/tests/test_sparsity.py @@ -34,7 +34,7 @@ def test_ChannelSparsity(): for key, v in sparsity.unit_id_to_channel_ids.items(): assert key in unit_ids - assert np.all(isin(v, channel_ids)) + assert np.all(np.in1d(v, channel_ids)) for key, v in sparsity.unit_id_to_channel_indices.items(): assert key in unit_ids diff --git a/src/spikeinterface/curation/mergeunitssorting.py b/src/spikeinterface/curation/mergeunitssorting.py index ccbaa32e7b..264ac3a56d 100644 --- a/src/spikeinterface/curation/mergeunitssorting.py +++ b/src/spikeinterface/curation/mergeunitssorting.py @@ -59,7 +59,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties else: # we cannot automatically find new names new_unit_ids = [f"merge{i}" for i in range(num_merge)] - if np.any(isin(new_unit_ids, keep_unit_ids)): + if np.any(np.in1d(new_unit_ids, keep_unit_ids)): raise ValueError( "Unable to find 'new_unit_ids' because it is a string and parents " "already contain merges. Pass a list of 'new_unit_ids' as an argument." @@ -68,7 +68,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties # dtype int new_unit_ids = list(max(parents_unit_ids) + 1 + np.arange(num_merge, dtype=dtype)) else: - if np.any(isin(new_unit_ids, keep_unit_ids)): + if np.any(np.in1d(new_unit_ids, keep_unit_ids)): raise ValueError("'new_unit_ids' already exist in the sorting.unit_ids. Provide new ones") assert len(new_unit_ids) == num_merge, "new_unit_ids must have the same size as units_to_merge" diff --git a/src/spikeinterface/extractors/bids.py b/src/spikeinterface/extractors/bids.py index 9de272c56e..02e7d5677d 100644 --- a/src/spikeinterface/extractors/bids.py +++ b/src/spikeinterface/extractors/bids.py @@ -76,7 +76,7 @@ def _read_probe_group(folder, bids_name, recording_channel_ids): contact_ids = channels["contact_id"].values.astype("U") # extracting information of requested channels - keep = isin(channel_ids, recording_channel_ids) + keep = np.in1d(channel_ids, recording_channel_ids) channel_ids = channel_ids[keep] contact_ids = contact_ids[keep] diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index af618cf4db..5a0148c5c4 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -47,9 +47,9 @@ def _set_params( def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(isin(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) - spike_mask = isin(self.spikes["unit_index"], unit_inds) + spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) new_amplitude_scalings = self._extension_data["amplitude_scalings"][spike_mask] return dict(amplitude_scalings=new_amplitude_scalings) diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 729dbd12bb..62a4e2c320 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -28,13 +28,13 @@ def _select_extension_data(self, unit_ids): # load filter and save amplitude files sorting = self.waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) - (keep_unit_indices,) = np.nonzero(isin(sorting.unit_ids, unit_ids)) + (keep_unit_indices,) = np.nonzero(np.in1d(sorting.unit_ids, unit_ids)) new_extension_data = dict() for seg_index in range(sorting.get_num_segments()): amp_data_name = f"amplitude_segment_{seg_index}" amps = self._extension_data[amp_data_name] - filtered_idxs = isin(spikes[seg_index]["unit_index"], keep_unit_indices) + filtered_idxs = np.in1d(spikes[seg_index]["unit_index"], keep_unit_indices) new_extension_data[amp_data_name] = amps[filtered_idxs] return new_extension_data diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index eb3f1255c8..c6f498f7e8 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -32,9 +32,9 @@ def _set_params(self, ms_before=0.5, ms_after=0.5, method="center_of_mass", meth def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(isin(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) - spike_mask = isin(self.spikes["unit_index"], unit_inds) + spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) new_spike_locations = self._extension_data["spike_locations"][spike_mask] return dict(spike_locations=new_spike_locations) diff --git a/src/spikeinterface/preprocessing/interpolate_bad_channels.py b/src/spikeinterface/preprocessing/interpolate_bad_channels.py index 5773b6a2ef..e634d55e7f 100644 --- a/src/spikeinterface/preprocessing/interpolate_bad_channels.py +++ b/src/spikeinterface/preprocessing/interpolate_bad_channels.py @@ -49,7 +49,7 @@ def __init__(self, recording, bad_channel_ids, sigma_um=None, p=1.3, weights=Non self.bad_channel_ids = bad_channel_ids self._bad_channel_idxs = recording.ids_to_indices(self.bad_channel_ids) - self._good_channel_idxs = ~isin(np.arange(recording.get_num_channels()), self._bad_channel_idxs) + self._good_channel_idxs = ~np.in1d(np.arange(recording.get_num_channels()), self._bad_channel_idxs) self._bad_channel_idxs.setflags(write=False) if sigma_um is None: diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index a51bfe9164..ee28485983 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -544,7 +544,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k # some segments/units might have no spikes if len(spikes_per_unit) == 0: continue - spike_complexity = complexity[isin(unique_spike_index, spikes_per_unit["sample_index"])] + spike_complexity = complexity[np.in1d(unique_spike_index, spikes_per_unit["sample_index"])] for synchrony_size in synchrony_sizes: synchrony_counts[synchrony_size][unit_index] += np.count_nonzero(spike_complexity >= synchrony_size) diff --git a/src/spikeinterface/qualitymetrics/pca_metrics.py b/src/spikeinterface/qualitymetrics/pca_metrics.py index 0702c8f35a..59000211d4 100644 --- a/src/spikeinterface/qualitymetrics/pca_metrics.py +++ b/src/spikeinterface/qualitymetrics/pca_metrics.py @@ -152,8 +152,8 @@ def calculate_pc_metrics( neighbor_unit_ids = unit_ids neighbor_channel_indices = we.channel_ids_to_indices(neighbor_channel_ids) - labels = all_labels[isin(all_labels, neighbor_unit_ids)] - pcs = all_pcs[isin(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] + labels = all_labels[np.in1d(all_labels, neighbor_unit_ids)] + pcs = all_pcs[np.in1d(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] pcs_flat = pcs.reshape(pcs.shape[0], -1) func_args = ( @@ -506,7 +506,7 @@ def nearest_neighbors_isolation( other_units_ids = [ unit_id for unit_id in other_units_ids - if np.sum(isin(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) + if np.sum(np.in1d(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) >= (n_channels_target_unit * min_spatial_overlap) ] @@ -536,10 +536,10 @@ def nearest_neighbors_isolation( if waveform_extractor.is_sparse(): # in this case, waveforms are sparse so we need to do some smart indexing waveforms_target_unit_sampled = waveforms_target_unit_sampled[ - :, :, isin(closest_chans_target_unit, common_channel_idxs) + :, :, np.in1d(closest_chans_target_unit, common_channel_idxs) ] waveforms_other_unit_sampled = waveforms_other_unit_sampled[ - :, :, isin(closest_chans_other_unit, common_channel_idxs) + :, :, np.in1d(closest_chans_other_unit, common_channel_idxs) ] else: waveforms_target_unit_sampled = waveforms_target_unit_sampled[:, :, common_channel_idxs] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py index ee8ace42ee..07c7db155c 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py @@ -502,7 +502,7 @@ def plot_errors_matching(benchmark, comp, unit_id, nb_spikes=200, metric="cosine seg_num = 0 # TODO: make compatible with multiple segments idx_1 = np.where(comp.get_labels1(unit_id)[seg_num] == label) idx_2 = benchmark.we.get_sampled_indices(unit_id)["spike_index"] - intersection = np.where(isin(idx_2, idx_1))[0] + intersection = np.where(np.in1d(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] if len(intersection) == 0: print(f"No {label}s found for unit {unit_id}") @@ -552,7 +552,7 @@ def plot_errors_matching_all_neurons(benchmark, comp, nb_spikes=200, metric="cos for label in ["TP", "FN"]: idx_1 = np.where(comp.get_labels1(unit_id) == label)[0] - intersection = np.where(isin(idx_2, idx_1))[0] + intersection = np.where(np.in1d(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] wfs_sliced = wfs[intersection, :, :] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py index ca18db58d6..1514a63dd4 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py @@ -133,7 +133,7 @@ def run(self, peaks=None, positions=None, delta=0.2): matches = make_matching_events(times2, spikes1["sample_index"], int(delta * self.sampling_rate / 1000)) self.good_matches = matches["index1"] - garbage_matches = ~isin(np.arange(len(times2)), self.good_matches) + garbage_matches = ~np.in1d(np.arange(len(times2)), self.good_matches) garbage_channels = self.peaks["channel_index"][garbage_matches] garbage_peaks = times2[garbage_matches] nb_garbage = len(garbage_peaks) @@ -365,7 +365,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["full_gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["full_gt"].sorting.get_unit_spike_train(unit_id) - mask = isin(self.gt_peaks["sample_index"], all_spikes[idx]) + mask = np.in1d(self.gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.gt_peaks["amplitude"][mask]) ax.scatter(self.gt_positions["x"][mask], self.gt_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.gt_positions["x"][mask].mean(), self.gt_positions["y"][mask].mean()) @@ -391,7 +391,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["gt"].sorting.get_unit_spike_train(unit_id) - mask = isin(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) + mask = np.in1d(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.sliced_gt_peaks["amplitude"][mask]) ax.scatter( self.sliced_gt_positions["x"][mask], self.sliced_gt_positions["y"][mask], c=colors, s=1, alpha=0.5 @@ -420,7 +420,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["garbage"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["garbage"].sorting.get_unit_spike_train(unit_id) - mask = isin(self.garbage_peaks["sample_index"], all_spikes[idx]) + mask = np.in1d(self.garbage_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.garbage_peaks["amplitude"][mask]) ax.scatter(self.garbage_positions["x"][mask], self.garbage_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.garbage_positions["x"][mask].mean(), self.garbage_positions["y"][mask].mean()) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index fb45e5fc3a..6edf5af16b 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -30,7 +30,7 @@ def _split_waveforms( local_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(cluster_probability > probability_thr) - local_labels_with_noise[~isin(local_labels_with_noise, persistent_clusters)] = -1 + local_labels_with_noise[~np.in1d(local_labels_with_noise, persistent_clusters)] = -1 # remove super small cluster labels, count = np.unique(local_labels_with_noise[:valid_size], return_counts=True) @@ -43,7 +43,7 @@ def _split_waveforms( to_remove = labels[(count / valid_size) < minimum_cluster_size_ratio] # ~ print('to_remove', to_remove, count / valid_size) if to_remove.size > 0: - local_labels_with_noise[isin(local_labels_with_noise, to_remove)] = -1 + local_labels_with_noise[np.in1d(local_labels_with_noise, to_remove)] = -1 local_labels_with_noise[valid_size:] = -2 @@ -123,7 +123,7 @@ def _split_waveforms_nested( active_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(clustering[2] > probability_thr) - active_labels_with_noise[~isin(active_labels_with_noise, persistent_clusters)] = -1 + active_labels_with_noise[~np.in1d(active_labels_with_noise, persistent_clusters)] = -1 active_labels = active_labels_with_noise[active_ind < valid_size] active_labels_set = np.unique(active_labels) @@ -381,9 +381,9 @@ def auto_clean_clustering( continue wfs0 = wfs_arrays[label0] - wfs0 = wfs0[:, :, isin(channel_inds0, used_chans)] + wfs0 = wfs0[:, :, np.in1d(channel_inds0, used_chans)] wfs1 = wfs_arrays[label1] - wfs1 = wfs1[:, :, isin(channel_inds1, used_chans)] + wfs1 = wfs1[:, :, np.in1d(channel_inds1, used_chans)] # TODO : remove assert wfs0.shape[2] == wfs1.shape[2] diff --git a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py index 0f1d503bdf..aeec14158f 100644 --- a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py +++ b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py @@ -198,7 +198,7 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): for chan_ind in prev_local_chan_inds: if total_count[chan_ind] == 0: continue - # ~ inds, = np.nonzero(isin(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) + # ~ inds, = np.nonzero(np.in1d(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) (inds,) = np.nonzero((peaks["channel_index"] == chan_ind) & (peak_labels == 0)) if inds.size <= d["min_spike_on_channel"]: chan_amps[chan_ind] = 0.0 @@ -235,12 +235,12 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # TODO: only for debug, remove later - assert np.all(isin(local_chan_inds, wf_chans)) + assert np.all(np.in1d(local_chan_inds, wf_chans)) # none label spikes wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, isin(wf_chans, local_chan_inds)] + wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, local_chan_inds)] wfs.append(wfs_chan) # put noise to enhance clusters @@ -517,7 +517,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # print('wf_chans', wf_chans) # TODO: only for debug, remove later - assert np.all(isin(wanted_chans, wf_chans)) + assert np.all(np.in1d(wanted_chans, wf_chans)) wfs_chan = wfs_arrays[chan_ind] # TODO: only for debug, remove later @@ -525,7 +525,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, isin(wf_chans, wanted_chans)] + wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, wanted_chans)] wfs.append(wfs_chan) wfs = np.concatenate(wfs, axis=0) diff --git a/src/spikeinterface/widgets/unit_waveforms_density_map.py b/src/spikeinterface/widgets/unit_waveforms_density_map.py index 2515d844eb..e8a6868e92 100644 --- a/src/spikeinterface/widgets/unit_waveforms_density_map.py +++ b/src/spikeinterface/widgets/unit_waveforms_density_map.py @@ -103,7 +103,7 @@ def __init__( if same_axis and not np.array_equal(chan_inds, shared_chan_inds): # add more channels if necessary wfs_ = np.zeros((wfs.shape[0], wfs.shape[1], shared_chan_inds.size), dtype=float) - mask = isin(shared_chan_inds, chan_inds) + mask = np.in1d(shared_chan_inds, chan_inds) wfs_[:, :, mask] = wfs wfs_[:, :, ~mask] = np.nan wfs = wfs_ From 5e420f3a847102c145c705dddfb01b140b318ec3 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 15 Sep 2023 14:21:53 +0200 Subject: [PATCH 076/322] in1d to isin with correct alias (shame on me) --- src/spikeinterface/comparison/basecomparison.py | 4 ++-- src/spikeinterface/comparison/comparisontools.py | 2 +- src/spikeinterface/core/baserecording.py | 2 +- src/spikeinterface/core/basesnippets.py | 2 +- src/spikeinterface/core/basesorting.py | 2 +- src/spikeinterface/core/generate.py | 4 ++-- src/spikeinterface/core/tests/test_sparsity.py | 2 +- src/spikeinterface/curation/mergeunitssorting.py | 4 ++-- src/spikeinterface/extractors/bids.py | 2 +- .../postprocessing/amplitude_scalings.py | 4 ++-- src/spikeinterface/postprocessing/spike_amplitudes.py | 4 ++-- src/spikeinterface/postprocessing/spike_locations.py | 4 ++-- .../preprocessing/interpolate_bad_channels.py | 2 +- src/spikeinterface/qualitymetrics/misc_metrics.py | 2 +- src/spikeinterface/qualitymetrics/pca_metrics.py | 10 +++++----- .../sortingcomponents/benchmark/benchmark_matching.py | 4 ++-- .../benchmark/benchmark_peak_selection.py | 8 ++++---- .../sortingcomponents/clustering/clustering_tools.py | 10 +++++----- .../sortingcomponents/clustering/sliding_hdbscan.py | 10 +++++----- .../widgets/unit_waveforms_density_map.py | 2 +- 20 files changed, 42 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/comparison/basecomparison.py b/src/spikeinterface/comparison/basecomparison.py index 79c784491a..5af20d79b5 100644 --- a/src/spikeinterface/comparison/basecomparison.py +++ b/src/spikeinterface/comparison/basecomparison.py @@ -262,11 +262,11 @@ def get_ordered_agreement_scores(self): indexes = np.arange(scores.shape[1]) order1 = [] for r in range(scores.shape[0]): - possible = indexes[~np.in1d(indexes, order1)] + possible = indexes[~np.isin(indexes, order1)] if possible.size > 0: ind = np.argmax(scores.iloc[r, possible].values) order1.append(possible[ind]) - remain = indexes[~np.in1d(indexes, order1)] + remain = indexes[~np.isin(indexes, order1)] order1.extend(remain) scores = scores.iloc[:, order1] diff --git a/src/spikeinterface/comparison/comparisontools.py b/src/spikeinterface/comparison/comparisontools.py index db45e2b25b..20ee7910b4 100644 --- a/src/spikeinterface/comparison/comparisontools.py +++ b/src/spikeinterface/comparison/comparisontools.py @@ -538,7 +538,7 @@ def do_confusion_matrix(event_counts1, event_counts2, match_12, match_event_coun matched_units2 = match_12[match_12 != -1].values unmatched_units1 = match_12[match_12 == -1].index - unmatched_units2 = unit2_ids[~np.in1d(unit2_ids, matched_units2)] + unmatched_units2 = unit2_ids[~np.isin(unit2_ids, matched_units2)] ordered_units1 = np.hstack([matched_units1, unmatched_units1]) ordered_units2 = np.hstack([matched_units2, unmatched_units2]) diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index af4970a4ad..08f187895b 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -592,7 +592,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceRecording - new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~np.isin(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceRecording(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesnippets.py b/src/spikeinterface/core/basesnippets.py index 737087abc1..f35bc2b266 100644 --- a/src/spikeinterface/core/basesnippets.py +++ b/src/spikeinterface/core/basesnippets.py @@ -139,7 +139,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None): def _remove_channels(self, remove_channel_ids): from .channelslice import ChannelSliceSnippets - new_channel_ids = self.channel_ids[~np.in1d(self.channel_ids, remove_channel_ids)] + new_channel_ids = self.channel_ids[~np.isin(self.channel_ids, remove_channel_ids)] sub_recording = ChannelSliceSnippets(self, new_channel_ids) return sub_recording diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index 52f71c2399..056134a24e 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -346,7 +346,7 @@ def remove_units(self, remove_unit_ids): """ from spikeinterface import UnitsSelectionSorting - new_unit_ids = self.unit_ids[~np.in1d(self.unit_ids, remove_unit_ids)] + new_unit_ids = self.unit_ids[~np.isin(self.unit_ids, remove_unit_ids)] new_sorting = UnitsSelectionSorting(self, new_unit_ids) return new_sorting diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 401c498f03..07837bcef7 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -166,7 +166,7 @@ def generate_sorting( ) if empty_units is not None: - keep = ~np.in1d(labels, empty_units) + keep = ~np.isin(labels, empty_units) times = times[keep] labels = labels[keep] @@ -219,7 +219,7 @@ def add_synchrony_to_sorting(sorting, sync_event_ratio=0.3, seed=None): sample_index = spike["sample_index"] if sample_index not in units_used_for_spike: units_used_for_spike[sample_index] = np.array([spike["unit_index"]]) - units_not_used = unit_ids[~np.in1d(unit_ids, units_used_for_spike[sample_index])] + units_not_used = unit_ids[~np.isin(unit_ids, units_used_for_spike[sample_index])] if len(units_not_used) == 0: continue diff --git a/src/spikeinterface/core/tests/test_sparsity.py b/src/spikeinterface/core/tests/test_sparsity.py index a6b94c9b84..75182bf532 100644 --- a/src/spikeinterface/core/tests/test_sparsity.py +++ b/src/spikeinterface/core/tests/test_sparsity.py @@ -34,7 +34,7 @@ def test_ChannelSparsity(): for key, v in sparsity.unit_id_to_channel_ids.items(): assert key in unit_ids - assert np.all(np.in1d(v, channel_ids)) + assert np.all(np.isin(v, channel_ids)) for key, v in sparsity.unit_id_to_channel_indices.items(): assert key in unit_ids diff --git a/src/spikeinterface/curation/mergeunitssorting.py b/src/spikeinterface/curation/mergeunitssorting.py index 264ac3a56d..2d20a58453 100644 --- a/src/spikeinterface/curation/mergeunitssorting.py +++ b/src/spikeinterface/curation/mergeunitssorting.py @@ -59,7 +59,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties else: # we cannot automatically find new names new_unit_ids = [f"merge{i}" for i in range(num_merge)] - if np.any(np.in1d(new_unit_ids, keep_unit_ids)): + if np.any(np.isin(new_unit_ids, keep_unit_ids)): raise ValueError( "Unable to find 'new_unit_ids' because it is a string and parents " "already contain merges. Pass a list of 'new_unit_ids' as an argument." @@ -68,7 +68,7 @@ def __init__(self, parent_sorting, units_to_merge, new_unit_ids=None, properties # dtype int new_unit_ids = list(max(parents_unit_ids) + 1 + np.arange(num_merge, dtype=dtype)) else: - if np.any(np.in1d(new_unit_ids, keep_unit_ids)): + if np.any(np.isin(new_unit_ids, keep_unit_ids)): raise ValueError("'new_unit_ids' already exist in the sorting.unit_ids. Provide new ones") assert len(new_unit_ids) == num_merge, "new_unit_ids must have the same size as units_to_merge" diff --git a/src/spikeinterface/extractors/bids.py b/src/spikeinterface/extractors/bids.py index 02e7d5677d..8b70722652 100644 --- a/src/spikeinterface/extractors/bids.py +++ b/src/spikeinterface/extractors/bids.py @@ -76,7 +76,7 @@ def _read_probe_group(folder, bids_name, recording_channel_ids): contact_ids = channels["contact_id"].values.astype("U") # extracting information of requested channels - keep = np.in1d(channel_ids, recording_channel_ids) + keep = np.isin(channel_ids, recording_channel_ids) channel_ids = channel_ids[keep] contact_ids = contact_ids[keep] diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 5a0148c5c4..5a3542cdf9 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -47,9 +47,9 @@ def _set_params( def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(np.isin(old_unit_ids, unit_ids)) - spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) + spike_mask = np.isin(self.spikes["unit_index"], unit_inds) new_amplitude_scalings = self._extension_data["amplitude_scalings"][spike_mask] return dict(amplitude_scalings=new_amplitude_scalings) diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 62a4e2c320..b6f25cda95 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -28,13 +28,13 @@ def _select_extension_data(self, unit_ids): # load filter and save amplitude files sorting = self.waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) - (keep_unit_indices,) = np.nonzero(np.in1d(sorting.unit_ids, unit_ids)) + (keep_unit_indices,) = np.nonzero(np.isin(sorting.unit_ids, unit_ids)) new_extension_data = dict() for seg_index in range(sorting.get_num_segments()): amp_data_name = f"amplitude_segment_{seg_index}" amps = self._extension_data[amp_data_name] - filtered_idxs = np.in1d(spikes[seg_index]["unit_index"], keep_unit_indices) + filtered_idxs = np.isin(spikes[seg_index]["unit_index"], keep_unit_indices) new_extension_data[amp_data_name] = amps[filtered_idxs] return new_extension_data diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index c6f498f7e8..4cbe4d665e 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -32,9 +32,9 @@ def _set_params(self, ms_before=0.5, ms_after=0.5, method="center_of_mass", meth def _select_extension_data(self, unit_ids): old_unit_ids = self.waveform_extractor.sorting.unit_ids - unit_inds = np.flatnonzero(np.in1d(old_unit_ids, unit_ids)) + unit_inds = np.flatnonzero(np.isin(old_unit_ids, unit_ids)) - spike_mask = np.in1d(self.spikes["unit_index"], unit_inds) + spike_mask = np.isin(self.spikes["unit_index"], unit_inds) new_spike_locations = self._extension_data["spike_locations"][spike_mask] return dict(spike_locations=new_spike_locations) diff --git a/src/spikeinterface/preprocessing/interpolate_bad_channels.py b/src/spikeinterface/preprocessing/interpolate_bad_channels.py index e634d55e7f..95ecd0fe52 100644 --- a/src/spikeinterface/preprocessing/interpolate_bad_channels.py +++ b/src/spikeinterface/preprocessing/interpolate_bad_channels.py @@ -49,7 +49,7 @@ def __init__(self, recording, bad_channel_ids, sigma_um=None, p=1.3, weights=Non self.bad_channel_ids = bad_channel_ids self._bad_channel_idxs = recording.ids_to_indices(self.bad_channel_ids) - self._good_channel_idxs = ~np.in1d(np.arange(recording.get_num_channels()), self._bad_channel_idxs) + self._good_channel_idxs = ~np.isin(np.arange(recording.get_num_channels()), self._bad_channel_idxs) self._bad_channel_idxs.setflags(write=False) if sigma_um is None: diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index ee28485983..4e871492f8 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -544,7 +544,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k # some segments/units might have no spikes if len(spikes_per_unit) == 0: continue - spike_complexity = complexity[np.in1d(unique_spike_index, spikes_per_unit["sample_index"])] + spike_complexity = complexity[np.isin(unique_spike_index, spikes_per_unit["sample_index"])] for synchrony_size in synchrony_sizes: synchrony_counts[synchrony_size][unit_index] += np.count_nonzero(spike_complexity >= synchrony_size) diff --git a/src/spikeinterface/qualitymetrics/pca_metrics.py b/src/spikeinterface/qualitymetrics/pca_metrics.py index 59000211d4..ed06f7d738 100644 --- a/src/spikeinterface/qualitymetrics/pca_metrics.py +++ b/src/spikeinterface/qualitymetrics/pca_metrics.py @@ -152,8 +152,8 @@ def calculate_pc_metrics( neighbor_unit_ids = unit_ids neighbor_channel_indices = we.channel_ids_to_indices(neighbor_channel_ids) - labels = all_labels[np.in1d(all_labels, neighbor_unit_ids)] - pcs = all_pcs[np.in1d(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] + labels = all_labels[np.isin(all_labels, neighbor_unit_ids)] + pcs = all_pcs[np.isin(all_labels, neighbor_unit_ids)][:, :, neighbor_channel_indices] pcs_flat = pcs.reshape(pcs.shape[0], -1) func_args = ( @@ -506,7 +506,7 @@ def nearest_neighbors_isolation( other_units_ids = [ unit_id for unit_id in other_units_ids - if np.sum(np.in1d(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) + if np.sum(np.isin(sparsity.unit_id_to_channel_indices[unit_id], closest_chans_target_unit)) >= (n_channels_target_unit * min_spatial_overlap) ] @@ -536,10 +536,10 @@ def nearest_neighbors_isolation( if waveform_extractor.is_sparse(): # in this case, waveforms are sparse so we need to do some smart indexing waveforms_target_unit_sampled = waveforms_target_unit_sampled[ - :, :, np.in1d(closest_chans_target_unit, common_channel_idxs) + :, :, np.isin(closest_chans_target_unit, common_channel_idxs) ] waveforms_other_unit_sampled = waveforms_other_unit_sampled[ - :, :, np.in1d(closest_chans_other_unit, common_channel_idxs) + :, :, np.isin(closest_chans_other_unit, common_channel_idxs) ] else: waveforms_target_unit_sampled = waveforms_target_unit_sampled[:, :, common_channel_idxs] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py index 07c7db155c..772c99bc0a 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_matching.py @@ -502,7 +502,7 @@ def plot_errors_matching(benchmark, comp, unit_id, nb_spikes=200, metric="cosine seg_num = 0 # TODO: make compatible with multiple segments idx_1 = np.where(comp.get_labels1(unit_id)[seg_num] == label) idx_2 = benchmark.we.get_sampled_indices(unit_id)["spike_index"] - intersection = np.where(np.in1d(idx_2, idx_1))[0] + intersection = np.where(np.isin(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] if len(intersection) == 0: print(f"No {label}s found for unit {unit_id}") @@ -552,7 +552,7 @@ def plot_errors_matching_all_neurons(benchmark, comp, nb_spikes=200, metric="cos for label in ["TP", "FN"]: idx_1 = np.where(comp.get_labels1(unit_id) == label)[0] - intersection = np.where(np.in1d(idx_2, idx_1))[0] + intersection = np.where(np.isin(idx_2, idx_1))[0] intersection = np.random.permutation(intersection)[:nb_spikes] wfs_sliced = wfs[intersection, :, :] diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py index 1514a63dd4..73497a59fd 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_peak_selection.py @@ -133,7 +133,7 @@ def run(self, peaks=None, positions=None, delta=0.2): matches = make_matching_events(times2, spikes1["sample_index"], int(delta * self.sampling_rate / 1000)) self.good_matches = matches["index1"] - garbage_matches = ~np.in1d(np.arange(len(times2)), self.good_matches) + garbage_matches = ~np.isin(np.arange(len(times2)), self.good_matches) garbage_channels = self.peaks["channel_index"][garbage_matches] garbage_peaks = times2[garbage_matches] nb_garbage = len(garbage_peaks) @@ -365,7 +365,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["full_gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["full_gt"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.gt_peaks["sample_index"], all_spikes[idx]) + mask = np.isin(self.gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.gt_peaks["amplitude"][mask]) ax.scatter(self.gt_positions["x"][mask], self.gt_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.gt_positions["x"][mask].mean(), self.gt_positions["y"][mask].mean()) @@ -391,7 +391,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["gt"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["gt"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) + mask = np.isin(self.sliced_gt_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.sliced_gt_peaks["amplitude"][mask]) ax.scatter( self.sliced_gt_positions["x"][mask], self.sliced_gt_positions["y"][mask], c=colors, s=1, alpha=0.5 @@ -420,7 +420,7 @@ def plot_clusters_amplitudes(self, title=None, show_probe=False, clim=(-100, 0), idx = self.waveforms["garbage"].get_sampled_indices(unit_id)["spike_index"] all_spikes = self.waveforms["garbage"].sorting.get_unit_spike_train(unit_id) - mask = np.in1d(self.garbage_peaks["sample_index"], all_spikes[idx]) + mask = np.isin(self.garbage_peaks["sample_index"], all_spikes[idx]) colors = scalarMap.to_rgba(self.garbage_peaks["amplitude"][mask]) ax.scatter(self.garbage_positions["x"][mask], self.garbage_positions["y"][mask], c=colors, s=1, alpha=0.5) x_mean, y_mean = (self.garbage_positions["x"][mask].mean(), self.garbage_positions["y"][mask].mean()) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 6edf5af16b..23fdbf1979 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -30,7 +30,7 @@ def _split_waveforms( local_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(cluster_probability > probability_thr) - local_labels_with_noise[~np.in1d(local_labels_with_noise, persistent_clusters)] = -1 + local_labels_with_noise[~np.isin(local_labels_with_noise, persistent_clusters)] = -1 # remove super small cluster labels, count = np.unique(local_labels_with_noise[:valid_size], return_counts=True) @@ -43,7 +43,7 @@ def _split_waveforms( to_remove = labels[(count / valid_size) < minimum_cluster_size_ratio] # ~ print('to_remove', to_remove, count / valid_size) if to_remove.size > 0: - local_labels_with_noise[np.in1d(local_labels_with_noise, to_remove)] = -1 + local_labels_with_noise[np.isin(local_labels_with_noise, to_remove)] = -1 local_labels_with_noise[valid_size:] = -2 @@ -123,7 +123,7 @@ def _split_waveforms_nested( active_labels_with_noise = clustering[0] cluster_probability = clustering[2] (persistent_clusters,) = np.nonzero(clustering[2] > probability_thr) - active_labels_with_noise[~np.in1d(active_labels_with_noise, persistent_clusters)] = -1 + active_labels_with_noise[~np.isin(active_labels_with_noise, persistent_clusters)] = -1 active_labels = active_labels_with_noise[active_ind < valid_size] active_labels_set = np.unique(active_labels) @@ -381,9 +381,9 @@ def auto_clean_clustering( continue wfs0 = wfs_arrays[label0] - wfs0 = wfs0[:, :, np.in1d(channel_inds0, used_chans)] + wfs0 = wfs0[:, :, np.isin(channel_inds0, used_chans)] wfs1 = wfs_arrays[label1] - wfs1 = wfs1[:, :, np.in1d(channel_inds1, used_chans)] + wfs1 = wfs1[:, :, np.isin(channel_inds1, used_chans)] # TODO : remove assert wfs0.shape[2] == wfs1.shape[2] diff --git a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py index aeec14158f..08ce9f6791 100644 --- a/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py +++ b/src/spikeinterface/sortingcomponents/clustering/sliding_hdbscan.py @@ -198,7 +198,7 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): for chan_ind in prev_local_chan_inds: if total_count[chan_ind] == 0: continue - # ~ inds, = np.nonzero(np.in1d(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) + # ~ inds, = np.nonzero(np.isin(peaks['channel_index'], closest_channels[chan_ind]) & (peak_labels==0)) (inds,) = np.nonzero((peaks["channel_index"] == chan_ind) & (peak_labels == 0)) if inds.size <= d["min_spike_on_channel"]: chan_amps[chan_ind] = 0.0 @@ -235,12 +235,12 @@ def _find_clusters(cls, recording, peaks, wfs_arrays, sparsity_mask, noise, d): (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # TODO: only for debug, remove later - assert np.all(np.in1d(local_chan_inds, wf_chans)) + assert np.all(np.isin(local_chan_inds, wf_chans)) # none label spikes wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, local_chan_inds)] + wfs_chan = wfs_chan[:, :, np.isin(wf_chans, local_chan_inds)] wfs.append(wfs_chan) # put noise to enhance clusters @@ -517,7 +517,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, (wf_chans,) = np.nonzero(sparsity_mask[chan_ind]) # print('wf_chans', wf_chans) # TODO: only for debug, remove later - assert np.all(np.in1d(wanted_chans, wf_chans)) + assert np.all(np.isin(wanted_chans, wf_chans)) wfs_chan = wfs_arrays[chan_ind] # TODO: only for debug, remove later @@ -525,7 +525,7 @@ def _collect_sparse_waveforms(peaks, wfs_arrays, closest_channels, peak_labels, wfs_chan = wfs_chan[inds, :, :] # only some channels - wfs_chan = wfs_chan[:, :, np.in1d(wf_chans, wanted_chans)] + wfs_chan = wfs_chan[:, :, np.isin(wf_chans, wanted_chans)] wfs.append(wfs_chan) wfs = np.concatenate(wfs, axis=0) diff --git a/src/spikeinterface/widgets/unit_waveforms_density_map.py b/src/spikeinterface/widgets/unit_waveforms_density_map.py index e8a6868e92..b3391c0712 100644 --- a/src/spikeinterface/widgets/unit_waveforms_density_map.py +++ b/src/spikeinterface/widgets/unit_waveforms_density_map.py @@ -103,7 +103,7 @@ def __init__( if same_axis and not np.array_equal(chan_inds, shared_chan_inds): # add more channels if necessary wfs_ = np.zeros((wfs.shape[0], wfs.shape[1], shared_chan_inds.size), dtype=float) - mask = np.in1d(shared_chan_inds, chan_inds) + mask = np.isin(shared_chan_inds, chan_inds) wfs_[:, :, mask] = wfs wfs_[:, :, ~mask] = np.nan wfs = wfs_ From 7aa96d3a81c685dfb9d242fc5e3057d352c376dd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 15 Sep 2023 13:31:16 +0000 Subject: [PATCH 077/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/comparison/studytools.py | 5 +- src/spikeinterface/sorters/basesorter.py | 2 +- src/spikeinterface/sorters/launcher.py | 45 ++++++++--------- .../sorters/tests/test_launcher.py | 49 +++++++++---------- 4 files changed, 44 insertions(+), 57 deletions(-) diff --git a/src/spikeinterface/comparison/studytools.py b/src/spikeinterface/comparison/studytools.py index 00119c1586..26d2c1ad6f 100644 --- a/src/spikeinterface/comparison/studytools.py +++ b/src/spikeinterface/comparison/studytools.py @@ -29,9 +29,6 @@ from .paircomparisons import compare_sorter_to_ground_truth - - - # This is deprecated and will be removed def iter_working_folder(working_folder): working_folder = Path(working_folder) @@ -54,6 +51,7 @@ def iter_working_folder(working_folder): continue yield rec_name, sorter_name, output_folder + # This is deprecated and will be removed def iter_sorting_output(working_folder): """Iterator over output_folder to retrieve all triplets of (rec_name, sorter_name, sorting).""" @@ -63,7 +61,6 @@ def iter_sorting_output(working_folder): yield rec_name, sorter_name, sorting - def setup_comparison_study(study_folder, gt_dict, **job_kwargs): """ Based on a dict of (recording, sorting) create the study folder. diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index aa76809b58..c7581ba1e1 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -421,4 +421,4 @@ def is_log_ok(output_folder): run_time = log.get("run_time", None) ok = run_time is not None return ok - return False \ No newline at end of file + return False diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index b158eba22d..d04a89fdf1 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -11,7 +11,7 @@ import sys import warnings -from spikeinterface.core import aggregate_units +from spikeinterface.core import aggregate_units from .sorterlist import sorter_dict from .runsorter import run_sorter @@ -28,6 +28,7 @@ _implemented_engine = list(_default_engine_kwargs.keys()) + def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=False): """ Run several :py:func:`run_sorter()` sequentially or in parallel given a list of jobs. @@ -38,18 +39,18 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal for job in job_list: run_sorter(**job) - + The following engines block the I/O: * "loop" * "joblib" * "multiprocessing" * "dask" - + The following engines are *asynchronous*: * "slurm" - + Where *blocking* means that this function is blocking until the results are returned. - This is in opposition to *asynchronous*, where the function returns `None` almost immediately (aka non-blocking), + This is in opposition to *asynchronous*, where the function returns `None` almost immediately (aka non-blocking), but the results must be retrieved by hand when jobs are finished. No mechanisim is provided here to be aware when jobs are finish. In this *asynchronous* case, the :py:func:read_sorter_folder() helps to retrieve individual results. @@ -61,7 +62,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal A list a dict that are propagated to run_sorter(...) engine: str "loop", "joblib", "dask", "slurm" The engine to run the list. - * "loop": a simple loop. This engine is + * "loop": a simple loop. This engine is engine_kwargs: dict return_output: bool, dfault False @@ -79,8 +80,6 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal engine_kwargs_.update(_default_engine_kwargs[engine]) engine_kwargs_.update(engine_kwargs) engine_kwargs = engine_kwargs_ - - if return_output: assert engine in ("loop", "joblib", "processpoolexecutor") @@ -109,7 +108,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal max_workers = engine_kwargs["max_workers"] mp_context = engine_kwargs["mp_context"] - + with ProcessPoolExecutor(max_workers=max_workers, mp_context=mp_context) as executor: futures = [] for kwargs in job_list: @@ -173,6 +172,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal return out + _slurm_script = """#! {python} from numpy import array from spikeinterface import load_extractor @@ -189,8 +189,6 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal """ - - def run_sorter_by_property( sorter_name, recording, @@ -258,10 +256,10 @@ def run_sorter_by_property( """ if mode_if_folder_exists is not None: warnings.warn( - "run_sorter_by_property(): mode_if_folder_exists is not used anymore", - DeprecationWarning, - stacklevel=2, - ) + "run_sorter_by_property(): mode_if_folder_exists is not used anymore", + DeprecationWarning, + stacklevel=2, + ) working_folder = Path(working_folder).absolute() @@ -269,7 +267,7 @@ def run_sorter_by_property( f"The 'grouping_property' {grouping_property} is not " f"a recording property!" ) recording_dict = recording.split_by(grouping_property) - + job_list = [] for k, rec in recording_dict.items(): job = dict( @@ -279,10 +277,10 @@ def run_sorter_by_property( verbose=verbose, docker_image=docker_image, singularity_image=singularity_image, - **sorter_params + **sorter_params, ) job_list.append(job) - + sorting_list = run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=True) unit_groups = [] @@ -298,7 +296,6 @@ def run_sorter_by_property( return aggregate_sorting - # This is deprecated and will be removed def run_sorters( sorter_list, @@ -316,7 +313,7 @@ def run_sorters( """ This function is deprecated and will be removed in version 0.100 Please use run_sorter_jobs() instead. - + Parameters ---------- sorter_list: list of str @@ -401,7 +398,6 @@ def run_sorters( elif mode_if_folder_exists == "overwrite": shutil.rmtree(str(output_folder)) elif mode_if_folder_exists == "keep": - if is_log_ok(output_folder): continue else: @@ -418,14 +414,13 @@ def run_sorters( verbose=verbose, docker_image=docker_image, singularity_image=singularity_image, - **params + **params, ) job_list.append(job) - + sorting_list = run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=with_output) if with_output: - keys = [(rec_name, sorter_name) for rec_name in recording_dict for sorter_name in sorter_list ] + keys = [(rec_name, sorter_name) for rec_name in recording_dict for sorter_name in sorter_list] results = dict(zip(keys, sorting_list)) return results - diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index ecab64ede6..14c938f8ba 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -7,6 +7,7 @@ from pathlib import Path from spikeinterface.core import load_extractor + # from spikeinterface.extractors import toy_example from spikeinterface import generate_ground_truth_recording from spikeinterface.sorters import run_sorter_jobs, run_sorters, run_sorter_by_property @@ -17,12 +18,13 @@ else: cache_folder = Path("cache_folder") / "sorters" -base_output = cache_folder / 'sorter_output' +base_output = cache_folder / "sorter_output" # no need to have many num_recordings = 2 sorters = ["tridesclous2"] + def setup_module(): base_seed = 42 for i in range(num_recordings): @@ -44,16 +46,18 @@ def get_job_list(): for i in range(num_recordings): for sorter_name in sorters: recording = load_extractor(cache_folder / f"toy_rec_{i}") - kwargs = dict(sorter_name=sorter_name, - recording=recording, - output_folder=base_output / f"{sorter_name}_rec{i}", - verbose=True, - raise_error=False, - ) + kwargs = dict( + sorter_name=sorter_name, + recording=recording, + output_folder=base_output / f"{sorter_name}_rec{i}", + verbose=True, + raise_error=False, + ) jobs.append(kwargs) - + return jobs + @pytest.fixture(scope="module") def job_list(): return get_job_list() @@ -66,23 +70,24 @@ def test_run_sorter_jobs_loop(job_list): print(sortings) - - def test_run_sorter_jobs_joblib(job_list): if base_output.is_dir(): shutil.rmtree(base_output) - sortings = run_sorter_jobs(job_list, engine="joblib", engine_kwargs=dict(n_jobs=2, backend="loky"), return_output=True) + sortings = run_sorter_jobs( + job_list, engine="joblib", engine_kwargs=dict(n_jobs=2, backend="loky"), return_output=True + ) print(sortings) + def test_run_sorter_jobs_processpoolexecutor(job_list): if base_output.is_dir(): shutil.rmtree(base_output) - sortings = run_sorter_jobs(job_list, engine="processpoolexecutor", engine_kwargs=dict(max_workers=2), return_output=True) + sortings = run_sorter_jobs( + job_list, engine="processpoolexecutor", engine_kwargs=dict(max_workers=2), return_output=True + ) print(sortings) - - @pytest.mark.skipif(True, reason="This is tested locally") def test_run_sorter_jobs_dask(job_list): if base_output.is_dir(): @@ -92,12 +97,13 @@ def test_run_sorter_jobs_dask(job_list): from dask.distributed import Client test_mode = "local" - # test_mode = "client_slurm" + # test_mode = "client_slurm" if test_mode == "local": client = Client() elif test_mode == "client_slurm": from dask_jobqueue import SLURMCluster + cluster = SLURMCluster( processes=1, cores=1, @@ -133,7 +139,7 @@ def test_run_sorter_jobs_slurm(job_list): tmp_script_folder=tmp_script_folder, cpus_per_task=32, mem="32G", - ) + ), ) @@ -165,12 +171,9 @@ def test_run_sorter_by_property(): assert all([g in group_names1 for g in sorting1.get_property("group")]) - # run_sorters is deprecated # This will test will be removed in next release def test_run_sorters_with_list(): - - working_folder = cache_folder / "test_run_sorters_list" if working_folder.is_dir(): shutil.rmtree(working_folder) @@ -185,12 +188,9 @@ def test_run_sorters_with_list(): run_sorters(sorter_list, recording_list, working_folder, engine="loop", verbose=False, with_output=False) - - # run_sorters is deprecated # This will test will be removed in next release def test_run_sorters_with_dict(): - working_folder = cache_folder / "test_run_sorters_dict" if working_folder.is_dir(): shutil.rmtree(working_folder) @@ -232,9 +232,6 @@ def test_run_sorters_with_dict(): ) - - - if __name__ == "__main__": # setup_module() job_list = get_job_list() @@ -251,5 +248,3 @@ def test_run_sorters_with_dict(): # this deprecated # test_run_sorters_with_list() # test_run_sorters_with_dict() - - From 0bd70dd27b23e799696ef966d9b84a4eac3c3b22 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 15 Sep 2023 16:54:36 +0200 Subject: [PATCH 078/322] detect_bad_channels some recording is not ordered. Add more chunk default computation. --- .../preprocessing/detect_bad_channels.py | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index 0f4800c6e8..35ed2c349b 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -17,8 +17,8 @@ def detect_bad_channels( n_neighbors=11, nyquist_threshold=0.8, direction="y", - chunk_duration_s=0.3, - num_random_chunks=10, + chunk_duration_s=.5, + num_random_chunks=100, welch_window_ms=10.0, highpass_filter_cutoff=300, neighborhood_r2_threshold=0.9, @@ -81,9 +81,10 @@ def detect_bad_channels( highpass_filter_cutoff : float If the recording is not filtered, the cutoff frequency of the highpass filter, by default 300 chunk_duration_s : float - Duration of each chunk, by default 0.3 + Duration of each chunk, by default 0.5 num_random_chunks : int - Number of random chunks, by default 10 + Number of random chunks, by default 100 + Having many chunks is important for reproducibility. welch_window_ms : float Window size for the scipy.signal.welch that will be converted to nperseg, by default 10ms neighborhood_r2_threshold : float, default 0.95 @@ -174,20 +175,18 @@ def detect_bad_channels( channel_locations = recording.get_channel_locations() dim = ["x", "y", "z"].index(direction) assert dim < channel_locations.shape[1], f"Direction {direction} is wrong" - locs_depth = channel_locations[:, dim] - if np.array_equal(np.sort(locs_depth), locs_depth): + order_f, order_r = order_channels_by_depth(recording=recording, dimensions=("x", "y")) + if np.all(np.diff(order_f) == 1): + # already ordered order_f = None order_r = None - else: - # sort by x, y to avoid ambiguity - order_f, order_r = order_channels_by_depth(recording=recording, dimensions=("x", "y")) # Create empty channel labels and fill with bad-channel detection estimate for each chunk chunk_channel_labels = np.zeros((recording.get_num_channels(), len(random_data)), dtype=np.int8) for i, random_chunk in enumerate(random_data): - random_chunk_sorted = random_chunk[order_f] if order_f is not None else random_chunk - chunk_channel_labels[:, i] = detect_bad_channels_ibl( + random_chunk_sorted = random_chunk[:, order_f] if order_f is not None else random_chunk + chunk_labels = detect_bad_channels_ibl( raw=random_chunk_sorted, fs=recording.sampling_frequency, psd_hf_threshold=psd_hf_threshold, @@ -198,11 +197,10 @@ def detect_bad_channels( nyquist_threshold=nyquist_threshold, welch_window_ms=welch_window_ms, ) + chunk_channel_labels[:, i] = chunk_labels[order_r] if order_r is not None else chunk_labels # Take the mode of the chunk estimates as final result. Convert to binary good / bad channel output. mode_channel_labels, _ = scipy.stats.mode(chunk_channel_labels, axis=1, keepdims=False) - if order_r is not None: - mode_channel_labels = mode_channel_labels[order_r] (bad_inds,) = np.where(mode_channel_labels != 0) bad_channel_ids = recording.channel_ids[bad_inds] @@ -306,7 +304,7 @@ def detect_bad_channels_ibl( n_neighbors : int, optional Number of neighbors to compute median fitler, by default 11 nyquist_threshold : float, optional - Threshold on Nyquist frequency to calculate HF noise band, by default 0.8 + Threshold on Nyquist frequency to calcureclate HF noise band, by default 0.8 welch_window_ms: float Window size for the scipy.signal.welch that will be converted to nperseg, by default 10ms Returns From 05ad95be8f9811ca86d6905edc13a5b5d4c2251b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 15 Sep 2023 14:55:58 +0000 Subject: [PATCH 079/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/preprocessing/detect_bad_channels.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index 35ed2c349b..fa61755aba 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -17,7 +17,7 @@ def detect_bad_channels( n_neighbors=11, nyquist_threshold=0.8, direction="y", - chunk_duration_s=.5, + chunk_duration_s=0.5, num_random_chunks=100, welch_window_ms=10.0, highpass_filter_cutoff=300, From bd26723e1cd1a86660abbe23d344cb299f9140ad Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Sun, 17 Sep 2023 09:40:10 -0400 Subject: [PATCH 080/322] fix folder --- .github/workflows/installation-tips-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/installation-tips-test.yml b/.github/workflows/installation-tips-test.yml index 0e522e6baa..b3bf08954d 100644 --- a/.github/workflows/installation-tips-test.yml +++ b/.github/workflows/installation-tips-test.yml @@ -30,4 +30,4 @@ jobs: - name: Test Conda Environment Creation uses: conda-incubator/setup-miniconda@v2.2.0 with: - environment-file: ./installations_tips/full_spikeinterface_environment_${{ matrix.label }}.yml + environment-file: ./installation_tips/full_spikeinterface_environment_${{ matrix.label }}.yml From c57cfa71fae9e0cc4aada7e72435cb8f3667eecf Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Mon, 18 Sep 2023 11:03:29 +0200 Subject: [PATCH 081/322] Add an option to flip the order by depth --- src/spikeinterface/core/recording_tools.py | 7 ++++++- src/spikeinterface/core/tests/test_recording_tools.py | 2 ++ src/spikeinterface/preprocessing/depth_order.py | 8 ++++++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/core/recording_tools.py b/src/spikeinterface/core/recording_tools.py index e5901d7ee0..8236671a3b 100644 --- a/src/spikeinterface/core/recording_tools.py +++ b/src/spikeinterface/core/recording_tools.py @@ -302,7 +302,7 @@ def get_chunk_with_margin( return traces_chunk, left_margin, right_margin -def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y")): +def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y"), flip=False): """ Order channels by depth, by first ordering the x-axis, and then the y-axis. @@ -316,6 +316,9 @@ def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y")): If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity, by default ('x', 'y') + flip: bool, default False + If flip is False then the order is bottom first (starting from tip of the probe). + If flip is True then the order is upper first. Returns ------- @@ -341,6 +344,8 @@ def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y")): assert dim < ndim, "Invalid dimensions!" locations_to_sort += (locations[:, dim],) order_f = np.lexsort(locations_to_sort) + if flip: + order_f = order_f[::-1] order_r = np.argsort(order_f, kind="stable") return order_f, order_r diff --git a/src/spikeinterface/core/tests/test_recording_tools.py b/src/spikeinterface/core/tests/test_recording_tools.py index 6e92d155fe..1d99b192ee 100644 --- a/src/spikeinterface/core/tests/test_recording_tools.py +++ b/src/spikeinterface/core/tests/test_recording_tools.py @@ -138,11 +138,13 @@ def test_order_channels_by_depth(): order_1d, order_r1d = order_channels_by_depth(rec, dimensions="y") order_2d, order_r2d = order_channels_by_depth(rec, dimensions=("x", "y")) locations_rev = locations_copy[order_1d][order_r1d] + order_2d_fliped, order_r2d_fliped = order_channels_by_depth(rec, dimensions=("x", "y"), flip=True) assert np.array_equal(locations[:, 1], locations_copy[order_1d][:, 1]) assert np.array_equal(locations_copy[order_1d][:, 1], locations_copy[order_2d][:, 1]) assert np.array_equal(locations, locations_copy[order_2d]) assert np.array_equal(locations_copy, locations_copy[order_2d][order_r2d]) + assert np.array_equal(order_2d[::-1], order_2d_fliped) if __name__ == "__main__": diff --git a/src/spikeinterface/preprocessing/depth_order.py b/src/spikeinterface/preprocessing/depth_order.py index 0b8d8a730b..b9edded883 100644 --- a/src/spikeinterface/preprocessing/depth_order.py +++ b/src/spikeinterface/preprocessing/depth_order.py @@ -18,13 +18,16 @@ class DepthOrderRecording(ChannelSliceRecording): If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity, by default ('x', 'y') + flip: bool, default False + If flip is False then the order is bottom first (starting from tip of the probe). + If flip is True then the order is upper first. """ name = "depth_order" installed = True - def __init__(self, parent_recording, channel_ids=None, dimensions=("x", "y")): - order_f, order_r = order_channels_by_depth(parent_recording, channel_ids=channel_ids, dimensions=dimensions) + def __init__(self, parent_recording, channel_ids=None, dimensions=("x", "y"), flip=False): + order_f, order_r = order_channels_by_depth(parent_recording, channel_ids=channel_ids, dimensions=dimensions, flip=flip) reordered_channel_ids = parent_recording.channel_ids[order_f] ChannelSliceRecording.__init__( self, @@ -35,6 +38,7 @@ def __init__(self, parent_recording, channel_ids=None, dimensions=("x", "y")): parent_recording=parent_recording, channel_ids=channel_ids, dimensions=dimensions, + flip=flip, ) From ef165cb4a2d43df592a57a2c801c62ebe5ce780b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 09:03:59 +0000 Subject: [PATCH 082/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/preprocessing/depth_order.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/depth_order.py b/src/spikeinterface/preprocessing/depth_order.py index b9edded883..43c43a5843 100644 --- a/src/spikeinterface/preprocessing/depth_order.py +++ b/src/spikeinterface/preprocessing/depth_order.py @@ -27,7 +27,9 @@ class DepthOrderRecording(ChannelSliceRecording): installed = True def __init__(self, parent_recording, channel_ids=None, dimensions=("x", "y"), flip=False): - order_f, order_r = order_channels_by_depth(parent_recording, channel_ids=channel_ids, dimensions=dimensions, flip=flip) + order_f, order_r = order_channels_by_depth( + parent_recording, channel_ids=channel_ids, dimensions=dimensions, flip=flip + ) reordered_channel_ids = parent_recording.channel_ids[order_f] ChannelSliceRecording.__init__( self, From d431e4ebe817993a74173f414eda139c21a83171 Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Mon, 18 Sep 2023 17:49:10 +0200 Subject: [PATCH 083/322] Update src/spikeinterface/preprocessing/detect_bad_channels.py Co-authored-by: Alessio Buccino --- src/spikeinterface/preprocessing/detect_bad_channels.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index fa61755aba..3c712946eb 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -304,7 +304,7 @@ def detect_bad_channels_ibl( n_neighbors : int, optional Number of neighbors to compute median fitler, by default 11 nyquist_threshold : float, optional - Threshold on Nyquist frequency to calcureclate HF noise band, by default 0.8 + Threshold on Nyquist frequency to calculate HF noise band, by default 0.8 welch_window_ms: float Window size for the scipy.signal.welch that will be converted to nperseg, by default 10ms Returns From ef0d66e6cfeea0b1f3392c5a0a8758194a9c884d Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 19 Sep 2023 09:27:18 +0200 Subject: [PATCH 084/322] Bringing back right searches --- src/spikeinterface/core/generate.py | 8 +++----- src/spikeinterface/curation/remove_duplicated_spikes.py | 3 ++- src/spikeinterface/postprocessing/spike_locations.py | 3 ++- .../sortingcomponents/motion_interpolation.py | 5 +++-- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 6f85e76f1f..33f3dea923 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1209,11 +1209,9 @@ def get_traces( else: traces = np.zeros([end_frame - start_frame, n_channels], dtype=self.dtype) - start, end = np.searchsorted( - self.spike_vector["sample_index"], - [start_frame - self.templates.shape[1], end_frame + self.templates.shape[1] + 1], - side="left", - ) + start = np.searchsorted(self.spike_vector["sample_index"], start_frame - self.templates.shape[1], side="left") + end = np.searchsorted(self.spike_vector["sample_index"], end_frame + self.templates.shape[1], side="right") + for i in range(start, end): spike = self.spike_vector[i] diff --git a/src/spikeinterface/curation/remove_duplicated_spikes.py b/src/spikeinterface/curation/remove_duplicated_spikes.py index d01ca1f6a1..04af69b37a 100644 --- a/src/spikeinterface/curation/remove_duplicated_spikes.py +++ b/src/spikeinterface/curation/remove_duplicated_spikes.py @@ -82,7 +82,8 @@ def get_unit_spike_train( if end_frame == None: end_frame = spike_train[-1] if len(spike_train) > 0 else 0 - start, end = np.searchsorted(spike_train, [start_frame, end_frame + 1], side="left") + start = np.searchsorted(spike_train, start_frame, side="left") + end = np.searchsorted(spike_train, end_frame, side="right") return spike_train[start:end] diff --git a/src/spikeinterface/postprocessing/spike_locations.py b/src/spikeinterface/postprocessing/spike_locations.py index 5f23e25b32..c6f498f7e8 100644 --- a/src/spikeinterface/postprocessing/spike_locations.py +++ b/src/spikeinterface/postprocessing/spike_locations.py @@ -77,7 +77,8 @@ def get_data(self, outputs="concatenated"): elif outputs == "by_unit": locations_by_unit = [] for segment_index in range(self.waveform_extractor.get_num_segments()): - i0, i1 = np.searchsorted(self.spikes["segment_index"], [segment_index, segment_index + 1], side="left") + i0 = np.searchsorted(self.spikes["segment_index"], segment_index, side="left") + i1 = np.searchsorted(self.spikes["segment_index"], segment_index, side="right") spikes = self.spikes[i0:i1] locations = self._extension_data["spike_locations"][i0:i1] diff --git a/src/spikeinterface/sortingcomponents/motion_interpolation.py b/src/spikeinterface/sortingcomponents/motion_interpolation.py index 18bb4f5a99..9a4cd688c5 100644 --- a/src/spikeinterface/sortingcomponents/motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/motion_interpolation.py @@ -155,8 +155,9 @@ def interpolate_motion_on_traces( **spatial_interpolation_kwargs, ) - i0, i1 = np.searchsorted(bin_inds, [bin_ind, bin_ind + 1], side="left") - + i0 = np.searchsorted(bin_inds, bin_ind, side="left") + i1 = np.searchsorted(bin_inds, bin_ind, side="right") + # here we use a simple np.matmul even if dirft_kernel can be super sparse. # because the speed for a sparse matmul is not so good when we disable multi threaad (due multi processing # in ChunkRecordingExecutor) From f2d702a7e20f7fb6459a18b17dd9a4881c1fe337 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 19 Sep 2023 07:27:40 +0000 Subject: [PATCH 085/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/core/generate.py | 1 - src/spikeinterface/sortingcomponents/motion_interpolation.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 33f3dea923..9adda4cb2b 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1212,7 +1212,6 @@ def get_traces( start = np.searchsorted(self.spike_vector["sample_index"], start_frame - self.templates.shape[1], side="left") end = np.searchsorted(self.spike_vector["sample_index"], end_frame + self.templates.shape[1], side="right") - for i in range(start, end): spike = self.spike_vector[i] t = spike["sample_index"] diff --git a/src/spikeinterface/sortingcomponents/motion_interpolation.py b/src/spikeinterface/sortingcomponents/motion_interpolation.py index 9a4cd688c5..b4a44105e4 100644 --- a/src/spikeinterface/sortingcomponents/motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/motion_interpolation.py @@ -157,7 +157,7 @@ def interpolate_motion_on_traces( i0 = np.searchsorted(bin_inds, bin_ind, side="left") i1 = np.searchsorted(bin_inds, bin_ind, side="right") - + # here we use a simple np.matmul even if dirft_kernel can be super sparse. # because the speed for a sparse matmul is not so good when we disable multi threaad (due multi processing # in ChunkRecordingExecutor) From 9d07ec2fb467e4bc035f2e36566ea9a2aead772e Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 19 Sep 2023 09:31:02 +0200 Subject: [PATCH 086/322] One more --- src/spikeinterface/core/generate.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 9adda4cb2b..401c498f03 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1109,9 +1109,8 @@ def __init__( num_samples = [num_samples] for segment_index in range(sorting.get_num_segments()): - start, end = np.searchsorted( - self.spike_vector["segment_index"], [segment_index, segment_index + 1], side="left" - ) + start = np.searchsorted(self.spike_vector["segment_index"], segment_index, side="left") + end = np.searchsorted(self.spike_vector["segment_index"], segment_index, side="right") spikes = self.spike_vector[start:end] amplitude_vec = amplitude_vector[start:end] if amplitude_vector is not None else None upsample_vec = upsample_vector[start:end] if upsample_vector is not None else None From 8d9ce49d14df99c1901854a398c2862c13184ceb Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 10:00:38 +0200 Subject: [PATCH 087/322] group in same file CollisionGTComparison and CollisionGTStudy group in same file CorrelogramGTComparison and CorrelogramGTStudy --- .../{collisioncomparison.py => collision.py} | 94 ++++++++++++++++++- .../comparison/collisionstudy.py | 88 ----------------- ...orrelogramcomparison.py => correlogram.py} | 79 +++++++++++++++- .../comparison/correlogramstudy.py | 76 --------------- 4 files changed, 170 insertions(+), 167 deletions(-) rename src/spikeinterface/comparison/{collisioncomparison.py => collision.py} (58%) delete mode 100644 src/spikeinterface/comparison/collisionstudy.py rename src/spikeinterface/comparison/{correlogramcomparison.py => correlogram.py} (58%) delete mode 100644 src/spikeinterface/comparison/correlogramstudy.py diff --git a/src/spikeinterface/comparison/collisioncomparison.py b/src/spikeinterface/comparison/collision.py similarity index 58% rename from src/spikeinterface/comparison/collisioncomparison.py rename to src/spikeinterface/comparison/collision.py index 3b279717b7..864809b04b 100644 --- a/src/spikeinterface/comparison/collisioncomparison.py +++ b/src/spikeinterface/comparison/collision.py @@ -1,8 +1,14 @@ -import numpy as np - from .paircomparisons import GroundTruthComparison +from .groundtruthstudy import GroundTruthStudy +from .studytools import iter_computed_sorting ## TODO remove this from .comparisontools import make_collision_events +import numpy as np + + + + + class CollisionGTComparison(GroundTruthComparison): """ @@ -156,3 +162,87 @@ def compute_collision_by_similarity(self, similarity_matrix, unit_ids=None, good pair_names = pair_names[order] return similarities, recall_scores, pair_names + + + +class CollisionGTStudy(GroundTruthStudy): + def run_comparisons(self, exhaustive_gt=True, collision_lag=2.0, nbins=11, **kwargs): + self.comparisons = {} + for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): + gt_sorting = self.get_ground_truth(rec_name) + comp = CollisionGTComparison( + gt_sorting, sorting, exhaustive_gt=exhaustive_gt, collision_lag=collision_lag, nbins=nbins + ) + self.comparisons[(rec_name, sorter_name)] = comp + self.exhaustive_gt = exhaustive_gt + self.collision_lag = collision_lag + + def get_lags(self): + fs = self.comparisons[(self.rec_names[0], self.sorter_names[0])].sorting1.get_sampling_frequency() + lags = self.comparisons[(self.rec_names[0], self.sorter_names[0])].bins / fs * 1000 + return lags + + def precompute_scores_by_similarities(self, good_only=True, min_accuracy=0.9): + if not hasattr(self, "_good_only") or self._good_only != good_only: + import sklearn + + similarity_matrix = {} + for rec_name in self.rec_names: + templates = self.get_templates(rec_name) + flat_templates = templates.reshape(templates.shape[0], -1) + similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) + + self.all_similarities = {} + self.all_recall_scores = {} + self.good_only = good_only + + for sorter_ind, sorter_name in enumerate(self.sorter_names): + # loop over recordings + all_similarities = [] + all_recall_scores = [] + + for rec_name in self.rec_names: + if (rec_name, sorter_name) in self.comparisons.keys(): + comp = self.comparisons[(rec_name, sorter_name)] + similarities, recall_scores, pair_names = comp.compute_collision_by_similarity( + similarity_matrix[rec_name], good_only=good_only, min_accuracy=min_accuracy + ) + + all_similarities.append(similarities) + all_recall_scores.append(recall_scores) + + self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) + self.all_recall_scores[sorter_name] = np.concatenate(all_recall_scores, axis=0) + + def get_mean_over_similarity_range(self, similarity_range, sorter_name): + idx = (self.all_similarities[sorter_name] >= similarity_range[0]) & ( + self.all_similarities[sorter_name] <= similarity_range[1] + ) + all_similarities = self.all_similarities[sorter_name][idx] + all_recall_scores = self.all_recall_scores[sorter_name][idx] + + order = np.argsort(all_similarities) + all_similarities = all_similarities[order] + all_recall_scores = all_recall_scores[order, :] + + mean_recall_scores = np.nanmean(all_recall_scores, axis=0) + + return mean_recall_scores + + def get_lag_profile_over_similarity_bins(self, similarity_bins, sorter_name): + all_similarities = self.all_similarities[sorter_name] + all_recall_scores = self.all_recall_scores[sorter_name] + + order = np.argsort(all_similarities) + all_similarities = all_similarities[order] + all_recall_scores = all_recall_scores[order, :] + + result = {} + + for i in range(similarity_bins.size - 1): + cmin, cmax = similarity_bins[i], similarity_bins[i + 1] + amin, amax = np.searchsorted(all_similarities, [cmin, cmax]) + mean_recall_scores = np.nanmean(all_recall_scores[amin:amax], axis=0) + result[(cmin, cmax)] = mean_recall_scores + + return result diff --git a/src/spikeinterface/comparison/collisionstudy.py b/src/spikeinterface/comparison/collisionstudy.py deleted file mode 100644 index 34a556e8b9..0000000000 --- a/src/spikeinterface/comparison/collisionstudy.py +++ /dev/null @@ -1,88 +0,0 @@ -from .groundtruthstudy import GroundTruthStudy -from .studytools import iter_computed_sorting -from .collisioncomparison import CollisionGTComparison - -import numpy as np - - -class CollisionGTStudy(GroundTruthStudy): - def run_comparisons(self, exhaustive_gt=True, collision_lag=2.0, nbins=11, **kwargs): - self.comparisons = {} - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - comp = CollisionGTComparison( - gt_sorting, sorting, exhaustive_gt=exhaustive_gt, collision_lag=collision_lag, nbins=nbins - ) - self.comparisons[(rec_name, sorter_name)] = comp - self.exhaustive_gt = exhaustive_gt - self.collision_lag = collision_lag - - def get_lags(self): - fs = self.comparisons[(self.rec_names[0], self.sorter_names[0])].sorting1.get_sampling_frequency() - lags = self.comparisons[(self.rec_names[0], self.sorter_names[0])].bins / fs * 1000 - return lags - - def precompute_scores_by_similarities(self, good_only=True, min_accuracy=0.9): - if not hasattr(self, "_good_only") or self._good_only != good_only: - import sklearn - - similarity_matrix = {} - for rec_name in self.rec_names: - templates = self.get_templates(rec_name) - flat_templates = templates.reshape(templates.shape[0], -1) - similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) - - self.all_similarities = {} - self.all_recall_scores = {} - self.good_only = good_only - - for sorter_ind, sorter_name in enumerate(self.sorter_names): - # loop over recordings - all_similarities = [] - all_recall_scores = [] - - for rec_name in self.rec_names: - if (rec_name, sorter_name) in self.comparisons.keys(): - comp = self.comparisons[(rec_name, sorter_name)] - similarities, recall_scores, pair_names = comp.compute_collision_by_similarity( - similarity_matrix[rec_name], good_only=good_only, min_accuracy=min_accuracy - ) - - all_similarities.append(similarities) - all_recall_scores.append(recall_scores) - - self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) - self.all_recall_scores[sorter_name] = np.concatenate(all_recall_scores, axis=0) - - def get_mean_over_similarity_range(self, similarity_range, sorter_name): - idx = (self.all_similarities[sorter_name] >= similarity_range[0]) & ( - self.all_similarities[sorter_name] <= similarity_range[1] - ) - all_similarities = self.all_similarities[sorter_name][idx] - all_recall_scores = self.all_recall_scores[sorter_name][idx] - - order = np.argsort(all_similarities) - all_similarities = all_similarities[order] - all_recall_scores = all_recall_scores[order, :] - - mean_recall_scores = np.nanmean(all_recall_scores, axis=0) - - return mean_recall_scores - - def get_lag_profile_over_similarity_bins(self, similarity_bins, sorter_name): - all_similarities = self.all_similarities[sorter_name] - all_recall_scores = self.all_recall_scores[sorter_name] - - order = np.argsort(all_similarities) - all_similarities = all_similarities[order] - all_recall_scores = all_recall_scores[order, :] - - result = {} - - for i in range(similarity_bins.size - 1): - cmin, cmax = similarity_bins[i], similarity_bins[i + 1] - amin, amax = np.searchsorted(all_similarities, [cmin, cmax]) - mean_recall_scores = np.nanmean(all_recall_scores[amin:amax], axis=0) - result[(cmin, cmax)] = mean_recall_scores - - return result diff --git a/src/spikeinterface/comparison/correlogramcomparison.py b/src/spikeinterface/comparison/correlogram.py similarity index 58% rename from src/spikeinterface/comparison/correlogramcomparison.py rename to src/spikeinterface/comparison/correlogram.py index 80e881a152..9c5e1e91cf 100644 --- a/src/spikeinterface/comparison/correlogramcomparison.py +++ b/src/spikeinterface/comparison/correlogram.py @@ -1,8 +1,13 @@ -import numpy as np from .paircomparisons import GroundTruthComparison +from .groundtruthstudy import GroundTruthStudy +from .studytools import iter_computed_sorting ## TODO remove this from spikeinterface.postprocessing import compute_correlograms +import numpy as np + + + class CorrelogramGTComparison(GroundTruthComparison): """ This class is an extension of GroundTruthComparison by focusing @@ -108,3 +113,75 @@ def compute_correlogram_by_similarity(self, similarity_matrix, window_ms=None): errors = errors[order, :] return similarities, errors + + + +class CorrelogramGTStudy(GroundTruthStudy): + def run_comparisons(self, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs): + self.comparisons = {} + for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): + gt_sorting = self.get_ground_truth(rec_name) + comp = CorrelogramGTComparison( + gt_sorting, + sorting, + exhaustive_gt=exhaustive_gt, + window_ms=window_ms, + bin_ms=bin_ms, + well_detected_score=well_detected_score, + ) + self.comparisons[(rec_name, sorter_name)] = comp + + self.exhaustive_gt = exhaustive_gt + + @property + def time_bins(self): + for key, value in self.comparisons.items(): + return value.time_bins + + def precompute_scores_by_similarities(self, good_only=True): + if not hasattr(self, "_computed"): + import sklearn + + similarity_matrix = {} + for rec_name in self.rec_names: + templates = self.get_templates(rec_name) + flat_templates = templates.reshape(templates.shape[0], -1) + similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) + + self.all_similarities = {} + self.all_errors = {} + self._computed = True + + for sorter_ind, sorter_name in enumerate(self.sorter_names): + # loop over recordings + all_errors = [] + all_similarities = [] + for rec_name in self.rec_names: + try: + comp = self.comparisons[(rec_name, sorter_name)] + similarities, errors = comp.compute_correlogram_by_similarity(similarity_matrix[rec_name]) + all_similarities.append(similarities) + all_errors.append(errors) + except Exception: + pass + + self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) + self.all_errors[sorter_name] = np.concatenate(all_errors, axis=0) + + def get_error_profile_over_similarity_bins(self, similarity_bins, sorter_name): + all_similarities = self.all_similarities[sorter_name] + all_errors = self.all_errors[sorter_name] + + order = np.argsort(all_similarities) + all_similarities = all_similarities[order] + all_errors = all_errors[order, :] + + result = {} + + for i in range(similarity_bins.size - 1): + cmin, cmax = similarity_bins[i], similarity_bins[i + 1] + amin, amax = np.searchsorted(all_similarities, [cmin, cmax]) + mean_errors = np.nanmean(all_errors[amin:amax], axis=0) + result[(cmin, cmax)] = mean_errors + + return result diff --git a/src/spikeinterface/comparison/correlogramstudy.py b/src/spikeinterface/comparison/correlogramstudy.py deleted file mode 100644 index fb00c08157..0000000000 --- a/src/spikeinterface/comparison/correlogramstudy.py +++ /dev/null @@ -1,76 +0,0 @@ -from .groundtruthstudy import GroundTruthStudy -from .studytools import iter_computed_sorting -from .correlogramcomparison import CorrelogramGTComparison - -import numpy as np - - -class CorrelogramGTStudy(GroundTruthStudy): - def run_comparisons(self, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs): - self.comparisons = {} - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - comp = CorrelogramGTComparison( - gt_sorting, - sorting, - exhaustive_gt=exhaustive_gt, - window_ms=window_ms, - bin_ms=bin_ms, - well_detected_score=well_detected_score, - ) - self.comparisons[(rec_name, sorter_name)] = comp - - self.exhaustive_gt = exhaustive_gt - - @property - def time_bins(self): - for key, value in self.comparisons.items(): - return value.time_bins - - def precompute_scores_by_similarities(self, good_only=True): - if not hasattr(self, "_computed"): - import sklearn - - similarity_matrix = {} - for rec_name in self.rec_names: - templates = self.get_templates(rec_name) - flat_templates = templates.reshape(templates.shape[0], -1) - similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) - - self.all_similarities = {} - self.all_errors = {} - self._computed = True - - for sorter_ind, sorter_name in enumerate(self.sorter_names): - # loop over recordings - all_errors = [] - all_similarities = [] - for rec_name in self.rec_names: - try: - comp = self.comparisons[(rec_name, sorter_name)] - similarities, errors = comp.compute_correlogram_by_similarity(similarity_matrix[rec_name]) - all_similarities.append(similarities) - all_errors.append(errors) - except Exception: - pass - - self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) - self.all_errors[sorter_name] = np.concatenate(all_errors, axis=0) - - def get_error_profile_over_similarity_bins(self, similarity_bins, sorter_name): - all_similarities = self.all_similarities[sorter_name] - all_errors = self.all_errors[sorter_name] - - order = np.argsort(all_similarities) - all_similarities = all_similarities[order] - all_errors = all_errors[order, :] - - result = {} - - for i in range(similarity_bins.size - 1): - cmin, cmax = similarity_bins[i], similarity_bins[i + 1] - amin, amax = np.searchsorted(all_similarities, [cmin, cmax]) - mean_errors = np.nanmean(all_errors[amin:amax], axis=0) - result[(cmin, cmax)] = mean_errors - - return result From e88b4b5da0b1d848bd910122a385b3f5fb01dc2c Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Tue, 19 Sep 2023 11:04:43 +0200 Subject: [PATCH 088/322] Update src/spikeinterface/preprocessing/depth_order.py --- src/spikeinterface/preprocessing/depth_order.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/depth_order.py b/src/spikeinterface/preprocessing/depth_order.py index 43c43a5843..55e34ba5dd 100644 --- a/src/spikeinterface/preprocessing/depth_order.py +++ b/src/spikeinterface/preprocessing/depth_order.py @@ -18,7 +18,7 @@ class DepthOrderRecording(ChannelSliceRecording): If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity, by default ('x', 'y') - flip: bool, default False + flip: bool, default: False If flip is False then the order is bottom first (starting from tip of the probe). If flip is True then the order is upper first. """ From b202c431a9f5d89bf7a5e92cf62acef64f040241 Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Tue, 19 Sep 2023 11:05:23 +0200 Subject: [PATCH 089/322] Update src/spikeinterface/core/recording_tools.py --- src/spikeinterface/core/recording_tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/core/recording_tools.py b/src/spikeinterface/core/recording_tools.py index 8236671a3b..ff9cd99389 100644 --- a/src/spikeinterface/core/recording_tools.py +++ b/src/spikeinterface/core/recording_tools.py @@ -316,7 +316,7 @@ def order_channels_by_depth(recording, channel_ids=None, dimensions=("x", "y"), If str, it needs to be 'x', 'y', 'z'. If tuple or list, it sorts the locations in two dimensions using lexsort. This approach is recommended since there is less ambiguity, by default ('x', 'y') - flip: bool, default False + flip: bool, default: False If flip is False then the order is bottom first (starting from tip of the probe). If flip is True then the order is upper first. From 73395fbd5a420be7d21e4017abcafb3d4a91d5ea Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Tue, 19 Sep 2023 11:24:18 +0200 Subject: [PATCH 090/322] Update src/spikeinterface/preprocessing/detect_bad_channels.py --- src/spikeinterface/preprocessing/detect_bad_channels.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index 3c712946eb..cc4e8601e2 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -17,7 +17,7 @@ def detect_bad_channels( n_neighbors=11, nyquist_threshold=0.8, direction="y", - chunk_duration_s=0.5, + chunk_duration_s=0.3, num_random_chunks=100, welch_window_ms=10.0, highpass_filter_cutoff=300, From 12fd197859a3bb91099e9f5fb73fc5f74f923847 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 12:56:55 +0200 Subject: [PATCH 091/322] Use sparsity mask and handle right border correctly --- .../postprocessing/amplitude_scalings.py | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 5a0148c5c4..4dab68fdf8 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -90,10 +90,7 @@ def _run(self, **job_kwargs): if self._params["max_dense_channels"] is not None: assert recording.get_num_channels() <= self._params["max_dense_channels"], "" sparsity = ChannelSparsity.create_dense(we) - sparsity_inds = sparsity.unit_id_to_channel_indices - - # easier to use in chunk function as spikes use unit_index instead o id - unit_inds_to_channel_indices = {unit_ind: sparsity_inds[unit_id] for unit_ind, unit_id in enumerate(unit_ids)} + sparsity_mask = sparsity.mask all_templates = we.get_all_templates() # precompute segment slice @@ -113,7 +110,7 @@ def _run(self, **job_kwargs): self.spikes, all_templates, segment_slices, - unit_inds_to_channel_indices, + sparsity_mask, nbefore, nafter, cut_out_before, @@ -262,7 +259,7 @@ def _init_worker_amplitude_scalings( spikes, all_templates, segment_slices, - unit_inds_to_channel_indices, + sparsity_mask, nbefore, nafter, cut_out_before, @@ -282,7 +279,7 @@ def _init_worker_amplitude_scalings( worker_ctx["cut_out_before"] = cut_out_before worker_ctx["cut_out_after"] = cut_out_after worker_ctx["return_scaled"] = return_scaled - worker_ctx["unit_inds_to_channel_indices"] = unit_inds_to_channel_indices + worker_ctx["sparsity_mask"] = sparsity_mask worker_ctx["handle_collisions"] = handle_collisions worker_ctx["delta_collision_samples"] = delta_collision_samples @@ -306,7 +303,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) recording = worker_ctx["recording"] all_templates = worker_ctx["all_templates"] segment_slices = worker_ctx["segment_slices"] - unit_inds_to_channel_indices = worker_ctx["unit_inds_to_channel_indices"] + sparsity_mask = worker_ctx["sparsity_mask"] nbefore = worker_ctx["nbefore"] cut_out_before = worker_ctx["cut_out_before"] cut_out_after = worker_ctx["cut_out_after"] @@ -339,7 +336,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) i1_margin = np.searchsorted(spikes_in_segment["sample_index"], end_frame + right) local_spikes_w_margin = spikes_in_segment[i0_margin:i1_margin] collisions_local = find_collisions( - local_spikes, local_spikes_w_margin, delta_collision_samples, unit_inds_to_channel_indices + local_spikes, local_spikes_w_margin, delta_collision_samples, sparsity_mask ) else: collisions_local = {} @@ -354,7 +351,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) continue unit_index = spike["unit_index"] sample_index = spike["sample_index"] - sparse_indices = unit_inds_to_channel_indices[unit_index] + sparse_indices = sparsity_mask[unit_index] template = all_templates[unit_index][:, sparse_indices] template = template[nbefore - cut_out_before : nbefore + cut_out_after] sample_centered = sample_index - start_frame @@ -393,7 +390,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) right, nbefore, all_templates, - unit_inds_to_channel_indices, + sparsity_mask, cut_out_before, cut_out_after, ) @@ -410,14 +407,14 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) ### Collision handling ### -def _are_unit_indices_overlapping(unit_inds_to_channel_indices, i, j): +def _are_unit_indices_overlapping(sparsity_mask, i, j): """ Returns True if the unit indices i and j are overlapping, False otherwise Parameters ---------- - unit_inds_to_channel_indices: dict - A dictionary mapping unit indices to channel indices + sparsity_mask: boolean mask + The sparsity mask i: int The first unit index j: int @@ -428,13 +425,13 @@ def _are_unit_indices_overlapping(unit_inds_to_channel_indices, i, j): bool True if the unit indices i and j are overlapping, False otherwise """ - if len(np.intersect1d(unit_inds_to_channel_indices[i], unit_inds_to_channel_indices[j])) > 0: + if np.sum(np.logical_and(sparsity_mask[i], sparsity_mask[j])) > 0: return True else: return False -def find_collisions(spikes, spikes_w_margin, delta_collision_samples, unit_inds_to_channel_indices): +def find_collisions(spikes, spikes_w_margin, delta_collision_samples, sparsity_mask): """ Finds the collisions between spikes. @@ -446,8 +443,8 @@ def find_collisions(spikes, spikes_w_margin, delta_collision_samples, unit_inds_ An array of spikes within the added margin delta_collision_samples: int The maximum number of samples between two spikes to consider them as overlapping - unit_inds_to_channel_indices: dict - A dictionary mapping unit indices to channel indices + sparsity_mask: boolean mask + The sparsity mask Returns ------- @@ -480,7 +477,7 @@ def find_collisions(spikes, spikes_w_margin, delta_collision_samples, unit_inds_ # find the overlapping spikes in space as well for possible_overlapping_spike_index in possible_overlapping_spike_indices: if _are_unit_indices_overlapping( - unit_inds_to_channel_indices, + sparsity_mask, spike["unit_index"], spikes_w_margin[possible_overlapping_spike_index]["unit_index"], ): @@ -501,7 +498,7 @@ def fit_collision( right, nbefore, all_templates, - unit_inds_to_channel_indices, + sparsity_mask, cut_out_before, cut_out_after, ): @@ -528,8 +525,8 @@ def fit_collision( The number of samples before the spike to consider for the fit. all_templates: np.ndarray A numpy array of shape (n_units, n_samples, n_channels) containing the templates. - unit_inds_to_channel_indices: dict - A dictionary mapping unit indices to channel indices. + sparsity_mask: boolean mask + The sparsity mask cut_out_before: int The number of samples to cut out before the spike. cut_out_after: int @@ -547,14 +544,15 @@ def fit_collision( sample_last_centered = np.max(collision["sample_index"]) - (start_frame - left) # construct sparsity as union between units' sparsity - sparse_indices = np.array([], dtype="int") + sparse_indices = np.zeros(sparsity_mask.shape[1], dtype="int") for spike in collision: - sparse_indices_i = unit_inds_to_channel_indices[spike["unit_index"]] - sparse_indices = np.union1d(sparse_indices, sparse_indices_i) + sparse_indices_i = sparsity_mask[spike["unit_index"]] + sparse_indices = np.logical_or(sparse_indices, sparse_indices_i) local_waveform_start = max(0, sample_first_centered - cut_out_before) local_waveform_end = min(traces_with_margin.shape[0], sample_last_centered + cut_out_after) local_waveform = traces_with_margin[local_waveform_start:local_waveform_end, sparse_indices] + num_samples_local_waveform = local_waveform.shape[0] y = local_waveform.T.flatten() X = np.zeros((len(y), len(collision))) @@ -567,8 +565,10 @@ def fit_collision( # deal with borders if sample_centered - cut_out_before < 0: full_template[: sample_centered + cut_out_after] = template_cut[cut_out_before - sample_centered :] - elif sample_centered + cut_out_after > end_frame + right: - full_template[sample_centered - cut_out_before :] = template_cut[: -cut_out_after - (end_frame + right)] + elif sample_centered + cut_out_after > num_samples_local_waveform: + full_template[sample_centered - cut_out_before :] = template_cut[ + : -(cut_out_after + sample_centered - num_samples_local_waveform) + ] else: full_template[sample_centered - cut_out_before : sample_centered + cut_out_after] = template_cut X[:, i] = full_template.T.flatten() From b1297e6aef50aa507415359b773f1c5611230b1f Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 13:08:36 +0200 Subject: [PATCH 092/322] Update CollisionGTStudy and CorrelogramGTStudy --- src/spikeinterface/comparison/__init__.py | 9 +- src/spikeinterface/comparison/collision.py | 96 +++++++++---------- src/spikeinterface/comparison/correlogram.py | 85 +++++++--------- .../comparison/groundtruthstudy.py | 4 +- .../_legacy_mpl_widgets/collisioncomp.py | 2 +- 5 files changed, 83 insertions(+), 113 deletions(-) diff --git a/src/spikeinterface/comparison/__init__.py b/src/spikeinterface/comparison/__init__.py index a390bb7689..7ac5b29aa2 100644 --- a/src/spikeinterface/comparison/__init__.py +++ b/src/spikeinterface/comparison/__init__.py @@ -28,12 +28,11 @@ compare_multiple_templates, MultiTemplateComparison, ) -from .collisioncomparison import CollisionGTComparison -from .correlogramcomparison import CorrelogramGTComparison + from .groundtruthstudy import GroundTruthStudy -from .collisionstudy import CollisionGTStudy -from .correlogramstudy import CorrelogramGTStudy -from .studytools import aggregate_performances_table +from .collision import CollisionGTComparison, CollisionGTStudy +from .correlogram import CorrelogramGTComparison, CorrelogramGTStudy +# from .studytools import aggregate_performances_table from .hybrid import ( HybridSpikesRecording, HybridUnitsRecording, diff --git a/src/spikeinterface/comparison/collision.py b/src/spikeinterface/comparison/collision.py index 864809b04b..c526c22ae4 100644 --- a/src/spikeinterface/comparison/collision.py +++ b/src/spikeinterface/comparison/collision.py @@ -12,8 +12,9 @@ class CollisionGTComparison(GroundTruthComparison): """ - This class is an extension of GroundTruthComparison by focusing - to benchmark spike in collision + This class is an extension of GroundTruthComparison by focusing to benchmark spike in collision. + + This class needs maintenance and need a bit of refactoring. collision_lag: float @@ -166,60 +167,49 @@ def compute_collision_by_similarity(self, similarity_matrix, unit_ids=None, good class CollisionGTStudy(GroundTruthStudy): - def run_comparisons(self, exhaustive_gt=True, collision_lag=2.0, nbins=11, **kwargs): - self.comparisons = {} - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - comp = CollisionGTComparison( - gt_sorting, sorting, exhaustive_gt=exhaustive_gt, collision_lag=collision_lag, nbins=nbins - ) - self.comparisons[(rec_name, sorter_name)] = comp + def run_comparisons(self, case_keys=None, exhaustive_gt=True, collision_lag=2.0, nbins=11, **kwargs): + _kwargs = dict() + _kwargs.update(kwargs) + _kwargs["exhaustive_gt"] = exhaustive_gt + _kwargs["collision_lag"] = collision_lag + _kwargs["nbins"] = nbins + GroundTruthStudy.run_comparisons(self, case_keys=None, comparison_class=CollisionGTComparison, **_kwargs) self.exhaustive_gt = exhaustive_gt self.collision_lag = collision_lag - def get_lags(self): - fs = self.comparisons[(self.rec_names[0], self.sorter_names[0])].sorting1.get_sampling_frequency() - lags = self.comparisons[(self.rec_names[0], self.sorter_names[0])].bins / fs * 1000 + def get_lags(self, key): + comp = self.comparisons[key] + fs = comp.sorting1.get_sampling_frequency() + lags = comp.bins / fs * 1000. return lags - def precompute_scores_by_similarities(self, good_only=True, min_accuracy=0.9): - if not hasattr(self, "_good_only") or self._good_only != good_only: - import sklearn - - similarity_matrix = {} - for rec_name in self.rec_names: - templates = self.get_templates(rec_name) - flat_templates = templates.reshape(templates.shape[0], -1) - similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) - - self.all_similarities = {} - self.all_recall_scores = {} - self.good_only = good_only - - for sorter_ind, sorter_name in enumerate(self.sorter_names): - # loop over recordings - all_similarities = [] - all_recall_scores = [] - - for rec_name in self.rec_names: - if (rec_name, sorter_name) in self.comparisons.keys(): - comp = self.comparisons[(rec_name, sorter_name)] - similarities, recall_scores, pair_names = comp.compute_collision_by_similarity( - similarity_matrix[rec_name], good_only=good_only, min_accuracy=min_accuracy - ) - - all_similarities.append(similarities) - all_recall_scores.append(recall_scores) - - self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) - self.all_recall_scores[sorter_name] = np.concatenate(all_recall_scores, axis=0) - - def get_mean_over_similarity_range(self, similarity_range, sorter_name): - idx = (self.all_similarities[sorter_name] >= similarity_range[0]) & ( - self.all_similarities[sorter_name] <= similarity_range[1] + def precompute_scores_by_similarities(self, case_keys=None, good_only=False, min_accuracy=0.9): + import sklearn + if case_keys is None: + case_keys = self.cases.keys() + + self.all_similarities = {} + self.all_recall_scores = {} + self.good_only = good_only + + for key in case_keys: + templates = self.get_templates(key) + flat_templates = templates.reshape(templates.shape[0], -1) + similarity = sklearn.metrics.pairwise.cosine_similarity(flat_templates) + comp = self.comparisons[key] + similarities, recall_scores, pair_names = comp.compute_collision_by_similarity( + similarity, good_only=good_only, min_accuracy=min_accuracy + ) + self.all_similarities[key] = similarities + self.all_recall_scores[key] = recall_scores + + + def get_mean_over_similarity_range(self, similarity_range, key): + idx = (self.all_similarities[key] >= similarity_range[0]) & ( + self.all_similarities[key] <= similarity_range[1] ) - all_similarities = self.all_similarities[sorter_name][idx] - all_recall_scores = self.all_recall_scores[sorter_name][idx] + all_similarities = self.all_similarities[key][idx] + all_recall_scores = self.all_recall_scores[key][idx] order = np.argsort(all_similarities) all_similarities = all_similarities[order] @@ -229,9 +219,9 @@ def get_mean_over_similarity_range(self, similarity_range, sorter_name): return mean_recall_scores - def get_lag_profile_over_similarity_bins(self, similarity_bins, sorter_name): - all_similarities = self.all_similarities[sorter_name] - all_recall_scores = self.all_recall_scores[sorter_name] + def get_lag_profile_over_similarity_bins(self, similarity_bins, key): + all_similarities = self.all_similarities[key] + all_recall_scores = self.all_recall_scores[key] order = np.argsort(all_similarities) all_similarities = all_similarities[order] diff --git a/src/spikeinterface/comparison/correlogram.py b/src/spikeinterface/comparison/correlogram.py index 9c5e1e91cf..b2376cb52d 100644 --- a/src/spikeinterface/comparison/correlogram.py +++ b/src/spikeinterface/comparison/correlogram.py @@ -11,11 +11,9 @@ class CorrelogramGTComparison(GroundTruthComparison): """ This class is an extension of GroundTruthComparison by focusing - to benchmark correlation reconstruction + to benchmark correlation reconstruction. - - collision_lag: float - Collision lag in ms. + This class needs maintenance and need a bit of refactoring. """ @@ -110,27 +108,21 @@ def compute_correlogram_by_similarity(self, similarity_matrix, window_ms=None): order = np.argsort(similarities) similarities = similarities[order] - errors = errors[order, :] + errors = errors[order] return similarities, errors class CorrelogramGTStudy(GroundTruthStudy): - def run_comparisons(self, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs): - self.comparisons = {} - for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder): - gt_sorting = self.get_ground_truth(rec_name) - comp = CorrelogramGTComparison( - gt_sorting, - sorting, - exhaustive_gt=exhaustive_gt, - window_ms=window_ms, - bin_ms=bin_ms, - well_detected_score=well_detected_score, - ) - self.comparisons[(rec_name, sorter_name)] = comp - + def run_comparisons(self, case_keys=None, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs): + _kwargs = dict() + _kwargs.update(kwargs) + _kwargs["exhaustive_gt"] = exhaustive_gt + _kwargs["window_ms"] = window_ms + _kwargs["bin_ms"] = bin_ms + _kwargs["well_detected_score"] = well_detected_score + GroundTruthStudy.run_comparisons(self, case_keys=None, comparison_class=CorrelogramGTComparison, **_kwargs) self.exhaustive_gt = exhaustive_gt @property @@ -138,39 +130,28 @@ def time_bins(self): for key, value in self.comparisons.items(): return value.time_bins - def precompute_scores_by_similarities(self, good_only=True): - if not hasattr(self, "_computed"): - import sklearn - - similarity_matrix = {} - for rec_name in self.rec_names: - templates = self.get_templates(rec_name) - flat_templates = templates.reshape(templates.shape[0], -1) - similarity_matrix[rec_name] = sklearn.metrics.pairwise.cosine_similarity(flat_templates) - - self.all_similarities = {} - self.all_errors = {} - self._computed = True - - for sorter_ind, sorter_name in enumerate(self.sorter_names): - # loop over recordings - all_errors = [] - all_similarities = [] - for rec_name in self.rec_names: - try: - comp = self.comparisons[(rec_name, sorter_name)] - similarities, errors = comp.compute_correlogram_by_similarity(similarity_matrix[rec_name]) - all_similarities.append(similarities) - all_errors.append(errors) - except Exception: - pass - - self.all_similarities[sorter_name] = np.concatenate(all_similarities, axis=0) - self.all_errors[sorter_name] = np.concatenate(all_errors, axis=0) - - def get_error_profile_over_similarity_bins(self, similarity_bins, sorter_name): - all_similarities = self.all_similarities[sorter_name] - all_errors = self.all_errors[sorter_name] + def precompute_scores_by_similarities(self, case_keys=None, good_only=True): + import sklearn.metrics + + if case_keys is None: + case_keys = self.cases.keys() + + self.all_similarities = {} + self.all_errors = {} + + for key in case_keys: + templates = self.get_templates(key) + flat_templates = templates.reshape(templates.shape[0], -1) + similarity = sklearn.metrics.pairwise.cosine_similarity(flat_templates) + comp = self.comparisons[key] + similarities, errors = comp.compute_correlogram_by_similarity(similarity) + + self.all_similarities[key] = similarities + self.all_errors[key] = errors + + def get_error_profile_over_similarity_bins(self, similarity_bins, key): + all_similarities = self.all_similarities[key] + all_errors = self.all_errors[key] order = np.argsort(all_similarities) all_similarities = all_similarities[order] diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 9f0039b9cb..0c08318ef4 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -155,7 +155,7 @@ def scan_folder(self): def __repr__(self): - t = f"GroundTruthStudy {self.folder.stem} \n" + t = f"{self.__class__.__name__} {self.folder.stem} \n" t += f" datasets: {len(self.datasets)} {list(self.datasets.keys())}\n" t += f" cases: {len(self.cases)} {list(self.cases.keys())}\n" num_computed = sum([1 for sorting in self.sortings.values() if sorting is not None]) @@ -303,7 +303,7 @@ def get_waveform_extractor(self, key): we.set_recording(recording) return we - def get_templates(self, key, mode="mean"): + def get_templates(self, key, mode="average"): we = self.get_waveform_extractor(key) templates = we.get_all_templates(mode=mode) return templates diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py index 6d981e1fd4..096a5f3933 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py @@ -1,7 +1,7 @@ import numpy as np from .basewidget import BaseWidget -from spikeinterface.comparison.collisioncomparison import CollisionGTComparison +from spikeinterface.comparison import CollisionGTComparison class ComparisonCollisionPairByPairWidget(BaseWidget): From 9add5def54fe63fd23f32d3cde5c2177f7eb1d09 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 13:19:33 +0200 Subject: [PATCH 093/322] Deprecate multicomparison save/load functions in favor of pickle --- src/spikeinterface/comparison/multicomparisons.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index 9e02fd5b2d..d1193907eb 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -1,6 +1,7 @@ from pathlib import Path import json import pickle +import warnings import numpy as np @@ -180,6 +181,11 @@ def get_agreement_sorting(self, minimum_agreement_count=1, minimum_agreement_cou return sorting def save_to_folder(self, save_folder): + warnings.warn( + "save_to_folder() is deprecated. You should save and load the multi sorting comparison object using pickle.\n>>> pickle.dump(mcmp, open('mcmp.pkl', 'wb')))))\n>>> mcmp_loaded = pickle.load(open('mcmp.pkl', 'rb'))", + DeprecationWarning, + stacklevel=2, + ) for sorting in self.object_list: assert ( sorting.check_if_json_serializable() @@ -205,6 +211,11 @@ def save_to_folder(self, save_folder): @staticmethod def load_from_folder(folder_path): + warnings.warn( + "load_from_folder() is deprecated. You should save and load the multi sorting comparison object using pickle.\n>>> pickle.dump(mcmp, open('mcmp.pkl', 'wb')))))\n>>> mcmp_loaded = pickle.load(open('mcmp.pkl', 'rb'))", + DeprecationWarning, + stacklevel=2, + ) folder_path = Path(folder_path) with (folder_path / "kwargs.json").open() as f: kwargs = json.load(f) From 3d2f41c0773a1e3b499f42918d582619e1fd0dba Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 13:20:48 +0200 Subject: [PATCH 094/322] Formatting --- src/spikeinterface/comparison/multicomparisons.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index d1193907eb..d418b92ab8 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -182,7 +182,9 @@ def get_agreement_sorting(self, minimum_agreement_count=1, minimum_agreement_cou def save_to_folder(self, save_folder): warnings.warn( - "save_to_folder() is deprecated. You should save and load the multi sorting comparison object using pickle.\n>>> pickle.dump(mcmp, open('mcmp.pkl', 'wb')))))\n>>> mcmp_loaded = pickle.load(open('mcmp.pkl', 'rb'))", + "save_to_folder() is deprecated. " + "You should save and load the multi sorting comparison object using pickle." + "\n>>> pickle.dump(mcmp, open('mcmp.pkl', 'wb')))))\n>>> mcmp_loaded = pickle.load(open('mcmp.pkl', 'rb'))", DeprecationWarning, stacklevel=2, ) @@ -212,7 +214,9 @@ def save_to_folder(self, save_folder): @staticmethod def load_from_folder(folder_path): warnings.warn( - "load_from_folder() is deprecated. You should save and load the multi sorting comparison object using pickle.\n>>> pickle.dump(mcmp, open('mcmp.pkl', 'wb')))))\n>>> mcmp_loaded = pickle.load(open('mcmp.pkl', 'rb'))", + "load_from_folder() is deprecated. " + "You should save and load the multi sorting comparison object using pickle." + "\n>>> pickle.dump(mcmp, open('mcmp.pkl', 'wb')))))\n>>> mcmp_loaded = pickle.load(open('mcmp.pkl', 'rb'))", DeprecationWarning, stacklevel=2, ) From 8a7a90e130e3007ad73ae840ee4e889c9a6b146f Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 13:35:50 +0200 Subject: [PATCH 095/322] wip --- src/spikeinterface/comparison/groundtruthstudy.py | 5 +---- .../widgets/_legacy_mpl_widgets/collisioncomp.py | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 0c08318ef4..6898f381b6 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -17,10 +17,7 @@ from .paircomparisons import compare_sorter_to_ground_truth, GroundTruthComparison -# TODO : save comparison in folders when COmparison object will be able to serialize -# TODO ??: make an internal optional binary copy when running several external sorter -# on the same dataset to avoid multiple save binary ? even when the recording is float32 (ks need int16) - +# TODO later : save comparison in folders when comparison object will be able to serialize # This is to separate names when the key are tuples when saving folders diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py index 096a5f3933..d25f1ea97b 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py @@ -1,7 +1,6 @@ import numpy as np from .basewidget import BaseWidget -from spikeinterface.comparison import CollisionGTComparison class ComparisonCollisionPairByPairWidget(BaseWidget): From fe6f60f45b8ee1f50e81c8d7b5b209965507c1df Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 13:39:31 +0200 Subject: [PATCH 096/322] Re move studytools.py. Not needed anymore. --- src/spikeinterface/comparison/__init__.py | 2 +- src/spikeinterface/comparison/studytools.py | 352 -------------------- 2 files changed, 1 insertion(+), 353 deletions(-) delete mode 100644 src/spikeinterface/comparison/studytools.py diff --git a/src/spikeinterface/comparison/__init__.py b/src/spikeinterface/comparison/__init__.py index 7ac5b29aa2..bff85dde4a 100644 --- a/src/spikeinterface/comparison/__init__.py +++ b/src/spikeinterface/comparison/__init__.py @@ -32,7 +32,7 @@ from .groundtruthstudy import GroundTruthStudy from .collision import CollisionGTComparison, CollisionGTStudy from .correlogram import CorrelogramGTComparison, CorrelogramGTStudy -# from .studytools import aggregate_performances_table + from .hybrid import ( HybridSpikesRecording, HybridUnitsRecording, diff --git a/src/spikeinterface/comparison/studytools.py b/src/spikeinterface/comparison/studytools.py deleted file mode 100644 index 00119c1586..0000000000 --- a/src/spikeinterface/comparison/studytools.py +++ /dev/null @@ -1,352 +0,0 @@ -""" -High level tools to run many ground-truth comparison with -many sorter on many recordings and then collect and aggregate results -in an easy way. - -The all mechanism is based on an intrinsic organization -into a "study_folder" with several subfolder: - * raw_files : contain a copy in binary format of recordings - * sorter_folders : contains output of sorters - * ground_truth : contains a copy of sorting ground in npz format - * sortings: contains light copy of all sorting in npz format - * tables: some table in cvs format -""" - -from pathlib import Path -import shutil -import json -import os - - -from spikeinterface.core import load_extractor -from spikeinterface.core.job_tools import fix_job_kwargs -from spikeinterface.extractors import NpzSortingExtractor -from spikeinterface.sorters import sorter_dict -from spikeinterface.sorters.basesorter import is_log_ok - - -from .comparisontools import _perf_keys -from .paircomparisons import compare_sorter_to_ground_truth - - - - - -# This is deprecated and will be removed -def iter_working_folder(working_folder): - working_folder = Path(working_folder) - for rec_folder in working_folder.iterdir(): - if not rec_folder.is_dir(): - continue - for output_folder in rec_folder.iterdir(): - if (output_folder / "spikeinterface_job.json").is_file(): - with open(output_folder / "spikeinterface_job.json", "r") as f: - job_dict = json.load(f) - rec_name = job_dict["rec_name"] - sorter_name = job_dict["sorter_name"] - yield rec_name, sorter_name, output_folder - else: - rec_name = rec_folder.name - sorter_name = output_folder.name - if not output_folder.is_dir(): - continue - if not is_log_ok(output_folder): - continue - yield rec_name, sorter_name, output_folder - -# This is deprecated and will be removed -def iter_sorting_output(working_folder): - """Iterator over output_folder to retrieve all triplets of (rec_name, sorter_name, sorting).""" - for rec_name, sorter_name, output_folder in iter_working_folder(working_folder): - SorterClass = sorter_dict[sorter_name] - sorting = SorterClass.get_result_from_folder(output_folder) - yield rec_name, sorter_name, sorting - - - -def setup_comparison_study(study_folder, gt_dict, **job_kwargs): - """ - Based on a dict of (recording, sorting) create the study folder. - - Parameters - ---------- - study_folder: str - The study folder. - gt_dict : a dict of tuple (recording, sorting_gt) - Dict of tuple that contain recording and sorting ground truth - """ - job_kwargs = fix_job_kwargs(job_kwargs) - study_folder = Path(study_folder) - assert not study_folder.is_dir(), "'study_folder' already exists. Please remove it" - - study_folder.mkdir(parents=True, exist_ok=True) - sorting_folders = study_folder / "sortings" - log_folder = sorting_folders / "run_log" - log_folder.mkdir(parents=True, exist_ok=True) - tables_folder = study_folder / "tables" - tables_folder.mkdir(parents=True, exist_ok=True) - - for rec_name, (recording, sorting_gt) in gt_dict.items(): - # write recording using save with binary - folder = study_folder / "ground_truth" / rec_name - sorting_gt.save(folder=folder, format="numpy_folder") - folder = study_folder / "raw_files" / rec_name - recording.save(folder=folder, format="binary", **job_kwargs) - - # make an index of recording names - with open(study_folder / "names.txt", mode="w", encoding="utf8") as f: - for rec_name in gt_dict: - f.write(rec_name + "\n") - - -def get_rec_names(study_folder): - """ - Get list of keys of recordings. - Read from the 'names.txt' file in study folder. - - Parameters - ---------- - study_folder: str - The study folder. - - Returns - ------- - rec_names: list - List of names. - """ - study_folder = Path(study_folder) - with open(study_folder / "names.txt", mode="r", encoding="utf8") as f: - rec_names = f.read()[:-1].split("\n") - return rec_names - - -def get_recordings(study_folder): - """ - Get ground recording as a dict. - - They are read from the 'raw_files' folder with binary format. - - Parameters - ---------- - study_folder: str - The study folder. - - Returns - ------- - recording_dict: dict - Dict of recording. - """ - study_folder = Path(study_folder) - - rec_names = get_rec_names(study_folder) - recording_dict = {} - for rec_name in rec_names: - rec = load_extractor(study_folder / "raw_files" / rec_name) - recording_dict[rec_name] = rec - - return recording_dict - - -def get_ground_truths(study_folder): - """ - Get ground truth sorting extractor as a dict. - - They are read from the 'ground_truth' folder with npz format. - - Parameters - ---------- - study_folder: str - The study folder. - - Returns - ------- - ground_truths: dict - Dict of sorting_gt. - """ - study_folder = Path(study_folder) - rec_names = get_rec_names(study_folder) - ground_truths = {} - for rec_name in rec_names: - sorting = load_extractor(study_folder / "ground_truth" / rec_name) - ground_truths[rec_name] = sorting - return ground_truths - - -def iter_computed_names(study_folder): - sorting_folder = Path(study_folder) / "sortings" - for filename in os.listdir(sorting_folder): - if filename.endswith(".npz") and "[#]" in filename: - rec_name, sorter_name = filename.replace(".npz", "").split("[#]") - yield rec_name, sorter_name - - -def iter_computed_sorting(study_folder): - """ - Iter over sorting files. - """ - sorting_folder = Path(study_folder) / "sortings" - for filename in os.listdir(sorting_folder): - if filename.endswith(".npz") and "[#]" in filename: - rec_name, sorter_name = filename.replace(".npz", "").split("[#]") - sorting = NpzSortingExtractor(sorting_folder / filename) - yield rec_name, sorter_name, sorting - - -def collect_run_times(study_folder): - """ - Collect run times in a working folder and store it in CVS files. - - The output is list of (rec_name, sorter_name, run_time) - """ - import pandas as pd - - study_folder = Path(study_folder) - sorting_folders = study_folder / "sortings" - log_folder = sorting_folders / "run_log" - tables_folder = study_folder / "tables" - - tables_folder.mkdir(parents=True, exist_ok=True) - - run_times = [] - for filename in os.listdir(log_folder): - if filename.endswith(".json") and "[#]" in filename: - rec_name, sorter_name = filename.replace(".json", "").split("[#]") - with open(log_folder / filename, encoding="utf8", mode="r") as logfile: - log = json.load(logfile) - run_time = log.get("run_time", None) - run_times.append((rec_name, sorter_name, run_time)) - - run_times = pd.DataFrame(run_times, columns=["rec_name", "sorter_name", "run_time"]) - run_times = run_times.set_index(["rec_name", "sorter_name"]) - - return run_times - - -def aggregate_sorting_comparison(study_folder, exhaustive_gt=False): - """ - Loop over output folder in a tree to collect sorting output and run - ground_truth_comparison on them. - - Parameters - ---------- - study_folder: str - The study folder. - exhaustive_gt: bool (default True) - Tell if the ground true is "exhaustive" or not. In other world if the - GT have all possible units. It allows more performance measurement. - For instance, MEArec simulated dataset have exhaustive_gt=True - - Returns - ---------- - comparisons: a dict of SortingComparison - - """ - - study_folder = Path(study_folder) - - ground_truths = get_ground_truths(study_folder) - results = collect_study_sorting(study_folder) - - comparisons = {} - for (rec_name, sorter_name), sorting in results.items(): - gt_sorting = ground_truths[rec_name] - sc = compare_sorter_to_ground_truth(gt_sorting, sorting, exhaustive_gt=exhaustive_gt) - comparisons[(rec_name, sorter_name)] = sc - - return comparisons - - -def aggregate_performances_table(study_folder, exhaustive_gt=False, **karg_thresh): - """ - Aggregate some results into dataframe to have a "study" overview on all recordingXsorter. - - Tables are: - * run_times: run times per recordingXsorter - * perf_pooled_with_sum: GroundTruthComparison.see get_performance - * perf_pooled_with_average: GroundTruthComparison.see get_performance - * count_units: given some threshold count how many units : 'well_detected', 'redundant', 'false_postive_units, 'bad' - - Parameters - ---------- - study_folder: str - The study folder. - karg_thresh: dict - Threshold parameters used for the "count_units" table. - - Returns - ------- - dataframes: a dict of DataFrame - Return several useful DataFrame to compare all results. - Note that count_units depend on karg_thresh. - """ - import pandas as pd - - study_folder = Path(study_folder) - sorter_folders = study_folder / "sorter_folders" - tables_folder = study_folder / "tables" - - comparisons = aggregate_sorting_comparison(study_folder, exhaustive_gt=exhaustive_gt) - ground_truths = get_ground_truths(study_folder) - results = collect_study_sorting(study_folder) - - study_folder = Path(study_folder) - - dataframes = {} - - # get run times: - run_times = pd.read_csv(str(tables_folder / "run_times.csv"), sep="\t") - run_times.columns = ["rec_name", "sorter_name", "run_time"] - run_times = run_times.set_index( - [ - "rec_name", - "sorter_name", - ] - ) - dataframes["run_times"] = run_times - - perf_pooled_with_sum = pd.DataFrame(index=run_times.index, columns=_perf_keys) - dataframes["perf_pooled_with_sum"] = perf_pooled_with_sum - - perf_pooled_with_average = pd.DataFrame(index=run_times.index, columns=_perf_keys) - dataframes["perf_pooled_with_average"] = perf_pooled_with_average - - count_units = pd.DataFrame( - index=run_times.index, columns=["num_gt", "num_sorter", "num_well_detected", "num_redundant"] - ) - dataframes["count_units"] = count_units - if exhaustive_gt: - count_units["num_false_positive"] = None - count_units["num_bad"] = None - - perf_by_spiketrain = [] - - for (rec_name, sorter_name), comp in comparisons.items(): - gt_sorting = ground_truths[rec_name] - sorting = results[(rec_name, sorter_name)] - - perf = comp.get_performance(method="pooled_with_sum", output="pandas") - perf_pooled_with_sum.loc[(rec_name, sorter_name), :] = perf - - perf = comp.get_performance(method="pooled_with_average", output="pandas") - perf_pooled_with_average.loc[(rec_name, sorter_name), :] = perf - - perf = comp.get_performance(method="by_spiketrain", output="pandas") - perf["rec_name"] = rec_name - perf["sorter_name"] = sorter_name - perf = perf.reset_index() - - perf_by_spiketrain.append(perf) - - count_units.loc[(rec_name, sorter_name), "num_gt"] = len(gt_sorting.get_unit_ids()) - count_units.loc[(rec_name, sorter_name), "num_sorter"] = len(sorting.get_unit_ids()) - count_units.loc[(rec_name, sorter_name), "num_well_detected"] = comp.count_well_detected_units(**karg_thresh) - count_units.loc[(rec_name, sorter_name), "num_redundant"] = comp.count_redundant_units() - if exhaustive_gt: - count_units.loc[(rec_name, sorter_name), "num_false_positive"] = comp.count_false_positive_units() - count_units.loc[(rec_name, sorter_name), "num_bad"] = comp.count_bad_units() - - perf_by_spiketrain = pd.concat(perf_by_spiketrain) - perf_by_spiketrain = perf_by_spiketrain.set_index(["rec_name", "sorter_name", "gt_unit_id"]) - dataframes["perf_by_spiketrain"] = perf_by_spiketrain - - return dataframes From 77505adc76fce228d66347d0aeb66bacce94cc8c Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 13:40:53 +0200 Subject: [PATCH 097/322] rm studytools part2 --- src/spikeinterface/comparison/collision.py | 1 - src/spikeinterface/comparison/correlogram.py | 1 - .../comparison/tests/test_studytools.py | 59 ------------------- 3 files changed, 61 deletions(-) delete mode 100644 src/spikeinterface/comparison/tests/test_studytools.py diff --git a/src/spikeinterface/comparison/collision.py b/src/spikeinterface/comparison/collision.py index c526c22ae4..01626b34b8 100644 --- a/src/spikeinterface/comparison/collision.py +++ b/src/spikeinterface/comparison/collision.py @@ -1,6 +1,5 @@ from .paircomparisons import GroundTruthComparison from .groundtruthstudy import GroundTruthStudy -from .studytools import iter_computed_sorting ## TODO remove this from .comparisontools import make_collision_events import numpy as np diff --git a/src/spikeinterface/comparison/correlogram.py b/src/spikeinterface/comparison/correlogram.py index b2376cb52d..150f5afe55 100644 --- a/src/spikeinterface/comparison/correlogram.py +++ b/src/spikeinterface/comparison/correlogram.py @@ -1,6 +1,5 @@ from .paircomparisons import GroundTruthComparison from .groundtruthstudy import GroundTruthStudy -from .studytools import iter_computed_sorting ## TODO remove this from spikeinterface.postprocessing import compute_correlograms diff --git a/src/spikeinterface/comparison/tests/test_studytools.py b/src/spikeinterface/comparison/tests/test_studytools.py deleted file mode 100644 index dbc39d5e1d..0000000000 --- a/src/spikeinterface/comparison/tests/test_studytools.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -import shutil -from pathlib import Path - -import pytest - -from spikeinterface.extractors import toy_example -from spikeinterface.comparison.studytools import ( - setup_comparison_study, - iter_computed_names, - iter_computed_sorting, - get_rec_names, - get_ground_truths, - get_recordings, -) - -if hasattr(pytest, "global_test_folder"): - cache_folder = pytest.global_test_folder / "comparison" -else: - cache_folder = Path("cache_folder") / "comparison" - - -study_folder = cache_folder / "test_studytools" - - -def setup_module(): - if study_folder.is_dir(): - shutil.rmtree(study_folder) - - -def test_setup_comparison_study(): - rec0, gt_sorting0 = toy_example(num_channels=4, duration=30, seed=0, num_segments=1) - rec1, gt_sorting1 = toy_example(num_channels=32, duration=30, seed=0, num_segments=1) - - gt_dict = { - "toy_tetrode": (rec0, gt_sorting0), - "toy_probe32": (rec1, gt_sorting1), - } - setup_comparison_study(study_folder, gt_dict) - - -def test_get_ground_truths(): - names = get_rec_names(study_folder) - d = get_ground_truths(study_folder) - d = get_recordings(study_folder) - - -def test_loops(): - names = list(iter_computed_names(study_folder)) - for rec_name, sorter_name, sorting in iter_computed_sorting(study_folder): - print(rec_name, sorter_name) - print(sorting) - - -if __name__ == "__main__": - setup_module() - test_setup_comparison_study() - test_get_ground_truths() - test_loops() From b5376a9b30d84a201a6c8ad7db15c644abe993a9 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 14:26:22 +0200 Subject: [PATCH 098/322] Modify doc for gt study --- doc/modules/comparison.rst | 101 +++++++++++------- .../comparison/groundtruthstudy.py | 6 -- 2 files changed, 62 insertions(+), 45 deletions(-) diff --git a/doc/modules/comparison.rst b/doc/modules/comparison.rst index b452307e3c..9b2e701dac 100644 --- a/doc/modules/comparison.rst +++ b/doc/modules/comparison.rst @@ -248,21 +248,19 @@ An **over-merged** unit has a relatively high agreement (>= 0.2 by default) for We also have a high level class to compare many sorters against ground truth: :py:func:`~spiekinterface.comparison.GroundTruthStudy()` -A study is a systematic performance comparison of several ground truth recordings with several sorters. +A study is a systematic performance comparison of several ground truth recordings with several sorters or several cases +like the different parameter sets. -The study class proposes high-level tool functions to run many ground truth comparisons with many sorters +The study class proposes high-level tool functions to run many ground truth comparisons with many "cases" on many recordings and then collect and aggregate results in an easy way. The all mechanism is based on an intrinsic organization into a "study_folder" with several subfolder: - * raw_files : contain a copy of recordings in binary format - * sorter_folders : contains outputs of sorters - * ground_truth : contains a copy of sorting ground truth in npz format - * sortings: contains light copy of all sorting in npz format - * tables: some tables in csv format - -In order to run and rerun the computation all gt_sorting and recordings are copied to a fast and universal format: -binary (for recordings) and npz (for sortings). + * datasets: contains ground truth datasets + * sorters : contains outputs of sorters + * sortings: contains light copy of all sorting + * metrics: contains metrics + * ... .. code-block:: python @@ -274,28 +272,52 @@ binary (for recordings) and npz (for sortings). import spikeinterface.widgets as sw from spikeinterface.comparison import GroundTruthStudy - # Setup study folder - rec0, gt_sorting0 = se.toy_example(num_channels=4, duration=10, seed=10, num_segments=1) - rec1, gt_sorting1 = se.toy_example(num_channels=4, duration=10, seed=0, num_segments=1) - gt_dict = { - 'rec0': (rec0, gt_sorting0), - 'rec1': (rec1, gt_sorting1), + + # generate 2 simulated datasets (could be also mearec files) + rec0, gt_sorting0 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=42) + rec1, gt_sorting1 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=91) + + datasets = { + "toy0": (rec0, gt_sorting0), + "toy1": (rec1, gt_sorting1), } - study_folder = 'a_study_folder' - study = GroundTruthStudy.create(study_folder, gt_dict) - # all sorters for all recordings in one function. - sorter_list = ['herdingspikes', 'tridesclous', ] - study.run_sorters(sorter_list, mode_if_folder_exists="keep") + # define some "cases" here we want to tests tridesclous2 on 2 datasets and spykingcircus on one dataset + # so it is a two level study (sorter_name, dataset) + # this could be more complicated like (sorter_name, dataset, params) + cases = { + ("tdc2", "toy0"): { + "label": "tridesclous2 on tetrode0", + "dataset": "toy0", + "run_sorter_params": { + "sorter_name": "tridesclous2", + }, + }, + # + ("tdc2", "toy1"): { + "label": "tridesclous2 on tetrode1", + "dataset": "toy1", + "run_sorter_params": { + "sorter_name": "tridesclous2", + }, + }, + + ("sc", "toy0"): { + "label": "spykingcircus2 on tetrode0", + "dataset": "toy0", + "run_sorter_params": { + "sorter_name": "spykingcircus", + "docker_image": True + }, + }, + } + # this initilize a folder + study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases, + levels=["sorter_name", "dataset"]) - # You can re-run **run_study_sorters** as many times as you want. - # By default **mode='keep'** so only uncomputed sorters are re-run. - # For instance, just remove the "sorter_folders/rec1/herdingspikes" to re-run - # only one sorter on one recording. - # - # Then we copy the spike sorting outputs into a separate subfolder. - # This allow us to remove the "large" sorter_folders. - study.copy_sortings() + + # all cases in one function + study.run_sorters() # Collect comparisons #   @@ -306,11 +328,11 @@ binary (for recordings) and npz (for sortings). # Note: use exhaustive_gt=True when you know exactly how many # units in ground truth (for synthetic datasets) + # run all comparisons and loop over the results study.run_comparisons(exhaustive_gt=True) - - for (rec_name, sorter_name), comp in study.comparisons.items(): + for key, comp in study.comparisons.items(): print('*' * 10) - print(rec_name, sorter_name) + print(key) # raw counting of tp/fp/... print(comp.count_score) # summary @@ -323,26 +345,27 @@ binary (for recordings) and npz (for sortings). # Collect synthetic dataframes and display # As shown previously, the performance is returned as a pandas dataframe. - # The :py:func:`~spikeinterface.comparison.aggregate_performances_table()` function, + # The :py:func:`~spikeinterface.comparison.get_performance_by_unit()` function, # gathers all the outputs in the study folder and merges them in a single dataframe. + # Same idea for :py:func:`~spikeinterface.comparison.get_count_units()` - dataframes = study.aggregate_dataframes() + # this is a dataframe + perfs = study.get_performance_by_unit() - # Pandas dataframes can be nicely displayed as tables in the notebook. - print(dataframes.keys()) + # this is a dataframe + unit_counts = study.get_count_units() # we can also access run times - print(dataframes['run_times']) + run_times = study.get_run_times() + print(run_times) # Easy plot with seaborn - run_times = dataframes['run_times'] fig1, ax1 = plt.subplots() sns.barplot(data=run_times, x='rec_name', y='run_time', hue='sorter_name', ax=ax1) ax1.set_title('Run times') ############################################################################## - perfs = dataframes['perf_by_unit'] fig2, ax2 = plt.subplots() sns.swarmplot(data=perfs, x='sorter_name', y='recall', hue='rec_name', ax=ax2) ax2.set_title('Recall') diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 6898f381b6..6dc9cb30f0 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -126,9 +126,6 @@ def scan_folder(self): self.info = json.load(f) self.levels = self.info["levels"] - # if isinstance(self.levels, list): - # # because tuple caoont be stored in json - # self.levels = tuple(self.info["levels"]) for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): key = rec_file.stem @@ -169,9 +166,6 @@ def key_to_str(self, key): raise ValueError("Keys for cases must str or tuple") def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True, verbose=False): - """ - - """ if case_keys is None: case_keys = self.cases.keys() From 2f4d50a6651d4fc0ba568463df61a350d62ddd33 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Tue, 19 Sep 2023 08:32:16 -0400 Subject: [PATCH 099/322] typo corrections, link corrections --- doc/development/development.rst | 6 +++--- doc/install_sorters.rst | 2 +- doc/modules/sorters.rst | 6 +++--- doc/modules/sortingcomponents.rst | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/doc/development/development.rst b/doc/development/development.rst index f1371639c3..4704b9b1e6 100644 --- a/doc/development/development.rst +++ b/doc/development/development.rst @@ -14,7 +14,7 @@ There are various ways to contribute to SpikeInterface as a user or developer. S * Writing unit tests to expand code coverage and use case scenarios. * Reporting bugs and issues. -We use a forking workflow _ to manage contributions. Here's a summary of the steps involved, with more details available in the provided link: +We use a forking workflow ``_ to manage contributions. Here's a summary of the steps involved, with more details available in the provided link: * Fork the SpikeInterface repository. * Create a new branch (e.g., :code:`git switch -c my-contribution`). @@ -22,7 +22,7 @@ We use a forking workflow _ . +While we appreciate all the contributions please be mindful of the cost of reviewing pull requests ``_ . How to run tests locally @@ -201,7 +201,7 @@ Implement a new extractor SpikeInterface already supports over 30 file formats, but the acquisition system you use might not be among the supported formats list (***ref***). Most of the extractord rely on the `NEO `_ package to read information from files. -Therefore, to implement a new extractor to handle the unsupported format, we recommend make a new `neo.rawio `_ class. +Therefore, to implement a new extractor to handle the unsupported format, we recommend make a new :code:``neo.rawio ` class. Once that is done, the new class can be easily wrapped into SpikeInterface as an extension of the :py:class:`~spikeinterface.extractors.neoextractors.neobaseextractors.NeoBaseRecordingExtractor` (for :py:class:`~spikeinterface.core.BaseRecording` objects) or diff --git a/doc/install_sorters.rst b/doc/install_sorters.rst index 3fda05848c..10a3185c5c 100644 --- a/doc/install_sorters.rst +++ b/doc/install_sorters.rst @@ -117,7 +117,7 @@ Kilosort2.5 git clone https://github.com/MouseLand/Kilosort # provide installation path by setting the KILOSORT2_5_PATH environment variable - # or using Kilosort2_5Sorter.set_kilosort2_path() + # or using Kilosort2_5Sorter.set_kilosort2_5_path() * See also for Matlab/CUDA: https://www.mathworks.com/help/parallel-computing/gpu-support-by-release.html diff --git a/doc/modules/sorters.rst b/doc/modules/sorters.rst index 34ab3d1151..1b27ed442c 100644 --- a/doc/modules/sorters.rst +++ b/doc/modules/sorters.rst @@ -239,7 +239,7 @@ There are three options: 1. **released PyPi version**: if you installed :code:`spikeinterface` with :code:`pip install spikeinterface`, the latest released version will be installed in the container. -2. **development :code:`main` version**: if you installed :code:`spikeinterface` from source from the cloned repo +2. **development** :code:`main` **version**: if you installed :code:`spikeinterface` from source from the cloned repo (with :code:`pip install .`) or with :code:`pip install git+https://github.com/SpikeInterface/spikeinterface.git`, the current development version from the :code:`main` branch will be installed in the container. @@ -458,7 +458,7 @@ Here is the list of external sorters accessible using the run_sorter wrapper: * **Kilosort** :code:`run_sorter('kilosort')` * **Kilosort2** :code:`run_sorter('kilosort2')` * **Kilosort2.5** :code:`run_sorter('kilosort2_5')` -* **Kilosort3** :code:`run_sorter('Kilosort3')` +* **Kilosort3** :code:`run_sorter('kilosort3')` * **PyKilosort** :code:`run_sorter('pykilosort')` * **Klusta** :code:`run_sorter('klusta')` * **Mountainsort4** :code:`run_sorter('mountainsort4')` @@ -474,7 +474,7 @@ Here is the list of external sorters accessible using the run_sorter wrapper: Here a list of internal sorter based on `spikeinterface.sortingcomponents`; they are totally experimental for now: -* **Spyking circus2** :code:`run_sorter('spykingcircus2')` +* **Spyking Circus2** :code:`run_sorter('spykingcircus2')` * **Tridesclous2** :code:`run_sorter('tridesclous2')` In 2023, we expect to add many more sorters to this list. diff --git a/doc/modules/sortingcomponents.rst b/doc/modules/sortingcomponents.rst index aa62ea5b33..422eaea890 100644 --- a/doc/modules/sortingcomponents.rst +++ b/doc/modules/sortingcomponents.rst @@ -223,7 +223,7 @@ Here is a short example that depends on the output of "Motion interpolation": **Notes**: * :code:`spatial_interpolation_method` "kriging" or "iwd" do not play a big role. - * :code:`border_mode` is a very important parameter. It controls how to deal with the border because motion causes units on the + * :code:`border_mode` is a very important parameter. It controls dealing with the border because motion causes units on the border to not be present throughout the entire recording. We highly recommend the :code:`border_mode='remove_channels'` because this removes channels on the border that will be impacted by drift. Of course the larger the motion is the more channels are removed. @@ -278,7 +278,7 @@ At the moment, there are five methods implemented: * 'naive': a very naive implemenation used as a reference for benchmarks * 'tridesclous': the algorithm for template matching implemented in Tridesclous * 'circus': the algorithm for template matching implemented in SpyKING-Circus - * 'circus-omp': a updated algorithm similar to SpyKING-Circus but with OMP (orthogonal macthing + * 'circus-omp': a updated algorithm similar to SpyKING-Circus but with OMP (orthogonal matching pursuit) * 'wobble' : an algorithm loosely based on YASS that scales template amplitudes and shifts them in time to match detected spikes From 46c4ada52b95a7deeed4babf5bb40a9e775047d4 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 14:45:53 +0200 Subject: [PATCH 100/322] Port plot_agreement_matrix to new widgets API --- .../widgets/_legacy_mpl_widgets/__init__.py | 2 +- .../_legacy_mpl_widgets/agreementmatrix.py | 91 ------------------- .../widgets/tests/test_widgets.py | 10 +- src/spikeinterface/widgets/widget_list.py | 3 + 4 files changed, 13 insertions(+), 93 deletions(-) delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/agreementmatrix.py diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py index c0dcd7ea6e..045b8acc8e 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py @@ -17,7 +17,7 @@ # comparison related from .confusionmatrix import plot_confusion_matrix, ConfusionMatrixWidget -from .agreementmatrix import plot_agreement_matrix, AgreementMatrixWidget + from .multicompgraph import ( plot_multicomp_graph, MultiCompGraphWidget, diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/agreementmatrix.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/agreementmatrix.py deleted file mode 100644 index 369746e99b..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/agreementmatrix.py +++ /dev/null @@ -1,91 +0,0 @@ -import numpy as np - -from .basewidget import BaseWidget - - -class AgreementMatrixWidget(BaseWidget): - """ - Plots sorting comparison confusion matrix. - - Parameters - ---------- - sorting_comparison: GroundTruthComparison or SymmetricSortingComparison - The sorting comparison object. - Symetric or not. - ordered: bool - Order units with best agreement scores. - This enable to see agreement on a diagonal. - count_text: bool - If True counts are displayed as text - unit_ticks: bool - If True unit tick labels are displayed - figure: matplotlib figure - The figure to be used. If not given a figure is created - ax: matplotlib axis - The axis to be used. If not given an axis is created - """ - - def __init__(self, sorting_comparison, ordered=True, count_text=True, unit_ticks=True, figure=None, ax=None): - from matplotlib import pyplot as plt - - BaseWidget.__init__(self, figure, ax) - self._sc = sorting_comparison - self._ordered = ordered - self._count_text = count_text - self._unit_ticks = unit_ticks - self.name = "ConfusionMatrix" - - def plot(self): - self._do_plot() - - def _do_plot(self): - # a dataframe - if self._ordered: - scores = self._sc.get_ordered_agreement_scores() - else: - scores = self._sc.agreement_scores - - N1 = scores.shape[0] - N2 = scores.shape[1] - - unit_ids1 = scores.index.values - unit_ids2 = scores.columns.values - - # Using matshow here just because it sets the ticks up nicely. imshow is faster. - self.ax.matshow(scores.values, cmap="Greens") - - if self._count_text: - for i, u1 in enumerate(unit_ids1): - u2 = self._sc.best_match_12[u1] - if u2 != -1: - j = np.where(unit_ids2 == u2)[0][0] - - self.ax.text(j, i, "{:0.2f}".format(scores.at[u1, u2]), ha="center", va="center", color="white") - - # Major ticks - self.ax.set_xticks(np.arange(0, N2)) - self.ax.set_yticks(np.arange(0, N1)) - self.ax.xaxis.tick_bottom() - - # Labels for major ticks - if self._unit_ticks: - self.ax.set_yticklabels(scores.index, fontsize=12) - self.ax.set_xticklabels(scores.columns, fontsize=12) - - self.ax.set_xlabel(self._sc.name_list[1], fontsize=20) - self.ax.set_ylabel(self._sc.name_list[0], fontsize=20) - - self.ax.set_xlim(-0.5, N2 - 0.5) - self.ax.set_ylim( - N1 - 0.5, - -0.5, - ) - - -def plot_agreement_matrix(*args, **kwargs): - W = AgreementMatrixWidget(*args, **kwargs) - W.plot() - return W - - -plot_agreement_matrix.__doc__ = AgreementMatrixWidget.__doc__ diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index a5f75ebf50..2f11e5ee3c 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -324,6 +324,13 @@ def test_sorting_summary(self): sw.plot_sorting_summary(self.we, backend=backend, **self.backend_kwargs[backend]) sw.plot_sorting_summary(self.we_sparse, backend=backend, **self.backend_kwargs[backend]) + def test_plot_agreement_matrix(self): + possible_backends = list(sw.AgreementMatrixWidget.get_possible_backends()) + for backend in possible_backends: + if backend not in self.skip_backends: + sw.plot_agreement_matrix(self.gt_comp) + + if __name__ == "__main__": # unittest.main() @@ -344,7 +351,8 @@ def test_sorting_summary(self): # mytest.test_unit_locations() # mytest.test_quality_metrics() # mytest.test_template_metrics() - mytest.test_amplitudes() + # mytest.test_amplitudes() + mytest.test_plot_agreement_matrix() # plt.ion() plt.show() diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index 9c89b3981e..22b33e38aa 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -2,6 +2,7 @@ from .base import backend_kwargs_desc +from .agreement_matrix import AgreementMatrixWidget from .all_amplitudes_distributions import AllAmplitudesDistributionsWidget from .amplitudes import AmplitudesWidget from .autocorrelograms import AutoCorrelogramsWidget @@ -23,6 +24,7 @@ widget_list = [ + AgreementMatrixWidget, AllAmplitudesDistributionsWidget, AmplitudesWidget, AutoCorrelogramsWidget, @@ -76,6 +78,7 @@ # make function for all widgets +plot_agreement_matrix = AgreementMatrixWidget plot_all_amplitudes_distributions = AllAmplitudesDistributionsWidget plot_amplitudes = AmplitudesWidget plot_autocorrelograms = AutoCorrelogramsWidget From e49071e38394c039d70cbc083c8b5a2cbb785b1b Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 14:53:01 +0200 Subject: [PATCH 101/322] Port plot_confusion_matrix to new API. --- .../widgets/_legacy_mpl_widgets/__init__.py | 3 - .../_legacy_mpl_widgets/confusionmatrix.py | 91 ------------------- .../widgets/tests/test_widgets.py | 9 +- src/spikeinterface/widgets/widget_list.py | 3 + 4 files changed, 11 insertions(+), 95 deletions(-) delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/confusionmatrix.py diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py index 045b8acc8e..6013512022 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py @@ -15,9 +15,6 @@ # units on probe from .unitprobemap import plot_unit_probe_map, UnitProbeMapWidget -# comparison related -from .confusionmatrix import plot_confusion_matrix, ConfusionMatrixWidget - from .multicompgraph import ( plot_multicomp_graph, MultiCompGraphWidget, diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/confusionmatrix.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/confusionmatrix.py deleted file mode 100644 index 942b613fbf..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/confusionmatrix.py +++ /dev/null @@ -1,91 +0,0 @@ -import numpy as np - -from .basewidget import BaseWidget - - -class ConfusionMatrixWidget(BaseWidget): - """ - Plots sorting comparison confusion matrix. - - Parameters - ---------- - gt_comparison: GroundTruthComparison - The ground truth sorting comparison object - count_text: bool - If True counts are displayed as text - unit_ticks: bool - If True unit tick labels are displayed - figure: matplotlib figure - The figure to be used. If not given a figure is created - ax: matplotlib axis - The axis to be used. If not given an axis is created - - Returns - ------- - W: ConfusionMatrixWidget - The output widget - """ - - def __init__(self, gt_comparison, count_text=True, unit_ticks=True, figure=None, ax=None): - from matplotlib import pyplot as plt - - BaseWidget.__init__(self, figure, ax) - self._gtcomp = gt_comparison - self._count_text = count_text - self._unit_ticks = unit_ticks - self.name = "ConfusionMatrix" - - def plot(self): - self._do_plot() - - def _do_plot(self): - # a dataframe - confusion_matrix = self._gtcomp.get_confusion_matrix() - - N1 = confusion_matrix.shape[0] - 1 - N2 = confusion_matrix.shape[1] - 1 - - # Using matshow here just because it sets the ticks up nicely. imshow is faster. - self.ax.matshow(confusion_matrix.values, cmap="Greens") - - if self._count_text: - for (i, j), z in np.ndenumerate(confusion_matrix.values): - if z != 0: - if z > np.max(confusion_matrix.values) / 2.0: - self.ax.text(j, i, "{:d}".format(z), ha="center", va="center", color="white") - else: - self.ax.text(j, i, "{:d}".format(z), ha="center", va="center", color="black") - - self.ax.axhline(int(N1 - 1) + 0.5, color="black") - self.ax.axvline(int(N2 - 1) + 0.5, color="black") - - # Major ticks - self.ax.set_xticks(np.arange(0, N2 + 1)) - self.ax.set_yticks(np.arange(0, N1 + 1)) - self.ax.xaxis.tick_bottom() - - # Labels for major ticks - if self._unit_ticks: - self.ax.set_yticklabels(confusion_matrix.index, fontsize=12) - self.ax.set_xticklabels(confusion_matrix.columns, fontsize=12) - else: - self.ax.set_xticklabels(np.append([""] * N2, "FN"), fontsize=10) - self.ax.set_yticklabels(np.append([""] * N1, "FP"), fontsize=10) - - self.ax.set_xlabel(self._gtcomp.name_list[1], fontsize=20) - self.ax.set_ylabel(self._gtcomp.name_list[0], fontsize=20) - - self.ax.set_xlim(-0.5, N2 + 0.5) - self.ax.set_ylim( - N1 + 0.5, - -0.5, - ) - - -def plot_confusion_matrix(*args, **kwargs): - W = ConfusionMatrixWidget(*args, **kwargs) - W.plot() - return W - - -plot_confusion_matrix.__doc__ = ConfusionMatrixWidget.__doc__ diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index 2f11e5ee3c..0aa309f748 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -330,6 +330,12 @@ def test_plot_agreement_matrix(self): if backend not in self.skip_backends: sw.plot_agreement_matrix(self.gt_comp) + def test_plot_confusion_matrix(self): + possible_backends = list(sw.AgreementMatrixWidget.get_possible_backends()) + for backend in possible_backends: + if backend not in self.skip_backends: + sw.plot_confusion_matrix(self.gt_comp) + if __name__ == "__main__": @@ -352,7 +358,8 @@ def test_plot_agreement_matrix(self): # mytest.test_quality_metrics() # mytest.test_template_metrics() # mytest.test_amplitudes() - mytest.test_plot_agreement_matrix() + # mytest.test_plot_agreement_matrix() + mytest.test_plot_confusion_matrix() # plt.ion() plt.show() diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index 22b33e38aa..d02aa7de7a 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -6,6 +6,7 @@ from .all_amplitudes_distributions import AllAmplitudesDistributionsWidget from .amplitudes import AmplitudesWidget from .autocorrelograms import AutoCorrelogramsWidget +from .confusion_matrix import ConfusionMatrixWidget from .crosscorrelograms import CrossCorrelogramsWidget from .motion import MotionWidget from .quality_metrics import QualityMetricsWidget @@ -28,6 +29,7 @@ AllAmplitudesDistributionsWidget, AmplitudesWidget, AutoCorrelogramsWidget, + ConfusionMatrixWidget, CrossCorrelogramsWidget, MotionWidget, QualityMetricsWidget, @@ -82,6 +84,7 @@ plot_all_amplitudes_distributions = AllAmplitudesDistributionsWidget plot_amplitudes = AmplitudesWidget plot_autocorrelograms = AutoCorrelogramsWidget +plot_confusion_matrix = ConfusionMatrixWidget plot_crosscorrelograms = CrossCorrelogramsWidget plot_motion = MotionWidget plot_quality_metrics = QualityMetricsWidget From b90e35b9df6bb03bac2a7c3e76e36c79c3f68af3 Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Tue, 19 Sep 2023 08:56:48 -0400 Subject: [PATCH 102/322] Update doc/development/development.rst Co-authored-by: Alessio Buccino --- doc/development/development.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/development/development.rst b/doc/development/development.rst index 4704b9b1e6..7656da11ab 100644 --- a/doc/development/development.rst +++ b/doc/development/development.rst @@ -201,7 +201,7 @@ Implement a new extractor SpikeInterface already supports over 30 file formats, but the acquisition system you use might not be among the supported formats list (***ref***). Most of the extractord rely on the `NEO `_ package to read information from files. -Therefore, to implement a new extractor to handle the unsupported format, we recommend make a new :code:``neo.rawio ` class. +Therefore, to implement a new extractor to handle the unsupported format, we recommend make a new :code:`neo.rawio.BaseRawIO` class (see `example `_). Once that is done, the new class can be easily wrapped into SpikeInterface as an extension of the :py:class:`~spikeinterface.extractors.neoextractors.neobaseextractors.NeoBaseRecordingExtractor` (for :py:class:`~spikeinterface.core.BaseRecording` objects) or From fac98233b84fa440b374d944d1c27b9d200cd0c1 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 19 Sep 2023 15:31:10 +0200 Subject: [PATCH 103/322] add tutorial to load matlab data --- doc/how_to/index.rst | 1 + doc/how_to/load_matalb_data.rst | 66 +++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) create mode 100644 doc/how_to/load_matalb_data.rst diff --git a/doc/how_to/index.rst b/doc/how_to/index.rst index dabad818f9..fa7210d4f0 100644 --- a/doc/how_to/index.rst +++ b/doc/how_to/index.rst @@ -7,3 +7,4 @@ How to guides get_started analyse_neuropixels handle_drift + load_matalb_data diff --git a/doc/how_to/load_matalb_data.rst b/doc/how_to/load_matalb_data.rst new file mode 100644 index 0000000000..39b9a48d65 --- /dev/null +++ b/doc/how_to/load_matalb_data.rst @@ -0,0 +1,66 @@ +Exporting MATLAB Data to Binary & Loading in SpikeInterface +=========================================================== + +In this tutorial, we'll go through the process of exporting your data from MATLAB in a binary format and then loading it using SpikeInterface in Python. Let's break down the steps. + +Exporting Data from MATLAB +-------------------------- + +First, ensure your data is structured correctly. The data matrix should be organized such that the first dimension corresponds to samples/time and the second dimension to channels. + +.. code-block:: matlab + + % Define the size of your data + num_samples = 1000; + num_channels = 384; + + % Generate random data as an example + data = rand(num_samples, num_channels); + + % Write the data to a binary file + fileID = fopen('your_data_as_a_binary.bin', 'wb'); + fwrite(fileID, data, 'double'); + fclose(fileID); + +.. note:: + + In a real-world scenario, replace the random data generation with your actual data. + +Loading Data in SpikeInterface +----------------------------- + +This should produce a binary file called `your_data_as_a_binary.bin` in your current MATLAB directory. +You will need the complete path (i.e. its location on your computer) to load it in Python. + +Once you have your data in a binary format, you can seamlessly load it into SpikeInterface using the following script: + +.. code-block:: python + + from spikeinterface.core.binaryrecordingextractor import BinaryRecordingExtractor + from pathlib import Path + + # Define the path to your binary file + file_path = Path("/The/Path/To/Your/Data/your_data_as_a_binary.bin") + + # Ensure the file exists + assert file_path.is_file() + + # Specify the parameters of your recording + sampling_frequency = 30_000.0 # in Hz, adjust as per your matlab dataset + num_channels = 384 # adjust as per your matlab dataset + dtype = "float64" + + # Load the data using SpikeInterface + recording = BinaryRecordingExtractor(file_path, sampling_frequency=sampling_frequency, + num_channels=num_channels, dtype=dtype, gain_to_uV=1, offset_to_uV=0) + + # Verify the shape of your data + assert recording.get_traces().shape == (num_samples, num_channels) + +Common Pitfalls & Tips +---------------------- + +1. **Data Shape**: Always ensure that your MATLAB data matrix's first dimension corresponds to samples/time and the second to channels. +2. **File Path**: Double-check the file path in Python to ensure you're pointing to the right directory. +3. **Data Type**: When moving data between MATLAB and Python, it's crucial to keep the data type consistent. In our example, we used `double` in MATLAB, which corresponds to `float64` in Python. +4. **Sampling Frequency**: Ensure you set the correct sampling frequency when loading data into SpikeInterface. From b8023d0733e48b8bc96d50c763753a7da1b3a5d5 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 16:16:40 +0200 Subject: [PATCH 104/322] Add read_binary and read_zarr functions to extractord and docs API --- doc/api.rst | 11 ++++++----- src/spikeinterface/extractors/extractorlist.py | 2 ++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/doc/api.rst b/doc/api.rst index 43f79386e6..122c88d01b 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -19,6 +19,8 @@ spikeinterface.core .. autofunction:: extract_waveforms .. autofunction:: load_waveforms .. autofunction:: compute_sparsity + .. autoclass:: ChannelSparsity + :members: .. autoclass:: BinaryRecordingExtractor .. autoclass:: ZarrRecordingExtractor .. autoclass:: BinaryFolderRecording @@ -48,10 +50,6 @@ spikeinterface.core .. autofunction:: get_template_extremum_channel .. autofunction:: get_template_extremum_channel_peak_shift .. autofunction:: get_template_extremum_amplitude - -.. - .. autofunction:: read_binary - .. autofunction:: read_zarr .. autofunction:: append_recordings .. autofunction:: concatenate_recordings .. autofunction:: split_recording @@ -59,6 +57,8 @@ spikeinterface.core .. autofunction:: append_sortings .. autofunction:: split_sorting .. autofunction:: select_segment_sorting + .. autofunction:: read_binary + .. autofunction:: read_zarr Low-level ~~~~~~~~~ @@ -67,7 +67,6 @@ Low-level :noindex: .. autoclass:: BaseWaveformExtractorExtension - .. autoclass:: ChannelSparsity .. autoclass:: ChunkRecordingExecutor spikeinterface.extractors @@ -83,6 +82,7 @@ NEO-based .. autofunction:: read_alphaomega_event .. autofunction:: read_axona .. autofunction:: read_biocam + .. autofunction:: read_binary .. autofunction:: read_blackrock .. autofunction:: read_ced .. autofunction:: read_intan @@ -104,6 +104,7 @@ NEO-based .. autofunction:: read_spikegadgets .. autofunction:: read_spikeglx .. autofunction:: read_tdt + .. autofunction:: read_zarr Non-NEO-based diff --git a/src/spikeinterface/extractors/extractorlist.py b/src/spikeinterface/extractors/extractorlist.py index ebff40fae0..235dd705dc 100644 --- a/src/spikeinterface/extractors/extractorlist.py +++ b/src/spikeinterface/extractors/extractorlist.py @@ -11,6 +11,8 @@ NumpySorting, NpySnippetsExtractor, ZarrRecordingExtractor, + read_binary, + read_zarr, ) # sorting/recording/event from neo From 26cfd5db963796865b4a5ec877bfdd37e8616537 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 17:01:24 +0200 Subject: [PATCH 105/322] Percentiles need 0-100 and ad duinit_ids to syncrhony metrics --- .../qualitymetrics/misc_metrics.py | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index 6a42b12bb5..38add13c02 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -499,7 +499,7 @@ def compute_sliding_rp_violations( ) -def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **kwargs): +def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), unit_ids=None, **kwargs): """Compute synchrony metrics. Synchrony metrics represent the rate of occurrences of "synchrony_size" spikes at the exact same sample index. @@ -509,6 +509,8 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k The waveform extractor object. synchrony_sizes : list or tuple, default: (2, 4, 8) The synchrony sizes to compute. + unit_ids : list or None, default: None + List of unit ids to compute the synchrony metrics. If None, all units are used. Returns ------- @@ -526,6 +528,9 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k sorting = waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) + if unit_ids is None: + unit_ids = sorting.unit_ids + # Pre-allocate synchrony counts synchrony_counts = {} for synchrony_size in synchrony_sizes: @@ -538,20 +543,20 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k unique_spike_index, complexity = np.unique(spikes_in_segment["sample_index"], return_counts=True) # add counts for this segment - for unit_index in np.arange(len(sorting.unit_ids)): + for unit_id in unit_ids: + unit_index = sorting.unit_ids.index(unit_id) spikes_per_unit = spikes_in_segment[spikes_in_segment["unit_index"] == unit_index] # some segments/units might have no spikes if len(spikes_per_unit) == 0: continue spike_complexity = complexity[np.in1d(unique_spike_index, spikes_per_unit["sample_index"])] for synchrony_size in synchrony_sizes: - synchrony_counts[synchrony_size][unit_index] += np.count_nonzero(spike_complexity >= synchrony_size) + synchrony_counts[synchrony_size][unit_id] += np.count_nonzero(spike_complexity >= synchrony_size) # add counts for this segment synchrony_metrics_dict = { f"sync_spike_{synchrony_size}": { - unit_id: synchrony_counts[synchrony_size][unit_index] / spike_counts[unit_id] - for unit_index, unit_id in enumerate(sorting.unit_ids) + unit_id: synchrony_counts[synchrony_size][unit_id] / spike_counts[unit_id] for unit_id in unit_ids } for synchrony_size in synchrony_sizes } @@ -565,7 +570,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), **k _default_params["synchrony"] = dict(synchrony_sizes=(0, 2, 4)) -def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(0.05, 0.95), unit_ids=None): +def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(5, 95), unit_ids=None, **kwargs): """Calculate firing range, the range between the 5th and 95th percentiles of the firing rates distribution computed in non-overlapping time bins. @@ -575,7 +580,7 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(0.05, 0 The waveform extractor object. bin_size_s : float, default: 5 The size of the bin in seconds. - percentiles : tuple, default: (0.05, 0.95) + percentiles : tuple, default: (5, 95) The percentiles to compute. unit_ids : list or None List of unit ids to compute the firing range. If None, all units are used. @@ -617,13 +622,13 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(0.05, 0 return firing_ranges -_default_params["firing_range"] = dict(bin_size_s=5, percentiles=(0.05, 0.95)) +_default_params["firing_range"] = dict(bin_size_s=5, percentiles=(5, 95)) def compute_amplitude_cv_metrics( waveform_extractor, average_num_spikes_per_bin=50, - percentiles=(0.05, 0.95), + percentiles=(5, 95), min_num_bins=10, amplitude_extension="spike_amplitudes", unit_ids=None, @@ -726,7 +731,7 @@ def compute_amplitude_cv_metrics( _default_params["amplitude_cv"] = dict( - average_num_spikes_per_bin=50, percentiles=(0.05, 0.95), min_num_bins=10, amplitude_extension="spike_amplitudes" + average_num_spikes_per_bin=50, percentiles=(5, 95), min_num_bins=10, amplitude_extension="spike_amplitudes" ) From 0b2ac19982024f61cdcb4dc886e54ea813b962b6 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 17:04:43 +0200 Subject: [PATCH 106/322] Fix Kilosort Phy reader docstrings --- .../extractors/phykilosortextractors.py | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/extractors/phykilosortextractors.py b/src/spikeinterface/extractors/phykilosortextractors.py index c91aed644d..2769e03344 100644 --- a/src/spikeinterface/extractors/phykilosortextractors.py +++ b/src/spikeinterface/extractors/phykilosortextractors.py @@ -17,6 +17,10 @@ class BasePhyKilosortSortingExtractor(BaseSorting): Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). keep_good_only : bool, default: True Whether to only keep good units. + remove_empty_units : bool, default: True + If True, empty units are removed from the sorting extractor. + load_all_cluster_properties : bool, default: True + If True, all cluster properties are loaded from the tsv/csv files. """ extractor_name = "BasePhyKilosortSorting" @@ -197,18 +201,26 @@ class PhySortingExtractor(BasePhyKilosortSortingExtractor): Path to the output Phy folder (containing the params.py). exclude_cluster_groups: list or str, optional Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). + load_all_cluster_properties : bool, default: True + If True, all cluster properties are loaded from the tsv/csv files. Returns ------- extractor : PhySortingExtractor - The loaded data. + The loaded Sorting object. """ extractor_name = "PhySorting" name = "phy" - def __init__(self, folder_path, exclude_cluster_groups=None): - BasePhyKilosortSortingExtractor.__init__(self, folder_path, exclude_cluster_groups, keep_good_only=False) + def __init__(self, folder_path, exclude_cluster_groups=None, load_all_cluster_properties=True): + BasePhyKilosortSortingExtractor.__init__( + self, + folder_path, + exclude_cluster_groups, + keep_good_only=False, + load_all_cluster_properties=load_all_cluster_properties, + ) self._kwargs = { "folder_path": str(Path(folder_path).absolute()), @@ -223,8 +235,6 @@ class KiloSortSortingExtractor(BasePhyKilosortSortingExtractor): ---------- folder_path: str or Path Path to the output Phy folder (containing the params.py). - exclude_cluster_groups: list or str, optional - Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). keep_good_only : bool, default: True Whether to only keep good units. If True, only Kilosort-labeled 'good' units are returned. @@ -234,7 +244,7 @@ class KiloSortSortingExtractor(BasePhyKilosortSortingExtractor): Returns ------- extractor : KiloSortSortingExtractor - The loaded data. + The loaded Sorting object. """ extractor_name = "KiloSortSorting" From 3d792951a6036849b5d82ea523bb6cc20e784a07 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 17:09:32 +0200 Subject: [PATCH 107/322] port plot_probe_map() to new widgets API --- .../widgets/_legacy_mpl_widgets/__init__.py | 1 - .../widgets/_legacy_mpl_widgets/probemap.py | 77 ------------------- .../widgets/tests/test_widgets.py | 8 +- src/spikeinterface/widgets/widget_list.py | 3 + 4 files changed, 10 insertions(+), 79 deletions(-) delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/probemap.py diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py index 6013512022..af1419fb11 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py @@ -1,7 +1,6 @@ # basics # from .timeseries import plot_timeseries, TracesWidget from .rasters import plot_rasters, RasterWidget -from .probemap import plot_probe_map, ProbeMapWidget # isi/ccg/acg from .isidistribution import plot_isi_distribution, ISIDistributionWidget diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/probemap.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/probemap.py deleted file mode 100644 index 6e6578a4c4..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/probemap.py +++ /dev/null @@ -1,77 +0,0 @@ -import numpy as np - -from .basewidget import BaseWidget - - -class ProbeMapWidget(BaseWidget): - """ - Plot the probe of a recording. - - Parameters - ---------- - recording: RecordingExtractor - The recording extractor object - channel_ids: list - The channel ids to display - with_channel_ids: bool False default - Add channel ids text on the probe - figure: matplotlib figure - The figure to be used. If not given a figure is created - ax: matplotlib axis - The axis to be used. If not given an axis is created - **plot_probe_kwargs: keyword arguments for probeinterface.plotting.plot_probe_group() function - - Returns - ------- - W: ProbeMapWidget - The output widget - """ - - def __init__(self, recording, channel_ids=None, with_channel_ids=False, figure=None, ax=None, **plot_probe_kwargs): - import matplotlib.pylab as plt - from probeinterface.plotting import plot_probe, get_auto_lims - - BaseWidget.__init__(self, figure, ax) - - if channel_ids is not None: - recording = recording.channel_slice(channel_ids) - self._recording = recording - self._probegroup = recording.get_probegroup() - self.with_channel_ids = with_channel_ids - self._plot_probe_kwargs = plot_probe_kwargs - - def plot(self): - self._do_plot() - - def _do_plot(self): - from probeinterface.plotting import get_auto_lims - - xlims, ylims, zlims = get_auto_lims(self._probegroup.probes[0]) - for i, probe in enumerate(self._probegroup.probes): - xlims2, ylims2, _ = get_auto_lims(probe) - xlims = min(xlims[0], xlims2[0]), max(xlims[1], xlims2[1]) - ylims = min(ylims[0], ylims2[0]), max(ylims[1], ylims2[1]) - - self._plot_probe_kwargs["title"] = False - pos = 0 - text_on_contact = None - for i, probe in enumerate(self._probegroup.probes): - n = probe.get_contact_count() - if self.with_channel_ids: - text_on_contact = self._recording.channel_ids[pos : pos + n] - pos += n - from probeinterface.plotting import plot_probe - - plot_probe(probe, ax=self.ax, text_on_contact=text_on_contact, **self._plot_probe_kwargs) - - self.ax.set_xlim(*xlims) - self.ax.set_ylim(*ylims) - - -def plot_probe_map(*args, **kwargs): - W = ProbeMapWidget(*args, **kwargs) - W.plot() - return W - - -plot_probe_map.__doc__ = ProbeMapWidget.__doc__ diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index 0aa309f748..bc0ec68041 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -336,6 +336,11 @@ def test_plot_confusion_matrix(self): if backend not in self.skip_backends: sw.plot_confusion_matrix(self.gt_comp) + def test_plot_probe_map(self): + possible_backends = list(sw.ProbeMapWidget.get_possible_backends()) + for backend in possible_backends: + if backend not in self.skip_backends: + sw.plot_probe_map(self.recording, with_channel_ids=True, with_contact_id=True) if __name__ == "__main__": @@ -359,7 +364,8 @@ def test_plot_confusion_matrix(self): # mytest.test_template_metrics() # mytest.test_amplitudes() # mytest.test_plot_agreement_matrix() - mytest.test_plot_confusion_matrix() + # mytest.test_plot_confusion_matrix() + mytest.test_plot_probe_map() # plt.ion() plt.show() diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index d02aa7de7a..77db17029f 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -9,6 +9,7 @@ from .confusion_matrix import ConfusionMatrixWidget from .crosscorrelograms import CrossCorrelogramsWidget from .motion import MotionWidget +from .probe_map import ProbeMapWidget from .quality_metrics import QualityMetricsWidget from .sorting_summary import SortingSummaryWidget from .spike_locations import SpikeLocationsWidget @@ -32,6 +33,7 @@ ConfusionMatrixWidget, CrossCorrelogramsWidget, MotionWidget, + ProbeMapWidget, QualityMetricsWidget, SortingSummaryWidget, SpikeLocationsWidget, @@ -87,6 +89,7 @@ plot_confusion_matrix = ConfusionMatrixWidget plot_crosscorrelograms = CrossCorrelogramsWidget plot_motion = MotionWidget +plot_probe_map = ProbeMapWidget plot_quality_metrics = QualityMetricsWidget plot_sorting_summary = SortingSummaryWidget plot_spike_locations = SpikeLocationsWidget From 2bd7dd6c1c0fea0e094293f1fb17f9293ce30bb6 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 17:13:06 +0200 Subject: [PATCH 108/322] oups --- src/spikeinterface/qualitymetrics/misc_metrics.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index 38add13c02..0a37da99c3 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -536,6 +536,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni for synchrony_size in synchrony_sizes: synchrony_counts[synchrony_size] = np.zeros(len(waveform_extractor.unit_ids), dtype=np.int64) + all_unit_ids = list(sorting.unit_ids) for segment_index in range(sorting.get_num_segments()): spikes_in_segment = spikes[segment_index] @@ -544,7 +545,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni # add counts for this segment for unit_id in unit_ids: - unit_index = sorting.unit_ids.index(unit_id) + unit_index = all_unit_ids.index(unit_id) spikes_per_unit = spikes_in_segment[spikes_in_segment["unit_index"] == unit_index] # some segments/units might have no spikes if len(spikes_per_unit) == 0: From 7c958c3789f5591ad9fb8c9a4eaef1b905e5c929 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 17:16:52 +0200 Subject: [PATCH 109/322] Unify imports and comments for quality metrics docs --- doc/modules/qualitymetrics/amplitude_cutoff.rst | 6 +++--- doc/modules/qualitymetrics/amplitude_cv.rst | 4 ++-- doc/modules/qualitymetrics/amplitude_median.rst | 6 +++--- doc/modules/qualitymetrics/d_prime.rst | 4 ++-- doc/modules/qualitymetrics/drift.rst | 4 ++-- doc/modules/qualitymetrics/firing_range.rst | 6 +++--- doc/modules/qualitymetrics/firing_rate.rst | 6 +++--- doc/modules/qualitymetrics/isi_violations.rst | 4 ++-- doc/modules/qualitymetrics/presence_ratio.rst | 6 +++--- doc/modules/qualitymetrics/sliding_rp_violations.rst | 4 ++-- doc/modules/qualitymetrics/snr.rst | 6 +++--- doc/modules/qualitymetrics/synchrony.rst | 4 ++-- 12 files changed, 30 insertions(+), 30 deletions(-) diff --git a/doc/modules/qualitymetrics/amplitude_cutoff.rst b/doc/modules/qualitymetrics/amplitude_cutoff.rst index 9f747f8d40..a1e4d85d01 100644 --- a/doc/modules/qualitymetrics/amplitude_cutoff.rst +++ b/doc/modules/qualitymetrics/amplitude_cutoff.rst @@ -21,12 +21,12 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # It is also recommended to run `compute_spike_amplitudes(wvf_extractor)` # in order to use amplitudes from all spikes - fraction_missing = qm.compute_amplitude_cutoffs(wvf_extractor, peak_sign="neg") - # fraction_missing is a dict containing the units' IDs as keys, + fraction_missing = sqm.compute_amplitude_cutoffs(wvf_extractor, peak_sign="neg") + # fraction_missing is a dict containing the unit IDs as keys, # and their estimated fraction of missing spikes as values. Reference diff --git a/doc/modules/qualitymetrics/amplitude_cv.rst b/doc/modules/qualitymetrics/amplitude_cv.rst index 981813ef09..3edb1f9833 100644 --- a/doc/modules/qualitymetrics/amplitude_cv.rst +++ b/doc/modules/qualitymetrics/amplitude_cv.rst @@ -32,12 +32,12 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. # It is required to run `compute_spike_amplitudes(wvf_extractor)` or # `compute_amplitude_scalings(wvf_extractor)` (if missing, values will be NaN) - amplitude_cv_median, amplitude_cv_range = qm.compute_amplitude_cv_metrics(wvf_extractor) + amplitude_cv_median, amplitude_cv_range = sqm.compute_amplitude_cv_metrics(wvf_extractor) # amplitude_cv_median and amplitude_cv_range are dicts containing the unit ids as keys, # and their amplitude_cv metrics as values. diff --git a/doc/modules/qualitymetrics/amplitude_median.rst b/doc/modules/qualitymetrics/amplitude_median.rst index ffc45d1cf6..3ac52560e8 100644 --- a/doc/modules/qualitymetrics/amplitude_median.rst +++ b/doc/modules/qualitymetrics/amplitude_median.rst @@ -20,12 +20,12 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # It is also recommended to run `compute_spike_amplitudes(wvf_extractor)` # in order to use amplitude values from all spikes. - amplitude_medians = qm.compute_amplitude_medians(wvf_extractor) - # amplitude_medians is a dict containing the units' IDs as keys, + amplitude_medians = sqm.compute_amplitude_medians(wvf_extractor) + # amplitude_medians is a dict containing the unit IDs as keys, # and their estimated amplitude medians as values. Reference diff --git a/doc/modules/qualitymetrics/d_prime.rst b/doc/modules/qualitymetrics/d_prime.rst index abb8c1dc74..e3bd61c580 100644 --- a/doc/modules/qualitymetrics/d_prime.rst +++ b/doc/modules/qualitymetrics/d_prime.rst @@ -32,9 +32,9 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm - d_prime = qm.lda_metrics(all_pcs, all_labels, 0) + d_prime = sqm.lda_metrics(all_pcs, all_labels, 0) Reference diff --git a/doc/modules/qualitymetrics/drift.rst b/doc/modules/qualitymetrics/drift.rst index 4e78150ba7..ae52f7f883 100644 --- a/doc/modules/qualitymetrics/drift.rst +++ b/doc/modules/qualitymetrics/drift.rst @@ -40,12 +40,12 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. # It is required to run `compute_spike_locations(wvf_extractor)` # (if missing, values will be NaN) - drift_ptps, drift_stds, drift_mads = qm.compute_drift_metrics(wvf_extractor, peak_sign="neg") + drift_ptps, drift_stds, drift_mads = sqm.compute_drift_metrics(wvf_extractor, peak_sign="neg") # drift_ptps, drift_stds, and drift_mads are dict containing the units' ID as keys, # and their metrics as values. diff --git a/doc/modules/qualitymetrics/firing_range.rst b/doc/modules/qualitymetrics/firing_range.rst index 3fd3d53573..925539e9c6 100644 --- a/doc/modules/qualitymetrics/firing_range.rst +++ b/doc/modules/qualitymetrics/firing_range.rst @@ -21,11 +21,11 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - firing_range = qm.compute_firing_ranges(wvf_extractor) - # firing_range is a dict containing the units' IDs as keys, + firing_range = sqm.compute_firing_ranges(wvf_extractor) + # firing_range is a dict containing the unit IDs as keys, # and their firing firing_range as values (in Hz). References diff --git a/doc/modules/qualitymetrics/firing_rate.rst b/doc/modules/qualitymetrics/firing_rate.rst index eddef3e48f..c0e15d7c2e 100644 --- a/doc/modules/qualitymetrics/firing_rate.rst +++ b/doc/modules/qualitymetrics/firing_rate.rst @@ -37,11 +37,11 @@ With SpikeInterface: .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - firing_rate = qm.compute_firing_rates(wvf_extractor) - # firing_rate is a dict containing the units' IDs as keys, + firing_rate = sqm.compute_firing_rates(wvf_extractor) + # firing_rate is a dict containing the unit IDs as keys, # and their firing rates across segments as values (in Hz). References diff --git a/doc/modules/qualitymetrics/isi_violations.rst b/doc/modules/qualitymetrics/isi_violations.rst index 947e7d4938..725d9b0fd6 100644 --- a/doc/modules/qualitymetrics/isi_violations.rst +++ b/doc/modules/qualitymetrics/isi_violations.rst @@ -77,11 +77,11 @@ With SpikeInterface: .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - isi_violations_ratio, isi_violations_count = qm.compute_isi_violations(wvf_extractor, isi_threshold_ms=1.0) + isi_violations_ratio, isi_violations_count = sqm.compute_isi_violations(wvf_extractor, isi_threshold_ms=1.0) References ---------- diff --git a/doc/modules/qualitymetrics/presence_ratio.rst b/doc/modules/qualitymetrics/presence_ratio.rst index e4de2248bd..5a420c8ccf 100644 --- a/doc/modules/qualitymetrics/presence_ratio.rst +++ b/doc/modules/qualitymetrics/presence_ratio.rst @@ -23,12 +23,12 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - presence_ratio = qm.compute_presence_ratios(wvf_extractor) - # presence_ratio is a dict containing the units' IDs as keys + presence_ratio = sqm.compute_presence_ratios(wvf_extractor) + # presence_ratio is a dict containing the unit IDs as keys # and their presence ratio (between 0 and 1) as values. Links to original implementations diff --git a/doc/modules/qualitymetrics/sliding_rp_violations.rst b/doc/modules/qualitymetrics/sliding_rp_violations.rst index 843242c1e8..de68c3a92f 100644 --- a/doc/modules/qualitymetrics/sliding_rp_violations.rst +++ b/doc/modules/qualitymetrics/sliding_rp_violations.rst @@ -27,11 +27,11 @@ With SpikeInterface: .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - contamination = qm.compute_sliding_rp_violations(wvf_extractor, bin_size_ms=0.25) + contamination = sqm.compute_sliding_rp_violations(wvf_extractor, bin_size_ms=0.25) References ---------- diff --git a/doc/modules/qualitymetrics/snr.rst b/doc/modules/qualitymetrics/snr.rst index 288ab60515..b88d3291be 100644 --- a/doc/modules/qualitymetrics/snr.rst +++ b/doc/modules/qualitymetrics/snr.rst @@ -41,12 +41,12 @@ With SpikeInterface: .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - SNRs = qm.compute_snrs(wvf_extractor) - # SNRs is a dict containing the units' IDs as keys and their SNRs as values. + SNRs = sqm.compute_snrs(wvf_extractor) + # SNRs is a dict containing the unit IDs as keys and their SNRs as values. Links to original implementations --------------------------------- diff --git a/doc/modules/qualitymetrics/synchrony.rst b/doc/modules/qualitymetrics/synchrony.rst index 2f566bf8a7..0750940199 100644 --- a/doc/modules/qualitymetrics/synchrony.rst +++ b/doc/modules/qualitymetrics/synchrony.rst @@ -27,9 +27,9 @@ Example code .. code-block:: python - import spikeinterface.qualitymetrics as qm + import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - synchrony = qm.compute_synchrony_metrics(wvf_extractor, synchrony_sizes=(2, 4, 8)) + synchrony = sqm.compute_synchrony_metrics(wvf_extractor, synchrony_sizes=(2, 4, 8)) # synchrony is a tuple of dicts with the synchrony metrics for each unit From 2d4f7692196388a0d9a27808c3c4f8002090247f Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 17:40:37 +0200 Subject: [PATCH 110/322] For connoisseur only: add a simple "ephyviewer" backend plot_traces(). --- src/spikeinterface/widgets/base.py | 2 ++ .../widgets/tests/test_widgets.py | 4 +-- src/spikeinterface/widgets/traces.py | 26 +++++++++++++++++++ 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/widgets/base.py b/src/spikeinterface/widgets/base.py index dea46b8f51..4ed83fcca9 100644 --- a/src/spikeinterface/widgets/base.py +++ b/src/spikeinterface/widgets/base.py @@ -39,12 +39,14 @@ def set_default_plotter_backend(backend): "height_cm": "Height of the figure in cm (default 6)", "display": "If True, widgets are immediately displayed", }, + "ephyviewer": {}, } default_backend_kwargs = { "matplotlib": {"figure": None, "ax": None, "axes": None, "ncols": 5, "figsize": None, "figtitle": None}, "sortingview": {"generate_url": True, "display": True, "figlabel": None, "height": None}, "ipywidgets": {"width_cm": 25, "height_cm": 10, "display": True}, + "ephyviewer": {}, } diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index a5f75ebf50..7386167d0b 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -72,7 +72,7 @@ def setUpClass(cls): else: cls.we_sparse = cls.we.save(folder=cache_folder / "mearec_test_sparse", sparsity=cls.sparsity_radius) - cls.skip_backends = ["ipywidgets"] + cls.skip_backends = ["ipywidgets", "ephyviewer"] if ON_GITHUB and not KACHERY_CLOUD_SET: cls.skip_backends.append("sortingview") @@ -344,7 +344,7 @@ def test_sorting_summary(self): # mytest.test_unit_locations() # mytest.test_quality_metrics() # mytest.test_template_metrics() - mytest.test_amplitudes() + # mytest.test_amplitudes() # plt.ion() plt.show() diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index e025f779c1..e046623eb7 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -523,6 +523,32 @@ def plot_sortingview(self, data_plot, **backend_kwargs): backend_kwargs["display"] = False self.url = handle_display_and_url(self, self.view, **backend_kwargs) + + def plot_ephyviewer(self, data_plot, **backend_kwargs): + import ephyviewer + from ..preprocessing import depth_order + + dp = to_attr(data_plot) + + app = ephyviewer.mkQApp() + win = ephyviewer.MainViewer(debug=False, show_auto_scale=True) + + for k, rec in dp.recordings.items(): + + if dp.order_channel_by_depth: + rec = depth_order(rec, flip=True) + + sig_source = ephyviewer.SpikeInterfaceRecordingSource(recording=rec) + view = ephyviewer.TraceViewer(source=sig_source, name=k) + view.params['scale_mode'] = 'by_channel' + if dp.show_channel_ids: + view.params['display_labels'] = True + view.auto_scale() + win.add_view(view) + + win.show() + app.exec() + def _get_trace_list(recordings, channel_ids, time_range, segment_index, order=None, return_scaled=False): From 16cf79e222c51ab54f82f0783a8f23734c270bdb Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 17:56:04 +0200 Subject: [PATCH 111/322] Default synchrony sizes and assertion --- src/spikeinterface/qualitymetrics/misc_metrics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index 0a37da99c3..b02bfae9ba 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -523,7 +523,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni Based on concepts described in [Gruen]_ This code was adapted from `Elephant - Electrophysiology Analysis Toolkit `_ """ - assert np.all(s > 1 for s in synchrony_sizes), "Synchrony sizes must be greater than 1" + assert np.all([s > 1 for s in synchrony_sizes]), "Synchrony sizes must be greater than 1" spike_counts = waveform_extractor.sorting.count_num_spikes_per_unit() sorting = waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) @@ -568,7 +568,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni return synchrony_metrics -_default_params["synchrony"] = dict(synchrony_sizes=(0, 2, 4)) +_default_params["synchrony"] = dict(synchrony_sizes=(2, 4, 8)) def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(5, 95), unit_ids=None, **kwargs): From 5c0bdbb546fd121db38cc9c5123360f7534eb94a Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Tue, 19 Sep 2023 17:59:48 +0200 Subject: [PATCH 112/322] Suggestions from Alessio Co-authored-by: Alessio Buccino --- src/spikeinterface/sorters/launcher.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index d04a89fdf1..d6506cade5 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -51,9 +51,9 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal Where *blocking* means that this function is blocking until the results are returned. This is in opposition to *asynchronous*, where the function returns `None` almost immediately (aka non-blocking), - but the results must be retrieved by hand when jobs are finished. No mechanisim is provided here to be aware + but the results must be retrieved by hand when jobs are finished. No mechanisim is provided here to be know when jobs are finish. - In this *asynchronous* case, the :py:func:read_sorter_folder() helps to retrieve individual results. + In this *asynchronous* case, the :py:func:`~spikeinterface.sorters.read_sorter_folder()` helps to retrieve individual results. Parameters @@ -82,7 +82,7 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal engine_kwargs = engine_kwargs_ if return_output: - assert engine in ("loop", "joblib", "processpoolexecutor") + assert engine in ("loop", "joblib", "processpoolexecutor"), "Only 'loop', 'joblib', and 'processpoolexecutor' support return_output=True." out = [] else: out = None @@ -355,7 +355,7 @@ def run_sorters( """ warnings.warn( - "run_sorters()is deprecated please use run_sorter_jobs() instead. This will be removed in 0.100", + "run_sorters() is deprecated please use run_sorter_jobs() instead. This will be removed in 0.100", DeprecationWarning, stacklevel=2, ) From 0ecf83b46dacf5426b7f55157f0d48497eb52245 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 18:02:51 +0200 Subject: [PATCH 113/322] add read_sorter_folder in api.rst --- doc/api.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/api.rst b/doc/api.rst index b605127426..8b269fc685 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -219,6 +219,7 @@ spikeinterface.sorters .. autofunction:: run_sorter_jobs .. autofunction:: run_sorters .. autofunction:: run_sorter_by_property + .. autofunction:: read_sorter_folder Low level ~~~~~~~~~ From 60e8989d3207f9ad213d96484c767a53b7e535a2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 19 Sep 2023 16:05:28 +0000 Subject: [PATCH 114/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sorters/launcher.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index d6506cade5..f32a468a22 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -82,7 +82,11 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal engine_kwargs = engine_kwargs_ if return_output: - assert engine in ("loop", "joblib", "processpoolexecutor"), "Only 'loop', 'joblib', and 'processpoolexecutor' support return_output=True." + assert engine in ( + "loop", + "joblib", + "processpoolexecutor", + ), "Only 'loop', 'joblib', and 'processpoolexecutor' support return_output=True." out = [] else: out = None From 45012894a558a59903e7b87f235d5f85f7637711 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 18:41:27 +0200 Subject: [PATCH 115/322] Port plot_raster() to new API. --- .../widgets/_legacy_mpl_widgets/__init__.py | 4 - .../widgets/_legacy_mpl_widgets/rasters.py | 120 --------- .../tests/test_widgets_legacy.py | 48 +--- .../_legacy_mpl_widgets/timeseries_.py | 233 ------------------ .../widgets/tests/test_widgets.py | 10 +- src/spikeinterface/widgets/widget_list.py | 3 + 6 files changed, 13 insertions(+), 405 deletions(-) delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/rasters.py delete mode 100644 src/spikeinterface/widgets/_legacy_mpl_widgets/timeseries_.py diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py index af1419fb11..9593f14d1c 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/__init__.py @@ -1,7 +1,3 @@ -# basics -# from .timeseries import plot_timeseries, TracesWidget -from .rasters import plot_rasters, RasterWidget - # isi/ccg/acg from .isidistribution import plot_isi_distribution, ISIDistributionWidget diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/rasters.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/rasters.py deleted file mode 100644 index d05373103e..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/rasters.py +++ /dev/null @@ -1,120 +0,0 @@ -import numpy as np - -from .basewidget import BaseWidget - - -class RasterWidget(BaseWidget): - """ - Plots spike train rasters. - - Parameters - ---------- - sorting: SortingExtractor - The sorting extractor object - segment_index: None or int - The segment index. - unit_ids: list - List of unit ids - time_range: list - List with start time and end time - color: matplotlib color - The color to be used - figure: matplotlib figure - The figure to be used. If not given a figure is created - ax: matplotlib axis - The axis to be used. If not given an axis is created - - Returns - ------- - W: RasterWidget - The output widget - """ - - def __init__(self, sorting, segment_index=None, unit_ids=None, time_range=None, color="k", figure=None, ax=None): - from matplotlib import pyplot as plt - - BaseWidget.__init__(self, figure, ax) - self._sorting = sorting - - if segment_index is None: - nseg = sorting.get_num_segments() - if nseg != 1: - raise ValueError("You must provide segment_index=...") - else: - segment_index = 0 - self.segment_index = segment_index - - self._unit_ids = unit_ids - self._figure = None - self._sampling_frequency = sorting.get_sampling_frequency() - self._color = color - self._max_frame = 0 - for unit_id in self._sorting.get_unit_ids(): - spike_train = self._sorting.get_unit_spike_train(unit_id, segment_index=self.segment_index) - if len(spike_train) > 0: - curr_max_frame = np.max(spike_train) - if curr_max_frame > self._max_frame: - self._max_frame = curr_max_frame - self._visible_trange = time_range - if self._visible_trange is None: - self._visible_trange = [0, self._max_frame] - else: - assert len(time_range) == 2, "'time_range' should be a list with start and end time in seconds" - self._visible_trange = [int(t * self._sampling_frequency) for t in time_range] - - self._visible_trange = self._fix_trange(self._visible_trange) - self.name = "Raster" - - def plot(self): - self._do_plot() - - def _do_plot(self): - units_ids = self._unit_ids - if units_ids is None: - units_ids = self._sorting.get_unit_ids() - import matplotlib.pyplot as plt - - with plt.rc_context({"axes.edgecolor": "gray"}): - for u_i, unit_id in enumerate(units_ids): - spiketrain = self._sorting.get_unit_spike_train( - unit_id, - start_frame=self._visible_trange[0], - end_frame=self._visible_trange[1], - segment_index=self.segment_index, - ) - spiketimes = spiketrain / float(self._sampling_frequency) - self.ax.plot( - spiketimes, - u_i * np.ones_like(spiketimes), - marker="|", - mew=1, - markersize=3, - ls="", - color=self._color, - ) - visible_start_frame = self._visible_trange[0] / self._sampling_frequency - visible_end_frame = self._visible_trange[1] / self._sampling_frequency - self.ax.set_yticks(np.arange(len(units_ids))) - self.ax.set_yticklabels(units_ids) - self.ax.set_xlim(visible_start_frame, visible_end_frame) - self.ax.set_xlabel("time (s)") - - def _fix_trange(self, trange): - if trange[1] > self._max_frame: - # trange[0] += max_t - trange[1] - trange[1] = self._max_frame - if trange[0] < 0: - # trange[1] += -trange[0] - trange[0] = 0 - # trange[0] = np.maximum(0, trange[0]) - # trange[1] = np.minimum(max_t, trange[1]) - return trange - - -def plot_rasters(*args, **kwargs): - W = RasterWidget(*args, **kwargs) - W.plot() - return W - - -plot_rasters.__doc__ = RasterWidget.__doc__ diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py index 5004765251..defe10f0d4 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py @@ -43,43 +43,7 @@ def setUp(self): def tearDown(self): pass - # def test_timeseries(self): - # sw.plot_timeseries(self._rec, mode='auto') - # sw.plot_timeseries(self._rec, mode='line', show_channel_ids=True) - # sw.plot_timeseries(self._rec, mode='map', show_channel_ids=True) - # sw.plot_timeseries(self._rec, mode='map', show_channel_ids=True, order_channel_by_depth=True) - - def test_rasters(self): - sw.plot_rasters(self._sorting) - - def test_plot_probe_map(self): - sw.plot_probe_map(self._rec) - sw.plot_probe_map(self._rec, with_channel_ids=True) - - # TODO - # def test_spectrum(self): - # sw.plot_spectrum(self._rec) - - # TODO - # def test_spectrogram(self): - # sw.plot_spectrogram(self._rec, channel=0) - - # def test_unitwaveforms(self): - # w = sw.plot_unit_waveforms(self._we) - # unit_ids = self._sorting.unit_ids[:6] - # sw.plot_unit_waveforms(self._we, max_channels=5, unit_ids=unit_ids) - # sw.plot_unit_waveforms(self._we, radius_um=60, unit_ids=unit_ids) - - # def test_plot_unit_waveform_density_map(self): - # unit_ids = self._sorting.unit_ids[:3] - # sw.plot_unit_waveform_density_map(self._we, unit_ids=unit_ids, max_channels=4) - # sw.plot_unit_waveform_density_map(self._we, unit_ids=unit_ids, radius_um=50) - # - # sw.plot_unit_waveform_density_map(self._we, unit_ids=unit_ids, radius_um=25, same_axis=True) - # sw.plot_unit_waveform_density_map(self._we, unit_ids=unit_ids, max_channels=2, same_axis=True) - - # def test_unittemplates(self): - # sw.plot_unit_templates(self._we) + def test_plot_unit_probe_map(self): sw.plot_unit_probe_map(self._we, with_channel_ids=True) @@ -120,12 +84,6 @@ def test_plot_peak_activity_map(self): sw.plot_peak_activity_map(self._rec, with_channel_ids=True) sw.plot_peak_activity_map(self._rec, bin_duration_s=1.0) - def test_confusion(self): - sw.plot_confusion_matrix(self._gt_comp, count_text=True) - - def test_agreement(self): - sw.plot_agreement_matrix(self._gt_comp, count_text=True) - def test_multicomp_graph(self): msc = sc.compare_multiple_sorters([self._sorting, self._sorting, self._sorting]) sw.plot_multicomp_graph(msc, edge_cmap="viridis", node_cmap="rainbow", draw_labels=False) @@ -150,8 +108,6 @@ def test_sorting_performance(self): mytest.setUp() # ~ mytest.test_timeseries() - # ~ mytest.test_rasters() - mytest.test_plot_probe_map() # ~ mytest.test_unitwaveforms() # ~ mytest.test_plot_unit_waveform_density_map() # mytest.test_unittemplates() @@ -169,8 +125,6 @@ def test_sorting_performance(self): # ~ mytest.test_plot_drift_over_time() # ~ mytest.test_plot_peak_activity_map() - # mytest.test_confusion() - # mytest.test_agreement() # ~ mytest.test_multicomp_graph() #  mytest.test_sorting_performance() diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/timeseries_.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/timeseries_.py deleted file mode 100644 index ab6fa2ace5..0000000000 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/timeseries_.py +++ /dev/null @@ -1,233 +0,0 @@ -import numpy as np -from matplotlib import pyplot as plt -from matplotlib.ticker import MaxNLocator -from .basewidget import BaseWidget - -import scipy.spatial - - -class TracesWidget(BaseWidget): - """ - Plots recording timeseries. - - Parameters - ---------- - recording: RecordingExtractor - The recording extractor object - segment_index: None or int - The segment index. - channel_ids: list - The channel ids to display. - order_channel_by_depth: boolean - Reorder channel by depth. - time_range: list - List with start time and end time - mode: 'line' or 'map' or 'auto' - 2 possible mode: - * 'line' : classical for low channel count - * 'map' : for high channel count use color heat map - * 'auto' : auto switch depending the channel count <32ch - cmap: str default 'RdBu' - matplotlib colormap used in mode 'map' - show_channel_ids: bool - Set yticks with channel ids - color_groups: bool - If True groups are plotted with different colors - color: matplotlib color, default: None - The color used to draw the traces. - clim: None or tupple - When mode='map' this control color lims - with_colorbar: bool default True - When mode='map' add colorbar - figure: matplotlib figure - The figure to be used. If not given a figure is created - ax: matplotlib axis - The axis to be used. If not given an axis is created - - Returns - ------- - W: TracesWidget - The output widget - """ - - def __init__( - self, - recording, - segment_index=None, - channel_ids=None, - order_channel_by_depth=False, - time_range=None, - mode="auto", - cmap="RdBu", - show_channel_ids=False, - color_groups=False, - color=None, - clim=None, - with_colorbar=True, - figure=None, - ax=None, - **plot_kwargs, - ): - BaseWidget.__init__(self, figure, ax) - self.recording = recording - self._sampling_frequency = recording.get_sampling_frequency() - self.visible_channel_ids = channel_ids - self._plot_kwargs = plot_kwargs - - if segment_index is None: - nseg = recording.get_num_segments() - if nseg != 1: - raise ValueError("You must provide segment_index=...") - segment_index = 0 - self.segment_index = segment_index - - if self.visible_channel_ids is None: - self.visible_channel_ids = recording.get_channel_ids() - - if order_channel_by_depth: - locations = self.recording.get_channel_locations() - channel_inds = self.recording.ids_to_indices(self.visible_channel_ids) - locations = locations[channel_inds, :] - origin = np.array([np.max(locations[:, 0]), np.min(locations[:, 1])])[None, :] - dist = scipy.spatial.distance.cdist(locations, origin, metric="euclidean") - dist = dist[:, 0] - self.order = np.argsort(dist) - else: - self.order = None - - if channel_ids is None: - channel_ids = recording.get_channel_ids() - - fs = recording.get_sampling_frequency() - if time_range is None: - time_range = (0, 1.0) - time_range = np.array(time_range) - - assert mode in ("auto", "line", "map"), "Mode must be in auto/line/map" - if mode == "auto": - if len(channel_ids) <= 64: - mode = "line" - else: - mode = "map" - self.mode = mode - self.cmap = cmap - - self.show_channel_ids = show_channel_ids - - self._frame_range = (time_range * fs).astype("int64") - a_max = self.recording.get_num_frames(segment_index=self.segment_index) - self._frame_range = np.clip(self._frame_range, 0, a_max) - self._time_range = [e / fs for e in self._frame_range] - - self.clim = clim - self.with_colorbar = with_colorbar - - self._initialize_stats() - - # self._vspacing = self._mean_channel_std * 20 - self._vspacing = self._max_channel_amp * 1.5 - - if recording.get_channel_groups() is None: - color_groups = False - - self._color_groups = color_groups - self._color = color - if color_groups: - self._colors = [] - self._group_color_map = {} - all_groups = recording.get_channel_groups() - groups = np.unique(all_groups) - N = len(groups) - import colorsys - - HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)] - self._colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)) - color_idx = 0 - for group in groups: - self._group_color_map[group] = color_idx - color_idx += 1 - self.name = "TimeSeries" - - def plot(self): - self._do_plot() - - def _do_plot(self): - chunk0 = self.recording.get_traces( - segment_index=self.segment_index, - channel_ids=self.visible_channel_ids, - start_frame=self._frame_range[0], - end_frame=self._frame_range[1], - ) - if self.order is not None: - chunk0 = chunk0[:, self.order] - self.visible_channel_ids = np.array(self.visible_channel_ids)[self.order] - - ax = self.ax - - n = len(self.visible_channel_ids) - - if self.mode == "line": - ax.set_xlim( - self._frame_range[0] / self._sampling_frequency, self._frame_range[1] / self._sampling_frequency - ) - ax.set_ylim(-self._vspacing, self._vspacing * n) - ax.get_xaxis().set_major_locator(MaxNLocator(prune="both")) - ax.get_yaxis().set_ticks([]) - ax.set_xlabel("time (s)") - - self._plots = {} - self._plot_offsets = {} - offset0 = self._vspacing * (n - 1) - times = np.arange(self._frame_range[0], self._frame_range[1]) / self._sampling_frequency - for im, m in enumerate(self.visible_channel_ids): - self._plot_offsets[m] = offset0 - if self._color_groups: - group = self.recording.get_channel_groups(channel_ids=[m])[0] - group_color_idx = self._group_color_map[group] - color = self._colors[group_color_idx] - else: - color = self._color - self._plots[m] = ax.plot(times, self._plot_offsets[m] + chunk0[:, im], color=color, **self._plot_kwargs) - offset0 = offset0 - self._vspacing - - if self.show_channel_ids: - ax.set_yticks(np.arange(n) * self._vspacing) - ax.set_yticklabels([str(chan_id) for chan_id in self.visible_channel_ids[::-1]]) - - elif self.mode == "map": - extent = (self._time_range[0], self._time_range[1], 0, self.recording.get_num_channels()) - im = ax.imshow( - chunk0.T, interpolation="nearest", origin="upper", aspect="auto", extent=extent, cmap=self.cmap - ) - - if self.clim is None: - im.set_clim(-self._max_channel_amp, self._max_channel_amp) - else: - im.set_clim(*self.clim) - - if self.with_colorbar: - self.figure.colorbar(im, ax=ax) - - if self.show_channel_ids: - ax.set_yticks(np.arange(n) + 0.5) - ax.set_yticklabels([str(chan_id) for chan_id in self.visible_channel_ids[::-1]]) - - def _initialize_stats(self): - chunk0 = self.recording.get_traces( - segment_index=self.segment_index, - channel_ids=self.visible_channel_ids, - start_frame=self._frame_range[0], - end_frame=self._frame_range[1], - ) - - self._mean_channel_std = np.mean(np.std(chunk0, axis=0)) - self._max_channel_amp = np.max(np.max(np.abs(chunk0), axis=0)) - - -def plot_timeseries(*args, **kwargs): - W = TracesWidget(*args, **kwargs) - W.plot() - return W - - -plot_timeseries.__doc__ = TracesWidget.__doc__ diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index bc0ec68041..509194cb93 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -342,6 +342,13 @@ def test_plot_probe_map(self): if backend not in self.skip_backends: sw.plot_probe_map(self.recording, with_channel_ids=True, with_contact_id=True) + def test_plot_rasters(self): + possible_backends = list(sw.RasterWidget.get_possible_backends()) + for backend in possible_backends: + if backend not in self.skip_backends: + sw.plot_rasters(self.sorting) + + if __name__ == "__main__": # unittest.main() @@ -365,7 +372,8 @@ def test_plot_probe_map(self): # mytest.test_amplitudes() # mytest.test_plot_agreement_matrix() # mytest.test_plot_confusion_matrix() - mytest.test_plot_probe_map() + # mytest.test_plot_probe_map() + mytest.test_plot_rasters() # plt.ion() plt.show() diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index 77db17029f..6ea2593432 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -11,6 +11,7 @@ from .motion import MotionWidget from .probe_map import ProbeMapWidget from .quality_metrics import QualityMetricsWidget +from .rasters import RasterWidget from .sorting_summary import SortingSummaryWidget from .spike_locations import SpikeLocationsWidget from .spikes_on_traces import SpikesOnTracesWidget @@ -35,6 +36,7 @@ MotionWidget, ProbeMapWidget, QualityMetricsWidget, + RasterWidget, SortingSummaryWidget, SpikeLocationsWidget, SpikesOnTracesWidget, @@ -91,6 +93,7 @@ plot_motion = MotionWidget plot_probe_map = ProbeMapWidget plot_quality_metrics = QualityMetricsWidget +plot_rasters = RasterWidget plot_sorting_summary = SortingSummaryWidget plot_spike_locations = SpikeLocationsWidget plot_spikes_on_traces = SpikesOnTracesWidget From 625ff5e35219d397215413bebdb4f64dac8f0707 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 18:44:28 +0200 Subject: [PATCH 116/322] Oups. --- .../widgets/agreement_matrix.py | 91 ++++++++++++++++++ .../widgets/confusion_matrix.py | 83 ++++++++++++++++ src/spikeinterface/widgets/probe_map.py | 78 +++++++++++++++ src/spikeinterface/widgets/rasters.py | 95 +++++++++++++++++++ 4 files changed, 347 insertions(+) create mode 100644 src/spikeinterface/widgets/agreement_matrix.py create mode 100644 src/spikeinterface/widgets/confusion_matrix.py create mode 100644 src/spikeinterface/widgets/probe_map.py create mode 100644 src/spikeinterface/widgets/rasters.py diff --git a/src/spikeinterface/widgets/agreement_matrix.py b/src/spikeinterface/widgets/agreement_matrix.py new file mode 100644 index 0000000000..55f38f078b --- /dev/null +++ b/src/spikeinterface/widgets/agreement_matrix.py @@ -0,0 +1,91 @@ +import numpy as np +from warnings import warn + +from .base import BaseWidget, to_attr +from .utils import get_unit_colors + + + +class AgreementMatrixWidget(BaseWidget): + """ + Plot unit depths + + Parameters + ---------- + sorting_comparison: GroundTruthComparison or SymmetricSortingComparison + The sorting comparison object. + Symetric or not. + ordered: bool + Order units with best agreement scores. + This enable to see agreement on a diagonal. + count_text: bool + If True counts are displayed as text + unit_ticks: bool + If True unit tick labels are displayed + + """ + + def __init__( + self, sorting_comparison, ordered=True, count_text=True, unit_ticks=True, + backend=None, **backend_kwargs + ): + plot_data = dict( + sorting_comparison=sorting_comparison, + ordered=ordered, + count_text=count_text, + unit_ticks=unit_ticks, + ) + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + + dp = to_attr(data_plot) + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + comp = dp.sorting_comparison + + if dp.ordered: + scores = comp.get_ordered_agreement_scores() + else: + scores = comp.agreement_scores + + N1 = scores.shape[0] + N2 = scores.shape[1] + + unit_ids1 = scores.index.values + unit_ids2 = scores.columns.values + + # Using matshow here just because it sets the ticks up nicely. imshow is faster. + self.ax.matshow(scores.values, cmap="Greens") + + if dp.count_text: + for i, u1 in enumerate(unit_ids1): + u2 = comp.best_match_12[u1] + if u2 != -1: + j = np.where(unit_ids2 == u2)[0][0] + + self.ax.text(j, i, "{:0.2f}".format(scores.at[u1, u2]), ha="center", va="center", color="white") + + # Major ticks + self.ax.set_xticks(np.arange(0, N2)) + self.ax.set_yticks(np.arange(0, N1)) + self.ax.xaxis.tick_bottom() + + # Labels for major ticks + if dp.unit_ticks: + self.ax.set_yticklabels(scores.index, fontsize=12) + self.ax.set_xticklabels(scores.columns, fontsize=12) + + self.ax.set_xlabel(comp.name_list[1], fontsize=20) + self.ax.set_ylabel(comp.name_list[0], fontsize=20) + + self.ax.set_xlim(-0.5, N2 - 0.5) + self.ax.set_ylim( + N1 - 0.5, + -0.5, + ) + + diff --git a/src/spikeinterface/widgets/confusion_matrix.py b/src/spikeinterface/widgets/confusion_matrix.py new file mode 100644 index 0000000000..da021092db --- /dev/null +++ b/src/spikeinterface/widgets/confusion_matrix.py @@ -0,0 +1,83 @@ +import numpy as np +from warnings import warn + +from .base import BaseWidget, to_attr +from .utils import get_unit_colors + + + +class ConfusionMatrixWidget(BaseWidget): + """ + Plot unit depths + + Parameters + ---------- + gt_comparison: GroundTruthComparison + The ground truth sorting comparison object + count_text: bool + If True counts are displayed as text + unit_ticks: bool + If True unit tick labels are displayed + + """ + + def __init__( + self, gt_comparison, count_text=True, unit_ticks=True, + backend=None, **backend_kwargs + ): + plot_data = dict( + gt_comparison=gt_comparison, + count_text=count_text, + unit_ticks=unit_ticks, + ) + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + + dp = to_attr(data_plot) + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + comp = dp.gt_comparison + + confusion_matrix = comp.get_confusion_matrix() + N1 = confusion_matrix.shape[0] - 1 + N2 = confusion_matrix.shape[1] - 1 + + # Using matshow here just because it sets the ticks up nicely. imshow is faster. + self.ax.matshow(confusion_matrix.values, cmap="Greens") + + if dp.count_text: + for (i, j), z in np.ndenumerate(confusion_matrix.values): + if z != 0: + if z > np.max(confusion_matrix.values) / 2.0: + self.ax.text(j, i, "{:d}".format(z), ha="center", va="center", color="white") + else: + self.ax.text(j, i, "{:d}".format(z), ha="center", va="center", color="black") + + self.ax.axhline(int(N1 - 1) + 0.5, color="black") + self.ax.axvline(int(N2 - 1) + 0.5, color="black") + + # Major ticks + self.ax.set_xticks(np.arange(0, N2 + 1)) + self.ax.set_yticks(np.arange(0, N1 + 1)) + self.ax.xaxis.tick_bottom() + + # Labels for major ticks + if dp.unit_ticks: + self.ax.set_yticklabels(confusion_matrix.index, fontsize=12) + self.ax.set_xticklabels(confusion_matrix.columns, fontsize=12) + else: + self.ax.set_xticklabels(np.append([""] * N2, "FN"), fontsize=10) + self.ax.set_yticklabels(np.append([""] * N1, "FP"), fontsize=10) + + self.ax.set_xlabel(comp.name_list[1], fontsize=20) + self.ax.set_ylabel(comp.name_list[0], fontsize=20) + + self.ax.set_xlim(-0.5, N2 + 0.5) + self.ax.set_ylim( + N1 + 0.5, + -0.5, + ) \ No newline at end of file diff --git a/src/spikeinterface/widgets/probe_map.py b/src/spikeinterface/widgets/probe_map.py new file mode 100644 index 0000000000..193711a34f --- /dev/null +++ b/src/spikeinterface/widgets/probe_map.py @@ -0,0 +1,78 @@ +import numpy as np +from warnings import warn + +from .base import BaseWidget, to_attr, default_backend_kwargs +from .utils import get_unit_colors + + + +class ProbeMapWidget(BaseWidget): + """ + Plot the probe of a recording. + + Parameters + ---------- + recording: RecordingExtractor + The recording extractor object + channel_ids: list + The channel ids to display + with_channel_ids: bool False default + Add channel ids text on the probe + **plot_probe_kwargs: keyword arguments for probeinterface.plotting.plot_probe_group() function + + """ + + def __init__( + self, recording, channel_ids=None, with_channel_ids=False, + backend=None, **backend_or_plot_probe_kwargs + ): + + # split backend_or_plot_probe_kwargs + backend_kwargs = dict() + plot_probe_kwargs = dict() + backend = self.check_backend(backend) + for k, v in backend_or_plot_probe_kwargs.items(): + if k in default_backend_kwargs[backend]: + backend_kwargs[k] = v + else: + plot_probe_kwargs[k] = v + + plot_data = dict( + recording=recording, + channel_ids=channel_ids, + with_channel_ids=with_channel_ids, + plot_probe_kwargs=plot_probe_kwargs, + ) + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + from probeinterface.plotting import get_auto_lims, plot_probe + + dp = to_attr(data_plot) + + plot_probe_kwargs = dp.plot_probe_kwargs + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + probegroup = dp.recording.get_probegroup() + + xlims, ylims, zlims = get_auto_lims(probegroup.probes[0]) + for i, probe in enumerate(probegroup.probes): + xlims2, ylims2, _ = get_auto_lims(probe) + xlims = min(xlims[0], xlims2[0]), max(xlims[1], xlims2[1]) + ylims = min(ylims[0], ylims2[0]), max(ylims[1], ylims2[1]) + + plot_probe_kwargs["title"] = False + pos = 0 + text_on_contact = None + for i, probe in enumerate(probegroup.probes): + n = probe.get_contact_count() + if dp.with_channel_ids: + text_on_contact = dp.recording.channel_ids[pos : pos + n] + pos += n + plot_probe(probe, ax=self.ax, text_on_contact=text_on_contact, **plot_probe_kwargs) + + self.ax.set_xlim(*xlims) + self.ax.set_ylim(*ylims) diff --git a/src/spikeinterface/widgets/rasters.py b/src/spikeinterface/widgets/rasters.py new file mode 100644 index 0000000000..de855ebe45 --- /dev/null +++ b/src/spikeinterface/widgets/rasters.py @@ -0,0 +1,95 @@ +import numpy as np +from warnings import warn + +from .base import BaseWidget, to_attr, default_backend_kwargs + + + +class RasterWidget(BaseWidget): + """ + Plots spike train rasters. + + Parameters + ---------- + sorting: SortingExtractor + The sorting extractor object + segment_index: None or int + The segment index. + unit_ids: list + List of unit ids + time_range: list + List with start time and end time + color: matplotlib color + The color to be used + """ + + def __init__( + self, sorting, segment_index=None, unit_ids=None, time_range=None, color="k", + backend=None, **backend_kwargs + ): + + + if segment_index is None: + if sorting.get_num_segments() != 1: + raise ValueError("You must provide segment_index=...") + segment_index = 0 + + if time_range is None: + frame_range = [0, sorting.to_spike_vector()[-1]["sample_index"]] + time_range = [f / sorting.sampling_frequency for f in frame_range] + else: + assert len(time_range) == 2, "'time_range' should be a list with start and end time in seconds" + frame_range = [int(t * sorting.sampling_frequency) for t in time_range] + + plot_data = dict( + sorting=sorting, + segment_index=segment_index, + unit_ids=unit_ids, + color=color, + frame_range=frame_range, + time_range=time_range, + ) + BaseWidget.__init__(self, plot_data, backend=backend, **backend_kwargs) + + def plot_matplotlib(self, data_plot, **backend_kwargs): + import matplotlib.pyplot as plt + from .utils_matplotlib import make_mpl_figure + + dp = to_attr(data_plot) + sorting = dp.sorting + + self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) + + units_ids = dp.unit_ids + if units_ids is None: + units_ids = sorting.unit_ids + + with plt.rc_context({"axes.edgecolor": "gray"}): + for unit_index, unit_id in enumerate(units_ids): + spiketrain = sorting.get_unit_spike_train( + unit_id, + start_frame=dp.frame_range[0], + end_frame=dp.frame_range[1], + segment_index=dp.segment_index, + ) + spiketimes = spiketrain / float(sorting.sampling_frequency) + self.ax.plot( + spiketimes, + unit_index * np.ones_like(spiketimes), + marker="|", + mew=1, + markersize=3, + ls="", + color=dp.color, + ) + self.ax.set_yticks(np.arange(len(units_ids))) + self.ax.set_yticklabels(units_ids) + self.ax.set_xlim(*dp.time_range) + self.ax.set_xlabel("time (s)") + + + + + + + From d3fe469bb95d4a8b3e6cff1ecde37e1bc5c4e0c6 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 19 Sep 2023 20:11:19 +0200 Subject: [PATCH 117/322] Update src/spikeinterface/qualitymetrics/misc_metrics.py --- src/spikeinterface/qualitymetrics/misc_metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index c742141d5d..f449b3c31b 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -523,7 +523,7 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni Based on concepts described in [Gruen]_ This code was adapted from `Elephant - Electrophysiology Analysis Toolkit `_ """ - assert np.all([s > 1 for s in synchrony_sizes]), "Synchrony sizes must be greater than 1" + assert min(synchrony_sizes) > 1, "Synchrony sizes must be greater than 1" spike_counts = waveform_extractor.sorting.count_num_spikes_per_unit() sorting = waveform_extractor.sorting spikes = sorting.to_spike_vector(concatenated=False) From 9c4bba37b89012c4d016394916a04832df3109c7 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 21:00:13 +0200 Subject: [PATCH 118/322] wip --- .../sortingcomponents/clustering/merge.py | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/merge.py b/src/spikeinterface/sortingcomponents/clustering/merge.py index 2e839ef0fc..e2049d70bf 100644 --- a/src/spikeinterface/sortingcomponents/clustering/merge.py +++ b/src/spikeinterface/sortingcomponents/clustering/merge.py @@ -157,7 +157,7 @@ def agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full" merges = [] - graph = nx.from_numpy_matrix(pair_mask | pair_mask.T) + graph = nx.from_numpy_array(pair_mask | pair_mask.T) # put real nodes names for debugging maps = dict(zip(np.arange(labels_set.size), labels_set)) graph = nx.relabel_nodes(graph, maps) @@ -196,8 +196,8 @@ def agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full" nx.draw_networkx(sub_graph) plt.show() - DEBUG = True - # DEBUG = False + # DEBUG = True + DEBUG = False if DEBUG: import matplotlib.pyplot as plt @@ -457,8 +457,8 @@ def merge( else: final_shift = 0 - DEBUG = True - # DEBUG = False + # DEBUG = True + DEBUG = False if DEBUG and is_merge: # if DEBUG: @@ -487,7 +487,15 @@ def merge( ax.plot(bins[:-1], count0, color="C0") ax.plot(bins[:-1], count1, color="C1") - ax.set_title(f"{dipscore:.4f} {is_merge}") + if criteria == "diptest": + ax.set_title(f"{dipscore:.4f} {is_merge}") + elif criteria == "percentile": + ax.set_title(f"{l0:.4f} {l1:.4f} {is_merge}") + ax.axvline(l0, color="C0") + ax.axvline(l1, color="C1") + + + plt.show() From d7aaa95e295d16fd1c9e6fe10fd82f93029a5cb1 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 19 Sep 2023 21:01:18 +0200 Subject: [PATCH 119/322] gt study widget xlim --- src/spikeinterface/widgets/gtstudy.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index bc2c1246b7..438858beae 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -243,10 +243,14 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): study = dp.study perfs = study.get_performance_by_unit(case_keys=dp.case_keys) + max_metric = 0 for key in dp.case_keys: x = study.get_metrics(key)[dp.metric_name].values y = perfs.xs(key)[dp.performance_name].values label = dp.study.cases[key]["label"] self.ax.scatter(x, y, label=label) + max_metric = max(max_metric, np.max(x)) - self.ax.legend() \ No newline at end of file + self.ax.legend() + self.ax.set_xlim(0, max_metric * 1.05) + self.ax.set_ylim(0, 1.05) \ No newline at end of file From 4b9149c663521c72b3a3a7915a18d920ddf51884 Mon Sep 17 00:00:00 2001 From: munahaf Date: Wed, 20 Sep 2023 06:55:04 +0000 Subject: [PATCH 120/322] Comment: Updated a test expression to remove two logical short circuits. --- src/spikeinterface/preprocessing/remove_artifacts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 3148539165..0e1940a45f 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -165,7 +165,7 @@ def __init__( for l in np.unique(labels): assert l in artifacts.keys(), f"Artefacts are provided but label {l} has no value!" else: - assert "ms_before" != None and "ms_after" != None, f"ms_before/after should not be None for mode {mode}" + assert "ms_before" is not None and "ms_after" is not None, f"ms_before/after should not be None for mode {mode}" sorting = NumpySorting.from_times_labels(list_triggers, list_labels, recording.get_sampling_frequency()) sorting = sorting.save() waveforms_kwargs.update({"ms_before": ms_before, "ms_after": ms_after}) From c362aac3837027c26e284e6670c03bcab8865fb8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 20 Sep 2023 06:58:41 +0000 Subject: [PATCH 121/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/preprocessing/remove_artifacts.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 0e1940a45f..61f2f2eca1 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -165,7 +165,9 @@ def __init__( for l in np.unique(labels): assert l in artifacts.keys(), f"Artefacts are provided but label {l} has no value!" else: - assert "ms_before" is not None and "ms_after" is not None, f"ms_before/after should not be None for mode {mode}" + assert ( + "ms_before" is not None and "ms_after" is not None + ), f"ms_before/after should not be None for mode {mode}" sorting = NumpySorting.from_times_labels(list_triggers, list_labels, recording.get_sampling_frequency()) sorting = sorting.save() waveforms_kwargs.update({"ms_before": ms_before, "ms_after": ms_after}) From 2d1a33ad752480bef7b3d39bcc0619a8d8d0c127 Mon Sep 17 00:00:00 2001 From: Munawar Date: Wed, 20 Sep 2023 00:46:17 -0700 Subject: [PATCH 122/322] Update remove_artifacts.py to change string literals (probably used mistakenly) to actual variables. --- src/spikeinterface/preprocessing/remove_artifacts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 61f2f2eca1..7e84822c61 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -166,7 +166,7 @@ def __init__( assert l in artifacts.keys(), f"Artefacts are provided but label {l} has no value!" else: assert ( - "ms_before" is not None and "ms_after" is not None + ms_before is not None and ms_after is not None ), f"ms_before/after should not be None for mode {mode}" sorting = NumpySorting.from_times_labels(list_triggers, list_labels, recording.get_sampling_frequency()) sorting = sorting.save() From a395c3c7253cd7dadd813b25a4862610221f9cf4 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 10:24:30 +0200 Subject: [PATCH 123/322] suggestions --- doc/how_to/index.rst | 2 +- ...d_matalb_data.rst => load_matlab_data.rst} | 26 +++++++++++-------- 2 files changed, 16 insertions(+), 12 deletions(-) rename doc/how_to/{load_matalb_data.rst => load_matlab_data.rst} (70%) diff --git a/doc/how_to/index.rst b/doc/how_to/index.rst index fa7210d4f0..da94cf549c 100644 --- a/doc/how_to/index.rst +++ b/doc/how_to/index.rst @@ -7,4 +7,4 @@ How to guides get_started analyse_neuropixels handle_drift - load_matalb_data + load_matlab_data diff --git a/doc/how_to/load_matalb_data.rst b/doc/how_to/load_matlab_data.rst similarity index 70% rename from doc/how_to/load_matalb_data.rst rename to doc/how_to/load_matlab_data.rst index 39b9a48d65..cca579036a 100644 --- a/doc/how_to/load_matalb_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -7,15 +7,16 @@ Exporting Data from MATLAB -------------------------- First, ensure your data is structured correctly. The data matrix should be organized such that the first dimension corresponds to samples/time and the second dimension to channels. +In the following MATLAB code, we generate random data as an example and then write it to a binary file. .. code-block:: matlab % Define the size of your data - num_samples = 1000; - num_channels = 384; + numSamples = 1000; + numChannels = 384; % Generate random data as an example - data = rand(num_samples, num_channels); + data = rand(numSamples, numChannels); % Write the data to a binary file fileID = fopen('your_data_as_a_binary.bin', 'wb'); @@ -36,22 +37,24 @@ Once you have your data in a binary format, you can seamlessly load it into Spik .. code-block:: python - from spikeinterface.core.binaryrecordingextractor import BinaryRecordingExtractor + import spikeinterface as si from pathlib import Path - # Define the path to your binary file + # In linux or mac file_path = Path("/The/Path/To/Your/Data/your_data_as_a_binary.bin") + # or for Windows + # file_path = Path(r"c:\path\to\your\data\your_data_as_a_binary.bin") # Ensure the file exists assert file_path.is_file() # Specify the parameters of your recording - sampling_frequency = 30_000.0 # in Hz, adjust as per your matlab dataset - num_channels = 384 # adjust as per your matlab dataset - dtype = "float64" + sampling_frequency = 30_000.0 # in Hz, adjust as per your MATLAB dataset + num_channels = 384 # adjust as per your MATLAB dataset + dtype = "float64" # equivalent of MATLAB double # Load the data using SpikeInterface - recording = BinaryRecordingExtractor(file_path, sampling_frequency=sampling_frequency, + recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, num_channels=num_channels, dtype=dtype, gain_to_uV=1, offset_to_uV=0) # Verify the shape of your data @@ -61,6 +64,7 @@ Common Pitfalls & Tips ---------------------- 1. **Data Shape**: Always ensure that your MATLAB data matrix's first dimension corresponds to samples/time and the second to channels. -2. **File Path**: Double-check the file path in Python to ensure you're pointing to the right directory. +2. **File Path**: Double-check the file path in Python to ensure you are pointing to the right directory. 3. **Data Type**: When moving data between MATLAB and Python, it's crucial to keep the data type consistent. In our example, we used `double` in MATLAB, which corresponds to `float64` in Python. -4. **Sampling Frequency**: Ensure you set the correct sampling frequency when loading data into SpikeInterface. +4. **Sampling Frequency**: Ensure you set the correct sampling frequency in Hz when loading data into SpikeInterface. +5. **Working on Python**: Matlab to python can feel like a big jump. If you are new to Python, we recommend checking out numpy's [Python for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. From 6130e5bad0c8d825a4c44da881b5473e691a8712 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 10:27:17 +0200 Subject: [PATCH 124/322] add an assertion --- doc/how_to/load_matlab_data.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index cca579036a..0a8345b792 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -46,7 +46,7 @@ Once you have your data in a binary format, you can seamlessly load it into Spik # file_path = Path(r"c:\path\to\your\data\your_data_as_a_binary.bin") # Ensure the file exists - assert file_path.is_file() + assert file_path.is_file(), f"Your path {file_path} is not a file, you probably have a typo or got the wrong path." # Specify the parameters of your recording sampling_frequency = 30_000.0 # in Hz, adjust as per your MATLAB dataset From 9a97e68f848d1126126bfecd819f456e12113813 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 20 Sep 2023 10:52:05 +0200 Subject: [PATCH 125/322] Improve the concept of check_if_json_serializable to more serialation engine like pickle. --- src/spikeinterface/comparison/hybrid.py | 6 ++- .../comparison/multicomparisons.py | 7 ++- src/spikeinterface/core/base.py | 50 +++++++++++++------ src/spikeinterface/core/generate.py | 2 + src/spikeinterface/core/numpyextractors.py | 16 ++++-- src/spikeinterface/core/old_api_utils.py | 8 ++- src/spikeinterface/core/tests/test_base.py | 12 +++-- .../core/tests/test_jsonification.py | 10 +++- .../core/tests/test_waveform_extractor.py | 41 +++++++++++++-- src/spikeinterface/core/waveform_extractor.py | 34 ++++++++++--- src/spikeinterface/preprocessing/motion.py | 3 +- src/spikeinterface/sorters/basesorter.py | 3 +- 12 files changed, 150 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/comparison/hybrid.py b/src/spikeinterface/comparison/hybrid.py index af410255b9..c48ce70147 100644 --- a/src/spikeinterface/comparison/hybrid.py +++ b/src/spikeinterface/comparison/hybrid.py @@ -84,7 +84,8 @@ def __init__( ) # save injected sorting if necessary self.injected_sorting = injected_sorting - if not self.injected_sorting.check_if_json_serializable(): + # if not self.injected_sorting.check_if_json_serializable(): + if not self.injected_sorting.check_serializablility("json"): assert injected_sorting_folder is not None, "Provide injected_sorting_folder to injected sorting object" self.injected_sorting = self.injected_sorting.save(folder=injected_sorting_folder) @@ -180,7 +181,8 @@ def __init__( self.injected_sorting = injected_sorting # save injected sorting if necessary - if not self.injected_sorting.check_if_json_serializable(): + # if not self.injected_sorting.check_if_json_serializable(): + if not self.injected_sorting.check_serializablility("json"): assert injected_sorting_folder is not None, "Provide injected_sorting_folder to injected sorting object" self.injected_sorting = self.injected_sorting.save(folder=injected_sorting_folder) diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index 9e02fd5b2d..3a7075905e 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -182,7 +182,8 @@ def get_agreement_sorting(self, minimum_agreement_count=1, minimum_agreement_cou def save_to_folder(self, save_folder): for sorting in self.object_list: assert ( - sorting.check_if_json_serializable() + # sorting.check_if_json_serializable() + sorting.check_serializablility("json") ), "MultiSortingComparison.save_to_folder() need json serializable sortings" save_folder = Path(save_folder) @@ -244,7 +245,9 @@ def __init__( BaseSorting.__init__(self, sampling_frequency=sampling_frequency, unit_ids=unit_ids) - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = True if len(unit_ids) > 0: for k in ("agreement_number", "avg_agreement", "unit_ids"): diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 87c0805630..d87bd617c4 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -58,7 +58,8 @@ def __init__(self, main_ids: Sequence) -> None: self._properties = {} self._is_dumpable = True - self._is_json_serializable = True + # self._is_json_serializable = True + self._serializablility = {'json': True, 'pickle': True} # extractor specific list of pip extra requirements self.extra_requirements = [] @@ -490,6 +491,18 @@ def check_if_dumpable(self): return all([v.check_if_dumpable() for k, v in value.items()]) return self._is_dumpable + def check_serializablility(self, type="json"): + kwargs = self._kwargs + for value in kwargs.values(): + # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors + if isinstance(value, BaseExtractor): + return value.check_serializablility(type=type) + elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): + return all([v.check_serializablility(type=type) for v in value]) + elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): + return all([v.check_serializablility(type=type) for k, v in value.items()]) + return self._serializablility[type] + def check_if_json_serializable(self): """ Check if the object is json serializable, including nested objects. @@ -499,16 +512,23 @@ def check_if_json_serializable(self): bool True if the object is json serializable, False otherwise. """ - kwargs = self._kwargs - for value in kwargs.values(): - # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors - if isinstance(value, BaseExtractor): - return value.check_if_json_serializable() - elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): - return all([v.check_if_json_serializable() for v in value]) - elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): - return all([v.check_if_json_serializable() for k, v in value.items()]) - return self._is_json_serializable + # we keep this for backward compatilibity or not ???? + return self.check_serializablility("json") + + # kwargs = self._kwargs + # for value in kwargs.values(): + # # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors + # if isinstance(value, BaseExtractor): + # return value.check_if_json_serializable() + # elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): + # return all([v.check_if_json_serializable() for v in value]) + # elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): + # return all([v.check_if_json_serializable() for k, v in value.items()]) + # return self._is_json_serializable + + def check_if_pickle_serializable(self): + # is this needed + return self.check_serializablility("pickle") @staticmethod def _get_file_path(file_path: Union[str, Path], extensions: Sequence) -> Path: @@ -557,7 +577,7 @@ def dump(self, file_path: Union[str, Path], relative_to=None, folder_metadata=No if str(file_path).endswith(".json"): self.dump_to_json(file_path, relative_to=relative_to, folder_metadata=folder_metadata) elif str(file_path).endswith(".pkl") or str(file_path).endswith(".pickle"): - self.dump_to_pickle(file_path, relative_to=relative_to, folder_metadata=folder_metadata) + self.dump_to_pickle(file_path, folder_metadata=folder_metadata) else: raise ValueError("Dump: file must .json or .pkl") @@ -576,7 +596,8 @@ def dump_to_json(self, file_path: Union[str, Path, None] = None, relative_to=Non folder_metadata: str, Path, or None Folder with files containing additional information (e.g. probe in BaseRecording) and properties. """ - assert self.check_if_json_serializable(), "The extractor is not json serializable" + # assert self.check_if_json_serializable(), "The extractor is not json serializable" + assert self.check_serializablility("json"), "The extractor is not json serializable" # Writing paths as relative_to requires recursively expanding the dict if relative_to: @@ -814,7 +835,8 @@ def save_to_folder(self, name=None, folder=None, verbose=True, **save_kwargs): # dump provenance provenance_file = folder / f"provenance.json" - if self.check_if_json_serializable(): + # if self.check_if_json_serializable(): + if self.check_serializablility("json"): self.dump(provenance_file) else: provenance_file.write_text(json.dumps({"warning": "the provenace is not dumpable!!!"}), encoding="utf8") diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 07837bcef7..706054c957 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1431,5 +1431,7 @@ def generate_ground_truth_recording( ) recording.annotate(is_filtered=True) recording.set_probe(probe, in_place=True) + recording.set_property("gain_to_uV", np.ones(num_channels)) + recording.set_property("offset_to_uV", np.zeros(num_channels)) return recording, sorting diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index d5663156c7..f55b975ddb 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -64,7 +64,9 @@ def __init__(self, traces_list, sampling_frequency, t_starts=None, channel_ids=N assert len(t_starts) == len(traces_list), "t_starts must be a list of same size than traces_list" t_starts = [float(t_start) for t_start in t_starts] - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False for i, traces in enumerate(traces_list): if t_starts is None: @@ -127,7 +129,9 @@ def __init__(self, spikes, sampling_frequency, unit_ids): BaseSorting.__init__(self, sampling_frequency, unit_ids) self._is_dumpable = True - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False if spikes.size == 0: nseg = 1 @@ -358,7 +362,9 @@ def __init__(self, shm_name, shape, sampling_frequency, unit_ids, dtype=minimum_ BaseSorting.__init__(self, sampling_frequency, unit_ids) self._is_dumpable = True - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False self.shm = SharedMemory(shm_name, create=False) self.shm_spikes = np.ndarray(shape=shape, dtype=dtype, buffer=self.shm.buf) @@ -517,7 +523,9 @@ def __init__(self, snippets_list, spikesframes_list, sampling_frequency, nbefore ) self._is_dumpable = False - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False for snippets, spikesframes in zip(snippets_list, spikesframes_list): snp_segment = NumpySnippetsSegment(snippets, spikesframes) diff --git a/src/spikeinterface/core/old_api_utils.py b/src/spikeinterface/core/old_api_utils.py index 1ff31127f4..38fbef1547 100644 --- a/src/spikeinterface/core/old_api_utils.py +++ b/src/spikeinterface/core/old_api_utils.py @@ -183,7 +183,9 @@ def __init__(self, oldapi_recording_extractor): # set _is_dumpable to False to use dumping mechanism of old extractor self._is_dumpable = False - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False self.annotate(is_filtered=oldapi_recording_extractor.is_filtered) @@ -269,7 +271,9 @@ def __init__(self, oldapi_sorting_extractor): self.add_sorting_segment(sorting_segment) self._is_dumpable = False - self._is_json_serializable = False + # self._is_json_serializable = False + self._serializablility["json"] = False + self._serializablility["pickle"] = False # add old properties copy_properties(oldapi_extractor=oldapi_sorting_extractor, new_extractor=self) diff --git a/src/spikeinterface/core/tests/test_base.py b/src/spikeinterface/core/tests/test_base.py index ea1a9cf0d2..77a5d7d9bf 100644 --- a/src/spikeinterface/core/tests/test_base.py +++ b/src/spikeinterface/core/tests/test_base.py @@ -50,18 +50,22 @@ def test_check_if_json_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) # make a list of dumpable objects - test_extractor._is_json_serializable = True + # test_extractor._is_json_serializable = True + test_extractor._serializablility["json"] = True extractors_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_json_serializable: print(extractor) - assert extractor.check_if_json_serializable() + # assert extractor.check_if_json_serializable() + assert extractor.check_serializablility("json") # make not dumpable - test_extractor._is_json_serializable = False + # test_extractor._is_json_serializable = False + test_extractor._serializablility["json"] = False extractors_not_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_not_json_serializable: print(extractor) - assert not extractor.check_if_json_serializable() + # assert not extractor.check_if_json_serializable() + assert not extractor.check_serializablility("json") if __name__ == "__main__": diff --git a/src/spikeinterface/core/tests/test_jsonification.py b/src/spikeinterface/core/tests/test_jsonification.py index 473648c5ec..8572cda23e 100644 --- a/src/spikeinterface/core/tests/test_jsonification.py +++ b/src/spikeinterface/core/tests/test_jsonification.py @@ -142,9 +142,12 @@ def __init__(self, attribute, other_extractor=None, extractor_list=None, extract self.extractor_list = extractor_list self.extractor_dict = extractor_dict + BaseExtractor.__init__(self, main_ids=['1', '2']) # this already the case by default self._is_dumpable = True - self._is_json_serializable = True + # self._is_json_serializable = True + self._serializablility["json"] = True + self._serializablility["pickle"] = True self._kwargs = { "attribute": attribute, @@ -195,3 +198,8 @@ def test_encoding_numpy_scalars_within_nested_extractors_list(nested_extractor_l def test_encoding_numpy_scalars_within_nested_extractors_dict(nested_extractor_dict): json.dumps(nested_extractor_dict, cls=SIJsonEncoder) + + +if __name__ == '__main__': + nested_extractor = nested_extractor() + test_encoding_numpy_scalars_within_nested_extractors(nested_extractor_) \ No newline at end of file diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index 107ef5f180..f53b9cf18d 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -6,7 +6,7 @@ import zarr -from spikeinterface.core import generate_recording, generate_sorting, NumpySorting, ChannelSparsity +from spikeinterface.core import generate_recording, generate_sorting, NumpySorting, ChannelSparsity, generate_ground_truth_recording from spikeinterface import WaveformExtractor, BaseRecording, extract_waveforms, load_waveforms from spikeinterface.core.waveform_extractor import precompute_sparsity @@ -509,11 +509,46 @@ def test_compute_sparsity(): ) print(sparsity) +def test_non_json_object(): + recording, sorting = generate_ground_truth_recording( + durations=[30, 40], + sampling_frequency=30000.0, + num_channels=32, + num_units=5, + ) + + # recording is not save to keep it in memory + sorting = sorting.save() + + wf_folder = cache_folder / "test_waveform_extractor" + if wf_folder.is_dir(): + shutil.rmtree(wf_folder) + + + we = extract_waveforms( + recording, + sorting, + wf_folder, + mode="folder", + sparsity=None, + sparse=False, + ms_before=1.0, + ms_after=1.6, + max_spikes_per_unit=50, + n_jobs=4, + chunk_size=30000, + progress_bar=True, + ) + + # This used to fail because of json + we = load_waveforms(wf_folder) + if __name__ == "__main__": - test_WaveformExtractor() + # test_WaveformExtractor() # test_extract_waveforms() - # test_sparsity() # test_portability() # test_recordingless() # test_compute_sparsity() + test_non_json_object() + diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 6881ab3ec5..53852bf319 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -159,11 +159,20 @@ def load_from_folder( else: rec_attributes["probegroup"] = None else: - try: - recording = load_extractor(folder / "recording.json", base_folder=folder) - rec_attributes = None - except: + recording = None + if (folder / "recording.json").exists(): + try: + recording = load_extractor(folder / "recording.json", base_folder=folder) + except: + pass + elif (folder / "recording.pickle").exists(): + try: + recording = load_extractor(folder / "recording.pickle") + except: + pass + if recording is None: raise Exception("The recording could not be loaded. You can use the `with_recording=False` argument") + rec_attributes = None if sorting is None: sorting = load_extractor(folder / "sorting.json", base_folder=folder) @@ -271,9 +280,16 @@ def create( else: relative_to = None - if recording.check_if_json_serializable(): + # if recording.check_if_json_serializable(): + if recording.check_serializablility("json"): recording.dump(folder / "recording.json", relative_to=relative_to) - if sorting.check_if_json_serializable(): + elif recording.check_serializablility("pickle"): + # In this case we loose the relative_to!! + # TODO make sure that we do not dump to pickle a NumpyRecording!!!!! + recording.dump(folder / "recording.pickle") + + # if sorting.check_if_json_serializable(): + if sorting.check_serializablility("json"): sorting.dump(folder / "sorting.json", relative_to=relative_to) else: warn( @@ -879,9 +895,11 @@ def save( (folder / "params.json").write_text(json.dumps(check_json(self._params), indent=4), encoding="utf8") if self.has_recording(): - if self.recording.check_if_json_serializable(): + # if self.recording.check_if_json_serializable(): + if self.recording.check_serializablility("json"): self.recording.dump(folder / "recording.json", relative_to=relative_to) - if self.sorting.check_if_json_serializable(): + # if self.sorting.check_if_json_serializable(): + if self.sorting.check_serializablility("json"): self.sorting.dump(folder / "sorting.json", relative_to=relative_to) else: warn( diff --git a/src/spikeinterface/preprocessing/motion.py b/src/spikeinterface/preprocessing/motion.py index e2ef6e6794..0054fb94d4 100644 --- a/src/spikeinterface/preprocessing/motion.py +++ b/src/spikeinterface/preprocessing/motion.py @@ -333,7 +333,8 @@ def correct_motion( ) (folder / "parameters.json").write_text(json.dumps(parameters, indent=4, cls=SIJsonEncoder), encoding="utf8") (folder / "run_times.json").write_text(json.dumps(run_times, indent=4), encoding="utf8") - if recording.check_if_json_serializable(): + # if recording.check_if_json_serializable(): + if recording.check_serializablility("json"): recording.dump_to_json(folder / "recording.json") np.save(folder / "peaks.npy", peaks) diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index c7581ba1e1..da20506965 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -137,7 +137,8 @@ def initialize_folder(cls, recording, output_folder, verbose, remove_existing_fo ) rec_file = output_folder / "spikeinterface_recording.json" - if recording.check_if_json_serializable(): + # if recording.check_if_json_serializable(): + if recording.check_serializablility("json"): recording.dump_to_json(rec_file, relative_to=output_folder) else: d = {"warning": "The recording is not serializable to json"} From 0842509422d8498fab0c506d6ed2839b4f4d0a74 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 12:29:11 +0200 Subject: [PATCH 126/322] my final version --- doc/how_to/load_matlab_data.rst | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 0a8345b792..3e602012a1 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -55,16 +55,42 @@ Once you have your data in a binary format, you can seamlessly load it into Spik # Load the data using SpikeInterface recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, - num_channels=num_channels, dtype=dtype, gain_to_uV=1, offset_to_uV=0) + num_channels=num_channels, dtype=dtype) # Verify the shape of your data assert recording.get_traces().shape == (num_samples, num_channels) +This should be enough to get you started with loading your MATLAB data into SpikeInterface. You can use all the Spikeinterface machinery to process your data, including filtering, spike sorting, and more. + Common Pitfalls & Tips ---------------------- -1. **Data Shape**: Always ensure that your MATLAB data matrix's first dimension corresponds to samples/time and the second to channels. +1. **Data Shape**: Always ensure that your MATLAB data matrix's first dimension corresponds to samples/time and the second to channels. If the time happens to be in the second dimension, you can use `time_axis=1` as an argument in `si.read_binary()` to account for this. 2. **File Path**: Double-check the file path in Python to ensure you are pointing to the right directory. 3. **Data Type**: When moving data between MATLAB and Python, it's crucial to keep the data type consistent. In our example, we used `double` in MATLAB, which corresponds to `float64` in Python. 4. **Sampling Frequency**: Ensure you set the correct sampling frequency in Hz when loading data into SpikeInterface. 5. **Working on Python**: Matlab to python can feel like a big jump. If you are new to Python, we recommend checking out numpy's [Python for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. + + +Using gains and offsets for integer data +---------------------------------------- + +A common technique used in raw formats is to store data as integer values, which provides a memory-efficient representation (i.e. lower ram) and use a gain and offset to convert it to float values that represent meaningful physical units. +In SpikeInterface this is done using the `gain_to_uV` and `offset_to_uV` parameters as the we handle traces in microvolts. Both values can be passed to `read_binary` when loading the data: + +.. code-block:: python + + sampling_frequency = 30_000.0 # in Hz, adjust as per your MATLAB dataset + num_channels = 384 # adjust as per your MATLAB dataset + dtype_int = 'int16' # adjust as per your MATLAB dataset + gain_to_uV = 0.195 # adjust as per your MATLAB dataset + offset_to_uV = 0 # adjust as per your MATLAB dataset + + recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, + num_channels=num_channels, dtype=dtype_int, + gain_to_uV=gain_to_uV, offset_to_uV=offset_to_uV) + + recording.get_traces(start) + + +This will equip your recording object with capabilities to convert the data to float values in uV using the `get_traces()` method with the `return_scaled` parameter set to True. From 1ead6a33e658bf5a0365d21506a90dd9bd32e67c Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 12:45:06 +0200 Subject: [PATCH 127/322] final review --- doc/how_to/load_matlab_data.rst | 72 +++++++++++++++++---------------- 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 3e602012a1..0a80f1fdf9 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -1,13 +1,13 @@ Exporting MATLAB Data to Binary & Loading in SpikeInterface =========================================================== -In this tutorial, we'll go through the process of exporting your data from MATLAB in a binary format and then loading it using SpikeInterface in Python. Let's break down the steps. +In this tutorial, we will walk through the process of exporting data from MATLAB in a binary format and subsequently loading it using SpikeInterface in Python. Exporting Data from MATLAB -------------------------- -First, ensure your data is structured correctly. The data matrix should be organized such that the first dimension corresponds to samples/time and the second dimension to channels. -In the following MATLAB code, we generate random data as an example and then write it to a binary file. +Begin by ensuring your data structure is correct. Organize your data matrix so that the first dimension corresponds to samples/time and the second to channels. +Here, we present a MATLAB code that creates a random dataset and writes it to a binary file as an illustration. .. code-block:: matlab @@ -25,72 +25,76 @@ In the following MATLAB code, we generate random data as an example and then wri .. note:: - In a real-world scenario, replace the random data generation with your actual data. + In your own script, replace the random data generation with your actual dataset. Loading Data in SpikeInterface ----------------------------- -This should produce a binary file called `your_data_as_a_binary.bin` in your current MATLAB directory. -You will need the complete path (i.e. its location on your computer) to load it in Python. +After executing the above MATLAB code, a binary file named `your_data_as_a_binary.bin` will be created in your MATLAB directory. To load this file in Python, you'll need its full path. -Once you have your data in a binary format, you can seamlessly load it into SpikeInterface using the following script: +Use the following Python script to load the binary data into SpikeInterface: .. code-block:: python import spikeinterface as si from pathlib import Path - # In linux or mac + # Define file path + # For Linux or macOS: file_path = Path("/The/Path/To/Your/Data/your_data_as_a_binary.bin") - # or for Windows + # For Windows: # file_path = Path(r"c:\path\to\your\data\your_data_as_a_binary.bin") - # Ensure the file exists - assert file_path.is_file(), f"Your path {file_path} is not a file, you probably have a typo or got the wrong path." + # Confirm file existence + assert file_path.is_file(), f"Error: {file_path} is not a valid file. Please check the path." - # Specify the parameters of your recording - sampling_frequency = 30_000.0 # in Hz, adjust as per your MATLAB dataset - num_channels = 384 # adjust as per your MATLAB dataset - dtype = "float64" # equivalent of MATLAB double + # Define recording parameters + sampling_frequency = 30_000.0 # Adjust according to your MATLAB dataset + num_channels = 384 # Adjust according to your MATLAB dataset + dtype = "float64" # MATLAB's double corresponds to Python's float64 - # Load the data using SpikeInterface + # Load data using SpikeInterface recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, num_channels=num_channels, dtype=dtype) - # Verify the shape of your data - assert recording.get_traces().shape == (num_samples, num_channels) + # Confirm the data shape + assert recording.get_traces().shape == (numSamples, num_channels) -This should be enough to get you started with loading your MATLAB data into SpikeInterface. You can use all the Spikeinterface machinery to process your data, including filtering, spike sorting, and more. +Follow the steps above to seamlessly import your MATLAB data into SpikeInterface. Once loaded, you can harness the full power of SpikeInterface for data processing, including filtering, spike sorting, and more. Common Pitfalls & Tips ---------------------- -1. **Data Shape**: Always ensure that your MATLAB data matrix's first dimension corresponds to samples/time and the second to channels. If the time happens to be in the second dimension, you can use `time_axis=1` as an argument in `si.read_binary()` to account for this. -2. **File Path**: Double-check the file path in Python to ensure you are pointing to the right directory. -3. **Data Type**: When moving data between MATLAB and Python, it's crucial to keep the data type consistent. In our example, we used `double` in MATLAB, which corresponds to `float64` in Python. -4. **Sampling Frequency**: Ensure you set the correct sampling frequency in Hz when loading data into SpikeInterface. -5. **Working on Python**: Matlab to python can feel like a big jump. If you are new to Python, we recommend checking out numpy's [Python for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. - +1. **Data Shape**: Make sure your MATLAB data matrix's first dimension is samples/time and the second is channels. If your time is in the second dimension, use `time_axis=1` in `si.read_binary()`. +2. **File Path**: Always double-check the Python file path. +3. **Data Type Consistency**: Ensure data types between MATLAB and Python are consistent. MATLAB's `double` is equivalent to nUMPY's `float64`. +4. **Sampling Frequency**: Set the appropriate sampling frequency in Hz for SpikeInterface. +5. **Transition to Python**: Moving from MATLAB to Python can be challenging. For newcomers to Python, consider reviewing numpy's [Numpy for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. Using gains and offsets for integer data ---------------------------------------- -A common technique used in raw formats is to store data as integer values, which provides a memory-efficient representation (i.e. lower ram) and use a gain and offset to convert it to float values that represent meaningful physical units. -In SpikeInterface this is done using the `gain_to_uV` and `offset_to_uV` parameters as the we handle traces in microvolts. Both values can be passed to `read_binary` when loading the data: +Raw data formats often store data as integer values for memory efficiency. To give these integers meaningful physical units, you can apply a gain and an offset. +In SpikeInterface, you can use the `gain_to_uV` and `offset_to_uV` parameters, since traces are handled in microvolts (uV). Both parameters can be integrated into the `read_binary` function. +If your data in MATLAB is stored as `int16`, and you know the gain and offset, you can use the following code to load the data: .. code-block:: python - sampling_frequency = 30_000.0 # in Hz, adjust as per your MATLAB dataset - num_channels = 384 # adjust as per your MATLAB dataset - dtype_int = 'int16' # adjust as per your MATLAB dataset - gain_to_uV = 0.195 # adjust as per your MATLAB dataset - offset_to_uV = 0 # adjust as per your MATLAB dataset + sampling_frequency = 30_000.0 # Adjust according to your MATLAB dataset + num_channels = 384 # Adjust according to your MATLAB dataset + dtype_int = 'int16' # Adjust according to your MATLAB dataset + gain_to_uV = 0.195 # Adjust according to your MATLAB dataset + offset_to_uV = 0 # Adjust according to your MATLAB dataset recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, num_channels=num_channels, dtype=dtype_int, gain_to_uV=gain_to_uV, offset_to_uV=offset_to_uV) - recording.get_traces(start) + recording.get_traces(return_scaled=True) # Return traces in micro volts (uV) + +This will equip your recording object with capabilities to convert the data to float values in uV using the `get_traces()` method with the `return_scaled` parameter set to `True`. + +.. note:: -This will equip your recording object with capabilities to convert the data to float values in uV using the `get_traces()` method with the `return_scaled` parameter set to True. + The gain and offset parameters are usually format depend and you will need to find out the correct values for your data format. You can load your data without gain and offset but then the traces will be in integer values and not in uV. From e31978ce8355dda2d87a713c2495ec915b805f92 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 12:53:47 +0200 Subject: [PATCH 128/322] typo --- doc/how_to/load_matlab_data.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 0a80f1fdf9..ca543ba43a 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -67,7 +67,7 @@ Common Pitfalls & Tips 1. **Data Shape**: Make sure your MATLAB data matrix's first dimension is samples/time and the second is channels. If your time is in the second dimension, use `time_axis=1` in `si.read_binary()`. 2. **File Path**: Always double-check the Python file path. -3. **Data Type Consistency**: Ensure data types between MATLAB and Python are consistent. MATLAB's `double` is equivalent to nUMPY's `float64`. +3. **Data Type Consistency**: Ensure data types between MATLAB and Python are consistent. MATLAB's `double` is equivalent to Numpy's `float64`. 4. **Sampling Frequency**: Set the appropriate sampling frequency in Hz for SpikeInterface. 5. **Transition to Python**: Moving from MATLAB to Python can be challenging. For newcomers to Python, consider reviewing numpy's [Numpy for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. From 5aba5e0f65532165488303203d7739e188fe6e0c Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 12:57:44 +0200 Subject: [PATCH 129/322] Update doc/how_to/load_matlab_data.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/how_to/load_matlab_data.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index ca543ba43a..7f90684701 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -97,4 +97,4 @@ This will equip your recording object with capabilities to convert the data to f .. note:: - The gain and offset parameters are usually format depend and you will need to find out the correct values for your data format. You can load your data without gain and offset but then the traces will be in integer values and not in uV. + The gain and offset parameters are usually format dependent and you will need to find out the correct values for your data format. You can load your data without gain and offset but then the traces will be in integer values and not in uV. From 3f4e182380995f56d458163356a70a813af6b146 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 20 Sep 2023 14:01:50 +0200 Subject: [PATCH 130/322] More check and clean for check_if_serializable() --- src/spikeinterface/comparison/hybrid.py | 4 +- .../comparison/multicomparisons.py | 2 - src/spikeinterface/core/base.py | 46 +++++++++---------- src/spikeinterface/core/generate.py | 2 + src/spikeinterface/core/numpyextractors.py | 8 ++-- src/spikeinterface/core/old_api_utils.py | 2 - src/spikeinterface/core/tests/test_base.py | 7 +-- .../core/tests/test_waveform_extractor.py | 2 + src/spikeinterface/core/waveform_extractor.py | 18 +++++--- src/spikeinterface/preprocessing/motion.py | 1 - 10 files changed, 44 insertions(+), 48 deletions(-) diff --git a/src/spikeinterface/comparison/hybrid.py b/src/spikeinterface/comparison/hybrid.py index c48ce70147..3b8e9e0a72 100644 --- a/src/spikeinterface/comparison/hybrid.py +++ b/src/spikeinterface/comparison/hybrid.py @@ -84,8 +84,8 @@ def __init__( ) # save injected sorting if necessary self.injected_sorting = injected_sorting - # if not self.injected_sorting.check_if_json_serializable(): if not self.injected_sorting.check_serializablility("json"): + # TODO later : also use pickle assert injected_sorting_folder is not None, "Provide injected_sorting_folder to injected sorting object" self.injected_sorting = self.injected_sorting.save(folder=injected_sorting_folder) @@ -181,8 +181,8 @@ def __init__( self.injected_sorting = injected_sorting # save injected sorting if necessary - # if not self.injected_sorting.check_if_json_serializable(): if not self.injected_sorting.check_serializablility("json"): + # TODO later : also use pickle assert injected_sorting_folder is not None, "Provide injected_sorting_folder to injected sorting object" self.injected_sorting = self.injected_sorting.save(folder=injected_sorting_folder) diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index 3a7075905e..09a8c8aed1 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -182,7 +182,6 @@ def get_agreement_sorting(self, minimum_agreement_count=1, minimum_agreement_cou def save_to_folder(self, save_folder): for sorting in self.object_list: assert ( - # sorting.check_if_json_serializable() sorting.check_serializablility("json") ), "MultiSortingComparison.save_to_folder() need json serializable sortings" @@ -245,7 +244,6 @@ def __init__( BaseSorting.__init__(self, sampling_frequency=sampling_frequency, unit_ids=unit_ids) - # self._is_json_serializable = False self._serializablility["json"] = False self._serializablility["pickle"] = True diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index d87bd617c4..63cf8e894f 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -484,11 +484,16 @@ def check_if_dumpable(self): for value in kwargs.values(): # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors if isinstance(value, BaseExtractor): - return value.check_if_dumpable() - elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): - return all([v.check_if_dumpable() for v in value]) - elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): - return all([v.check_if_dumpable() for k, v in value.items()]) + if not value.check_if_dumpable(): + return False + elif isinstance(value, list): + for v in value: + if isinstance(v, BaseExtractor) and not v.check_if_dumpable(): + return False + elif isinstance(value, dict): + for v in value.values(): + if isinstance(v, BaseExtractor) and not v.check_if_dumpable(): + return False return self._is_dumpable def check_serializablility(self, type="json"): @@ -496,11 +501,16 @@ def check_serializablility(self, type="json"): for value in kwargs.values(): # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors if isinstance(value, BaseExtractor): - return value.check_serializablility(type=type) - elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): - return all([v.check_serializablility(type=type) for v in value]) - elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): - return all([v.check_serializablility(type=type) for k, v in value.items()]) + if not value.check_serializablility(type=type): + return False + elif isinstance(value, list): + for v in value: + if isinstance(v, BaseExtractor) and not v.check_serializablility(type=type): + return False + elif isinstance(value, dict): + for v in value.values(): + if isinstance(v, BaseExtractor) and not v.check_serializablility(type=type): + return False return self._serializablility[type] def check_if_json_serializable(self): @@ -513,21 +523,11 @@ def check_if_json_serializable(self): True if the object is json serializable, False otherwise. """ # we keep this for backward compatilibity or not ???? + # is this needed ??? I think no. return self.check_serializablility("json") - # kwargs = self._kwargs - # for value in kwargs.values(): - # # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors - # if isinstance(value, BaseExtractor): - # return value.check_if_json_serializable() - # elif isinstance(value, list) and (len(value) > 0) and isinstance(value[0], BaseExtractor): - # return all([v.check_if_json_serializable() for v in value]) - # elif isinstance(value, dict) and isinstance(value[list(value.keys())[0]], BaseExtractor): - # return all([v.check_if_json_serializable() for k, v in value.items()]) - # return self._is_json_serializable - def check_if_pickle_serializable(self): - # is this needed + # is this needed ??? I think no. return self.check_serializablility("pickle") @staticmethod @@ -596,7 +596,6 @@ def dump_to_json(self, file_path: Union[str, Path, None] = None, relative_to=Non folder_metadata: str, Path, or None Folder with files containing additional information (e.g. probe in BaseRecording) and properties. """ - # assert self.check_if_json_serializable(), "The extractor is not json serializable" assert self.check_serializablility("json"), "The extractor is not json serializable" # Writing paths as relative_to requires recursively expanding the dict @@ -835,7 +834,6 @@ def save_to_folder(self, name=None, folder=None, verbose=True, **save_kwargs): # dump provenance provenance_file = folder / f"provenance.json" - # if self.check_if_json_serializable(): if self.check_serializablility("json"): self.dump(provenance_file) else: diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 706054c957..362b598b0b 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1056,6 +1056,8 @@ def __init__( dtype = parent_recording.dtype if parent_recording is not None else templates.dtype BaseRecording.__init__(self, sorting.get_sampling_frequency(), channel_ids, dtype) + # Important : self._serializablility is not change here because it will depend on the sorting parents itself. + n_units = len(sorting.unit_ids) assert len(templates) == n_units self.spike_vector = sorting.to_spike_vector() diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index f55b975ddb..5ef955a6eb 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -64,7 +64,6 @@ def __init__(self, traces_list, sampling_frequency, t_starts=None, channel_ids=N assert len(t_starts) == len(traces_list), "t_starts must be a list of same size than traces_list" t_starts = [float(t_start) for t_start in t_starts] - # self._is_json_serializable = False self._serializablility["json"] = False self._serializablility["pickle"] = False @@ -129,9 +128,9 @@ def __init__(self, spikes, sampling_frequency, unit_ids): BaseSorting.__init__(self, sampling_frequency, unit_ids) self._is_dumpable = True - # self._is_json_serializable = False self._serializablility["json"] = False - self._serializablility["pickle"] = False + # theorically this should be False but for simplicity make generators simples we still need this. + self._serializablility["pickle"] = True if spikes.size == 0: nseg = 1 @@ -362,7 +361,7 @@ def __init__(self, shm_name, shape, sampling_frequency, unit_ids, dtype=minimum_ BaseSorting.__init__(self, sampling_frequency, unit_ids) self._is_dumpable = True - # self._is_json_serializable = False + self._serializablility["json"] = False self._serializablility["pickle"] = False @@ -523,7 +522,6 @@ def __init__(self, snippets_list, spikesframes_list, sampling_frequency, nbefore ) self._is_dumpable = False - # self._is_json_serializable = False self._serializablility["json"] = False self._serializablility["pickle"] = False diff --git a/src/spikeinterface/core/old_api_utils.py b/src/spikeinterface/core/old_api_utils.py index 38fbef1547..a31edb0dd7 100644 --- a/src/spikeinterface/core/old_api_utils.py +++ b/src/spikeinterface/core/old_api_utils.py @@ -183,7 +183,6 @@ def __init__(self, oldapi_recording_extractor): # set _is_dumpable to False to use dumping mechanism of old extractor self._is_dumpable = False - # self._is_json_serializable = False self._serializablility["json"] = False self._serializablility["pickle"] = False @@ -271,7 +270,6 @@ def __init__(self, oldapi_sorting_extractor): self.add_sorting_segment(sorting_segment) self._is_dumpable = False - # self._is_json_serializable = False self._serializablility["json"] = False self._serializablility["pickle"] = False diff --git a/src/spikeinterface/core/tests/test_base.py b/src/spikeinterface/core/tests/test_base.py index 77a5d7d9bf..b716f6b1dd 100644 --- a/src/spikeinterface/core/tests/test_base.py +++ b/src/spikeinterface/core/tests/test_base.py @@ -46,16 +46,14 @@ def test_check_if_dumpable(): assert not extractor.check_if_dumpable() -def test_check_if_json_serializable(): +def test_check_if_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) # make a list of dumpable objects - # test_extractor._is_json_serializable = True test_extractor._serializablility["json"] = True extractors_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_json_serializable: print(extractor) - # assert extractor.check_if_json_serializable() assert extractor.check_serializablility("json") # make not dumpable @@ -64,10 +62,9 @@ def test_check_if_json_serializable(): extractors_not_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_not_json_serializable: print(extractor) - # assert not extractor.check_if_json_serializable() assert not extractor.check_serializablility("json") if __name__ == "__main__": test_check_if_dumpable() - test_check_if_json_serializable() + test_check_if_serializable() diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index f53b9cf18d..3972c9186c 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -517,6 +517,8 @@ def test_non_json_object(): num_units=5, ) + + print(recording.check_serializablility("pickle")) # recording is not save to keep it in memory sorting = sorting.save() diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 53852bf319..3de1429feb 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -280,17 +280,17 @@ def create( else: relative_to = None - # if recording.check_if_json_serializable(): if recording.check_serializablility("json"): recording.dump(folder / "recording.json", relative_to=relative_to) elif recording.check_serializablility("pickle"): # In this case we loose the relative_to!! - # TODO make sure that we do not dump to pickle a NumpyRecording!!!!! recording.dump(folder / "recording.pickle") - # if sorting.check_if_json_serializable(): if sorting.check_serializablility("json"): sorting.dump(folder / "sorting.json", relative_to=relative_to) + elif sorting.check_serializablility("pickle"): + # In this case we loose the relative_to!! + sorting.dump(folder / "sorting.pickle") else: warn( "Sorting object is not dumpable, which might result in downstream errors for " @@ -895,12 +895,16 @@ def save( (folder / "params.json").write_text(json.dumps(check_json(self._params), indent=4), encoding="utf8") if self.has_recording(): - # if self.recording.check_if_json_serializable(): if self.recording.check_serializablility("json"): self.recording.dump(folder / "recording.json", relative_to=relative_to) - # if self.sorting.check_if_json_serializable(): + elif self.recording.check_serializablility("pickle"): + self.recording.dump(folder / "recording.pickle") + + if self.sorting.check_serializablility("json"): self.sorting.dump(folder / "sorting.json", relative_to=relative_to) + elif self.sorting.check_serializablility("pickle"): + self.sorting.dump(folder / "sorting.pickle", relative_to=relative_to) else: warn( "Sorting object is not dumpable, which might result in downstream errors for " @@ -949,10 +953,10 @@ def save( # write metadata zarr_root.attrs["params"] = check_json(self._params) if self.has_recording(): - if self.recording.check_if_json_serializable(): + if self.recording.check_serializablility("json"): rec_dict = self.recording.to_dict(relative_to=relative_to, recursive=True) zarr_root.attrs["recording"] = check_json(rec_dict) - if self.sorting.check_if_json_serializable(): + if self.sorting.check_serializablility("json"): sort_dict = self.sorting.to_dict(relative_to=relative_to, recursive=True) zarr_root.attrs["sorting"] = check_json(sort_dict) else: diff --git a/src/spikeinterface/preprocessing/motion.py b/src/spikeinterface/preprocessing/motion.py index 0054fb94d4..6ab1a9afce 100644 --- a/src/spikeinterface/preprocessing/motion.py +++ b/src/spikeinterface/preprocessing/motion.py @@ -333,7 +333,6 @@ def correct_motion( ) (folder / "parameters.json").write_text(json.dumps(parameters, indent=4, cls=SIJsonEncoder), encoding="utf8") (folder / "run_times.json").write_text(json.dumps(run_times, indent=4), encoding="utf8") - # if recording.check_if_json_serializable(): if recording.check_serializablility("json"): recording.dump_to_json(folder / "recording.json") From 615c5d9cd219e4016e7149f1ce170f043d507333 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 20 Sep 2023 14:19:46 +0200 Subject: [PATCH 131/322] Make pickle possible to dump in run sorter when json is not possible. --- src/spikeinterface/sorters/basesorter.py | 61 ++++++++++++------- .../sorters/external/herdingspikes.py | 4 +- .../sorters/external/mountainsort4.py | 4 +- .../sorters/external/mountainsort5.py | 4 +- .../sorters/external/pykilosort.py | 4 +- .../sorters/internal/spyking_circus2.py | 5 +- .../sorters/internal/tridesclous2.py | 4 +- src/spikeinterface/sorters/runsorter.py | 15 ++++- 8 files changed, 59 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index da20506965..bbcde31eed 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -137,9 +137,10 @@ def initialize_folder(cls, recording, output_folder, verbose, remove_existing_fo ) rec_file = output_folder / "spikeinterface_recording.json" - # if recording.check_if_json_serializable(): if recording.check_serializablility("json"): - recording.dump_to_json(rec_file, relative_to=output_folder) + recording.dump(rec_file, relative_to=output_folder) + elif recording.check_serializablility("pickle"): + recording.dump(output_folder / "spikeinterface_recording.pickle") else: d = {"warning": "The recording is not serializable to json"} rec_file.write_text(json.dumps(d, indent=4), encoding="utf8") @@ -186,6 +187,28 @@ def set_params_to_folder(cls, recording, output_folder, new_params, verbose): return params + @classmethod + def load_recording_from_folder(cls, output_folder, with_warnings=False): + + json_file = output_folder / "spikeinterface_recording.json" + pickle_file = output_folder / "spikeinterface_recording.pickle" + + + if json_file.exists(): + with (json_file).open("r", encoding="utf8") as f: + recording_dict = json.load(f) + if "warning" in recording_dict.keys() and with_warnings: + warnings.warn( + "The recording that has been sorted is not JSON serializable: it cannot be registered to the sorting object." + ) + recording = None + else: + recording = load_extractor(json_file, base_folder=output_folder) + elif pickle_file.exits(): + recording = load_extractor(pickle_file) + + return recording + @classmethod def _dump_params(cls, recording, output_folder, sorter_params, verbose): with (output_folder / "spikeinterface_params.json").open(mode="w", encoding="utf8") as f: @@ -272,7 +295,7 @@ def run_from_folder(cls, output_folder, raise_error, verbose): return run_time @classmethod - def get_result_from_folder(cls, output_folder): + def get_result_from_folder(cls, output_folder, register_recording=True, sorting_info=True): output_folder = Path(output_folder) sorter_output_folder = output_folder / "sorter_output" # check errors in log file @@ -295,27 +318,21 @@ def get_result_from_folder(cls, output_folder): # back-compatibility sorting = cls._get_result_from_folder(output_folder) - # register recording to Sorting object - # check if not json serializable - with (output_folder / "spikeinterface_recording.json").open("r", encoding="utf8") as f: - recording_dict = json.load(f) - if "warning" in recording_dict.keys(): - warnings.warn( - "The recording that has been sorted is not JSON serializable: it cannot be registered to the sorting object." - ) - else: - recording = load_extractor(output_folder / "spikeinterface_recording.json", base_folder=output_folder) + if register_recording: + # register recording to Sorting object + recording = cls.load_recording_from_folder( output_folder, with_warnings=False) if recording is not None: - # can be None when not dumpable sorting.register_recording(recording) - # set sorting info to Sorting object - with open(output_folder / "spikeinterface_recording.json", "r") as f: - rec_dict = json.load(f) - with open(output_folder / "spikeinterface_params.json", "r") as f: - params_dict = json.load(f) - with open(output_folder / "spikeinterface_log.json", "r") as f: - log_dict = json.load(f) - sorting.set_sorting_info(rec_dict, params_dict, log_dict) + + if sorting_info: + # set sorting info to Sorting object + with open(output_folder / "spikeinterface_recording.json", "r") as f: + rec_dict = json.load(f) + with open(output_folder / "spikeinterface_params.json", "r") as f: + params_dict = json.load(f) + with open(output_folder / "spikeinterface_log.json", "r") as f: + log_dict = json.load(f) + sorting.set_sorting_info(rec_dict, params_dict, log_dict) return sorting diff --git a/src/spikeinterface/sorters/external/herdingspikes.py b/src/spikeinterface/sorters/external/herdingspikes.py index a8d702ebe9..5180e6f1cc 100644 --- a/src/spikeinterface/sorters/external/herdingspikes.py +++ b/src/spikeinterface/sorters/external/herdingspikes.py @@ -147,9 +147,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): else: new_api = False - recording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) p = params diff --git a/src/spikeinterface/sorters/external/mountainsort4.py b/src/spikeinterface/sorters/external/mountainsort4.py index 69f97fd11c..f6f0b3eaeb 100644 --- a/src/spikeinterface/sorters/external/mountainsort4.py +++ b/src/spikeinterface/sorters/external/mountainsort4.py @@ -89,9 +89,7 @@ def _setup_recording(cls, recording, sorter_output_folder, params, verbose): def _run_from_folder(cls, sorter_output_folder, params, verbose): import mountainsort4 - recording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) # alias to params p = params diff --git a/src/spikeinterface/sorters/external/mountainsort5.py b/src/spikeinterface/sorters/external/mountainsort5.py index df6d276bf5..a88c59d688 100644 --- a/src/spikeinterface/sorters/external/mountainsort5.py +++ b/src/spikeinterface/sorters/external/mountainsort5.py @@ -115,9 +115,7 @@ def _setup_recording(cls, recording, sorter_output_folder, params, verbose): def _run_from_folder(cls, sorter_output_folder, params, verbose): import mountainsort5 as ms5 - recording: BaseRecording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) # alias to params p = params diff --git a/src/spikeinterface/sorters/external/pykilosort.py b/src/spikeinterface/sorters/external/pykilosort.py index 2a41d793d5..1962d56206 100644 --- a/src/spikeinterface/sorters/external/pykilosort.py +++ b/src/spikeinterface/sorters/external/pykilosort.py @@ -148,9 +148,7 @@ def _setup_recording(cls, recording, sorter_output_folder, params, verbose): @classmethod def _run_from_folder(cls, sorter_output_folder, params, verbose): - recording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) if not recording.binary_compatible_with(time_axis=0, file_paths_lenght=1): # saved by setup recording diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 9de2762562..86cce1959b 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -54,9 +54,8 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): job_kwargs["verbose"] = verbose job_kwargs["progress_bar"] = verbose - recording = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) + sampling_rate = recording.get_sampling_frequency() num_channels = recording.get_num_channels() diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index 42f51d3a77..ed327e0f3c 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -49,9 +49,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): import hdbscan - recording_raw = load_extractor( - sorter_output_folder.parent / "spikeinterface_recording.json", base_folder=sorter_output_folder.parent - ) + recording_raw = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False) num_chans = recording_raw.get_num_channels() sampling_frequency = recording_raw.get_sampling_frequency() diff --git a/src/spikeinterface/sorters/runsorter.py b/src/spikeinterface/sorters/runsorter.py index 6e6ccc0358..e930ec7f79 100644 --- a/src/spikeinterface/sorters/runsorter.py +++ b/src/spikeinterface/sorters/runsorter.py @@ -624,10 +624,20 @@ def run_sorter_container( ) -def read_sorter_folder(output_folder, raise_error=True): +def read_sorter_folder(output_folder, register_recording=True, sorting_info=True, raise_error=True): """ Load a sorting object from a spike sorting output folder. The 'output_folder' must contain a valid 'spikeinterface_log.json' file + + + Parameters + ---------- + output_folder: Pth or str + The sorter folder + register_recording: bool, default: True + Attach recording (when json or pickle) to the sorting + sorting_info: bool, default: True + Attach sorting info to the sorting. """ output_folder = Path(output_folder) log_file = output_folder / "spikeinterface_log.json" @@ -647,7 +657,8 @@ def read_sorter_folder(output_folder, raise_error=True): sorter_name = log["sorter_name"] SorterClass = sorter_dict[sorter_name] - sorting = SorterClass.get_result_from_folder(output_folder) + sorting = SorterClass.get_result_from_folder(output_folder, register_recording=register_recording, + sorting_info=sorting_info) return sorting From b231e2dade552413bdd68e18aad95881a047f4cb Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 14:47:14 +0200 Subject: [PATCH 132/322] correction --- doc/how_to/load_matlab_data.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 7f90684701..0186ecf72b 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -57,8 +57,8 @@ Use the following Python script to load the binary data into SpikeInterface: recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, num_channels=num_channels, dtype=dtype) - # Confirm the data shape - assert recording.get_traces().shape == (numSamples, num_channels) + # Confirm that the data was loaded correctly by comparing the data shapes and see they match the MATLAB data + print(recording.get_num_frames(), recording.get_num_channels()) Follow the steps above to seamlessly import your MATLAB data into SpikeInterface. Once loaded, you can harness the full power of SpikeInterface for data processing, including filtering, spike sorting, and more. From 468396a8832038c0779feba8f72e0794fdea8ab0 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 16:04:56 +0200 Subject: [PATCH 133/322] Add methods to sparsify and densify waveforms to `ChannelSparsity` (#1985) * add tests for densification and sparsification in ChannelSparsity * passing tests * fix docstrings * fix docstring * added checks * better assertion message * typo * base the implementation in unit_id instead of unit_index * better variable name * alessio suggestions * improve docstring --- src/spikeinterface/core/sparsity.py | 107 ++++++++++++++++-- .../core/tests/test_sparsity.py | 88 ++++++++++++++ 2 files changed, 184 insertions(+), 11 deletions(-) diff --git a/src/spikeinterface/core/sparsity.py b/src/spikeinterface/core/sparsity.py index 4c3680b021..455edcfc80 100644 --- a/src/spikeinterface/core/sparsity.py +++ b/src/spikeinterface/core/sparsity.py @@ -33,7 +33,9 @@ class ChannelSparsity: """ - Handle channel sparsity for a set of units. + Handle channel sparsity for a set of units. That is, for every unit, + it indicates which channels are used to represent the waveform and the rest + of the non-represented channels are assumed to be zero. Internally, sparsity is stored as a boolean mask. @@ -92,13 +94,17 @@ def __init__(self, mask, unit_ids, channel_ids): assert self.mask.shape[0] == self.unit_ids.shape[0] assert self.mask.shape[1] == self.channel_ids.shape[0] - # some precomputed dict + # Those are computed at first call self._unit_id_to_channel_ids = None self._unit_id_to_channel_indices = None + self.num_channels = self.channel_ids.size + self.num_units = self.unit_ids.size + self.max_num_active_channels = self.mask.sum(axis=1).max() + def __repr__(self): - ratio = np.mean(self.mask) - txt = f"ChannelSparsity - units: {self.unit_ids.size} - channels: {self.channel_ids.size} - ratio: {ratio:0.2f}" + density = np.mean(self.mask) + txt = f"ChannelSparsity - units: {self.num_units} - channels: {self.num_channels} - density, P(x=1): {density:0.2f}" return txt @property @@ -119,6 +125,85 @@ def unit_id_to_channel_indices(self): self._unit_id_to_channel_indices[unit_id] = channel_inds return self._unit_id_to_channel_indices + def sparsify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: + """ + Sparsify the waveforms according to a unit_id corresponding sparsity. + + + Given a unit_id, this method selects only the active channels for + that unit and removes the rest. + + Parameters + ---------- + waveforms : np.array + Dense waveforms with shape (num_waveforms, num_samples, num_channels) or a + single dense waveform (template) with shape (num_samples, num_channels). + unit_id : str + The unit_id for which to sparsify the waveform. + + Returns + ------- + sparsified_waveforms : np.array + Sparse waveforms with shape (num_waveforms, num_samples, num_active_channels) + or a single sparsified waveform (template) with shape (num_samples, num_active_channels). + """ + + assert_msg = ( + "Waveforms must be dense to sparsify them. " + f"Their last dimension {waveforms.shape[-1]} must be equal to the number of channels {self.num_channels}" + ) + assert self.are_waveforms_dense(waveforms=waveforms), assert_msg + + non_zero_indices = self.unit_id_to_channel_indices[unit_id] + sparsified_waveforms = waveforms[..., non_zero_indices] + + return sparsified_waveforms + + def densify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: + """ + Densify sparse waveforms that were sparisified according to a unit's channel sparsity. + + Given a unit_id its sparsified waveform, this method places the waveform back + into its original form within a dense array. + + Parameters + ---------- + waveforms : np.array + The sparsified waveforms array of shape (num_waveforms, num_samples, num_active_channels) or a single + sparse waveform (template) with shape (num_samples, num_active_channels). + unit_id : str + The unit_id that was used to sparsify the waveform. + + Returns + ------- + densified_waveforms : np.array + The densified waveforms array of shape (num_waveforms, num_samples, num_channels) or a single dense + waveform (template) with shape (num_samples, num_channels). + + """ + + non_zero_indices = self.unit_id_to_channel_indices[unit_id] + + assert_msg = ( + "Waveforms do not seem to be be in the sparsity shape of this unit_id. The number of active channels is " + f"{len(non_zero_indices)} but the waveform has {waveforms.shape[-1]} active channels." + ) + assert self.are_waveforms_sparse(waveforms=waveforms, unit_id=unit_id), assert_msg + + densified_shape = waveforms.shape[:-1] + (self.num_channels,) + densified_waveforms = np.zeros(densified_shape, dtype=waveforms.dtype) + densified_waveforms[..., non_zero_indices] = waveforms + + return densified_waveforms + + def are_waveforms_dense(self, waveforms: np.ndarray) -> bool: + return waveforms.shape[-1] == self.num_channels + + def are_waveforms_sparse(self, waveforms: np.ndarray, unit_id: str) -> bool: + non_zero_indices = self.unit_id_to_channel_indices[unit_id] + num_active_channels = len(non_zero_indices) + return waveforms.shape[-1] == num_active_channels + @classmethod def from_unit_id_to_channel_ids(cls, unit_id_to_channel_ids, unit_ids, channel_ids): """ @@ -144,16 +229,16 @@ def to_dict(self): ) @classmethod - def from_dict(cls, d): + def from_dict(cls, dictionary: dict): unit_id_to_channel_ids_corrected = {} - for unit_id in d["unit_ids"]: - if unit_id in d["unit_id_to_channel_ids"]: - unit_id_to_channel_ids_corrected[unit_id] = d["unit_id_to_channel_ids"][unit_id] + for unit_id in dictionary["unit_ids"]: + if unit_id in dictionary["unit_id_to_channel_ids"]: + unit_id_to_channel_ids_corrected[unit_id] = dictionary["unit_id_to_channel_ids"][unit_id] else: - unit_id_to_channel_ids_corrected[unit_id] = d["unit_id_to_channel_ids"][str(unit_id)] - d["unit_id_to_channel_ids"] = unit_id_to_channel_ids_corrected + unit_id_to_channel_ids_corrected[unit_id] = dictionary["unit_id_to_channel_ids"][str(unit_id)] + dictionary["unit_id_to_channel_ids"] = unit_id_to_channel_ids_corrected - return cls.from_unit_id_to_channel_ids(**d) + return cls.from_unit_id_to_channel_ids(**dictionary) ## Some convinient function to compute sparsity from several strategy @classmethod diff --git a/src/spikeinterface/core/tests/test_sparsity.py b/src/spikeinterface/core/tests/test_sparsity.py index 75182bf532..ac114ac161 100644 --- a/src/spikeinterface/core/tests/test_sparsity.py +++ b/src/spikeinterface/core/tests/test_sparsity.py @@ -55,5 +55,93 @@ def test_ChannelSparsity(): assert np.array_equal(sparsity.mask, sparsity4.mask) +def test_sparsify_waveforms(): + seed = 0 + rng = np.random.default_rng(seed=seed) + + num_units = 3 + num_samples = 5 + num_channels = 4 + + is_mask_valid = False + while not is_mask_valid: + sparsity_mask = rng.integers(0, 1, size=(num_units, num_channels), endpoint=True, dtype="bool") + is_mask_valid = np.all(sparsity_mask.sum(axis=1) > 0) + + unit_ids = np.arange(num_units) + channel_ids = np.arange(num_channels) + sparsity = ChannelSparsity(mask=sparsity_mask, unit_ids=unit_ids, channel_ids=channel_ids) + + for unit_id in unit_ids: + waveforms_dense = rng.random(size=(num_units, num_samples, num_channels)) + + # Test are_waveforms_dense + assert sparsity.are_waveforms_dense(waveforms_dense) + + # Test sparsify + waveforms_sparse = sparsity.sparsify_waveforms(waveforms_dense, unit_id=unit_id) + non_zero_indices = sparsity.unit_id_to_channel_indices[unit_id] + num_active_channels = len(non_zero_indices) + assert waveforms_sparse.shape == (num_units, num_samples, num_active_channels) + + # Test round-trip (note that this is loosy) + unit_id = unit_ids[unit_id] + non_zero_indices = sparsity.unit_id_to_channel_indices[unit_id] + waveforms_dense2 = sparsity.densify_waveforms(waveforms_sparse, unit_id=unit_id) + assert np.array_equal(waveforms_dense[..., non_zero_indices], waveforms_dense2[..., non_zero_indices]) + + # Test sparsify with one waveform (template) + template_dense = waveforms_dense.mean(axis=0) + template_sparse = sparsity.sparsify_waveforms(template_dense, unit_id=unit_id) + assert template_sparse.shape == (num_samples, num_active_channels) + + # Test round trip with template + template_dense2 = sparsity.densify_waveforms(template_sparse, unit_id=unit_id) + assert np.array_equal(template_dense[..., non_zero_indices], template_dense2[:, non_zero_indices]) + + +def test_densify_waveforms(): + seed = 0 + rng = np.random.default_rng(seed=seed) + + num_units = 3 + num_samples = 5 + num_channels = 4 + + is_mask_valid = False + while not is_mask_valid: + sparsity_mask = rng.integers(0, 1, size=(num_units, num_channels), endpoint=True, dtype="bool") + is_mask_valid = np.all(sparsity_mask.sum(axis=1) > 0) + + unit_ids = np.arange(num_units) + channel_ids = np.arange(num_channels) + sparsity = ChannelSparsity(mask=sparsity_mask, unit_ids=unit_ids, channel_ids=channel_ids) + + for unit_id in unit_ids: + non_zero_indices = sparsity.unit_id_to_channel_indices[unit_id] + num_active_channels = len(non_zero_indices) + waveforms_sparse = rng.random(size=(num_units, num_samples, num_active_channels)) + + # Test are waveforms sparse + assert sparsity.are_waveforms_sparse(waveforms_sparse, unit_id=unit_id) + + # Test densify + waveforms_dense = sparsity.densify_waveforms(waveforms_sparse, unit_id=unit_id) + assert waveforms_dense.shape == (num_units, num_samples, num_channels) + + # Test round-trip + waveforms_sparse2 = sparsity.sparsify_waveforms(waveforms_dense, unit_id=unit_id) + assert np.array_equal(waveforms_sparse, waveforms_sparse2) + + # Test densify with one waveform (template) + template_sparse = waveforms_sparse.mean(axis=0) + template_dense = sparsity.densify_waveforms(template_sparse, unit_id=unit_id) + assert template_dense.shape == (num_samples, num_channels) + + # Test round trip with template + template_sparse2 = sparsity.sparsify_waveforms(template_dense, unit_id=unit_id) + assert np.array_equal(template_sparse, template_sparse2) + + if __name__ == "__main__": test_ChannelSparsity() From fb7681520e74a01be0fd4e56740936a4f6de4e25 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 20 Sep 2023 16:40:43 +0200 Subject: [PATCH 134/322] Update doc/how_to/load_matlab_data.rst Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- doc/how_to/load_matlab_data.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 0186ecf72b..3943fbd30f 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -28,7 +28,7 @@ Here, we present a MATLAB code that creates a random dataset and writes it to a In your own script, replace the random data generation with your actual dataset. Loading Data in SpikeInterface ------------------------------ +------------------------------ After executing the above MATLAB code, a binary file named `your_data_as_a_binary.bin` will be created in your MATLAB directory. To load this file in Python, you'll need its full path. From c8579b573236a6e454e27c329e9a03482be606f7 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 20 Sep 2023 21:25:42 +0200 Subject: [PATCH 135/322] minor chnages on drift benchmark for figures --- .../benchmark/benchmark_motion_estimation.py | 33 +++++++++++-------- .../benchmark_motion_interpolation.py | 14 +++++--- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py index dd35670abd..a47b97fb6d 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py @@ -487,7 +487,7 @@ def plot_errors_several_benchmarks(benchmarks, axes=None, show_legend=True, colo mean_error = np.sqrt(np.mean((errors) ** 2, axis=1)) depth_error = np.sqrt(np.mean((errors) ** 2, axis=0)) - axes[0].plot(benchmark.temporal_bins, mean_error, label=benchmark.title, color=c) + axes[0].plot(benchmark.temporal_bins, mean_error, lw=1, label=benchmark.title, color=c) parts = axes[1].violinplot(mean_error, [count], showmeans=True) if c is not None: for pc in parts["bodies"]: @@ -584,23 +584,30 @@ def plot_motions_several_benchmarks(benchmarks): _simpleaxis(ax) -def plot_speed_several_benchmarks(benchmarks, ax=None, colors=None): +def plot_speed_several_benchmarks(benchmarks, detailed=True, ax=None, colors=None): if ax is None: fig, ax = plt.subplots(figsize=(5, 5)) for count, benchmark in enumerate(benchmarks): color = colors[count] if colors is not None else None - bottom = 0 - i = 0 - patterns = ["/", "\\", "|", "*"] - for key, value in benchmark.run_times.items(): - if count == 0: - label = key.replace("_", " ") - else: - label = None - ax.bar([count], [value], label=label, bottom=bottom, color=color, edgecolor="black", hatch=patterns[i]) - bottom += value - i += 1 + + if detailed: + bottom = 0 + i = 0 + patterns = ["/", "\\", "|", "*"] + for key, value in benchmark.run_times.items(): + if count == 0: + label = key.replace("_", " ") + else: + label = None + ax.bar([count], [value], label=label, bottom=bottom, color=color, edgecolor="black", hatch=patterns[i]) + bottom += value + i += 1 + else: + total_run_time = np.sum([value for key, value in benchmark.run_times.items()]) + ax.bar([count], [total_run_time], color=color, edgecolor="black") + + # ax.legend() ax.set_ylabel("speed (s)") diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py index 13a64e8168..8e5afb2e8e 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py @@ -9,7 +9,7 @@ from spikeinterface.extractors import read_mearec from spikeinterface.preprocessing import bandpass_filter, zscore, common_reference, scale, highpass_filter, whiten -from spikeinterface.sorters import run_sorter +from spikeinterface.sorters import run_sorter, read_sorter_folder from spikeinterface.widgets import plot_unit_waveforms, plot_gt_performances from spikeinterface.comparison import GroundTruthComparison @@ -184,7 +184,7 @@ def extract_waveforms(self): we.run_extract_waveforms(seed=22051977, **self.job_kwargs) self.waveforms[key] = we - def run_sorters(self): + def run_sorters(self, skip_already_done=True): for case in self.sorter_cases: label = case["label"] print("run sorter", label) @@ -192,9 +192,13 @@ def run_sorters(self): sorter_params = case["sorter_params"] recording = self.recordings[case["recording"]] output_folder = self.folder / f"tmp_sortings_{label}" - sorting = run_sorter( - sorter_name, recording, output_folder, **sorter_params, delete_output_folder=self.delete_output_folder - ) + if output_folder.exists() and skip_already_done: + print('already done') + sorting = read_sorter_folder(output_folder) + else: + sorting = run_sorter( + sorter_name, recording, output_folder, **sorter_params, delete_output_folder=self.delete_output_folder + ) self.sortings[label] = sorting def compute_distances_to_static(self, force=False): From 84051d1515a444a3174a4642029ed02aa69d755e Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 10:41:27 +0200 Subject: [PATCH 136/322] oups --- src/spikeinterface/widgets/agreement_matrix.py | 2 +- src/spikeinterface/widgets/confusion_matrix.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/widgets/agreement_matrix.py b/src/spikeinterface/widgets/agreement_matrix.py index 55f38f078b..22617f6be0 100644 --- a/src/spikeinterface/widgets/agreement_matrix.py +++ b/src/spikeinterface/widgets/agreement_matrix.py @@ -8,7 +8,7 @@ class AgreementMatrixWidget(BaseWidget): """ - Plot unit depths + Plots sorting comparison agreement matrix. Parameters ---------- diff --git a/src/spikeinterface/widgets/confusion_matrix.py b/src/spikeinterface/widgets/confusion_matrix.py index da021092db..b76283b421 100644 --- a/src/spikeinterface/widgets/confusion_matrix.py +++ b/src/spikeinterface/widgets/confusion_matrix.py @@ -8,7 +8,7 @@ class ConfusionMatrixWidget(BaseWidget): """ - Plot unit depths + Plots sorting comparison confusion matrix. Parameters ---------- From 85c7755f3a3c4a93117ecb7fb842309e00e22915 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 21 Sep 2023 08:43:30 +0000 Subject: [PATCH 137/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../tests/test_widgets_legacy.py | 2 -- src/spikeinterface/widgets/agreement_matrix.py | 8 ++------ src/spikeinterface/widgets/confusion_matrix.py | 10 +++------- src/spikeinterface/widgets/probe_map.py | 5 +---- src/spikeinterface/widgets/rasters.py | 15 ++------------- src/spikeinterface/widgets/tests/test_widgets.py | 3 +-- 6 files changed, 9 insertions(+), 34 deletions(-) diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py index defe10f0d4..39eb80e2e5 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py @@ -43,8 +43,6 @@ def setUp(self): def tearDown(self): pass - - def test_plot_unit_probe_map(self): sw.plot_unit_probe_map(self._we, with_channel_ids=True) sw.plot_unit_probe_map(self._we, animated=True) diff --git a/src/spikeinterface/widgets/agreement_matrix.py b/src/spikeinterface/widgets/agreement_matrix.py index 22617f6be0..ec6ea1c87c 100644 --- a/src/spikeinterface/widgets/agreement_matrix.py +++ b/src/spikeinterface/widgets/agreement_matrix.py @@ -5,7 +5,6 @@ from .utils import get_unit_colors - class AgreementMatrixWidget(BaseWidget): """ Plots sorting comparison agreement matrix. @@ -22,12 +21,11 @@ class AgreementMatrixWidget(BaseWidget): If True counts are displayed as text unit_ticks: bool If True unit tick labels are displayed - + """ def __init__( - self, sorting_comparison, ordered=True, count_text=True, unit_ticks=True, - backend=None, **backend_kwargs + self, sorting_comparison, ordered=True, count_text=True, unit_ticks=True, backend=None, **backend_kwargs ): plot_data = dict( sorting_comparison=sorting_comparison, @@ -87,5 +85,3 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): N1 - 0.5, -0.5, ) - - diff --git a/src/spikeinterface/widgets/confusion_matrix.py b/src/spikeinterface/widgets/confusion_matrix.py index b76283b421..8eb58f30b2 100644 --- a/src/spikeinterface/widgets/confusion_matrix.py +++ b/src/spikeinterface/widgets/confusion_matrix.py @@ -5,7 +5,6 @@ from .utils import get_unit_colors - class ConfusionMatrixWidget(BaseWidget): """ Plots sorting comparison confusion matrix. @@ -18,13 +17,10 @@ class ConfusionMatrixWidget(BaseWidget): If True counts are displayed as text unit_ticks: bool If True unit tick labels are displayed - + """ - def __init__( - self, gt_comparison, count_text=True, unit_ticks=True, - backend=None, **backend_kwargs - ): + def __init__(self, gt_comparison, count_text=True, unit_ticks=True, backend=None, **backend_kwargs): plot_data = dict( gt_comparison=gt_comparison, count_text=count_text, @@ -80,4 +76,4 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): self.ax.set_ylim( N1 + 0.5, -0.5, - ) \ No newline at end of file + ) diff --git a/src/spikeinterface/widgets/probe_map.py b/src/spikeinterface/widgets/probe_map.py index 193711a34f..7fb74abd7c 100644 --- a/src/spikeinterface/widgets/probe_map.py +++ b/src/spikeinterface/widgets/probe_map.py @@ -5,7 +5,6 @@ from .utils import get_unit_colors - class ProbeMapWidget(BaseWidget): """ Plot the probe of a recording. @@ -23,10 +22,8 @@ class ProbeMapWidget(BaseWidget): """ def __init__( - self, recording, channel_ids=None, with_channel_ids=False, - backend=None, **backend_or_plot_probe_kwargs + self, recording, channel_ids=None, with_channel_ids=False, backend=None, **backend_or_plot_probe_kwargs ): - # split backend_or_plot_probe_kwargs backend_kwargs = dict() plot_probe_kwargs = dict() diff --git a/src/spikeinterface/widgets/rasters.py b/src/spikeinterface/widgets/rasters.py index de855ebe45..4a1d76279f 100644 --- a/src/spikeinterface/widgets/rasters.py +++ b/src/spikeinterface/widgets/rasters.py @@ -4,7 +4,6 @@ from .base import BaseWidget, to_attr, default_backend_kwargs - class RasterWidget(BaseWidget): """ Plots spike train rasters. @@ -24,16 +23,13 @@ class RasterWidget(BaseWidget): """ def __init__( - self, sorting, segment_index=None, unit_ids=None, time_range=None, color="k", - backend=None, **backend_kwargs + self, sorting, segment_index=None, unit_ids=None, time_range=None, color="k", backend=None, **backend_kwargs ): - - if segment_index is None: if sorting.get_num_segments() != 1: raise ValueError("You must provide segment_index=...") segment_index = 0 - + if time_range is None: frame_range = [0, sorting.to_spike_vector()[-1]["sample_index"]] time_range = [f / sorting.sampling_frequency for f in frame_range] @@ -86,10 +82,3 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): self.ax.set_yticklabels(units_ids) self.ax.set_xlim(*dp.time_range) self.ax.set_xlabel("time (s)") - - - - - - - diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index 509194cb93..2c583391c3 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -349,7 +349,6 @@ def test_plot_rasters(self): sw.plot_rasters(self.sorting) - if __name__ == "__main__": # unittest.main() @@ -371,7 +370,7 @@ def test_plot_rasters(self): # mytest.test_template_metrics() # mytest.test_amplitudes() # mytest.test_plot_agreement_matrix() - # mytest.test_plot_confusion_matrix() + # mytest.test_plot_confusion_matrix() # mytest.test_plot_probe_map() mytest.test_plot_rasters() From 11a9ce0dcccf0fb367a1d1eb5a9659fc1bb05e48 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 10:53:39 +0200 Subject: [PATCH 138/322] Add doc for ephyviewer --- doc/images/plot_traces_ephyviewer.png | Bin 0 -> 102235 bytes doc/modules/widgets.rst | 40 ++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 doc/images/plot_traces_ephyviewer.png diff --git a/doc/images/plot_traces_ephyviewer.png b/doc/images/plot_traces_ephyviewer.png new file mode 100644 index 0000000000000000000000000000000000000000..9d926725a4f25e61fc6c1672d0b1876ae9d7f6d9 GIT binary patch literal 102235 zcmeFZcT`i^_ct7LY$!U8Ac6v;prSO9-mxG8(xi6R zJU21Y<>40O27y34dUtQ%2Z0Xn1A%_}{qRAcXLCfW5%@at_^wSL2z2}-=Z7mz;`mt* z=vR>5?HlHyj?2>_p}ZE8*fo+{%Fn;tJfvuRL`y=jGz# zGp|bG`{@AJfuAJBe=jmWt8+5c@j%HR_lNKPs(rI|Rm497_b{aH%A4gVEh5V5Is-kB zH73$}^Pk5rW79EQJ|IE+kCA+4@HJ(S7ycOUR= z5@mY$UzgD*3V;6BB}nAv`F~$(iVYt7{Gw{4EG;iZwA)#6uHzC|JNmbM;H~G2lCga?gA&URrmi}=k zs}M@xEGRi8d9!ywOXAmGwILL|1X`tgz}l}^1dnX?ghy4j~8*n1YC~1J6#gPo(b1i5BPX*sCu$tsgV?A=HXF3 z{u5_=;`<$8v+dbxA^8H*Zy$EVP!^n`6gx_s$KHOKXbp=O=`#OiO)b*+s&;s|D4wQIcvKHZ+yz^WpO#V5%1s5^6m>=x(?2cQLHM| zhFjz)`3;rQ;AsM<;VxM!wk=-h2?f8`f@#;o&T#GpN;RcXh{SCaIOpMy>mJN*o1vf8 zs2_eZ9+V%t>#yKDbvNOx((tJyEl0^t5RahzW0mv`)wA**pGWuSn;d?5!oafhDti-s zA=MSJ5XGqH4FHxL2q}-z&kNZM?ejRl>Z4Uea@<{I$Lf?2~iv_1GV;EVY*3 zG1OGftv^*vNImGT(lMdZ5`zr#&e@ns7@I3w-8Js|_3N8^F9Za&qHgB5jgSfo{b#WY z&-I4cERvt#^>0wtDvX^XcCqY@$L`h_8QEI`S?@y!)_ zU0oe*_uCagMenAa6X)JAMMAaNyBkSjKlEl*lQ`d9papy4;&cw!dq)fpalt8~u&^Tr z?$=J)Ty$|sOU+&WL~4s{xkC!9hSJH1Cr=M9P5Kd)C+MrBvPs|eTP$RY5uk(a`6tS` zn@P(tJZF)3xD|M`+-|gy1c81pj79meqlC1g>@m#EA*4f#5L!S$fIJRwjABP-i2gX^ zZ68J&m#A?&2x#5mcSz-UuD+Tn#HsD#pMMp352KoYYphF~aE>-FZfg!+MUrc3dX)X| zJ4qRsxLKwPD*8&sE0|4qr58W5VfPA3i>ZZVu@}x-VwjP0ojsJuMKHh8tSj z)@Dvn)4+wkkrP^bi;HqT6Y>t`>mJw|(&<{O&^ z^&2S#Fh|bv2;0U{iR!j_4~f30I=lMN=cPuGrbp*tMMq^?O^h{I^*3+c9Bp$5kHmOq ziTdFtP?Z*S>11O2V1*J#W?KhCNTrPuhg)W6bWT3HA8GIfR5yxjH~FS{6K zZlzwjm%A98_x;7q=!oA}>}3vELu%=K$(=>z@nV-H&#-m5{l$FQ zf!a1UeH@$v*o5hq=b!TN^1kUwySfx})^{))MX|uxDgdX``pA(Z$#0xsEQ`y=u4FPC z8L(-fo?MO-31;mQIi~k6v}rRs@vWx(|$9O z=?H)M^5+8w59%jZS|Td*HNwT;d0E`UpK*xloSSI$YZvpJ?OHY0XlR~j1Yg!`CRd)W z6cj2p%u!P7k}RLc<-l(~0vQ2qHqQStpvtb+M+5ybaV)@kkk=fQ`1+;Z-RX$JxP004C^tL8?nVoM(6hJtJkiX zD3RoE!l-Nm7z?6}fzpO6(;eXEM{@RLaV#Lgg(~#>dIib!9VQ3=JM(`MVNUi2G$mLar!V zA@r(Y4)Vo|QzAa{rR@`$vr?p}&{l}Yck0RxnqO3M>#-q9=4`_B}m$|~oIeQ19 zB?5X$3lK(`@q#ZnK)<%q3clhY*ZJ|bTGR>dW9I$st2J+#IO%oL*L`~2M~;|UTW2Px zq~M$UMLP-Qg@Bc(#7Q6r6Z6#MeWzxPG&lzM9YdLazxrC}ih_whFPFT>pf=!Mg4NP7 zV-I@1bwA7Yf0q?mb%d>1G%&6DMc^4|`%_an`YLtz&KE5i{Cfj6i7%AuAPAfg2e-3= zQAyR*$wn|B+6p&>ZCnDP@Kax&z6HbvVM#XQ$n*6J1O0pV6E?i=0{Xb{`hut^?*mk& z&4wG4nxIr4$qYs~v>3@?wr=J}HBLk0ugl5N`sxPAd5tsoLqnS?%`1wcqd~ZQ1%vnR z--oSJCDZGwc^x{Ji{R!%o%~L3GRlk;a=7Q7@jIHMyzraD@iOAd%GOR1Xmhw#b-I|% z{{3kwDJho$`=v5aD|Vo1eCE?9aUsoskH^%kC6SjeN-js$8R+*(iM@RD;>EF&l9DT` zs*Q`Utp<_D3%&6`f}glmj&-0lwcXd^jqDP!JpM=N41cBKDv;rwdo%uED7ZV zS^aUCc9|r*Oa#V*KOl8b;@5YT(I78L&Rt$h9zAw!w3FZA%}J%{DgeqtEH>twT}N?= zbm8e1FkLrrv!G=)n4CsrF=mR@R+{ZZVAq{)9Xv7o``RpxFzE+geEYDczu}5vL-Pv8 zy}r@{?iNhbfAr{0K5S0~!|HE=&f=8#Ja|m(?Hvp=uQ{GSf8Gt+=8J};&V97gs8HFgI(^o{f^G0pts z?#l&_?NefZ|GnHNhcNNhzxeCytWYFJ2Jb|8X5jENhLR-y2@b3WuUE27#ezA&Y)rmE2ekZD0eV-yl%8r=V<5e7AB7f51)~Luj2bH zmG4|3M2$T}C(*Bpi<|5}D?LX)$>|7l_8eVQ2U8>A z6PUx6js4qEFp*pVuBqB?x4LfNPU7Ns9a=&%q||g9-f5Ru#odolFnMz03HPyM<|%M# z%Z_;!j&Z+x_YcX_@!pC#Z|cZLIctJk|7bBjU{s)qP$EI?9GZiq_;o-sZClB~)Ct!Z z+Ip`{g>$Eg+_G6fb-Tl410Y7fvDX44WjThN$zYA&5sg>G#jRb0qwdODxb*!nJD`Z{78$KAKC7gI zZ^ykkXVl0wPxVx9%@s;egJk+t*1mm`T(v)0KYBG+rD1cv{I{DouYPiV$p7Ot{J5*O z-j^Xz-{H^cozAA6apx4EJ2 zJ@JdQa@>oo3rE*Zz14?>^pNVA22dn(Q|5+XeCdrG zSwZ~Ax`4P2bcme)r<+$u2h@`%{PiTta>-L`oCTsav&mb+~`M>URSshd+w~ zhb|x>;NQ}DH(=ctt);K0;U(wt&*IVRm#<#`@PC@6_z#7+`&s}5`Zj?AXAF-Fr=_NP zDDuTGE}qf3{X5(j19dYKc6C()y;g^YkqweL@!g11a}EhV@C^92NIOnurNUalbNDtXqOJ>AFO4dm zw9IeJbJp>pBA0lvCBhcLwSOn)VUnWH#2c->F+?*&1NQl=o}Lgm(lCniWE;+~ytCU> zD_CdP&nB8_1oY4SQi{7vvG{Snxj3<0Q`06^g+(H4wdKcK#CGkhI@7>w~VA zb7}o7BI5MzXq1!MUYeLOv)?SIeZAgT9ra$0K0U5{V&AT=f9}yQQ(rTWZN+|777lc@ z3td(NJx%}?j5hwd4V|WfskI*1zr!DYDE~ z-p^N&55C(lTVx=kbW57muwf-gecX8G@pnKLBUP zBQ#p8gE7*sYEA{zHHah)Uza{4ZO?H&Vv<` z;C3qdzFXb+IT>c7)RfeulQ@h}L_E`L$6SO#ZUhaVQ6^Vqv z!ePzh(Ak~yPw}tcySj~)0FW`~e6?ZJ7ad%d(!en_b>`GYw$kb;Ir|5mo=ZSxDzLfI z;;cfPACeM#oVcR3cWnfeS~RRI%x}rakKEjWk>ops5u~@KSFT(EjL&BmK%1j4tjGQq zz5meh#i5#6d3p`D{`$i#$FLPg+fM|fGfLaF%w#aT+}JMxpJ_@n=FZN^0(6yleyLH$St59zIbtEIXi4(3 zuU=I`5;clfEvXZzAXC;+oOI_UAk>pF%&Qy!nx=(2BmVW_$fH*ttljwf^`_9gQ_qs4z z<=HBB1?ihx?6b2epz2S46YNbwQq_VheKE~~S}RBJ=+FD~(4>HHDorUhKfM%g=ZNr6 zO0odp5MY)$fvl8mtF>Z|;jM!_gio)O^pZJHa<2E3xvbdjr-ur*f1PKFM2(sm zYatF4L{!D`NvM9E0irf_To&z#FPdspcSKuYxWeTSggo~C7&EUU_5>wCi8MUW?mYtF zXcxr{0|jt?*|@Ow`h?c=E=Fuu~lwoUy;;ln~`P~RDBJ-&rKhA1n@e}iGt zD~#-OFD1h6FA$N`Gj_>}-ecuJf~@kCmG1o&-l;&%Y&Z}Ib`t);r!Ewj;j$I*WmbMt zj;*)X46}K6>^+!?LbcYC%}6IFz2Uq@QYG9LWdY!dt5!(wNBhNnE`~{)+hX4XG2BwN zk;7(Za%biJi~4pAzIY(LSg0a`Bk0g%M)NV@5Q)bsjm~!<#lhuO$8&cfd$42AkoUQ=T?`o@> zAt$U_!Vcq0Az;IE>^C1eHu5hww^-JVdqd|?LW_c zsE}37XV0E3J+Dcs>`4){|w8)+MQbU+i^5!(k;Ag6e>jX6_;%*?zRmy~2>V)9bd^5^Lm$Xtgn z$kox-w%IXzCvK$)T#xh`x*5;kZ^aDu!PN&;*b$|-g=5PGG7EtOp4V)%g8mk?0?JtN zEKL<;8b&dLUiZr^1Up@OtPPX0sn;H|D1vmnJ((@Lu^)&>vOS|*Svx#lqi+i>tM3iI zd$K=h0K=$ooNl&#_4Zx}DXh#fYR&?%44bge?`6p(wkpCAebYe=LP$AvNw8Qir_tMV zWb0D$n_^E-C{b_0{7tE`AM=YW8A zanXYX`RrFF3Hn~8ob;6Ms`(%vkDx~{0g)VlO@yB<<2f5B@}GvDX<#LA58DiCK_SwZMzZA(`A_YY&28)z)#!jJXiGT$t$@iEp3#c zwkBjmJR zAiakQ?RMYs8o!QXhaw5p+jlq*^|7o#wxtU!C}_{=Ufa zTqSSSxR8m}?LhijXK4w0j%wwEzsZkpHvx>E(s7YeAk>wRl{JN1AuQnpgn6~E29RB& zQ75gy1Z0Ysl=0oc&o}n-IihQU>B(azA9#iuQ_jB%aP8{`@0svXX>Th;g@!j_)!%HzzKqh z2qI1?!Q{b%*IxL_3h6wxxo-ctn6pT$XWV?e0s8sT*PWYrLIx!s+FpnATKfQ84u)ssVcPaBmPxsprI`pKJt zw$5+Z6Gy;!vfpUA=`G;xz<~pA%fczC`hWds6GqS2*qU!{v)@VcJ*Q|sG-Sbh?pzgs zR7$Edq^vA0E%Vhwh+a-IcBMJK=a4{nb45(-HV}>iZ0bmug4beguXLIi$`^m1y&GsU zK0Y4S@Fl~k;fjQW8EeN;A_rG`{^=nyrw3rZ2AVYh3Kz+@oU$LJ-IlEYt_b{v85n_C(0*>=|lYX?U4oCRv|=NZ%9@)k;Jv7 zDE&DQxAP!IGgrfowR6I}+EGJ(bLYG!&2VFP>e{2XS7Ux({34rlRrQjsaRTf;pOuwWNe;rCIdux*g6-$6 zZ29Gs5OquA+Na8@?cpt-gC$w#3!gW1tGIuC7-!G&BZKMM)U;&7#e$YoRA7KgN}ubQ_@&jK)yzfKPTCHnfB za-|!c`#@Vy7Z1UnZ+&*7Irj;Z)$MBAaxRDW@(uYcnUtj?C__R*T|!2 z?H2Y{M@{alSA|axow(Ys%Z}p8J=+<1u61|47v)Eairl~n0`!_bV7Cngm4=6_+bju7 zBkmd^&G$>L9XN0Rh=govYVa55Fplyfq7V3g{;awp2^?a8-mSDpCyP}81qe_Ij_uW( zF4eryc4)KHJ)ZC6$wC)wlEfxyALHxKfV1TsetyT?%J$>uOziVi-MB%y7lC!{8uqGW zH0`Ri!7D!be#EMwqKgF;A=UYuoM^C-uYY=~8Y{}Ha${|H4gO}Ts1kNuI7m_i;IM&w zby!^Evb_Tw@VrZdUpm6p?aRa!OX1LR)i2{+tu8c(%(!kD#?X2fm4 zU;)e$lHg+t-uvyD*JLT;x8L^Bx_9N2V4TD`E?>?nNkg^to!QrY;#z=#DB+W^%6h1W zR&t7@O%hArzP&cB>h{&|L zk)PVsZGL7kmhL(irC+WxsVhY%{ak*|(FO>~RRg}gEh={5-t-&WULXjFp}nU(8%G1!gW;O0{an9eIn;Md4p!4m%0SphBah#bbBii(@R=09pl;8O+>ipgW~=r| z0FEtBHF)rlNdNs_c(o-UI)MBED5)Dj9)D1W-$6`oXScVc|CWlpS7-@PYedLJZ^(xZ zY6CPArI7?9&hZFlR)C`St?zlC0qwrnuDY^;ud*?VR1R!*!$mmZvK2Mp)00+FeGwaT zQgsE}>ta-o?EEOY3WPdifZ4H!lVBu--hevQV|5r^2M{;yUZlblAd^<~{M6;U$>heIg?a;qmN~&hkHMD|lxyHhLX{>4T47LTB&b4GM0tiJtO#n@9y&2+DSV=7Mb#l< zO@|!r0OL6)e_Mkcd9XyRMA#4yMD?$8bI);ocUaLh@#kki0N%}Os1+?hw*lg+D(Pux zC1esTzl83F&Q~09 z>)i)zO}4yevTE=Okm7MH%T^cw!cd`$dO1oyrV$Yp&CL( zo>WR2rZ&zLWoAxcPkPVP+N`aipB-W{s3QP7E#wj^aZ!O&TJ=&;$)^yoHeNtPUp~YY8~|}z!)s3Jpk?Y$Nl~jjT(*+(7b$F#<9pKPc7#mCq1m5r5PEMd_P%X zzVHyP^gims`5XHWZYp(vdfpy5dO9D{XIB%DW{I?w(&JPCV%m4hXx)KH!L$Qu&mZ(ZPIh9;fb*ouVbf`Z!%s8Bj#}d ze(H0v8^6AexQYlo|80VW^_>n}W$k#cT>wIfI-BIw?Ci?D{Rcg}zZzRq++Pv>X}v>M zKpdt|;UvDlgiSC&yH@6pekYL*F`|TTfIw59{O)jp#3dI_SlLGo?gP!)0QgxL`aKG> z*OdcZ>b&As_h#xu9vrAvfSd$@NFUH9-4%dwpQ!gNPFffkkn4@EYa~S)7yAk<{TT3m z?F`67_(yGG^!0=jHlVHA-O@B%wNUix>=}~)VWB{}{8;}w_|WG`W#Rpxp0ga5N9VfN zC=N&`|J_Rd0HDa@^Qx-NA4^-35)&0~9S2hVJ;gxsU+$-Ox}6`tv(-S#v+9Sx%j6zS z^EsEo#r|<$0*8K*zJ2?)2aw?P8ZCb%29S(r1aw^5(d`h5B~Xtdh;aXraKCWj18~+Z zbz-pqCm0dU+Cl+o(hDN(`n8Ka#L%l3T-x&!SP0iwSqlSW4k>I}*NxTV$o`^n?>l10VEfVZz`rjfG^e{5Wo*=C2?vAcNj zVm1!cQ{EY$k$Jw++1Z&F7}W#-9kVC7lp+bIS)Y>vOa*=>W$Jry`;J8a)97`$eE+k@ zzW(g8Cz8ot5SJ(U1vfD5Jx_YPe-Q!1hwASY#(BF(*`;GsZ>3?o$i(8lnC|mtk z+v5EXCi6eYy?wZU4F+}k%!m7VG74bX4`ZSJ{>@7SHQeJu=C<5_$n3`)6Lq&XM!FZi zs=`QPC)4n_M0!*yfG0l%nzdhJ0o?8Go~H7DBW(LCreS+o3kTV8B?U{95X!k;s%@iX znQ)}M@A4Vld#WRT9ZE-byuyCVj|z?}_L&-!CC`-9^+xSUzdQD;Rfg|0a86FsmZxH_Pzd`PwTpa17AazuTu-;I5mS zb!O*e7K%VtZMONpUSZ6)Zy1N43zHR%KFgsb{3e17!&eFr`8PO!V5jEFy7P z%}!Kzy3#EjCH2vhQ)I&_dZV0-5+YAghb9VK8or=X?P+Y6@G#v zkOFD@Hz}8m$YX1>!1nF~Rg$^TeSZr91Zw>f5lA{EH(=i-0C2B2w4*!M!v7UXl^weR z5FAgQJW)-~EC)!GPb2?&xbJ;Ae)JqUE@iDt?{fC`Vg$HaIZRRHHE{Z#N~lRPP%Cs2 zsQSDML@z$JjRdvT*0Pk###ez-Bp933I0XDL#ouiZ2>~aZRqI;wA~vJ4zTRg@=UvRZ zBKVeFkA1~2w=cB9xY|))PoMC~v~g*yCdeWQfxO(MSvodG7G#czW^XQUkf%^BzwO&k z8+>|Vv2~+gF81%Gf$w&68EGRYX~)<3k%!97k~H1w#5_BP2Y_B_<36-gFFnj}XEL74 zp0*LYWBg4{fB7$_(69fk1itOJo1iQI zkKE(md-~4`hyScF_>Xh>KU>DUl*ZIE4nO5_xJN z1VrPESIpmP=?ePqRU7}md8YppVgBNA(B%5fTeon7DnLT*2tb+tRZRawI^Rus_oXHS zxoWu}qjSoSqXDe&U+uf--&P;w`a!#`7vQYL_6&al@e2JVrltq%@A%0R4>7n7bSLZ( zNZaN^y8*}~`0>4%_CNoGVxbS+_U>xWmd;ZAZdwV;F1?|%;4NuDite{|g&JuyLDB8@ zXOJc)o6yT}?sd(h$#%9q9g6$fJ`mY32^?d=b2@(l^sSREBkuV_2A^}q6XSEfp6N&l z)YwDMXs%szkqeK zQHEn`o4D}G-r1&jH3=j;mit4SaNzVs)ZXws8I#>RBYv;ot21)aUX$ORHO+h5l$nwK zb!-OMrH~bDz}3REv96pIPqs&}?Gui!{;2ASz96xvhsi1+1>E3!(_R)2G3L+BpLny2 zh3t8D<-r)KWTf#JuVp1O?8bWxVW_R07L;N9p7SV(6M zWIRg+*hSx@&sFX~q5P(vK)#eXzZP8Ed~Ke9QijHryS}E%tCSB_Bg-z*lYC+;SKX{q zebUxf7kpt&l!e??sgUjM1lI=igi#7Scb72x9OGxY)8gU_FSB>=pLA(6Hl9M|_=mV; zhXt>E-S)>1So8rU6?s`M(rVbgf|YMX-vyuiyp{+#U+EUJL3QJ8w;Z&$dl$~tEQ&hw zSRqEB*KgczW?91Q1~W5kzP3ATcQ-&|ZIpx$|0F})4zWVJ^^NQpBG;#AN{#O=^5H)8 z^s)%!+3a5Aw?@RJz{q}<0}A~$ch%7GMksl<5@&Jjz^xy4g!|&&^i;w8f#14cgxS*N zW8fL%zU=8~x_a1mA% z3Gon@$t<50LKdl3F7TKYdW8U%&e6h=HfDX0aM~DO@bvKb_I&M?_TcWwL^$K~RU+r{ zos4}-pER_}EHnkSH0EyLqP7#}Dyk)r0eyQb1kbKMb!KhLTHju<+myZZVXH}(0KPzM zA4I8CZH4}$#Utsx9-8 zo9&-Mg-s|1Vpc-E0yr)`{=o4tkzxJ1Hltwc?r z$?o%G=(wCnL$?;--A=l=OTT%dIJj)?T%-4buY+6PlxE9r)PP^af~&DhxNR^V zi-+R&4m{85^HG@1Z|LYh*WulWweK`^OKgEmpySm7X~KO?5^gQyBq1xxV@E}oOI8#} zr)K)N-OI!!&R<~NI+iI)pJ+t;++BC#@|fjML~o1;aYlFIS-YqG6Idv{^J zR3?={vSf{^`+eNmFbz^b@KjdWTA%B}T5gzJ!}tPwyoG6PPzUc{rC3^6g|xIx<5%c* zIl{}JsQ1^R?bdI5eEJ?6zqYHRUV7n=yT7_=yJ(9VM*nP*DSrR4?#W>ArCYLt?AM=e z-a_gQ-8~6Y{`r~g)6}e>r<0ji_CLS$313YM8Oez0o~Qu{_gdB!tLGcsB^Rp z+pxM>iaX7=cop)Tkw`<)d!603!_tZ4&4MyLwsR;LfjHE3hqvwUq2Cm|yD)8fy*hvCN=)CRlYLnn-41*{d)^;T=cXvPzr| zIz1mHpWh8#5-7h<8MtS7GCsgj!s#|daKyiGVHoP5mU1CyQVrYNua9-?)(OXl@t$E3^<5V&C&9NVQ0Ss?ooX75pgCcTePwGXrDTadJslVn z=@>MPBAQC}QrsOD>gjHl16g^kjxs$u5S{eiuWtUxkDEI~u`Y6TWd&i1TOL9o1Lzh7 z`H9YWerjQ?uW|fhD<0ZVTy7b+?Bajt8C!j%1sig$lPhE|5cCT!B{g zN>d>@kA0cX8Ii)FB9oghg492Lc``mSpbr0XYDFc4@uZ8+!oUhMWl8`G6J!G^|4`DQx6$f+94YClvUiM3t> zlmb1e+JN?@>!(<07G62ahH$AS+_zXA|CfFk4m}4Tb)?$HYSCsdtRnc zQ^(&6ME&+R)C4vhi6tF=d$ETEGFfD=RU;$k`d&l-3wwdyMSZ>#Ew#4k?$@ZqzHEi- zTTWnqkqF*ge}Y;6=&7b$_qK8OokiW#6dHpy4G(@iv78>Bv^)jdtkqyt`>8qpD@+_y zn*wgswhj&%7a~PWx6LxzoL6^O^Bc67v3}R%VvDRcfQ{FcfRHPi8YHfykY3ZJ^zbbG+D9ZcM1UEDFk959o@o{~qkowH zuIZaG!zgY%?t`kUnroeDM$evmOI2JGnEseOmg^JIUaX-byJ<5wZa5SkCoE-FEep{C zDfx@OOK)2HaHVClgE20$i));mg)x1CX-D0G=blI@qYj;z@>9y!+O}kE-&$Bc(Y8UQ zAA$iND42h2DIn8eBTB3mjuKk1h@vRRClp5ZY<>Uy2>Fq<(M&^5+F5Om>MD$@WOc5I zr+eZFgc(arL4jmJCni7p%4StDQEgu1tS#xf+X*Q%`Uq=Dz$=0q;fRPZg~ubi+X?QG z4kLC-B_8dLyh+X)LHUUm5+Xw?x2lb?Hhs&F?6Eoxu^mrSY8(+^h}~z;?o0%=4++{9 z>uXj%cYfs^<#w0a@CV)A*|eWh0iSCvgxFTQ@XZxl%Wh(|A+-scjnX&P_Cs zj5F2=r!?vM7)e<@QV0qrIi; zfTCS{LQq;mwX)y8wk&)5)&Yn%|NOrI?c-IbA#0b5D+Qvi$Q6VFRx8}pC_IUe_F{9C z_K_8Za>TxxpdH7#&K8EhfM$el+TsJ;BDXzGs7e=w@=EO5c;t1-J2{(9Eq|MuuoXaz z)L8Xq2{~yE$7Mtk>D5I0Q`F1Zrx2@wrc(<5lhZb6ZzgYsN}71ff|iE0i>CCoe-jOTAHmvBk8}p8smzL|(B=+8nAZF?Mk0I-yO#$thl!RqJnV_sG8*)iAGstS+uea;5O1 zGUsh)?-}Q68^C` z7bZkApO7G@4DEh-k2?#Nw_4wW)R$&(eA+;DA($Mcj?2hwBP`sNat{tbEJNg_YnyCl zWWuN8wprw{c<05yI~_a<4#k>{Dq9rxR$| z_5I6_fMS#)_agT%OHfA%;)qNo<@vVe#E7JpR^2SofxH=b?UCfWj(^i)Pn=Rv4^!ivr1O05m(FBHLk_ZKLg<3IiH9jZL-WYq) z@{#-ewlGSyu8CniG-J?uv9jBFXZ$;KW5_zNGL6x*g9$8Rc_jrn427S?ImOUNDBr{5 zQ=yoHIHv!mYEhtpq**|e)}AC_`7yResQypOkNQ^!l8$Qid&Mp0UT@PS3{>U!E-GBn zD4U7Y4ey^BAlUGgSW<$PJ&mvzPgBqD2oBn69L21Z$M%~x-zmLZrhsZ%k!YS5w!t86 zJR-sB>BwrAlI`W?>#igei|IH-nXs3cNLguIF8NqrUEO<|k~zl;!kyN-%sl4dRVXYtc3lS=h>OWKsM0Ciu2S zku^ONs)b3aAZ9lgXm)Li$X$kx?u-WXszaG>5JA6$aA8bfS463dq_3tt5@n_p#3Mu$ z2JXH%WHthQp)y;gsLII4W~%gRdV!B)63?SrzGqI->V;j9N;xT=v6}TsiFRv}tJv%j z-PELcT?i_c0B|eC1efLf*wH&1K@kuAuk+)pnXeF9^qpeioljpeNsAg%)!0pfimLFA zb#Ta3+m%F9jQQXRic6WE>TBNzHp!L69jYt1gT&(`8dXfaV>bJ@)5K9>lyc+ zjoe!PYlqAt4e#L(^-iJ~&P0043x_6x!|Q&v_U9MqRQeUyiz zgMT%*7iYN?zfsy&DDw5cWXL!*6c>x~{Qa7v1zO%-Q?nmnryK^2-P{GJ-M?DZ_<_Ca zRv^&Zn?H0Q8qwkR$BnftP$`zk7Y#YH1I7*R_$X$8pCd)%9=5M#Wgd4w0?Oq6p#l%e zT-)`MJC$4~>2>i9OUD{LWyUlocecj}kv?%+C;Hu5E%K%-cZ__j=;-*`I^5Sk{2e!H zml%Pcc5Znw#xny2F1;BeTd! z*0tL3Of$QX2Cp{^7k~%zpzIChW#SM9_4H>tEb#pw;n)I-Y#Zix*T-N~A}(ugttd^z z*;^*BZ1$Jt#rfhVE)gV!UJ~&p(Or}Tj zy3pm^jCu$2O(S_Hb%r4J)*t=r)E(>BT{a`6D$k?figS*oWCe=?SDtBIYTa|sKaZHD zG_zDQK3qe=jRkSTg6l+7_0@Y&j{sWza*#tjl3GigP#%a5(Z-ys%s236JAs>{70#LwdibTr@) zX@mI)O@v0eyHl}0PS+BQ?RxMd2>w*pTH$g+*3LcWGI?>PSt-9Mr`vY)Z0UM>dTL7f0I~zA4irBQ}Cy*)p|F!>ZII{Fgf%jeoKJxf(;S zBDH4J>^(S%-)@@MpjtTU(olk01hh@-$NG#i@D>pjmQ|%02d;9rkM~3lQYt-ZamVt^ zCXIsh=3NJ#^-~Hl^|ADk*5Ta5cyVFs#UdsJu~8I;f`HNY8lvbE zNp;zPRPpn!P;vy@GO}U_Q$L@TR}~M{DasZv)pw-q+OyBmMpI49pB7 zoqC`Y%HFR%dV07BF0*am2U*|^OK_5-Rez@76k!bMvLE;c}y3;Ob z6E)X?{Q@bisn7v z=VBMo7AnbpL{iLCEUM@81%+?3a|Veobwnqe9anpyvW1f|>vH(&A0C7H4~p8% zg<5%m=VB8=_XzP((jOkDEs>!sFMX`q$EWaL$DJ3{QRuI2niYZZ-}e_NTCKd^fUB{MOPG4=|H0N3;5idMHTO<%Vy=8&Gc+Wxwl&ll7L`WY<51|9%U0z^kFXd+^z4YQY%6RnF=!m;n9Y-{f9HDj4}0dC z`)$r9y`MIv7sqNRuagX7rGZ%K0g=Zcbgx475@l8^^V-@<^Q!QA57OLA^Y6)P%B2m; z1~2x4usHKqFWf9*66>&8QsxP3+UW<++ku3RBTVqpc z=_E!Fp=tHjw`$7SYvr1`YQk5L-**82Qj(XI<^6WvTC{g8do`CGM_PS$SiXa_`fsRq z0<9TjuMcWgB?Q#`U$?|sXo_qfBM)svnf5P2Ki39_JdJi)kq_PGeZn2>sEuh1QW81CU;F6u z(%CuDvtzS1He;%oLFup#*;l9LRTNx$NVq~dF@t3?+eQeG;HafbFm)N!aQ3{v;v&eC zj-F4q4xrn$eJj0VL1J%$-8ngXyEB08+IpzqjVm8--wxYwp zPw+$T`lYZ-VYE8lLwvL#APU5n+7?F3>aE^kK^cmxHEIDFL!o0)1a$H>`|Z4@1Nkll6ZdU`SV5yBA37zGP(p%`%)&3 zN0CDFiFB!6OS>f**_4{iEzJ>1K^>w~K`cvc)}QXr8_uxppyuWUYz-M>Ahvl8@)l0a zY9$h8q^&&@iHq-D*-9qDBx_zqQ3`=8BexgJ8JZWGs+J1P-G`k4=9RS&Y<>X#46dFJ z>bm$d`7rvIZMQY{sMQ{p%`5gx%+Lisf0_}}=9FcvtB9W&F3{$9`Rls!FdOg}eySi} zoS#IlTMv==oQ}1ujC9|N_s3*~65?1Kz0_)wFu=EKNqa`;?Qruvc=nvAz48)MRw9?6g!+rhaGF<8DAB~qD9818H6 zl32^>U(ux?xMKV@OH~p)Lt%xFbkZdF4bT-%oamZEo#{ zCmV^gEiT;TJ^@ctFR)N^G~b*S6svhQ+;EbMXXl1PkZfk&i{S-(+Ha7>**3^2J+9rI zG}^yX*FDyaZLTmUQMFg(zITb)z?_@R!^U3rpw|>^dPnWzn*TkN+Q8#oE1k=({ZeUu zF#sOvaSSV{TY|3SWgmr-4N*91WvpV$FJJo8&lQcPo#5L}F#5nq1A}g|qE=e0-g}K7 z)31+(x$WJdZnPCkb|NYFhxE8kVH3fpuj00NbXbmdLQ<+lpN7aw14WV}V^PxL4{k`~P2-mAsHoP)cQrQ@my6*+@giS;T$X=Wddc*?M{9I6R zFOyMG;OAV}i|QK^UVoRTGheEFz67Y%_&>BX$(R25p;?nVKQW)SjjFai z;R36c>~FzfCL)LS6{??4`=(mo-_L-OwSm!V;>Q3%JtmRvv-y?nKDID9fP;-&Y3I_N zP@T>-i*Ag#tN@1xyqKlv(@@B4e_2>i5}VissLtB9cE}>gfg9$DDRMv>ti}Eap$8{% zV6+%{&zIET7uMi_v*kQoRNv5%qEgQ@ZSb8ku8TivU=5XF8A^9x9W%Og{4>*lS$xzU zMZh_w;`5+eT*6cZlT z10jmW3^;vZml)gn*fl3no;>@%9N?f1XGkCw>>ANZ7ch?o5~cUx zKFHx33TOKMH=a==UsL3hEdz`&X4qOpdjTq}-zw&?fSdedZ4BvK8%lL70AR=44L_JG z_Xa)E2t7{KNe?n~u&aV|$&P_5&*PG;{i848Yg-ihr`RacO@{)-JB+SmpZ8xH1zwd} zuL%28ADN4AKyq^A}?6TXguZ&uH>@+TOa3BUgngi!UI1`-v`Xy&spWo_N$HWcY~P1_18g!vE?@S z-n;F_QR822{$_5B-~9L_XxhzGI~r_#?^51(AI!{R{=N4<&?2*vPdA_B-~W2~%wwQy z|L^UF<^S3Fe`AOJn)hLoO;V<+9MZNum=@mrpHLpSM=?33VbDH)b<|X-Nb~yEE#aH* zfPcZP$#FGvTghQo`+H{SUhi$D+nG@R@yGd%f-xB);JcTn-a^07 zeEJT_o8hlL4QdP8DPs(UPyEWc@6y|{y7j$r0)5oL!z#np_SeWE&uaL+siJ4nDn^Vw zLD7qIb3$i%OEd9L_PqnMVjOipoybWnufpwvdH*pYus<;L($u98H9T0z=5-e;oqf&E z(bcTBWuJI&>!jSoNr4BTeA5b39Idr`Tw?2*QClnB+A_l$aZ|f3+-#%e=u+6##r5>U z9QfVKsBk7ZZmaiYo(8qt^9}on`wOJ`UZx0eBhB<69x0uY=Ed^W*3b!2|-u zmQA*3u(!TR#Pm$ni(*u@@*cg~}@ZydoXM(=o~CvNvap zrqr3e29eu(z^Sro$@amyaHqjF2wD9RmD|jZZh=+S%2rEWl@u`TYEfw(HUr8~obS?H z7nGWR8t(#okPo3X13Hvu`37D%f_LnZD599eoL7t5qI_VtI! z#+E~qc2Ns?7BGt1r_Td{5G+Hm5k4d}qX2^logf%m`D-L#NGHj( zWk7vX;I6^4mWsYp)KTdjP2Ox2of$V(T6aO}2l0KFd*xH=XtI5cejic}S$KR$v!nNR z4y~i7fL2wg;y_>&0~Wi}XCVwj&8Wfv68=-BCYpy`RgH`|=IGP8=zLl|QeZm+<4{A= zspiztv`>3`h-={PA6hf$V|P`SzG{8GaDb{Fo+b)m00da>QCg~{sbs zL3e#4^LJY%@v0ShrquTae78kR_Ljp4zqR8uZjUW0sTIqdeK?Dm_=Yy@U7YqTukPF5 zwE$<(9v#mv)~h5%bMmjwab9T&Sij~20}O!TvQkYEAIBzuePyWZbMb&*3XN5;B6qez zUD#HC35srgTq~_Ylap6E$GZN3f+32jAsSIXmUS!)KXjoFC7_Xhx)OIm;Lt2DYY~Jt zLTGns2-2XQg7J0bul~h{KX~OUx}Ss*i}yd<^U~!g8DH3C7kpZe!dI5|CM$Tb?mAxE zH7feKrbiI?fN=JFs*NSH-MPgOjn#<8DQ|IiR#s&5 zfSA<>UZD!^*VWR3cPJj!DGtH}QgTR9>5)bVGjB2-*k z?rtZMk2y5OA=M=H`fg*{-4iqZ^VOcT%Nt70WMr_dH?Fa-+b}6m$TrGj8k!L(R(%Y5tjSZqx3kRhQ( zHaJcBBFYir83m8YvGGZ~GJIr?h3Sb$Z>SUY(olOf_d;WmplBTO6jhLH&!V;%VwQ@U zRR>sthj>A>5bETaRbGgaG5cTk-p`I=ZU0BSOm29uB=JSQnJxAOE%Qxd1a7%) zQU!Gq8oZ4a_$BX5S+kXLo01(3!r0F9l zSk&smdd;8y?KWqxS%M84DC2Gn9b)^x{#6wD?ZLmX1FfFKoHC4ej04C4;EsbE#=J^* zt;kkP!uL}&zeM`K1|ZGG)Ail#G(LQF9hDXN!~{-oF|C2u-)=F2lRTlleq`IytY7VU z@5$^E3WROX6PES=K2OW%O#|`QN|BO{MCgZ5S;HG@uxGKi`b@X+qA_R^ZTuX!lpCwi zAy}yg|05u~oK?v+bCt0OJ#k5^v#8YhEmH|Gzst~$TymQqi^w!|iDd11D(0xQ#AyGmP4A;wTZ_Lt%gZeJ4pBc?Kki3@6SebL8&5iO zWF)e5Cv@nx))F>_iktezJ4Jld7w3NJ@!IQD%0x8_;lsE3#`9T&PE&GC5sh_pxqj)k zJpRkXL$doIC!9Yy8W!y0ONjVEP%kMHi^N@JjeZtRC+HG!6e^BjWkrDwo<3sn)TM6PRFC^i7g|tvMbah*vop8{%y5G zsrS%#Of7$cTlqlCHa^z*#VY`du9*&}wP|HN|H+?KoB zbFanpt7ZbK*NEhOR^1FT1#Ss)C}?anpVfEeTz}80pjs;)A|Z zS=)r_xKmiZJ8|mfh&tU?kO`xosF8fxUR3K20>mt6boc{BHALgguHnyT_So6IG zii+xj6=j997?4O4`SXwWwDdRjX3m^+{bq zFX$=TMHBqvHV(0?j!&ot%5X->ZpBZO-WKpjv+wKT%_1*;Aabj-SJcYC}Wi-*=7*xHi>5VVBl+`;|KsiC%~J zQ&SHCA-$|~p)}6h+I~1zl3*S%q@=_W9F2`_#e(sU+G9XKXN2LyBjE}d5`Exv8};Dq z*qj(o5D9#CYH_*CKt^h~7Q?zAt%HfR6lOM@L;Oggl)(`GfG2PC7BuA2aCy!uOhExO zOHBE}fRX`mn4QvZrQ*dkBhyjROm?{o_hTDNc=UsV^C!uIxo8!HCTVW-M}^NpK*Hq5 z1lIy03-hB5cf}q5ka!&XDqLIb>?FI?wtOdfQ$SQS%P!S6&+XOkZ$q5*&lkSvs**U9 zUA|%1wjbxZYge8lPl#Rk>f$OxYsimdJk97hDvzjrJ=(=jn?%Y4*8=O*^qB*LEGGzf zt~uj-m@@PFLCqfZ%aQ^pDR*I${zO6oXsMM)%WUVV6iby18&DoR=!;Yq7ND>rowW!P zFc$Gy8KpV8zoF0xD?=ic^#q^^G^u~r-~M1Q+AHq&_U&o4ZG9kf@&%!re{%u}Q;_agL)G|B@PD^Pxhvw#@(s7hEYAi1T!GTSgD<~51R1?b* zUv_ggiJazv<>*Xv;X89rV}#P=JI!be5$M*ZvBzg{bNsB24^UwwgvN@To;k&A zIU*x)cf)J|tygutW@0$(v%6OTs=wqJ-EJt?3w=h+bm^Hxr0NC5OkNfeXO6l1=Ykw_}P2k;#)!41&xEpaUn5_w3DYohB3!R z(WRZ`O%0rGr5h&cl78Q+Y!KN)v}!eWMMuj3d9)B3h=NHRZYuyW2euL_(e@k`dAp=* ziY6L!Pe4ZGjp}?VU{^m1)R0<&V@e@KrQJ!hsX>7V&X}R5U7;UOg-Qj1t)Gmy5t?z9 zp61=zS}bwa$Qm)|#%K{r4{i4dWA=9IV6v<1r_EEm{&@XHb7dMrQ_kA(5dTpoR&@!+ zQ!BRIz(@p@!y&U1#ibvcQCiH|u8AthRb)~*3=|bGK`RKuLNI_6xjjm5XFvLxZ)LYmG3TG9uexfX4bTV2jx+WYj$RZyV7Ge!JT zhwo2+&unN_`QGfmMoE(MbaS~U>%-=4tF@b<-Q}mN+ulESi?jU({wW=2cB6i2%EAo& zuJL9~|BhHYPmT~CHfwyJ{o(tsgV%K(mKC06m-qgrzq_nh9S*Sb+F|NVv)_{O&HtO- zxG?eTqszPhk52D@qc-uHNpuNJ<$*r-$?AnmVGb7W*y+psV-7=~n4j2u<;|uQWLu|{ zhy$WLP?`V0JGf2yc1zmkolLOJgl|dRvawmkq3P+dZ#mMq3hUz;u2-iWZVbPXnF9ci z(mYk0SJ~kRJUXwv-zha4_jucBEU_0t@fLpUs@Ril;_jxo@~J?VTNvt1iDqp=yC#y} z(e8NNC=y(D+|u|FsayR^Mf0CC7MnhPhx0zzqyOOL>-76U^dPu;{k=z=4kXGnsSQAo z0;(jjc>tfUfToJO=8P#lxhqyF;ZezL=kciwL{gs}^q9kRW`M)AzVYXahT=*k1It@x zOtb^pei~93`)%l;COkDW<`Q1;lM-JjxUU|W4h)G_9?26N=;~9eD61Fq=G%b9 z;z%4Pp>fH%#m0-si-M^`;dB`Q^SVExip!MNGIdn?XAb5yDLXn;D zQ1g75W!uVF-lg%}g53Q;C}h!(GfBr9iD75P+;ZycW`90VK!18%R@BJ?2|HNzxsNff zjETR9{QBW3%)q-k>NO^r=QhwGE`R<~R~AIK1Ad9CmZ9wZOGiLK?OnYWg92sa6i7#* zQ^ureRu7<;1)aB7t}$cvdtN2giNzrHwyo|c<}>!MFN)hZno9w8 zk&dEVAYxB${JgCA#lne2+yZ8-FCZKtIBjTI;{u%!lPq8)kM-yB7w_}@mh5KdZIC#P zJAPy}LxCCdL!Aifvr@ruv|-1#nw7D%(RtKwKvb2ZeZJkpL~HNCpT7IhAgqNs-@ z{B#eSqh)m2yqJvFNte4^HA@MND8IG5kIA{Z2k1P3m1XbqO+`pBqA2VeUg|XsCg*o+{0XrLVM|oM*$Wvb%!Mk*`3pB zE)gH@yL8-bfvXCwn`y1!J67{L++tE)7CArnpR&efvIyIp+gOLXKLi0PmNrO$NDC{x zV-=N07!0E&o2IN*cI0R%rl89$mvfc`oum+b6kEgaD9JwrZl0Z6ID(ZsBR;+Y>m{6H z+XL4aGDxlj>sT6;)Vk{M6@JB-CiGgttc1Q0tL%s`mIK0Z zfJT>LbONDpK0OL|iJECS>=(IF!Ih1c9PMlgI+3+?l3lFM*HHATbt@e#(rdA>Oe7MX z+|HZJ#PaWW`Eel{D_BzMHM}hW4ze#r%whUe?nu81dctD(g;?-qoAgWjS|y<%H*Q}g zL-;~;K-8KEU6nSsxa4W}f%@2U_`x{*XCgs~dnPpjVI^?k_2evf6^k<&6~Y&-vUMu9jLmcEE zBUb;*(3iA+o2gr%&^h+MIUQa$zaGz=`(wBEV6bA0`myK9XpPjovNhJo886_aUC)x* zYYMX7R#bNCpI?}4Z(^?_7Ml)d&Kz@W9V~Vl=W_3xR5C@^jO*$8x!9?Ab|hh6uFU+4 z`^2)J1)r35AOjbp&pB0AOO7;>Ypq)QfUg54D2uMVMC&Mk#(*Tr?meL1Z7X#U;)zCA|`-@8HXv_ra`6+~Fs`w%nr{SyX@D`~7G7ppHnksy& zyYyUhuQxX+lNZ#?+DItR*LwsH4p9LI6ivN?pv(zT4^SPASCS}m@kLrS0u?&^mr#NO z>m#G!{w2D5)HW(F+1~3Brm^{>NNwft;4~z1*5RyRY^eol0H$B%+b5>p=Dq#1wxV1A zw@7`A()>xnYXBDNUS`X0+OGdl>46#WQk8OgG7mn%iB&fG+exb*>L;XLv2Irqkft*d8lc#5+o;p-1v+8<|LzMeS_(f4It>ovYjVj6(!R))aw3g9QB z1tgE?T4ng_jtvU^^={)xmDLATA*w~OGkvt4WkmZfKlR= zI7C*2G5tGl!x9ddMTyGv7pyf>7SH&$dnzVZwng)6tulkj$4!$aqy5)gyXBgso~ud& zi5*j%x$1?R_lN7t|HJEIrl0?HBc|X{kA70wPRRK6poYTMQGj`9ovzXzi|AU>`9KZA zam6n~KVM~T+^+eDlR@aHnP9u4uH%0xt9ch$Z~nyH2GYIFKFEB#CF^D8OmJo|ExeOF z-{97>F>5&&5B1DNsBEM#2f-djAp7-JGbvgd^>5K zaNBD_9I-d~m1_m>sHQ20IGzxkWj(<#j7+vD`&(4mAyU}MdhPo489`0r#FXpei*b!X zabRSnCgN0X#DuErTi5P&325u#^lImR<_?fnko$hiqXodAcVHZdY8U{I@h4c=&Kp?)h3 zh5KPj8(nh=&L9;|zkkG}N^As+0nVlu8>MPV;|?Uh z&j=qpj%L>DcKYai8CIjoHDl^AR6a0=*jRoen?D8E?58)=0ALARzPEd7ftf@6(j7*Z zlz|3npU<{WgPN&ncN(P)(XwbD8{}-Q6f_8W3W9w>3<1m>4#sm^OT8b(IhB0q_I}>V z3;)blo5OYWzMoCA?Z%R=w`TPPY5kvW01&vGHSe|z7{E=BFW1}-X3JYaDyX|;ReFk= zTQQQqijL>4HfPn@1+{5*Dh@Z;lgnUug1jpy&XG#buSVHBTjM+m;->6qxVvz{m0OzS zrTs{Cz_GB7=Oa%>%V5-3YRpwsBg~NDFt96>AllcO%9<1Ua2vo1Jwe|)le=HPm&~UZ z$+%b&8O3)G&F?$TWCWad1={m6dr4z)DEtAlAdcm#k@<4&I|y=6tnlhA2UHwMEDDN9 z&G&70vK>XgjLr8od^Ba>kc-{TaJLd9JAWn?ojprEG_a;#e=0?it@7v7LIb=fy9u%o zS=$-k^n6AiD7o;^z8N+`V0h*D0viuugc~4#rL1lfCS^v&dG5M=kv*H zc-sbZVM3HsuJ#T;ug_P>*a<^i@fg7t2_iG0+JRG40@jz~;U9mVA|6d=bTY!Z4M$LI z{DGCBBg)_^6ayQFw{W5}fJ3Y9g21l%+(mlwa4h=v9qelIL(ZHRPMS`2XJjZEPt;(d zub!Y6E4d4!B74zq3GN-%e!>;-%2NH)??Rb<8^7FRYLH7}0}%EN>MR11Dy!2$pqr6z zy#3v7>TX3+;q69V*xra>I=_M_W`ySgH8xk}#aO-te?XE7=0nk6`S4{_7tc_Nq^Qv= z*S|hCzp=)R7Riw!F);?l=TcIu&f#k6@!Cj#!5z3PvkhTc?ETDgIoln8zd3+;7gfKK z<iH=cc2nqbt&D*scD@2iUbiq>b2y%E- zkN+kWepFQ6Fms&CID;pb)UL_PQkZpp^3~2L$o+&1JMEJBxcBLy15)?Qd=udZ; zaslrF$eBIUd`{5?@c3dN6JHt93ik4}@=H=#E35B|ce2GtzEk&5=lUI~SYR*t9#A&G z`vx_6iIHlqc|h-*zwetm1J#d}H46luwePHLx#z3Y!jf0_GFhq5dk%CTvyN@H@$l~- zoSVm_z0A762sf{!OU`p$-U8DcGnV>QqU+&Y0;Fx~rOvqqR*flQ7L|IhpueUpZPd;@ zTzAh`D5M37Pea=U#|3T21{afrYd%q^=%uoRk)5ReG7~=PGp371xV@{zP|@4B%&d`j z|GAu16wTk~a($?xs4><;i$z`p=W&V@a)3#i(&VVWlQ$WLg4>h0=>VKKkV^@gY#}jh zB6S6&E~m%5bN+?@4Z>I&UZ15(vIj548_bA+2tUpD%vDk@0k1WHZS$&RbC1D#R$ip= z%%zXI%Kv5E7cK?Sy69P1_w3srHY%{P_Fh-z^>XcPL8Am(b|rs5;;GL$E;<1SrX>+$ z`m8d-8@;BH8(7^pR3AEv9xoFN(4QXCHoai<@9CPVQiG+kF-)jBw#WqLPTs{9L=t&V zQ0_aSPq9XwO%Tf#spDt5TeJ~EX~mL_nM6J)xR6>In(>KidSZD9ZU>4sP(h16I&-e2 z^~H4LIpN2nZ}2}I9X6;STG_Z8CCxjSyJHl1wi^4vd#8V>DhzQrx-nVda4RM;IOq$kw6CTz@L;py;;6dvHeD}32r53>f$%ut$Y4uoB2T$Lh&=M z`(xb1u8NtfZw&97C46jb`b$aeC8Nql)-;Ig&v*xg-pGHmMPg`wq0IuOjB185$RrFM z&sEN7wkR=sWB=d)f^SnpwgzYZZDz}*yq}~WQu<=IngwMeB7+Go;BR{hHoM{r_e@aE z{bUj#yA8*+$_=tC_ez{(!u2M4i&?@4@061_piPNv%g#joduBgu*)aV4mkjW{{c+>` zlS9AbvYt1lP6Ds=-<(+2|Df4|P(H8VZgzv&ou$1FlH}CapLu1KyiYoK8MmbF^h~2S zaaPwOCs#LHaO3XV4<<9G@qXJ#=0krb#eyAoW+eS7;T?ZzI!sgY`gfow2zVnTokX{O z()0UipWIdqB%M?acyY6}AO*<#N&A;4SARM4?$qyo{;z2pp9Gye_3}5Xc;EjQ2mGx` z|7)%J|6^xhZjdGGR^qo&hrr8y{~yl!BPM6PUdcxI*rVSSV0!f*FVDPu`!@>Uw^shQ z+*aNQ^q3PD2lF9q(wA7X5-av6?>+hFy68`4qtgQ3Z`?@;xlRErT#W_TjHKfcfYx9!d5?@oSt z%kmG#r}sR+zs#}y_whY;x1)sD zrt>OK?%#3b{^x|4>AZ8_{AhPK!Ry@p$;JU&>M&eQVQ9qC3G^aV%h!HzVnm3dGBsXO zaRaTvYqGLedOVfctU7)chwrm$(e7tJ0@X{ibAMhbQyJW1mHSg{J>cZTEaGBlYi$g| z;SH>~Y0KsDS>4GJit;aKU;7X~SD^oivqX+CIYOLZD|5GxEUacRd;aZTO}%&M`kS~_ zxA$1AA-eiyMFCWym`}UIpZGqBw@b#K$Haga1 z2J^WKf&p^~hW&g3m8X~jbEu%vMbQ0iTPA(KN)X`>s$_i>%5m`Od&oQFWq@U$KPZ$m| zQ98ln>le=R59SKaEg30yb!VEZOlnYN8Iz9o-`1YJ#gssPQ7$k44#WEUUXOnIj8wBS zU%!NvF^=txT{^$OjAc|5M}%2W=IfR9mRBk>8J$=JlF^`reB*;e@#kHll)DQq@H=MH z7_ebAtvcCycfhHIy}e&e*dAnrR|y`OR`@u1Ry+;D58>v-3UaKH10Q)v?J!LYq0Ik1@$DhNK8tB3oMC?U$pVPkHWPcS{W-TMo&(*OIOE&+WS66>x z8P>^1ryeWJkszDz~$sXaRolU9~W-!F}(tVK0+s#e&D=s_g{+OOv zQEBp9dlCncw+D1%{8x`>0TtbT;S8v^gI&y8>*@R0#J?LrQrZ}Vi$@ImH78u<4(u)) zZFW0Ui_>5{jyY%1)?lZ|3~o=*Ir>kz(^jMIj?qkhPaFa}rplugSK>@8h6V|&-@fUJ zTEWHX$gx>p*~7F$gY%H672V3~?|IQOy^%X%7ZioQNI5j9l?O_TGFHy|HXjJ9=F{&i zgtanDS(@hg2)|DD;CVHg2+As#_549lLX*1URsPj=)h zLXa&~kesp@nORV9c1W5LE`!jK-~cU*QsDSql2&eBC-K2AEJlN03sswo!ZyK8yXYMa}yA;QG=CQ0r?GO4Fx~i$rZKi1ey0}&%?Dx)` z=~4eN-4QXki(xgqmbCqopni1eap`4D3&K=kaO%g?o#5dEDkdgK@>@d!7L%xb4kLcr zXSN2JYg8ps;JL7adY`uzX))ToNclBo_#cs6tOq~M`7^lekvo-}R;08>ol{YhEds#_ zc$(^F2fFf>gUxVoKz`p>%DDXRl>yzZ*4nz^Bkt_$wgOcVF}*A1jP{%E4xiIdtfm3k zEcaSvRIkZ$_*$$=C>{I<8k-Lh+N6ZHVf8pz-!0Y%d~9%(%RpWYy*!14Bft?#kODk41ji2#~gJu}OQf z$ll1@22_@%<)0`jN%)^_^hrrA{aN}m=d+EXq>Rx|oXYWPDS~pu#Ra#*^;5ik`}I?B zzpE#4N~VU$VF@#*So+V*KBVAB+x{>r$rkP2$V3&hb(oPHu3&(< zy!{2kHGV+$3hv?MSknVCZ^j&`n`VUljAH&9>M*%|k-J^<5>>4EXvHI>6slhF-A4LW z<|qxe@=RQ%zh^P4U(6aHZm5h|b#K&JQunY*t{rGg(bf5>J4xuQuJ!_L?;*N)UbaV%`ZHa_6GkxrdizISs}PSD3tuymdJ?p!)=Mh86~9i+vEMir%iFV5f5ge%bjBIe z;5Tic?$6&XfLh_(Q-5mui(AH2bho0G*uh@sjFby`^aLkpa-YrG^WoI_@qR=Dissp( z>{cED*x*J`>(jsur?7WN+5E}M-SVErWz6Dpme!awzM(%$Z>}Ef-}riCZh3&X`fI*X zXy)Rg56)bR*Xxod2hs`BrdD7_l+;PXI}yRaMr^gj5a0 zY5btAyAv;l;u1qQ)U#Uz6F`it%KDZYB(N5i2HT3pz1Dujuf-y2H!tpnb#l`*$$K+0 zHq3dpoZP^=rl^XTbV=lP9blk58rHL5L^Q&1Ks+dLFk4|ER z@{Ke-Pe(U;b_fGWY@(_7`V;ZVU$VU3PkrB((w#3z5^4-;p;q?6m{*S!CHQ(!L5^S& zBPO-+jC=uVFY1%#6~n*M+n0#KXJuWt3%-McF8)?})w9jbFzxG!mQ>Y%L#FayCtdCv zQ31M9`jA@Ki;F^M_vPNa)E#3Tsf$&IW*`|D@T^MKvEG7nyy5N}Agp>+Qw!l(%SwE1==XWeBCfYkSV=br` z)AvpKSobkLZHDh0JT-a)43oM0S>K9nHWYgSME~gU$ZMbL?4RJ7u37cOwFfde#d`y0 zsxp*tT9&&sS3Y!YtTVs?)Uk7J#hx1QZFXO2d@xB#paEWWO3Uvl?!^@{kT`oXbfE%3 z6h{|=s~MS@V}sHYwB~a`dwVeeKOz?fb20OoK)hJp1(vDs(@P2YV(97S{cTx5GzC9%`Yvi1DCCicV%yQ&A9adOj(3mTU6GIWlhZx}j%hrIanu=qsw`Ga zQD%T)CE1)g!e;>liQ+ITfd$HfPeUFhB@(5$m7%-*WhoQi1H-4}1Dun(fzKJqzXDbP z`;{&fL-tlS=e&K=R#By=sa-rv1@2J&aoHe2!7JOXaQl9SoQ68ziao#69+W%5wx(SO z7%Yvr3(A9J*iD_4#!BMwpr(dyCznVTC2RyxLDi17LsqE9XX3)&9t+B~p^bAg%+=E`<^M{(2Hcu?>OFV)M_L{a zV54My60ryXr2c53N9eh=wHZv#@&F>z5@+%xIYo3lJI=LfEz#Uv$h9j1#ZvIA$tZs> zdcT7Ow}(bacN+J9$eCs^QW{$__^k|5P~-D!To`t-Q0BzW=sBbEm;%=Os&uG@7p?OS zeLkWZcwSGiA&0j|rUA6^7H4#kg0EG{ZwQ_5xoksdzcV~f#Oq0Sg}jT z^@6_E<~~%j!y>^;oD=Vr14_e?D?(YfNyWnuloY0dQgd0^xwOL`S{V|UyVnBFzbK4+ z2uk(Dq+ifBU^4y7a#!K^9f(|Af-Nv96L{A=3Z}g*DrFTwP1}2bfgu~>hotLVS*gIJ zhRy98IN1>9MCm;*qvXOIrYz(=MG(fv-_ZmQZ5`o!;HtqzXq z2?4JXG>~8KgUQYn3Ix%2;tKjc3BoH0`(fxlTIe)&4bDe>#myv#d^S#E^UqNa;7?se(v$E z!GcmSRpow9fD`!Pt&h({W;S*nX~Wty>ZYDHTvswYtWiT7aL-gyWFaHve13=5)dMrw zfldO-!3xwV<%YCxp&zaV^^CZ%UBmTv7o%M@p9HPhSmR#2(RJJ)(cIa+}reBfCBs-z~1++L`2o3~MVcxPCry zBlH*m!Oz>tbZWum;Jq~;r1V--nMUW92+AE2(zX@d+{|;oJ?fw?zsF|*R4@TGIr?kc zl;rd94G=+{eT-1O5x-wsp83g3F#oKKU4a3)Cq`<{m{*$(CB3i}oE48BO15pwoi&Mj-ODH*dVD-QCpvNzjrvsbDnz2#HhN0_ex%V7>pG z0M(S-2KzKUU48S}7vZw(TI%}t1T3K(Zmv(1>*M989Cpy9y2sgOW9@A335~AyJ(8aL zGwu&(pJZ)jC}t-y06;yi$3{Gf@9%d7Z@ym@nf=Xta}b%`aFk@9)aMI2%(dW6lP((V z-DcGd%#zq=o6_8@{K-vl;@`HgKHE<|Fspg??pN)%ZBu@ukVhqd|NT3re>H#FOk_8I zW$|5>GY}kJwmS8nVcHX$H~(*YT(($$K>Y1UP1gUt$7M>|9aA-)5{v))xZq=xUz*8f z{QGtT`tyIZ)yz!(>AUMQ{r&QXTkK7&Si*7HoV_1-jt6wP=P?4o;jFC{Il>Jr#@&vj^&i9T_bqXU2-*oJ?b!T7`mYWiRC zZJn{F~ve8$)@Wm2ddxAF{)BY4WF3Zxucty6f_r zQ-<2+IkoqWg=2YtpU~%Jb9auutmYX`H+o z)$T7G(Tt<`e3Zx!bQKl;MWxeq%cmmnZ;TwCp&DLdNiz$HX;{RHW<;!%OJu(h0W(UP zmuBtu>PC5z>V%&*gj1mzz=iGi%kcU-g5{`|l~>N3w780{NaKDEiy-+)gYjAT!TP?~ zcZ%);E^uPd6pjvRTWo>DoD^sf?ifB?T?OYA4%^(Nbt^z<6L)-XPDnHlr+vD>T-MuV zR<#4~?+HwNsi4h9!GKYZB6a5g1Cj=hP@bbUG6nbRV0FtzMv@WK6Y%TvDHe$_gfDw} zpg!YO^MTg(_>cSuC5owIhQ9lG;Jst``P#FB@>lB94<0e}U%(BxxXykyHcaBAsI67y z#f)QuqlqVhshjw|I2k{Wi%Tg%O*ui)MFE{P&z;ZgH=7H^$oqm3RXDdns9j5l*xNA>0uxB3%{mRRP`=$5z z%Mjnast=dvovU09ouap$!JuDbuE-orR(jpfsA$R{+*V^bl7D-~%z_)YAvbx*NGi3K z1OvCbDhVpFtui0@C6=+wnZ?0Gw>E&_tfkf-33Q%s z4wM#kWW|)iujS^#0t5p=QkU$xXJ>`S?BL|fcWul&2#LK2>9)JQLS|XUS$q@|sPFzd zK&+?ux@0{B!6d=r8dSMHi|COIFKhl53sytpeq7g{$rg$u%>oc8)w7Xs*r|x(>5~iL z%wjh!q8C)z<*LH?Tn$-p=dx>{B?6!$fm6g@B&!m}&L__eXA5|R0p}%n_bCrrWm6xd z$9v|QOJ%%*nT8=Dr|=$SxOM8QGLem7yOBQvy3Oog&}ifwsS`)8LEo9D@#!H*Y%oOiFLva4Tb}X^HS% z>jO}OZ>xf>mmPrWZW(0UwVE*O)+TdsXKfdmza222_wDIs&l(F&CE%7OL3!m8ED;_S z!f~ZvW5ry#&x1$A!NRd!3J=M0Qpy5{jkvHw-R}f&HjOVFKM-_3(AO4Dj=OMQ$~r^> z-k>jqu?fE2>5q>~6=V-HA>?3yvv9=*7$zPNxrvI*r^}j=`5om2x!Hqm^E*rQA`5xQ z4*sl&X_#9D#!^W+v8JRU9CKS&yf%o;dGO5bY!dB>Rekb)@DoagL$ep7)VjMdrmhk)yv)(HqTQur2Y0PpXC6*MJDQ7i3heWEvKDM zf$bky-Nj0AYftVu1CPVSxvewjqEG?WKZG*YXR-wL?7fqQu7`iH?a;Q3{-}RhCIw&XgBo=(U@n z8U5_V2aJM;BT|&sN>Va!@~uaEqH*5j4p@fi7PsyxoF+>bCmQ?OGV!Xdo_I(<|I| z;m3X45);?Rl*ath*2Nz)RHU>A1UWkdkV1uQWt~C2K9|(GB)2xTY+KHcmU}9i!>W5A zge6~R7ndQ>j(Uz1c(p=;u?KVw&Sg_Q@iVq(Y!P z#hfvd0+8PJ_QdEgVcx~6X%gNwmW=!S0qx0@S3fK{^j;XJ_*AoC`{rwftqE2;`z8P&xik;8{%~y=q(*uTvc&L zfor0+Yk5V=XaaQNOw&yT4-Me9G*GLYFBM;2Uvy)4$X?54+I3d=?nIYZ)Aje3vf056 zq>vNW+e2AlS<2pWCoUrLd+GS8Zp|l~C!U(KaeWm1&rIZo4(7ulpY~X8nm)Yv?w*gx zDs@cRwcO{daM7TD+pyQFa3poTxj1@lPMxy+Gc096lTYSXm3If`+PdO8H;|^AiuP-5 zANI5H__z7@e|Uao<1BET#g`^j@QH9Cn#5vB!xVLyP-0N`cS4dPb^XFb&y^O*!4O){ zae|?S)(07pD@fw_>8xH^i=sK6s#wEsi%!IkTv>T!t`iz}{dDK%MJD*YU?;UD@{xBe zXscj2efLd$8#}B$vz1LUQBA3S^OWH7`>*dtQ_?#j8SnqRas3%>HN)D{23)nx+}Pmx z+)}(itkW?@or#JhJg$XzHQfVPgslsMWz7qe!hIDB(J6J6MpJ6*YwJ!|lbL5z{y>YCN zW-#aYCx)5IA5ZP;Y(rr!>++3Q-eU`+Yj)I236QVv45wpeizzFVLc21yZ%suLyXndu zu^65VhoO83KE0vW60CeKRa3%O?mLFhTzNU9FD^qN&kk5SX^Bk)}JMiQwtTGv%OT)H+_D%!Aoy@6i-t^UyoaC1=8GYdrkj%;?w+6_}X`H-7i%6`pRu**%n31%QZziA(Nnjw$BFv*2hJ^ zn?yS(P}JOFWY64x*6CuEU!V~;=GJ-TLk zurar)5=??YWSQ$q0fZA&3HUvzQ=}eB98=^&c4yA$GA>vGJcl5-DtD9GkP+LT= zu(Qy*EbXLEcne$e<|?!0NQDH@M)PKI`)O?>Z&>M0xl2Pe<(2IycZK1}lU{ykFppt1 zK$v6WJyfhC^jg8QKnzg&j+Mg@s?F4UwLSf!+Nrv}7hSTSQ`ftZ=yJ|2#Ur)r%oMQ( z<|=d+6c37GN>L4m?=#g z;Nk@Sns;tDF-YM*o2xqG(_98c-G61H_uz>%8zHrlU0H$fDXlq_ZSKoXF|{|(T}&S^ z99Pcn{kT@Wv)I3xPOM!ZkC?w_q2K8tOyV&Sg6t033h{N}WIOGjJ%lq;7AgeXR=86= z1OPNBEuUX#HQ{~%IvsM2-I-Mrtyn#mVns_*>}BM2M_|sfKpY7lS0dqzr2hm6e6}PT zNw{z@>5MRImVM3pl#d5fL_OGIjzi7joQ~IulN!Ar>2C|Hb+azzhHGp(6??N^_f^QK zq%8f$!T<*r&`ErGvJhZC>qjo{MOB*UH zr;s-DAbFGR zO-7md`3)_@Bbl^N$9eVaUMqYdl@^cl%6INg(lQxdGtoJoxOhvkISUi6n!*1s|&~}3xrgSz8u$3?S-O}vIfwK-7;3sim`1@=pw6+S*GgQUgDtc0)3C7m{%5(u0UouoA~FjS#X)E8gJ%wDUa#n$QMk%A;OGie z_urC_;oS8P6>U*$6XLPZYh{yQbaW|_0 zHz$i?!7nUNx8ee-yz-RICDYcdb&wcmV&eQr<3T!>dFd3vwi0El|MU08rpj54Utzs= z@3sJ*Y(!g79bExz=QuNf+9@8!yQZLi5>WH(F_P5r3ql9~(kI7~(H?SXmKXA8$k$dV zt~!2Y4e;}mhLR@eapOS8RQdojQHg&#gxS-=IEpHIgv}I8o&JCbKRLyBFauC%7U&{+ z$p9ttm+Bw*bqd{&+D7{0u+c%y599UkwEaJ7Qwr7WKxls^S_IM)OYa#I-=F(|Y z>Dy<~1>JiUG)LTJNb4$6z9`=GIX2*v;X8*ZY2Uclp9W<=Kvn6C$wlX7`0mQB3Z3D(^!KvOYnYAKK0f@4Kt9h?HXZx$+}8&`!hts1P0xLJ>dwnw z(J+(7tju=s){@w%-IJNOTw*h$l@%sS}fYs+) zhP^Um3%^4hHVIei!c*TPff?0*Ad>$=QVTvMO?_&j*<8xo^vkthRPgJB|CbP1a4=61 zeQ_p5Nvd84Ow2B>tG>p{$|`pE#!-Ga)XKCkE_(Krx*35{V8s3(Xm923KO&kR-~aJq zd5S&$-W0Owx%XR@rcSwHVsa{H0uQd*55sqH`|2My4|B;s5ZWp(moj?eM}!V)am7O5 zG8;?#UUyzssMgHWUY>X$RGe(jm7?@5s>-_ig!m%yW+P$GU)t00^%zE0C)O$o7;iAi z%)L(&Kb*J_q265xiX}gqXba*n=31NQj2KTPSt~r4zVUD3^t9EYhGO2)n2-zi8u3Guy@H}y z$ul@V&BN-zKT1nF91_L}cMPFl|8&Yv3L49Nurjx4^uf^?l@*)|gqaJPAbjcP0FynJ z2t0Wx7Pi$O=@6+x>T2uQ!5Oz^;qSoQ#}tGCBQ^r#sD789y12~8Sk<>}^&Eal|9~6T zO#kew-*UCLiTxt$GQ2E3l60^V!Pwb7Gaoe9RweoCNU|;?r?iq8%-!E*3fI;iacvS6 z4yb`S6bC>pE;K41s^!%q!-%O9ub#eGoT>N zA~~7iivsQU%kX5QHRBDsbvr?fkfV#5tGN+;e~^_*C?b zBtrXK(OfK?i32LAmfX%VWyKjhwZvy%Yx_Y77XvKxl8JxJrdE+|H^t;Ecye%bj+dyj z@a{hCKgW1gjd|8cUC%$@nrbx&L)nre9k#^Ft3Grjj0o+?tNu&9IYc{#t`G_Jr}Asb zbG}^vlpHpB#uy_ADGxe}Z6O{*cUo90uMTA-soc^WUo!bo@OR@4N z>Mv;nu7$SWdXxAuid*3068Oe)!;GtWqKr3%PtCeydFajsmj^Hi$;C<*2qEbqhd73{ zpIRvKiwWf%tWd^yF?wS8>1{6j~Q)3>ga{Yl)SLKSd&6+zcEw5_{aIp{y z{tAd4@;`g%Z#QUymxeqX$1?{pZ#tcX3Iz5mDoss)quEI-XPu<04zST?WZc{{P&dT0 z7rd}cmB?=Xq5AcVUR9G1*^+}1cgLNi>ld$$ZC5Xy+{MqwqN$Eh>$+#14TuEREqb}C zA)o6Z`BpE;4d%|e6iH&+hT=jaburL2m#lPTrMjhw0xH*_PT_nZ4sv^u=~~F0!8@V( ziz2k}ScCVJMK-R=rj!P|U&>(B)h>ic%Yqa^Q?3C5b^8flH~EeZ5SeILtpXEsvD5{) znMKwB3M|H><%j%o0F)y)|8vBNkCQC!+VMCd81R-ovhecJgl)fG^fD6C(LvsBNP( z1@47-S>YN^#q=Csdy}kTt}pF6<~XxLQvXO$FkBOiOd?P7K+UvvoPN+4N+Ejt_R&VJ z!BquIE5f_#qx3pHhfMgA_|!?~(F`p1v&;(t-NLlCFBBHZ*cIl zynx(fjG)Pq4pXO7ytvGm91zoj^6M4ut)cG~j`R8*-KE(vC^O3)6M6A&EZ||Fd=f#Z ze-_7QF2c(oo2k~ES=3fw=lx?2i&Cqq_*x>u5?32=jf>LRxe_4DF@2;B!8dk-FeXf! z7aTwLY3zu zU5$Qock6sZ7$f!R+yD^@#BZyZ$12BPJ9|_qy$EDp&}R{*p;#rDbD)Be7U@o`11#7Fvni= z>Tu%FRcHKKzY%;|mwm!umgo11S4JlqgsXCM*=RfdhrADk$HqRrzg)vyzws8$e=D)PK?xLe ztI{=3A%*ardW(T!LJVMMGP;m8vJ9I3HgzZW7KY@*j< zo2*1dL}gh?v+{kAp~H<0@}D1C%7CQ$@2n|%@+cPV6756CT?b4`lX!6fwBXJ|9u=$; zN(ap_UIPbCASN7fefvhEccBq;PdEpICn-Ih3XH7nXgpT_dQek;3t%XR<6FuMVvUIF+A2xGnAv>%&s+}7)q8h7sW*UyaQ{wP<`lIk3 z%%@uXr}!WQ03N0Lr9CCx7x0DEKHU|97yic6m}yIwp;ryUVe<)qXDP>1hnv>kP=S}k zzsPr*cb;lBc{~E$rroChxjiY9AYThP2{;H(?KV@_Y-gC>9H1xst$d;-_eTc5$_{(K zw8$t(I05h(5$KKfEW!ZpVId5<4nN(PIE9Z zt|%zF8n(oaL0f4%ugM~tkFdRax)%XOrFXHoGdXkzU$bzq3sGBJM*tG%Iz>$;J(wDn zPxt5fg$FRQulkqN?0`C9N8gD^W04kOR#dy!HonR%y>mWc7vCfXo z0Y)w?WZen{FA=JeeQPoWq#wNBPEE!5NVO<=T_H7F%H1>` zO0xw0Nw;-aE2&)(Bgsc1jLWX7Z~z^<>>~`}q_$epg)S0!sDara$bJq!-W^?*{H=>+ zv;<|wkRQuBN&+z|)^vR?D8vY7Rpk~r>eqs$It!%(wMy+8H}q#$;gH)vy`?Y6RqZ~ynwOntF(02Qob1*h((e}OK})o8fI%L zX6{M%N)#4WF}0nCh7rB|N6}}buz*>1UEyr#1C*~<0+*Il(U>=%lU@jv>#SMySN&39 z_)mG5+nNh8E}}{|VL1f>{1jDnEh1)t)dbon?4$~A}t-rV|`qx zd#Dnpve4TP@KsAh4?DS3RcKw;46i)`ARFp5vcc6-S__I77r3Ag--&7f#edcbhw3-D z`DeQ=px-F`sg(#BHN;^)7xSVcZA+MGqwcDW{Y(7|# z^q?a{4-P#FU5R^=&BhbWv=)2@qTUt)%!3y|+;5HBKk5-tv!T_=jYKT6n zrPFqF#AUSa0#58?JFK_!2Pox;DdA5Cnq@_IfZ45>z15&Kh-iYZ!XgQE&$1RUim^K&5*!4SPwQ;N)%XZN!|)7bPxsH{LDB!bRD3@|YMZe@;g(pB9O#SsQ0 z63I2*Q@CC~Ij?&qQn_m!`%_-_($WPcvXsi_gWf3l%iiqEcY$NE7_+;`zFQT%_vMG> zeN?SA`e`Drbi~0|Dmzd_O){=pt>XH`;``F`Bbaaf7N~sdZs5Gtc*b3m{?sDC?QsIF zbt@Jn?H9XCIYFsg-GKLvXMULUf)MUsy6{E8g5jx0X8+g7MDdP8M+Jq_N_&8F!w`mh~4G zw6B09F0wbFW*pPU>K$km)B~T1!ig?(R`OjV{+8G^Ovou|I_fzwaetLb$B#?<(A7s7 zQ~>Nzfnj6x=-}Bz0H@KYfdGBwmAJTpuLEFb1 z1&){Rih9Sdfug(V_=RIlW}c(rjiolOugu_2ZQ3e$-3zh1I45M2()w&Xu7JChsQc%Zr|r^qDk0x!K|QmC0~(zh4Nyu@`S1wrUHk-6nQx zy&>H45qvuI$^mV1{ZY!CHWz45L}~Ev(=Ug7_orXE07|>x0hjID8?YFVJpXco7ZE5{ zv_9p6@M+_hP`^)h`xl!JJY(|9PORaQA(T+rum%5{>mMLXF1JE@e=^5t<0AH$~OG?hUb*Y zc5w2I+w9-oo;$F6vSFf~M>OcOggZ^1URZ_PIQrgIl<)zQ?m#X&STb4st3kPs+r7cB zZ?|6F<$d=2|>dw+L)uhsfYB)a|XQ6ayD zoxIS|ztVGo5GM!=AJ<`vFeE>1Nz24gW3!uKtRZ4lBsnA;e}{}K!bImitMn>ZY^jtn zWBnFbONHfLk2%kAW{qw*Zb#iem%Ntlw z-hnlu6Hv#Z8;R^gz%(c`s0pEQlBV6+f=DVoa*F9y<9i0xy;+Jp2=h& znFVckuZX^MT44b3^9k&^^?giM9R&`&K9cTs9B_}rZ;&TZKBXHryLJt!Afxu}>^$Ry zAmfkO=4?sDERG3pxEg*&RTgwcdJV*l)TaZgoCj_N@bXy=h=xW%L9ANd=4myH(Y(EjlTDcFuge`jp6#~lD_Fl$qz(@>+1crlfB0~__UERa8 zuxh$*Xw+Ctt3`MZY=h#j;WG^}Tp8pHf>FsH^8{`DwGkIu(SK_OY6siIndhj7vhwx6 zkRCh}=u}AJS1nQ3D6#wq;YfL^w}OIc2CT(kuk@|mwoq~|&h>B@LR-c;cRzb3G8veg zFW8-JJZom(J>`){77T!s3ui%{nsx>TkJ+&E%HAPi50PNufD$gI(~i~Qe6*z{cncx0 z9UUO(!fBJ{{_dJ<9`7LHYvoOG=yU0pG)!(^_++kcLewNdO z1ESpCzWRb(%qotS&aOzMFch~Vua!Jfqjm)j;CyY$q7GRx*(|R(GtpZH-PMGY3YP>bINU?(*^5AU@<4`<$~Vk9F{RC?Q5b*a zCnoBQaDF89tR%l+<^Tv06xj65AuX(@s+w}^hreLzS~i2l!&&@N-^(QHt;J`3;DlP8 zrdge*vT%AU#w(ameJ!-!NX}ZDv|am|C`l!9j8p!x`v0kRqX48J|t0X+o5hkDEyh-^(SVB>uUJZBK#!f3HU?HDhTxeRtG3dK@Kz}V9a zRf}P9NO)bwE(fY)=pUk z?U8=a=!zyt^lWT@q&$~S=v|Q%Fcl@FiFlD2CR)h6FhT^X6r|$&*MOb!q|!ES`R^NG z-@^}JAJ66Y?;D#~y~gBV5o3LYF^V?u(IfZ?Jt~A2pn2j$_}0@0z#~rK8sYjoyTD5+ z3(QPSw1Q9q*SDinZBT8~;S&v(r1y^lJEN~SHE`C7isS?9lTm>k#{2(8$iNRKSj)w& zFJfz7;p>djma%pDLzNg9P6Q?_QsS>3>-jg$|Z zZF`TU^ZLmt%B3FsFBicMS)iXa6`DGpyy4!5<-&m}B3dsJ%Jl8)`o~|TEY*#?bX&a% zz6v0gi9tloC9a`V*it>UHnJIok6+cSD?i20+dJ&d2KBM*-lGY#;@^3;9rhgK#r|xV zpg1ZF#jI5RA;p_R!k=BzwsPLGFWqZ25JxpDo*jfei87X@vng*kex1|ziFRAt=9!e7 z-+kR@bdhvUT`1nL=QH%18vbE;<+J*C!i;_CK;7}wwJ|J6#a`p5TuYBMa!1Ei@556T z8dnq56RQsG;rXR4oon-4ZtTDKdiGPMCpXz_UElxw4Dh3iC|zHb$bo5Nso^zw^_DSy zTLYMUaU<4Zz+q)yU3+deT*9$Kxx zBq=l`X*zyI!nMU4`~T$!QxS-LE|2sFxc$O%Qz?_N9<{c4eSRxR5v*S!rIznme+MF! ze%+n}=<&qF>WqV&-nvi+sG1KSj*O>HBp7u=7x=81MApR#ohTUpmKumFc=#`(;b-mA zF}dyNv2{_Z?9FC0?I_scAGiHCgKBc|vkc;?XuVG;{@s|A zmnqzfef#I@w^v_4SHFiFmUtTL_C@QI34(UxQ`^-c?T;h>JpT^F+_L3~F)?tqoK|R?Os^p`+{& z5zFY_nhFHRk+9&5&)}>O7_H3$iM~@3SmR9kYIGP8jHNRr^Uj_9te&JqBn!yk1Y-eW z%kQM>9_Ju6KT`0}dFYVvJ0OwIzcOwR%m}aEeWZ4;8Erj<#yU}c?x(Pcv!VRL(&_rN zpMdNb~_MJI)@r6u{3kx0K#G!^6y1PB~}Ozg>=#m2HbJQ5S7JW6Zr1Zyrl82$aA z$Zhl?%=>*v>V9NOcv)M=#;(WmRUyvv#n~pXYj`U+AV>e39yR5lcd+`yaGmo72ZSp$U{5eY|G=ht3cob z%-5!z3Gfb}?ACfl)*b;KZy<&2_zpf$A}&kAqOX`K_LznRYKwY%C!^Hq;h)d(Lqe$1 zk#@lu^&0AIUD_i6s--1d@x)M2`H;+NIddm>5jZq3ou;*ZqakOs4|;PZ2arwyASMMk z3t9*bkve4W7w$88q8M*8`)qB|0A~Inu=BEWc4R~wC~c^Fft^SRb~;oA>dlml637U< zPUo&K>cGZHTFgtO55fY0)o(a7`zjna10dCqCE|(>E+N(^Z=t(6tIac&*WS)XkgTH1 zfJorkA!^Pf4L^pRIH|y|6bqi%xc!9i6$n=BSSM(@HP_Gity~*n3-E1hJ|95E#D(2g zA!rl#MJcPQ1NW@fb5d4E29PHd?YoaBKQ_ zg^w_0@{iEzCw%rFwCGp*7C|w@Jon_)jsn<&Qifsm^p4)66851o7+lI65^!%`C%;uWX}YxjzFrL!3IK%N{AYCNLM_K8VCmgz`i1CSipOU$YTVLntc|F0 zM45RMspqL-0L2FF-n;RpH`*aNQf6EZFfDqZ?$$U53?0te*}@jg!&ONopo*7}K?*u& zV1Ty*{5_k2iC`H_jm!uI3z9A=^2_o3zJX^m_QA*Jd6Qb-&MznMulLe;yG0)e59K=B z=jtuMG{}OXj!~*^_E;e&UTQJwE7QG`g-2#0wZ5R)0BFh4RFnq*zW`VYE<4bWw8t3$ z>I)tJ0pe>aFAsl=#r5GM^hcWu0r?GzT7fXrgO6t;%8W%0l2Hb~uQW3p7*PX&+Gr*O zmIJoepv)B+MHh76-kBmS+|En0ivoBU$fCg@Bo@>#DjCrXaR-6fOD^|l<#xb5rK@OK z`7?uocp&H7h_fNtg0x#P3oquEC1dCB3zJMscox9e#fd&3I&oZBkv=WWo=rR+jgw|o zxrzX%`Uoo9qxo`tpDfL_I_GkfIxPwVEn3;96ZKLE?e2I;#6(uQv~I zqzvoBIB;S~Pd$GUp&iB^1C~HS;MC4=1QIEzw6;I#?s(=t5G-_F9m@T}UAXtM%q1}k zQn_b?8g#L8*1^PhL$alIPkQL0VtX7ejTG0o4pt{jYI7|(%1w_ zstHa7q;&jOz)wdR=kD99iO;ZBl@afVuXc9Cv8{YkxnI5_H$sCQRFpV=96bHuET=ZX8{B`E*pArG@8;O%F z3FL)d=zKN@_qk#C&3xUnK>S3w9*25KsDzph!1t(t`v3mn5gZ_f4V67ZiH1KnW2xGZiKN7EwB0Cfifbco`?mI}g! z);{F%t4}8}s<2n>MrrT$zo;_~4EXs0Q?bUd5llkq!9eOr!%@xP%1yVdV1HoDY8zh(iCu*ABhrS^PS#jw zp}P%%RQcjN^be00Fca~Mu8CJ^muCyzrFL7_JODCzZEk}ZsM&^or^s(cagXXDHEG`v zzVc%q56Tq_nN8bFo-)W}>>FF|qCmV^Jz=NwM&qSB%S(k3uT*;GpHuahe7ddY!awD@-)fUA<{4x5I5 zBaVKavAq2ADdpNHCKsFD;~h+DfD+HRKd=?-A-y*?zGYiB*5W2ldCQ-CG%x?<1Ko4u zZQ$YmI3)SYdbC0u4(p{mwKM$k*{#(rqYGD>nhT`6#~RO`ia7CU-e{_;nhdc&bvohs zMfavw>iBqa5?Y$Fh)7RmsGKo!R_Q2Stc6dIuPwRYX-uJ*t72EKwE6o4{yG}qFrRvS zq$qwk?(4_?Q}7-CZ-Q^*pk)6i;V|TXk#6t*4>I5LUH_>_Z~Qk6`~SB-y13sYV2Z6@ z;PvmP;O39E5)CazM$fNXNY>^}+uppncNq3>4)xC?K+ZeZhIdD6Pa3becnFdH+rM2< zN}(n-A63>SXWu#~ufMc^r%yZwIJJNJd+Fh)e^17vX`{sJPd=QO=dMLdFC0cOyErSa zQROzSXMGry!pD1OL>>*9M*@bfJ`WmQWQ+2or$0-hRba+n1`xMVgUS&JP(VVzgtdAg z#LAKzXPArB25xd zXi>RZ4~Cct^0z06z$?)GzRqtY0$aZfOgGAf_nYrF>vtLhu`6cfrTj=E!QQ13sX$)2 z;x5P%ulwmJG%={FBS8dA-T=}Hdec$Uk6+K+J%~9c|LXugg)Z%#iB5X=VO;O&ps%_& ze?Oav%>h*g%nygq4;7OU@U};~n3~$hYPvap&X5H}qGTtxA3F}f4Q&Cv{lsGf@Ihm@ z6>byPyblDvjg4u$Wf!?siMREPb7oK^nis-I2B|VFH)TZN0qB_XpjIw3$@Zt--aJTXo((jChbA zX{LA>$SW+@omdEV$i}Y`b&R~40FFmPqJQ^8s~&w)L;iO1>==aBBBs#rMr%3gFt2#Q zSM{(8C>X5nNS_5JkieYt-Yj~idCXiJ6UOvq<02c#3I^t@UM7eF5EPytLU_mOX=jSqKY%*77W%-MKK4ebE$tK3`B(|}Uv)3U4RB~>lGE$pk;-B_@ug7s1YSoBRrlGv#Vrzqg7S2SIw z1O9}&?6V~?3CHpsTT|b6OXBL%S-Snegol!wO=-5UmWrD^P8urQPL2R_7fBo~@T$mx>fjtC4IgRN@L+YQhZNDE zpD@;{-Vt@u)@c`Tu4IND+m%x;+TRsP=xUY-`yJN`N}^R4Xr+CzIiPGg9fSA;F`(N5 zbkC*E++sn0jH%n|QzJH{zwAIV`0HoZm2Fn-3EbTAx@eMmOQmd8A#P!GI;Th}EM+uz zls1vlkvq{6%_Xj$!plla22bEul^pqsT53#FD|q)}GqeG^{K`^(Pvh(LI3;z37z6z- zdY3Q{;*L`bv_om0LIe_il3}EQ;G}IL0)xdq=IaK66z@DN2gC`&m`$J816uG9>K@9e z3AQj$Xb*EclgJRa_&fIyyn9lLf1Teiz zrXw!S)QTEF{7g=pI_J|ncyr+qh!M4Fbkydlzf#(`2^RWtrV7MBxrcPg;!lS6jC6-o zCo&#MO=%8NQaf|;t{t|glPfi{AKjP4-96y_<)uaHu1ODIflD0r_|4I0G%CUC@ z--n8OBL=x;%t9nxxEH?753_!7qPpk@`ar#AA$J4}H7w(KNwl{jMposx=J1%%?P=`l}CD_o3z3{KMX@Qy&L?kX3*FH=eKLzOZ zOH-x43)&&x_uxH_Vq>LzwyOuiR}2p3h{cTE|3C`I=s*l<;>~H z9lv@YMeSk>2#e2mdlF4T>$FZI8fo#=%FOKdNzb&#S#hRQa;Nu|^k#X)`nqebyDAXs zm@u(s?KApNq06#rR}~uG;L(_5y@cAGch{+PgHb;x$S>;FFKsgSL*-i=D4%*UY)Vl~T;Na@N*5_q_Y`afmpL z?``PKmvuu~FORRfH4Piq`GN9bnoH-uP2vbE)Bmcuow)k^w|W3JHNo|aEA@zVu~gPW zS}q%3RR=O`uy>q-oTgwBl_XY1u-%>c9V;ADO!w@FREN^HXINdOAQ8 zl_6T$vK-%-n$9tvCOAo(USDER3vCs{Z}5pjqq!qq+R!Gc?O%+v$lN>Qza^Q0fxIAX zbGZ0m85LyOwrx~V#p?77RcwcU-~4{9I2V8G`Cn~P{8S{x$UM;TxdvK z`{lRzSO4Xs{*?=$awB!IvD%|}8Ob#=pKH~0FBRRySU+qoU$zoz`EHu*N?DIHs&O+> zdKBN?emdItp6K=I1brO%HiC{rtAB74LNJ7nMgY(``9Mh1S6Dem`;aQxO$}{j23Q+^ zc_8!7t>Z^lw&O&!yTp{Ht>`Ivd0&m6iGl^UW%eBl&+)Nq5bnhnoDjyZtopyvJnX=9 z_B`y~%g~uok*WUDy^ZBpORzEH)BxAPLLQ*mC_F({h}HQK-u-6Jk@1IY#uHWT9f5SP zi+opIJVgkIDHQ?zmeDa54trRO%>=z-=b;~2`9t*zq}MUwiKvAFM+8_}*W8vsnn%`V z$F;-I^-AqNbz zZ79u!hoLd`NxR5HSK)OqI4k$Oz;lYWYQ_TT##u-fS1q=#XSW1+-Dk-6WogCh0mN#@ z6@-uyR+{U1erh0tb`%&8QWQ}BS);hMB{yr)2~q|U2orBF_?b~-`9P(4x8{&2+Hn#gGSGz4x}5+`+!**6O)oUX+bjhdR-$mMEwOz->DvyE^7Z!O+J&% zg!tARqLL>ET7U}fTw)H0yM9$h*a4^f9PtI(#f?#yH6^#OjI8&V}|rCY%MkC$Mlf#TJjGcAu5+eu_4@W$;A@k z_BvlR+s#YbWm(N^vYC3*}@*LcE2xJ4)^lOL6`R@d5zSs4`w)0#1Y$frMP1v`$8U)2c(cgci10 zPed8@%Pj<(qRMM*^XZ1D&)Pw~m?yoB@n$Z9(P&6)H1)wUYZuz@s3lwALPCtIQyzAl z?C?j49zh_CN$}(wx!F~fBUCrrNa+eFSA)nSsGdb;Ry-SUaxc49!jNu%obgC+Z|vNU ztOT9ep8n88o3dmlx>C5aH=91n)87YCH=t|9 z%c~@JkDy1XKycCx=0jqJLHV9hLSIEOBI1n5z=6I`H zb>(rX)pSNEhsn(F*6YLNV`LbxmCOrDkj)haCp0Q#dkx+(#yj)w%c;@piJ^s7t3OOw zxV0l1C|q4Rqnj9x6bdMN!mN1!iE^aD>YoJ@Co){h;K?;*8D`lHbZ!l6;v4+ZQoum5 z%q43s1Fl(A4={x*TZ8L|W-Z`YR(x+vjp8q~qqxbD^plWpB=b(7%$4@*gU$%$SYMYx zKffJenG3D{_9>?BGqDMny#vy$p_K?nlF$C&wp)t_XUHps4TAVEYPCA6j}_n|@;Sw+ zp;#kf1Mrj1%WOX3p;cSEZ|@f)DDd6z2_;71ulAcMct*lWTO4{m?UsmQ!0Z{XM7lU> zkx9LnQ1WzJW9g4VF$JX!V!959@n?jULNpI+=;97Y=j1!(t%!yS2aW5lHK&u}mc}EJ zaJYohzAG$5vaBDR#4yQOf;%%q09?KloTpl?e}kUy@4AQjr5^wwp` z^+~>yG;ISQDl@)oMAZM_2xzY)Qv}Q}k_Dru^SjD(+c#MF_i}WP#<;P)!qtVo7eRld z0xVDzt{BU!@<|Evt$Cc9yEb!UhNFcOt$v)@39CmI;+gQCIG*f>7%R)Z%doBys0}3b+XBI6tHjw zHzN8VDr~B2pMZsI08ILh$DX7L7^-<0c|rXv=U{)U<`uQf$GvYK&A~S@?I3nA?1d&} zB!4D(EFDUfN5)lYc)gf6_mlb3}TVhB$zTZ^L#tqoWmx1@@nSKR|!j zYUM07Le4?40zV8s; z;}d*_Z%BV(|9WkU%HgtKsiRwB)H^>v!rfS1;>hfKgJJ;DSy_?p)KJ|E~9(D=! zg z)St`b)FHpr8DLAYuN5nOk^&C8X|T{?l`;OsSPWq`$*0>;&w5xi_&)izuA%H>kjU+J z|J?!TEc9+2_(?xu?sw(Kx^Uv7vUu+^lRF!_-a~Ger^wGVeJ<9ewuB#8#})PTTvXrf z1`G7`3f9doKRgHjjQ#%=Bm4ghGH6@+No~*kaFtc#9p~JW<6Lg>{}s!t@u;$FyhBcs z68?rtuK!5W4E3og+q@amVYWCQI-Y}T2d{c2+0Zyz4J*Q-04Y+Q zj&s;+)Df4WLjj8CSF2%so*LD zRFPRdZpxL7EZOa9sNxhh;G#i@-o*efUV+&BvDdu6k-k+-(iM~L9>Z6JW^0@fBz`X$ z+p)8(a_5N?*60BFuef5W&Fsi)Ha8++Yx`ywcjT2P$7prRWWO(|ouyJ!4~0vfeJV>q zOdNRF`ipER+6)9)?Hd$P6%~mm2uqFJ;ia!UW-~_{g1sy(Nj)R;dySlsRuuofhpos< z9@|B5?bN1|?xSg3^3(rlQB{)5dcJZA|CpA28h&Hu3YfU~} zEVKtC0$|=gYBv?Q8x;U_+|+!+=ysUJb&-DzktNORmLCDLndZjXIpais2Bu5n zP-0Fs!dhm@HZ<@}eBvpUl5cC%kgC%b-+(-B2s(!5?w^b>A2A1Pc;OB!jtd#)BreGiQ`)YzM<%bNsW^xnK}q~3O_1oOTnpSTDZ}eL#g8rcrt6oo zS;H1>GC*&TSn})#S%9m$(Mu8tz5;T# zFy34Th^o83v_GGqOlPrdi%S3`%ks%#C&8+gI2`N=OJnJa)wV@{iUI{kmDlwop6N|+ z@3fiCPHYM`<(32R^$7(k)=`Da*I*^x$VNnram@@R%V8lQtGcs^am4HGlaU3ew3_b@ z+R)w9-qSF@FgSY!Vkr(x>Xf~*<#+%Xs2b`=yYm);$~DwhfEG8-t$VCP%xhr%ss@>O z=U5Gg^2-B9aT&o%iF*Xgi4KWDFeyiYuc9Ftm|Ul33EWW~C#vwkEB}aSbeSSZ16JVh zEBrt^2SR$IVzp(~Q``phcCvEw?1GauJZ_L#(wpaCEop+oMQr^sr{sfP0?Gh)Yr zFKz)M9v!BI&`jGhGjqS?8~TzxsjiMAvdev@2iu+OL|Z@-yQ!TMJ+PnY7zAm+*&PM| z-O+lu>*-BKf_;VvCED|<5=U%P`F7J?mc^@z(cAT5!wD4ibo$s!NqOc25#3F^AX=$7pNGs|>Qd#Pk0HQN_IeUHS90e8RXQ+hjeNr8QZZID zo%t>_hAAL6wy-^h4=il!?qET}229GDy@BUT@IVR zeW2sNcCL1tadfoJ50-dh@N7}kE@st_MDU`=2a}gUc6IxTmK#owoN6fG-57|(c6s8I zw6*%84xCmeu{O#T{vie-czPr#dh%q_!cKd&Nk=0YputxQ`qxT7c^3SZ{nJ2?pn3OZ z0S1N*5RwD+1Qm%{kqY@jr8wD$*aZnlgFu69!01c1B#cOx|( zvRND2I8?KgHL(%;{u8CdL5o)2H(A?ait8@mu2gz=_V>eARo~l7AvT(?wX$Y-Qvpr$ z#K-|Xpu-Kb)PEdpUtR<_P&cEbaTq~xNRVUfrD7fG`A)0a_-9r3jQQMT`v!8ab_@2) zn{S_gj(d0CX|axOY+=#BQ&M>ialoiBc5$rkJ$W{Hh3O>E(5eoL>UFqcyVm4P2tvK$ z61!HnEmo+Co@j3iA#C-l|9V$JJN;vcR!O$9XxJ7)nm}nE7%VP7pjRw-vf*QO zv{kVP8>GrfP5|DG%{Hz7=7Q=ybAPV=2lRnkvSL{FY=iz-bCRH$vouW?7Ksz9@FH}{ zaD8Rbw%{mLmBHe1tumr1ZYrGrym(3yS7I~4?Dd|KlsJUwwvpR;LX1* z*Sn&qUP(L3K&r?kKz&fEv2Tg z7-55yD%rBc4{#sB{A{+6x*wv(f#5J~aCB|%BBu8(q_ThNQ=;JbnpXh8J~L9dgri<{ z=N~sknpOePM+Isw-qi!=Rz!POK2vlZ6tyF0z$?9;g00LySE>=$ZW*q+B#wc5M zpv(N00j36OT;wzjAMYr}Q)V~23;gGA=x{I6=mjZ57cp8khsCJ(gKNuX2zD~kD0{lxWEQEpr;)EfXky9^>$O+E?erz!= z_hBc8(Qfo=p=~?a{n9+V+}1a_;Y{bxpsg(E-^>b@`wEc2g&T5j1g;MxIspt2(gCEK z<54V|bbU{OhfZS3=^T*p>)TjTM(Ii<(W9?4bG=)hul?a)Ci}}k#j{aW-6I%q!T@hN z%;fQfDk_X!VQb_BT*(erBB0*WWP)eG61p5pi8jr>iv%u)8hlML73=Ft=F!!yEP(Nu zO&>Aoj#7djB`JUD1Qlb7hGr_{%Vt2$zfyE2jBY=`k@alsI=BxR*=88K9nslpzOA ztST+JhM>L;fdPevy3_egIpv`BL-FvihkW-8i2eg=l>B0FV%P=&A&)_GY$+OYz8(uq zrL)4$k5N>|hUZScVX*V5Pva za>+1&V2wbHK9W*7#;VgCrz_;Tt@Im*Ep=C&mH2m5!`3Co1lh)`*b`+SEd|fNq7bwl z3~AEpd+kSeqv~;21fzfzmsf3_R&&{dARL%akaZqug&```^{t(*sF-9TIwQ92-O%Lb zCU99AScROIf8CxkM>*BUGqaoo^g~1%)xPI1&nj@Q%%Vzc3KnAD#$^1Q222;45f-S(?Y zFg6-ok)2%W6qsXORHyrb=ogMxi809|@nS&4usgpe`GD@+1zSl|mpQ7%u3y*%6H~UQ zNntadV9NS;nh8Ws_k2{J!ySf^>srlMxJ?TAJ3o*@Dg)Ee4}8*vnwljE@Q%1P*mk*G z>D&7omH${d5^eqt<5%G+3L{ZcWVZeC@a+ZHGLsbWk!avVurs4!%pc(V;YX>c(FGNi z9q7yuRMsm_h%$T`fNKbvAKw6`=##3-;1`Jqf+S^fxb|e4veP~3D1|cH;?;l!oKTbf zg6wX;l>RXWSG{2YAPS)6WIRv6uVjsd*D|kuo1Ipa>}!b?aT!kU=t*Mn~v%4 zix}AMERWjVt{F?9=7?yGwsMCM^1dFDeQfcIOR8MmAU8MX4ABnkNj3w4& zSlM*{3iplJX@Wx1Y9xLz^11C3pQ6R>D|6S`TSb#ukqSS=t zwez-O<&Q>%x|>gICw*kiu@*25cc4hXh%kJa%l>@*5K!TY)K8~qna#`*RP6wH#(KO7 z?)|RXA0@`fSOXPJaMR{ZD~gzVUB3$euMr<{!bWhWoPZox8yoG zcCo|LSFWyU)rQ=CQ7PXDTkjhE*B$*@g84hBeognAx0FAA=m^O)hD&a@9G-i_JKPa+ zYi$XS*&u)Oen4&5KP9eyqlu{8X7~*0-x2Tbm_9Z!F}yCfedhJn=Ex_t+mc*hZUw3~ zZ~&=x7ukQ;a|YrK$D823tpg^ney2M0mv6u1S^Us(_{Oey-X}gzD$b{Rrx%b~)4c^< zs`aqor(0J4b?aVo)Fph$r6Pyt$Jl=XcH;u}!VR+fdpKB$#d}_O?Y4C|@rIs>{|COA zzv82S3sLdZypVh0kx}GUF>*d64Pa;D8&c3rJo(Mk4SC5 z;(skK^2O(yu3AO5%^%uC{6)YfPmB`0t^PxcH1^e*zumZ?^Y#bt8^Gg#|KK^GJ2xL6 z)}MZaC13B*aSflxll7jRT9pE6_oo`=U-m-Q>9LfjD;r-`D5pOYS!@m*NACHP3-E;) za$Rz%%a(H_t*N)sw3eGfk-d*3kk0J=)!LCcQs^t6(F;3~~Gz(I5ph#P*$Fgtt zvNS2R_K#y9)b?SBH%&a)`F1Ajdt^oV-eHl9B_~nMQ6*CZ#_`tbN~>nTXe_O)wFn~F zG%|)1FZMz@0C$ksNmL!9$X@9zCW41)^qxUa4qeXje+X5#MUJ$Q+wyd4*WS({dsJ@OXhF0qHo0Xt|QEL;na0&oW? zK@Mu%$w$1(rFQUF$Jh(zF%HgbhH#Oqfy^bv)W#&aG`WgZU9TEwFqc>&#TG3HAdJjM zALStE9-HQypP&ZM3eKB@5arkdz?k^a`0w%7toVUnVcEp0XwJ85zoKzXt| zSV2prDphDQ1S>yvh{t&#a(F8@7t7#LlOGTOXu zq}gAYcw|UlU$LQ`JgmR<{t+ImYNto17pfkr>7yfI2@Q$VxyfPbYW$%mCi~kqVn?{d zLfL!1FBRD6kf{xJCGzPztgw!E^@hnS(HT1^ka?_o$%)FCqUy>!H z$#-1Ho=r3x$d0!~y2NtR&P6wzfuF^_oCQ3K1TJ0b9|*)NT&lqP8&0lqwu}gJlArU- zAglhiNYZoM3W@E=|8h5q^vE!aboj{<21Wp3bwwzujNQW0O>`XR;Pr5zn=hG23*oy{ zwP`o+S0k-2}<=^UUhyrXD@WhOMB#LFg&WD{$qx78HnYD_N z(EmX<99E4}&meBvRRN(*=NS;`%#1FT;QiOQ8t1S}bfQlT#EunsKcl=DM(CA>QM1-s z*u6kbv{GGXOqF05FpmP%PCmu$%5)gl1aAg;QnM7wctY9Oa2sF7sm@|$w<>nLe0xV2 z`ud1n@|$at2sA%$*w9e;Ce@@V)fjuB3k9-0;7>uo!}%_7O;!F+z-3ML*i@eEwic8w z{jID*D(j_)4b7gTtOdL%rh5TVFTqq3(`3ds(@;^?94Zu7G z35dSBvKm@EdWi{g1qsFM=O-BY70ThI%PAcvi5FrUUQOihZ1Fi=M=Bbp%y3U zqWFUvP8{uO)T)y~1iuBaZP-Lvu0veUfL$p6(KuT@ogT~3cZkYLWUI-~qmm^%lsWni zD0*ajq?xAsb7g~kDuP`+q0Rlrhk|7;dJL3=1t%0=rQf!mfezF$Y_YWs5wy+-N*X(2 z2WmUPA2o49d~>p3bh_9C(lJ|*!PZ9(^R^XqY9oL(q#Bf0tmj5i$>W{E5(tW+U2;*r z)xk!*_W=GvJH%(HCumY^`}9x8V--ebLV`O_MuXy~v$MVBeo$2{eku8MMDl7gJc^-x zGZ3rIO2#DXYOh<}O9h|d|6lFVv)J(!@w^MqA=6mL`7=w>98yl|(+1&5s@2Zb8rs@# z0jk>t!rozegt%?oqrG_?q}VX~`?-p{hlTyT&S(*SK1AGTrhQd|uiF-fXxJ7P(BPbW zBRwwM3+Pp0!N!g@s&EJ6UkL~>T4uA>~vbyIJ*5r(tNppdyUD4~KWoY2JC8r~8rsNAe zy&J<+_F}I`m-64M*JJGg6uj1ST5>vdmX|Q@+9}Ex?zL#=FMbjGM%&rl-!D>h8UR2W zpG?(`rqdTIXlH(`;w}A_mq!&j;JAayL=dd=tZh7-)<0M6lPiGd;#Yi{sicXd{b*4g zfaIsXa$fO&RVg(1b{gLKM;Yho9uuVQ(~*bdU4sqfbw$QHE;( zaEUBeSf^j|p(W;?qWKqmA$4$1na*RfjD?GfE6j zFFXO=2wE7nVNrmzkqDwMb|Nk_@17SRy>O{n{6owKXs*>% z2WN`!?t_4IYn_>zdF6^k{#?#1)=h@{#u6SGfOD8l5$a>w}e${xe~i}?EPF5 zJ-vJK7l7%9GHC*ib1UCfSkrOXr{&|7?nEV2NffAZ@GEgCd zlUYG@kOW*~w$eEo$_pp)y0*LP2l%08VeN+&$ZSNWdj$EEgP=4TR1A1UbYmqeNQGg) zWg6Zu&fm|$S$d=0GsfS?S7J8h_bpf%kH#NNmZ3_fJqaa@W+ZS10eK)G0IZ%Ds>SwS zJ15{+l`!p!Jo-kjbw;5!&G6R(zv%;60=Q+%DnJHoEe;f%fgP}yTfz&r32Q(n@~x(N zDYURCHyTAWHwSUGb(e&OX)qUS8U0ApcHtDvB8^S5`HlF74Eb%~co{zQM806!xATR@ z-g6o!7df4)`-hy2x|cm53~&@APMTN{6CYb2?hV4#GC{4?GZLgczI4Rk?t-OP5B5aN zne(jn=U8$LNGmBdFX9>I`ZUo)3rBalcUV=WmY5hdXxzh(@EqA*I<-jpb@r9ePA@`Z zY$z}ta+AZZX&&8OcK^~=^Nr51y?kDur`eb=utU++$(r|BOKi-{1PTzHFvM_XS2v1p zMYL^;T$zz1xyBk3r_Uu6?HRp}#O?Ym9mMbDRYz^=vLo7xk_gPgi+OQG>)v@QrJ*pl zNYK(hyq3@Q$vGZ9gDSUD7oISXTJ_1d+{Fe7j%I`ikp2wR+J)o1S8b#}fSDj>@tm|6 zL92451Ilc~l5~8swpkbDUIR<;6nV0ed(|(l6(j85;@B+nbO})dx2By##dOY6j~EEM zBx{TMk1&xalVD*Y(=UY89C?2<2m%;dv#YoY+a29kM%?Csi3}2VZ)MTGw_ub#wAR_w z6ujN!7|bnub)0&eU5X>$U2aQq_VN(i56B%S*bWF}4waE zY@1QTn&U&LBVRUx;)S}p=^VYYu9;GBuvO*uQ6Pv0cjM6h;X?5PfUG?90JK}Gd%zoX zn-hc`9Yd-+<;E{(e6C|D=@D|&Ujv@oTm3JwBWNJuzd$6hP;vZGo(fpN5_;uavKL_P zQU4)8%3p;iXJiGHspk?Kuvz<3WOrrk-*hzyP(Ty{+eUmshKhMS5K+E#NA~g>{G~Eb zU~?mzSDrQ6Qw-dCR^#0=GOgGL+N{aVRiXpfsqEQ4ANj*k$~9APRU;uW8d%DZ^7MI@ zqjEm}&Y~iwuzN)0s>YxGnN&5MzmH-?gpR8PObaJO9JQHZTmc0^@4t}7brw>=YScM4 zJlzSYrgGc%x24oN0qBw0a);!N^25!QqA|6q&N9wciPbpHVi-uE({(2JQ92Uc{`_s! zK?V0n%48H`+o+SkbqmOswq^-N+b*;kB<)?o__{EA4HR~B@Q!_Q+jf&*#P0UuJaM2e z{+k8&IcS;WM5S~_v%m%3SteyxHoYN&1u&z4=T?Df(5XiFJZei=x0OkTA1~LpqgyWW zoW0~b4C3bdTf7@(KAr3#+?~DaxTV*%O#Qz)bEbTQvz%87uMO&l6?19D8i*Bl2piQq zdGJfBH<;|oe&rZ*+~~fSq$4YauLxDvUuA$=ia#qA& zv~8DKNk`FDFId23tADN7Xu?^5sYR|GG3fSkk^d_&Ltl3Q0492zB72P0k-#GL8b3rX441{VN`FR^cE4Mq-( ziaR_QvQ(CX+I>@;#%;@~!&eTXrWpNCX3wr*3@6yU+i*u@)#-jr*{+^-edncDiZ>nE zh*w)tZfm#V9$7~)oJDbiT(s2iRmg>iEKfOV236S~@Jknm_kqlE0N#@Q26 zXi?c>$V+?>)w@ns>DNv5?KZS>CYu10)CpJec$SsHe9b4wb<}E4s{TJh$ESQX`1&ha z&_x&&5dFLh8nxkM-^9p3_B`nQps$1vQ^zV%uXLSmv=z;_{6H;C{ipuh zIV&mk6b4jcf~%uih@TYegXjS9ppNg|*72UruWfe!H2M}7xmPq1`$nq!yV;MCEB{ah zpV3_hK-)upC+42?`R(;r>;L{6vHC9?jTqc&^}%V*5Zix6?=L*&LnJQpiCqB!UlnPV>RO>V+F7 zr39>_GkULYxoC!(Ug9>gcJut6F`vm5*tZz4BEdgPoUPYaq4#Xx>)RH${)y0MZ6o(F ze@AW7le}SH*^8M_L7onNK3%RXSn|Lb0d zB!BS!n7QURofpB!$JljMjoJEhR_{3E19@04WgSGWwbFcj^|WYw(`4!%FN}#7W%EK# zl2MLGzjhl}JbVMNC2zgu-yVGpE&urWzd|2}(_C;F`e<4zMfM{R72AT!qP#24U8G(_ z`_tD2ae%<=izPeDOD4vtUE`8RR`h=JJq~7XaRcM>YS;{?T02aH4pnmnoG}#E6v7e~W zSuxqOj>44PUSCW}7_Tc7oKABBhIM90YB_ZkUF3~D2g-U3+|+wo zUI~dg^G#)$=G=Cm z0y9F&+F)5nc7}}xv0*nIM`ivaH{p$DMubp2?=ZLz&+0%+_{t-uU`SC6qyQIj!0-l@ zGP2XI%0Hw@Ja^^^yYt+`;b^mf;Swjos(-p{NW-iW5XknX;ebGdMr!*2>*J6$p=hI z=Of_vMo2@-zE4lSz?))T@Q;fXpO1K$-nd|ekhv2;W^n*&e_Q0;yiI~!A_A)vFeTMw zpj<8IUu3U<;u;iod+(C$d)LPHt`vuL6r#B4vNCj8U9pmS9Rh$rN11Pd-yXr43U(>m zqrhXZoN{KWtYQNus#Y4gWaY?nfVXa@9mY?6G1WZl zTx*RG`_|m;EGfe@hTN47dQ<$}{b(UttVsGCWyPds${srvdGx?0wn_;D66t*yT)BW* zXeBdbW5W$Y#=e(8qT@)z&>E9VseIm;3#Ocil7Wn~sDc?t%+!(MspO)9sbuim9q~Sw z;J`L*{Ok*p)1&1rVPl3`HXVA1~q$oHwHf9$7j!mp6Htwl$PP+L2izeDUd1O z<0PO`Rx3fwjIy^xz#LXSkQ(mK=ZiD4<`wW5Qwnv{T?*GEcq4zo_#q3IKa3hKZu5#r z0s-zxbGaQVr@jwD>r$Q2`iD7|Ulc`7)2d^Y!sdxo2i2SR-`xVSw`0cGxqX{}t~#k( z!*MEUIt*19>IAOvyW+hT%C_W0<_A6SeQ_Ia;Lhj*h^bDOnVKZ@K$USK3gBklL=5&X z0_5{=a?oyi4f#*HW{vMzlT-8;it9@iwOGKlP@AJGUs>raApC3=+ljT5=lRYn;d!Wm zR>h-%iN0E{8`_S01?`^-SlxX?3PTW0>oUwq1{$7F;gP0e;~d{aqkH2Mk8Ja5y?2rU z#PPs|%H0Qu8fXa&mNuI&SdQvTg7c?TaNt1!dA-ppBm5=N>>{zOg`0n+Bv&Alyt>NifbmaX+*YXxKjGb>9$qrCcRsu1=BRz znZFH*AAR;MH!hJLH-7rWMz zUo^6GLsvnGdNo3}rd_54volr751Q-?9u7}Uf}GUc2!S+8fY~vTgxAYFnS=)>H_}zH z7+Ex=*VDQ-TOiz+{C?vHV4F&nwXeEFVTX0L0E+4WTqM0eHvI6ag(BC)A(9^DFeX_- z*ab0?XAf)+C9pMoux5)_usKvucZx1ipBu(2a-gh#-uhR~%KVHA@UlLO` z_#nD8`*4rf^SVX+q(SIPss6YwCb%6K5UH2VwDlhxEnkgKO7Ui6ciYLjB)O}?#Y|Pz z-JU`F-jQ_j-jaKM2 zu54E~_FfVD07!SUa1^k|5BWPcI;k~xkY@%2@t+fXy23S>Sy4#SB}Q3uTvWhW{z7~; zS5B;=*ug72iAEquf%Owe9kT-ZgV}|05a!&U4t)0ZNbu1R!x%q3 zk-k7YK3i>6A54h7L>g`A;USVo(4$WzxHK7%f`FWb>$nkA#?6b<#8zKwu~QirzS9f{ z`dDWJGmcI*WTJrkgBvc_Cx&$KhZZAK{KC zW+(+qbr)iUL@=>k3Oy6!I__vKOH?hw{XMpg%6rnnqs}of@ts^9sAeWC7oecS=!{>u zdpy$#9HM zax5wOgb+-;uXv=d30BOLYnf1YlJ`h?HF4W~%~320qi^RX0RcicgQZYAS>4>O(U3BZ zS3mHMK%TkWx5L*(sECP}YH)}W+Vq7LEu&-LImaz4CDhuD3+t1c@^dC%mDOd1omBoX zhs6S5FQ1H z9@vA?Z1e!mmt^fG-!d9@^2yPa;mJ3$Ke+%@5_+YYZ{<$QsUDQSyat-Eze(sfNeH+l z%cnKYop)0-T|6jRy3?aJY|CUJ&-CJ$CKpF%QLgf@28NBtsVh!yXzU3-l&fg|p&Zk{ zS;n+8IzHv`%>n_t?CbI}2!xs#07k0pNl^&oAuC4`Dt326kYhTlqK$CwC7iZksVxMn z5JbPo_dPSMl#?LOe_eAAh|p&yNvml)*UH7de)L)g5+ug|nx-R14#UXBwt z>R@$LtV0LbCe3wZWTA}GnDheH^3!@hzxRNc8ogJtVfB&iZtd^i0190Q(gX^_3NV|r z*+*MpE?J>TO|jbrOnb7Icb{|!w>aN&u-)N)aa(8$vG0SYNVQk>~muLzwg zy_f|f!y@m!at3{YV>DN{C?ICwow|+>XtzF`E+1-5Dmr42K^M8OUuJFr$tBWVr)Zu9 zz~}WF0N?#G8wa$v7ZZ;ZJ)oYq^U9awMhbzGdd5w|VcRW56BZ#y+W?&rE`Z_CSbe0V zf4S{M@jTHYo$N_}l5Jx@)*Rw`{Y3`{+g5UknCjg}8IhGm8_9cFN9vAvhr04ZlQu&j z!M(P6279Rp z0j#04{4|}~>s*amJKnDJac&?CiUlLiG_Q6-+my&=Pk2`&l7j~WMJ(&ObhJ@&T=dY)~-yZix z`{8@0KVI4(B*n&I;>H6!1ExGN5~l=jp<;?6<1!{}8R$Xzfq_tp<0G zdAzxKf!$zP^>EJT6tYEovsV-Z8ynO00TUQ&Q59%%Z1gJf zFGmKE>+125%FOT8DFP4fBe@sa^CUY|Q*<#m+kF=xKgEtE+wd z>QtGuJpM6?>Pxc?oR_RwrC`W5l`cgMt-Az*Kbq#yCs|fil`M0f6vQOT{d@5xv7<|4hJaaE^#j$$Lf7~Eu?f%*#FP5ax^ zeF)8Ru9f8%O69i6OaN%z^3ChpGg2cr$0T1h5H9dXNyhT6?>UPn;mWl!8GFB7=aI}) zgA?B}*V5nO?~Q*U=@#5`dBgEDi~Mx#S0H!&P`5q15Fa?ciT{P*56WkHV;Gh1-}KtU zYRTF_>F<%R{|pKNrI8V>Uyqf}u)V5$pOCRN@}vHF?AxSUsV`0j>}ijw1qbH*hdiwB z`8p8f-|!nda2v_t(^$}&;DGGZd8Dy;Ee+xp1jr*i8kJv zdd^PPEw%r+G|`Br`tY{iGwatMB=_Kz1H!-2*gP*~(PdZvRw7o%p)}Degp1A%wTe4Nway;Jjmh?4xlU~a7`l{1& zw6{XPcQ^ri1qbW4zAG-cHS=1s!}%G;&U8?m_;H`~o_)4-yMAEWO1EQABR+1x>rJk$ z-mQq=OTxgG!xNLui2tp-zb@-P5`|rKxz8Xh`PXdjpPpZk6F_f_(TQM+2|Dnfny-hDj(koRAN>P@7O zQ9@S{x>N7!p706wa)iAxK;FrIj|@K?(ADWO8>su^k6xC;C*n-?uWBk4XNsSQeH5aon&gzSb_}&s0NdDSIU_X> z5?_Cbx-@DaS--mZ6kuq~3xZ7nadWDjZ7G?4(xMO+J`#mT1q}G$k**?yGL%`tDo6V% zC?g|Z79q;0lDgTgX2sXwu9T5)0+94mtb9ks2tcS>FGn0@Bkvg>^pKU*%Xhho*m9pf zx%qj9Iraqe*Xcq2=6Q=0TVcg{dDS!tHrqUMHl@F)d=7Ru(;`jZyfK8mc39rdnEyjS zG&ka3wDlyoBoHNUVHQ`MX*u~91e3tYulfBT@2f!R_~Qpkb!NF>FPWR3025e;)j|e| zS;wTNGyCzgwOvE_O+{JK^2vA;oDl-*9k-Ra~3)zdGwV@ zc5!Yz1kzSDmvEOA=H`~0xN?SX6m`XKxFXe<(41B1EgnVo@5!BI$;@;12^t5PlI-I% z7u$-<0b7C(0NsFj)vLOZo`e&-|=w8T0lox7<5PIdGjC~%@6!D`Ll zjC_#-?_YeaL1N%k1BRqmcTR46wf^5kETMcp3r5?MK*{-~{iK5@Wx3 z3jBmO%qh@4Vm=-Oq0UB=UHNm84%aYZ&&6o7O5Cv-iIZEH*v@siMRac3)-31vjEDA> zVMX2SZ;9Fpr;G&U_ifLYjTJsPl1V~Fk7Y?HV`Z0gAS`3HgCACFVknQdEh?a67P}?> zK+Ez?!pSG!!*n98ro{u09OyEq1Iq8dJc$!!ICtQyU%`&DUR8YqJ0K=dQK<7v&;1S? z^1&SZ`9woi#UW-f({?CcGdIAlgD#+>Ky&oBS8WOXeRE5n`c#=sifu ze+Yy#@!tzH4>-ncyBcy|72A1HI;sH4#AylIn48Hi4I0SFFJuW?jz@JG^o=fmUqgB* zD03^ZbmGVEMn#?gc^`n+0V_wHsn0~`3);>*( z@9Pm|H_9%dVFJJTgb^olZ1v@m%cSGvTm#gO0n?&}GY&tKwm=4d=PvXHveohQ*WJjN z%uG*H-u?yiQNggTEVYW!OrE80>-KExLBuchOFyUIoo-I1V&jCW&%Z68XXw$5JFZ4) zhSFm-&Q6PjlFlVghF&wGX+o>$i&Zp7){WlB<0c%GY3i9|Tsw6&nHw{un%qEpUs$Al z%RFxLmkvR1h8{q@)TIqFRn*kAT(jE6jAZ$__QW59k~Ix%69e5(`_|6+^S5|)PFBBs z9+O}(a@i(Ma3=F8E5GdU(=dPbn3w;AI$AUvM8EpE@Lo17hFQbg{e5ifyk~B@2MmdVNmcqsY#;bbfpg}t@v4GbU>hA zbcCGVT96xnhcpRWYykxM?V-p|AKi5}BQCzpb8I>`S#Yz9n!A`oQ>0Pfth;om2}h{-h8;Q)1k}*YE|}zuvOv zq6af46Xx4|UfO_IY~hfLQdUs{1$0UPhctSxMpfyye04saal5l6R z;BpC7mS=IL5%7z;9#Y$%2rF_B`P9*ltncNOfzMgdBvxGtr^%{nks2egvk|}!*AGhD zN!bhSg9YtS`f!MAdW|CUoUVRI8FbpsH$a1G-9qv%HFLtEUyFx+{m-C*`jQy8C*VeZWkw0QQx?iM%9G0R(X zYy$-iUY3W`4l@%nfDep?1HZdpC24E6J8hgUbFEXkm*h6GHkn_~*oPiaNjxt<8DOAu zJKY1eyICTzMHH6B<3nl9%d*-PA0*}=NS~H_v7wgSUVG~z0Af13lu_(l%i@niidA)} zD58AO&l0>OReZKF`ook6o3N-hY+ZI)dx_6bEZZ0`1CNU{(lQ}-WJ`Kq0)6NoY!~w^ z(AVqk2VuKHcX_F_wc2UxfQ9{OZ&W!bVEA<`D2xzI&hj={nw?7|A7Vn{VG>H4p?KG_ zlUWYm(s*V$`Yvl8O%5?O!@D4T5%YVe+y?jH>R^L=_)=~;>wL-x1zO36Cr2?V+WrS+ z?;e*_w*UXrbf)HReQ z`yEmIz(oU+V~<|5gTnf9CcIq?fI%>iE7!`SqL}tLB>V;0oog#;J73Blewg0fQ`gao zi14Dh_ynC+-wCm-_f!|)(eQFc@*~E}Oi?-00CQn4el81L??Nd{J;@BVInY+l>~+A* z^fVPRTWp{MZnlNcf&FGk4fa^IGg`NOEXagwnrFawamWT9Sl@F5Nx2S>Wd<%gQ7X}5 zn^I6@8A@HGO7{U*n6waK3Ju%EZ+tJt)!hh?wrzt-+F}=RJz($Y3fz1$&YosfGvKD> zxXSPH@QfYi&T2eg^A0sz6MwTn!9!0~*YN;){EkLZ_NEy2F%OB*L}p;Q_C``X{ub_> zuu@`KzU*z$reMk^9I`{gV!-{Jvk=bWfv8p{>V1Frmm#M=#vsS-Bx2`v$DPX;mx_|J z=*!1@Dx}|4DpmdgymKU2!O1!syDVE;)1GR(af|E&i-z>!Z6!fQjel|#xn~KP2-

GZNFKH`pVG6EGg4T6KaOD6yo~+x5vAY)i<%Dw zLHaas0flFMIvov&Zq5p#MBAeYU;w#lv!PV~Ou{3(O=~wznIrlPZ!UDmBG( z&Q0Q21V}Q)KX(!x#jZjp$(6noHC|+k@D$M_i#h62NnCLsO1jro;Is>l21#Dr<2KFX z&iL{7&9&tQds`K@0MAx zD#;G%kV=WvQwvLYO|cd*X|0tRg{F%yHdmf$njwET-}l67_&rp=82_!Yx9FNbYT?IW zU$&t_UhC)Eaj$LhU}>%#(VaQ4a4)A{jK@L!dzJC>g_3HOeq19A)p@jm;Uf-62PFFp zN0vpwtmNY=3wc!$%dVO` zjJj8zSXNqTPY<45%B%05dUC3!Di@uHe_&dwo|@{CffZ7ekXpXzo&4}2W{3J6orp{y z9aO|SovaBkD{zmeC>ar+W+oz-o1)@b?LeNqGWcdkJ9<`T3`{$W)oUhRnDCTlTq?M_}ZcRkVg(X9R_0wae<^#F0 zXVH4qp}!@uT#^6I9@O@}GvO`hq}|kG+6u>267!FurRz-5rhi(FpXU^YMcYnRa~QKe znV=ez;psBPV0gFH50cP;Zf-B>DQCI4|8gd89tlfg#WBr%XHh?le}hfG!ONPE_9t$k z<9eXC#6QnzPuNW<7h0^W#u|*dN2le6@in$m1Wmq3#EZ&;42vq_=P330iGpp=qu1go zR}(%F7U%ct47O6UL_vR`pZ&*qBEyPyzNR5}?f0{iKXX|H7tPqn5y@*O>f7L1Dr+Zg zg(_4ccdfFO$JWd%!DW-dr(=NObqjg&(iX(&OJDHE4!?KEB4PhFlfT#SIiC6cj}3-} z*D?*DQ2=vYM8ChT?yx_`pm|@~#@`3Il}RHXH*yKMenGMrB!LB1S@C};fSn63LZnAK z?7BGe%iibLnV|5UA-LO~|Cw~;ZxF>F{LqVuvPF`0^aS1z-BqW75P#9FQCRaN!a(BV zl2sz|_*kW#edV46&Nk{b7sAYu>adJb-@p!+9#fh%#DephHgZCeSl3Pe`$)g^S6Xw8 z^G13<5oH&{`a<#iBS(6k4FICtObw8R$1qPQpXsgZUL$F~r8t5-fh@YONFRJRjD5Ly z+?<-%Whh)C^`PeR@weQ^>;MXrcH&nbP5ayIKHM#WKii-9cDLm6W!mqfsb-e%{HiX|I;v7wSzc4qL5! ze)$vMdHQn8{qg)=B+oe);nA!H{PeMu)(E(nO!gpZ;yI{SOb@{04J% z70Mk+IrZ4@^GAK$>HmOehO)+QfjztJ)A_eVot7s z1X?Zc0jPK4md~mfRGrwOb z_)bqdAv0gJgclw_7(kk@(+GFB@dI3o8%3Wn-;Zruh`$Q)fm^zdfDcP+Sv@%CZ91qb ze(|p#O252o=W-gfW%KfA>6X<-q|t&TMFa8FFCmlJG@v}XXe4vq{Mhk-pyc!J34?DZ z|1AaGZ1Jf>k6k?D`X*QpO~>Bbnn2P*-*PX{IJm6;{=omhy^l$+VW*pY(mSx;_7weoT$V^WotAPb=a7j-n3yY8m}`U;XdTXAflNXbpeA^GKlt zGyw*3cgwx^BTtmHY~-v zGR6~(>-*~spBh{r5%W<+r09`LBWU^q;ZQ%qj(+(JrxX?ixs}dkj5)4vPGAn+szijJ zP?vB`hc`%nm-cV@gnUpbCj`S}I`!uq=cZ1nPm$CSbLfD2MDR;>QZB~&=gXNsD?Z&b z7ZGd*So%7zpoaXo`Gvbs%6nJMNc-j~(g$_zV8wgB2AK;y^FEXUMM!OBm0OLM86PBq zj5$*8kE_?MOp+KFFiV7O;<%z$TCe* zU9hF=%UqVT#=c=r8Pu;j-*Imd2fhCG)6Ms_w&+8z(f#B{_E|Vw9xcyx8cS+y-9{Ox zi<&}mB;_!SF?eIXuF9YPBn(d_bPA_%%g?On@fFW5j4Y#3UVb_33d6j4PLc3qa-&B! zrX{31R`jj3)Aa^Ws_hwv)Iu&H!V|1HN`1x;Qk!P#uJK=F4%By^A(%*a*Oyi2`r;_6 z0SRI}+10zn#=p=DMfB^@SS}pH(&KYrE&;V!vEHMPihW^Zz$XZr&Pea+ApEBE_HPjs z1NU<0hR#R|W^0!#!&DMmN5VG{r)v~23EVeLnBT4Xl@yCvALY}EA`k`leMi^G?nloj(PT@-b^2D!L# z;ZRo}F~6R7FP?0xQ0J3^LKr?)0k!VvcalR~GpK(B`kn009gLTb{urm+#Lk-rYSm8X z`C3l!SYtL8mcG2&;Vh>Qnfy|zov6o7VMb+qGyaC4lRpz*e-xS!ovq@QupL zH+fknrEWxKo$9q=o|;E0%y)p&>XIL6tteXoB3`Vtuk|M{*5|ixAIq5|?+6%nkK8hE z-4n9`LnI=~wE!&N?Tcp`K0OB3fFPq*ai25j9BE2?wjBS?#-AJjs}P=4*Rt<31GjZe zozOlIAe``2VR2%yCUDz8QcM;OJ*~K52bCxlJPxXq%7}nxiI{rJ}^aWpnd;#IEUIhoq@X zw^(zm@Yy;{E4QeAzLh~a$xh1`tZL^}TX!~(ctiF_nt`1y&7(*;|kC{&@ zUyR&hi0;v+P#3Di4efW9jsqn%Q|=v7G5Te`3~>2=#mE#pl7*ZHDG=b zS`3tX{4z9?Z-8*}W#xMAX@#HQ<2XTrknshN@t;#aq^Cv{Sizr|>UvD++!WQ*)%x4B zK(JQ_B&Hvl2J6;C2a8-6%1c&yw`!-WF--j4=WkI3vy)>5Ga{&oZO}aykPiZ?wrHsu zb!`xrrm@s$SEvi(z4E-JkrKcDt4+q$VgaxU1-1 ziDP27XPFBT4q(#pkF};xlqFZ_vYl(qI@4NkE_MSK`p5IdNr;XdrV-h{yq#^|F)PNB?-gCi|YKIo}ufYaH)*Wi-tv( zRqn~Jo|g_K3jSUc_;jF2-OzZL@V4!Uq05_>vXR#1>KAjvi`y?A?aqQsYg^{EGue)F zkC9qBvM-mQ|K8dL2PQ^Md{0hz`)Y3h<}ZI56KD4kq%;?Fss=g!Q@Cs5RhBNNdWM^- zN+8G*F)h@&b}e+M&0zCTqYmkXOx$}scCwvFP;gr=LUq!Lgz19I%~l`Hz280?sAR^0_S!sk0HtGas1PMt0*tasV z3*cAtRPsso${))({n{^ zshY;7S2`q2Rpt@rFr%oI`9=Z8V0q>tvCkEc&`%wow+)FW*X|ZyS#R>#6co9xClO~* z35)Vj52SLDn~FGb-T-;*=jxR3w~ zTfh3g+>SNnp=wdHxGBq$Q~|hwC{u{b_&ztsWKLdcdRTnixF=J~EYYq|ml7d~Oi8gZ zL*aukallJLo8OkA>EK_?5*>ck3;$!IWXTQ$F@ddftKAp3-fhApUDdtcvhrBhL0igj zURhEk2(#>lH`#4mano9+DtBr@B7|I)Zsl00zI&VB_y6Rm8vHR-H@h9S4u}W*N0v9_MZ30G1BQ#vP&m7BE58=*xz$q< zW{kenyFVM#X-hB2t_Cua>vy%wY1Y4#04P7QvD>l9P*M2x(*SZjmC;Qf>VYcI{61T!s_mH?yVM-?@zuOD*SFSSPXDaTp z>q_frNSSVqFpJ$XhWhvicX{rwBdtlIBjTIt(dh8yH&tXh!#zAeHP}4Y?BKG}wpaC1 zt>3qlFhgq5nrQz>U4{`v{ckUUulD?VRI+btV^a$W7@sG#?-sJE0s;!>YX*4o+g92e z(`VUV_Y6loBAti_4l94jN!d4NSt@^}Eqd`A7=bsV06H!Q_N7P#9Ps|En#EYDBUabm48HRFDW`Hv#LMs!V3mwa?JzhlLT3| zWvyk8Jzcg#<-hD8h{465F*5)u*<@x5aPmB}7>L|WIHnc+bGUei?kO0h9`J200X%KH6TwsSJxpQu82W^$pV z>*hpiAUQOf6D{mk zI5-~|7QS2ly;Lx2qtcjBUQO-kGN;TsxbhEkrL;OHz7p3Dm~`EDg~A>~V}gb^DT}hm zVo9xf0pOI1>-U#%BC->Mw?b*slk#Nul15rD^Ch{bRO3YRbqK&xxU5A6tff83w~Xw< z=AD&H9+Ob=<}EuKUoV=>PEiU<3AQ)enRV?>uz;W=D>riSxs%KYB!anRHV>o=6)=B@ zvsg^;Xr)S-cMi@2l!mNCELxw6$D4p+J+^{EOI<~h3okGu5hplpgXF)TUic6NO8$`u z*gQMeYG>B70xs4TF}{sf)xUSv16^+=F%F3I+@an$RbZMKAZ&ZRDo*)6aCuYe^4&hn zB&#r(0vwxj-;O8NQxh*WRROmrhu>RTNz9FC9cD7E3M*o|4tP8DpfZwd1uj8uN^fRW zg2^3Ni<^UBX)jmAEZhZ`o{#Q2C^2^TGm_ya1JQAhUaS z<{9#7`RT0qfx{q?xCe$*DcMR~~l;fdp% z!ze>R@Jt-zPCmWjLm*7s*q;|ASLCvrdR++QxzvP{bO42&D~ElkFN^r07)#qTAQy7; zLFGZww{Xcjr63sU-xuCJc$WWf11So#Q=Kufk0`#Ir|$W-(5UV0Dkb~vcOOB-#v#eC z3~4=;$m0Qr2d^pmjwv>73DM=}dho`_X-?`0t#+0x2Hf}>7dih&eqYQ3KfYbKKbI6e zCLM?y^WtmbFR)uQ)Z54vTO3?k>~a<;R$DZxM_ql)?1FNR?UR$fdq#FRrw2+>qFVP@ zimG_w(RylrqO64|KFLHNHU_+tggsK&_?y}$j9Y6eO)3$Vww7@%*#)(tW3VV#))4uD zRO0X66$t$2)YZmc+A+R_dL>TQb>lEZ3t7@xf|V%Yd>rt&8K4#>L&l`{qQg8E5@V5j zr9!wl;*nc4!g8Fw!@1yw;<2tBpT(Z;iyD)zX})OWN^6kab#iy?uADW9UjCNyQC@&P*86dSFY9JuEi`TH zcm79x;xm)^93BcoA4+!w`$Lt``9s1aeEO=VIAv zh3vuIDZsP7+bFdGe5Ub-BgT;HtK{#JZOSic_C+t0_UX=glLlbC;7=^>uJPp`A!&;f zXq$wtK>ku75zRHd<1UcUImV4cbFzK@aRE11R&Bj?@ zJhwW;D1Q_^6x{`!t40@hq0!wE0>S=K_7kL2te{`C(hmLy?vel*v|T;t#RQfAQd(4r zIDsl@FooRx0x6}k7YE@?RdYH&IT%&SwI-~A93`z1)0rJ9&%T6=WYT2M@zE3Fu6SC( zr6~B*E-K5i;q9KwFRURDB!?%x0jvy28bj!@7B(Y~e`3jQv1Hfz`4+F3;lCKXWmU$7 zM+JHGYzMxaEiKUBgMn~B-wQs{m?QT2fg(_&LOz` z6u2dB#a+-mBv+Fh5U5*Mq!Gkp{DYrC?5XxGC{X-ob5Gfn_Z|rBz3pF~JA7u=TR;3X zB$lm4UOnFU=`!iB88D;%Td-*;gT8}6uEU1Q_PLKhsh{<*X`6)WKa$Nf>s30xdMF4P z{eNpgpSQXCZd%>PV*7s)@uD?AHSxa^anSf<%lkg|q0|4~M6iC_zPztG^mE%*PxBeI zernjimdqjgn_R!8txdYRl6!(BTHzTce-tY8bIVSAc^0)tTZgl(t~3K*Odmqa$5I?Z@&J!P_g#;9hvRMK&XoZ zV$l|eq_t14eKcYf-26%0d-P{|;e7b3VzvH$?0&4IwxH`c_Q_gQ{1xEL~%5!2EnP(lsh6aXLqt1yqP;&mqv>3(~LXv6H*Eq z#z*g{vuEq4(6Io@_E1ZzDMyvGLTEiTgKC+&3L?m?zo&Tn{<*e3R6m`9G zY*+DBoE%{Q<*@^avAzq%SMRey>7T>?rcYx)=y|$x=kHD3C!$M0D)Q?3o3;f}R+6Yr zpc_YfRyj~zh}zD7UYcCo%veI1ytu{bL{ zq8G+LT_Au*Nq$hL#*UMeJ%?$PVRL8MKX%aHiwSkb;`m}ktYzKsv6~YnG}XHnQ#%=~f{QJtKOseu#zk=!DeA+DE6$C8k)uDiyw$Z(r1FsiLt(>*}3HP>74Od`T z-F+) z(>Vn&@F|+kK6?kNDW9T+dq?uD2xsU5X^gJI>l@W~OVP-#>eIBN z1_RTmEY&O2?T4Q1GwzryX`tI8RTeCDacB4=C?LzbgC{e|q>l-j)sfe)LQc%u;rk_w zgC$7(d?|0 z9?T{f>j0@5LuvU9IJy6?MR*cSqMe^jG^4vX`?X^z+K(^txx)0t40?Z-#XB=eyouiDprGvU`#w_ytdx-X%dxM(e!SwKoD1&T>&&J@@^7h z%c5c%%}UvAO5fi@d~mms?K3*_CSGzAcsgd@;H(wup7hea#*+A^bSFuhuSH=2StOTM z_@NVs2+3ryGXm3!H(^R}7EL_RsTj)n!akv2CF-^CydjN}$40ZOwY@RRcfmp}Y`xkK zKeoL~nu1;ou0J(loUS6B#6c0#p>2G0`B63pjOkL+jRJRxr}~~NRK!ZMqMZ>w2)+EB zXaVLCH{XP*w8lJQznw)&-0FTv2?kw#Jz|R?Ux)6tK)e8H&6x2)IP^oH?dHftk`t&( z31g`^S2GDhq)ebxZ5OCjjRoF;31xOR+o{pqbfUKCVFMYTn|K|B`v@B^(D-W+B*_8_4;sHDt_{Q`b@gZ;wh~$!Oyou zg!!}FF5JB#i=Atv_z=a4T4Y3yVbEhcssahON>K}g`@!@8n}?;`Fl$-nYaTEIByHl5 z?Szpk+v;hfdm$_yn$JnJJw>W{gvFp9iInd{0LDgGn2Dl*s>np!&COGJcjPUU6lCHf zg4rV>Gr|sSRuAEp$%!mRdOsP%ZBd-9nzX-Wtr{Axpj%DBNLQYjsWAoD+J& zJp)jW>i!#x1{jXmI;MWnDUR9u{Qt(mPQdR9G>-#_6=X?WP^)q$VToqEBubgPf=ue} zPL)jp{ws(l&H{9*;vsbzl+8uDsd462{akn)iV}SFtC6q^^1VSY7xH=&(r!)XR%8({ zj3thGTj=%Nuj?LvEouGbs!m3jf3m^Ln3rqZgc}zu+&7qUPnGw5oukexvNV(a3+KUqhPSO82MPPoTFy zcKndMULVGDasU6Y)kWQs(TZS$N|YK~#F8((mCC({OSTCrtX=Si`vM4%6LsY3S?oBW5$@Etyv2d3THig?H4cj7 zEUvHa?3|f|7tBz~l6cDt2j(9eq|WcJ(VHQkHjDFG%$>TeWh-9uPuQ(O7s68W9G%@N zMKs5We|27ltB2c;q|aoli=XaIMD&_b^$K3X%BQBb5QsG}iG8apfD zWSY_1Gescyoy2=5oLu}nQ8UTtH*xSMkgu}0b(x8BRepMhGU^x*Y2Q2G8aGT4B-;Evh-9n1c;-CseVjxqYt{uNPj zB#TO9F$@(GJ;zpF2Oy+P`CKr`A^UEIDsi{FkrhiwYT+9xQD zD`r>!T^W7zG`ouZTRaXS5i^4$OAU@>^7|xB7?554+%R_p1HvqvuRDR-eUT&=`JzUW z%N_|>aH?kKg;XZroO?m&UlyP?%=U#|?mc`O9XGb{(A3_zv6n6v(BtJz3zJ>1lwEFd z*>h$-9TN6^cHeO8NdjILLG-G3g;MLS+L%E3(2hny^XJEBI{5o3){eV5dB79$I6@w5>sptu0`-lv#$miw~^vXzM;2;g&czHY~;}gRl_D zvQ<1wd61l$??8FQE+osu4XQs$UMTz$&SY#)kA7^wo~Tn74M7n`jVVVE!+}@^bJ06|V+vv$gSd61K(5zPe860=wnn9lgF2)OYWU6w=#*K5;-%W3=86e=s zyreQW>)C=3OAvQ6jAk_rr|hjcIz0tyY)3Vmk_CL$0&vfla$NZ=E&)@V1X63CDHL{f z;TCr&sFpk1uuc4Ydt2`<7|JAe-0}1>W)>GPi)qGE|4Ubis zv|#`1By)!^;@kk>QxOTjjQKA*;5!$kenI5W)u)N3O`V7P3ckyu<&tdH9JQFvI*Qcsc3sw)XBn9j45dFhzJ8w(OAZ+2@_~8Sz$QrJj5{~D|_ni>)-qC`}xYbsY<5QzN zWpjV+Qj)XxXkVx)J^no9awj$*$XwDjUmj2Cb)cKjlh5!O6eS$FQiIb*WW`&@rJR{E z^zK>Y$5?Si=mBxum~W;PF=&8C6S*%WlNf5MwwJZhTk6eeJfs9xp%dRHM+!ss_L8~r zh@uMVbW)T6kFe+oHk8PQt+(Xy92lv;xZ1*{SUQ79YW70TGbJ{qWlyQ~0Z5cG@_Z02 zzwBSL@f?w@rLwj@-$459G(~v7Ni`A_-`ehu&W+vIvv@Oi+9sgciWzvOb{;R#^y;RL z8HmckYDk zNAlyT=MNVU?+&C||JXr!QN=e6`75DJVIpBRT3d)fxzDHWvx7$&Du)NcAWe*FEOi_g z_ChRfpMGkF9bI|tOz(sOfoqs@=|^9+X?a5<>wHjGQx-#fm-QH7NrSs*>E+QF>w5O< z#lWMhO4moS>tBZTA=mq=*-7H}Fb`IGjYd5?l4#N-42Eu}89_RIXTwfGMnXNy=CQ*Y z5ZBaJ1!akr`A$VCD_TmU9DH0D_-bh$F*AR)8&tc zrWyT9BOF~bxU*TVyd03deK0K9WbJd+TePG2*6NznOz`(LeQ&TEAj!$Zzclpiacz`5 zI{+ZaXNEKxvDx)Q87K<*n8MB*xrPE1^Z8{V0dZ0BNf~&{)JGpAv>f~;r2Y*zGv%y= zl3UcVpEtU$*zR1`$CEr(BniJfS3NHY7{^U!W3%GF7Q&1w)_)%mJPt|NiClqPH2j2? zHoVx<@z^p!1{1WQ+Eq1hHr+}5(+ocnW1HdynY>DK8@bPs4p#0O-1qT-w5%ferXW2< zNc>XrmycoX3hOrhJAeX@Ba=OVJhDLfHh69Gm5fDUkpSYf;OwyEcS`?3qo*};fl4=eaG;#k>*zzA|e0~!K@?88m6dYJbJK(7o zhUg}Pp~)?fK!=uW%Ev%2&^!4T>l?R< zv;y}C7B)VWN-%s<>R*$xzPFP;+t06Cw|RmIe7!GrYl2S`#T)52Z6#4Pl8_qhI>^PZ zR&PK5x9gn-6MTb(z6*bG3P#^G*N{rZn>zm&8evlCwE zU<7x#thqS#0pD!FLfp%1Q_wU84XWIY5Zu$5vF32ta zI*Q+lx!Wh>MuKdYk9;*pes$eY@NvlkDoZ|oU>EN`Y_1oHC`*b zY6-ICMnWb@wEK|j6Vk6BPa|7*-5MfvA|UOnS0b>uPVn>Q*&}f&uYlb(MjQ1b7F<{S=~sf|v0Id(YuE`l?JgqpD*ADeW^- zWiQu*R<(uZl>vNe4_!bEefD|wnH}@rPJ=Y^TyKs^Md3k&u&eKGHvgyX=*k&GdUUu5FW+77=kl8`h`)!4lt`YcF05H|(Z zDHXj*oIsSBK!iJkF1gogYK-pW8~|?GI}@6RbrC6`+0mqJSmw>sYup`scirfR~Hbu+wQos7>L~m8;Rd@hH$9{Q*$#3t56iXa_MuHvZxAy z@B8eU^JVt?P|6PNBx4~~5Qz1*c*VUIP%SxB>#bk16tw|>2m-rosa)rpL_hvEk{E3< zjQNfR1m`oB~?+5$W$prhJlk#KhSPm9@{C z`I7LWr`qcQJ#mL!rUX5|4O7~FmZ=%u^Av3h3mU!}P-GGhvW1oh+cd@_?Zi0 zo)p5_Hygut@OAkQ7ID!oJa(}imop$16K;qnVfL789^TdxC8E6TdWCIDERl%lRVUH) z1#+IERKnCfaPfA{lGsE2rw80P#gj<@7bT4GjeIXr4AG>^TTqDbjPxW#xYSUpNlm9fjsh%D_=zim zqg}URNI0sEXI*lNUxgee0M>q;(;P*r4}Ii=#95L$E3`zzYv?b^lb*HTQ!ie@p-T}- zvUAEO_AyHdo8S^^^W2lNrB`LEGBnLXBw z`ZunUjEDaj7fUDa@l;kC8ZVi_rbO$K+eMv};&=lK%r| z^qZ^pl}!XqrAh>SDlPn@wdy(xB+6~;1WNb%bJU)aSnBKr@mdM6{-cH2Bl5Q z7!56ZZbY#FoMe4o0d)+1offqL(iQrE0$p z8hea^E(<(92G{G;419|9zna>-@`rEw?0d?3SH+g`2vz>dvb}z1$#R!BL2kOIF`)Eq zyWV;Lc2d!Y-}S^ZEgDz`d+iCTM`KCCtN^y%SX>UAnbgDj>+_ios4j@9S^7rX={VP@ z%M44$j5W5(Wqk?rTUvicE?*m&S-=R&MQ&cnD`O3*J(mE9UpCd&vKA$X$J~{0+Q8wD zr~3Hg`M-fk;?}#N@G?4Kjv<-;ARx@M=34)@J7i@dj}3H;Agu#NkNcIHf1j%1E{8s) zEwx*+{<}(%IMZm&DftH>X=G%4If0;)A~E{bvb1CK4Q$iYV2*=htYn{K@=QnTp~p=> zje9O-DO7a!|0y?x84ooY&8=ukAYrQE`PIrzgKh-*c-q0p>@RP<_XY~Aselvo0MRSX_)K;OIadi#@ zqUu$8I=t+^GJRvnZY7aB;+Ue&WeI(&XLuN5Tj>(vKt0e`Uqtt(-0nScwG9-5}i-@Nj^@QTiBCFYysrg-ta@QLzFtBiT@Xpd2J*|7_4 zSG1-dLUj_kwQSi^+B!+P5x7kH$y$OIhN$ifQL2D;c9E;GafvkbAOApTG|vK;ByZvL z3+MuRn?ieOWB}3l#1c{Ja5T*HuuGV!gA2kLOAgfCez66zQ1S!=o4Z7&HNYInFzT(D zqVYK6z64gU9jK-g*do8}FD$8Hh3_=pkE;vTyTtQo>8?t)i=I zMP$*pQ4;Auj!drE(e^~fEX;^vWJK8x#3~ONlD9#JtKtaebxv-wZ*i#V-sGkhw?(`N zoTySBB@ema{7TX`eLmsVllYx(>~Hf5yik$N`Hmw;XMFp+$&{DmSDb<@w|)a|yIdan zK-Zg4df0ZLf4Xc;?M9Duimt9ZjM`Om9%+vBX!4wj_Iekc=zuUZA9lroT@A@q2k1j1 zcuhf1k!*U%i~c|fdZHzmGR9@d^OC$>@&XDB$di%O^2rZ=(jdzK_k`43;{z1;kqd4NW|AR!pu0Ao ze9$+iLz*)dj*AlQt(83Nviltbv~-k2i@ZjkE}m_Mm4SZ2q5RO*j3-qI`9TsJP{fcI zTB$RiEg!MTFYu~lFzb2~1cx&o5NpT0L?U@eO=C2o`dERBjz72M4W6v+y* z!W9EMM41y8==nJYnUex&UVCp{U$WQvcFGc``)$1yA&5~%>EeWrxb+VE(SvPq9Iikt z>Ps-Bz3fdIrtZVs%CIS=x+7YK8y-x*3TIPCy%aU&E<=H;h94YGmBzt)o9f{=ceeRP zMh|zl_a>FrH~6{u_8{MKr=0J8CLVu@xHR7c_IE{>cRIUr&LAJdN3y2H4O@Y>)K}EWZK#k3_ z!YetQQeRTNi(`W5sIH(Z94&vKlZQlFv5+yZgd z^+(AMsXWcjv9O(@-h_Smx`3OZ81ySLC-eNz*hu(rcP$TU-fS7^G@pX7=&GJA4}5uD z`14AoZKM@#|M?xHg9uN&Yo0hmbDm7zNv2;ON-lIqU2u~f&d^jza|$Y)41j#;^WD7?r9I}}?rOT+%WXjOI#eP>l^u&&NB;nvEh;(C5|Z+nh#PtwAUy#MRVNJ{=BgRp^8Yjw^JU4 z+~{>^to>1WFu3VGLlroBo~Vk!40IFRjg0noGKYivR}s~l4BD1`yMP;%?&KC9X%Wre zJ7Q;C?cZEjK5MbvU9l7N6~51|9KQl4i#VgqcLQKZ)-0UC5Xxto_&#C9mt1Hr(E&T! z_R86>&SE2*zs-`+rEve-ZbNpE_4|kfcoDq;ADoL=L7^5)R&y^r#SX6QTc{`sWL7M0xxCO~;W8=+Qqj-FC3aX45$aP$P3`cY{Dl zJ7~CBET$JLciN3ZxfAP|mO=XbQ3M~O1^)9?j1X{iZ`S%}XwuJ9=cC^L{|#W`wEP-4&` z#X@{pl-7w{nv961II)EP0w&VBT#uOEaRbgwrgiPqye#PV6iU+@^tf<$E`H-D@f@EKfp$gP&?*FA#10dyWky z_2?df|BVE4hWzkPh@skCigbe!>Ox-VrCXu4&0*u|ZX*FZ=5HS6=HRAljK9*v66Y^T z7gNYpykm6v+MeTq5@!4lq@Vn~_pJNd#{qko`Ok{HB&M2n<#DPMo{pE^rzw@F$22zT z4YpIZdy?V7m_I6d5@}&#{_}b57bXY8f6$BWep`7*{PniMRsT?zAEBhLFZwE+{sIx< z#zvEZzj}N#d7>yF+S^;s-A}!Tf;da;cdXlQ@WrKX zIR#L!6A*~UtD=9VUFmZB{`<%GDj>)A%5i_cgumMR+29;tyt#6v^V-Lk$6Mv!*h4n% zK@=F}2v4zsKczu^J(2wCtJlFFUs}Fg@A$xJ|HE|lhWd3_V+h2sYy-eLv;@a|4h{TI;WW~M5_LxUFi7hu;h`+n!BsS7@>D&1_gxPZ`MQI|$ zOYQQnG$A0bH!{2bg|ZG`^RPhQmWV?xd)BP`cUY2D)b;4Ju%BFd9~p}3#tNiE1*qu1 zWwk6nZ-2}9V%@scSv6WNw2)9`_04zx-m|t z+&xQ+xspX|R4g`0sqQCqhv4J&UC-a?zZ)bX*5$!{D9$O`Nyng(j!oRK@heykwo6;? z@8BXS%-QG9-{2yIvEM+923)i=IoBg%vvKIy(I66PGv)W-@r9!*Bk`4lPCJ#Vv3A*M zF8l5C{xUYR(JAb=`Hk-w2$Fv?VzTR~9Ve#RiOp0V6CdCDvE?>+#hS0X?%y_9n-?Gb zAb#D$h-(t6|KO>~4dTBk-zQC(s>~s{kJs$dZ}(h(h!RJmcV%ydEYSRuDYLUj)eWwY zg=<&d@sD0e7nRUfr^B4gdL=|F;JJt|+nLs;=PELLsO8wf}VZ;Ma18 zy?bJ1QsL8V%I%T5IBvnY5&`rKNxU-E^~6WiSrlVhz3{}4O;*qmB$^MBQ;hvMcfUE= zc2ND*>tA(*_g$QBrHA(FD;s)gl=lapY)ZE_-ycsV-%`@!z&21d9G8-KG+voK`p`Qg zsb$w2p7^%SQonIE#6cYBAElucy>L1;XXEtqI(g!nh|BF};9yfu1hL)GoSk(v{$=M@ zb(1~vmhbLZynl29`|kCxMnQv>Oh`X#eAI2}>QZ*eIdx0AG=%P=d+ykFNOFE{!a4Rt zqyPD_@Tt6U4LbJCeSIT*+ha*9HRgVf|5rZEBfWin!uQ!!%JF-;?B^Hi1Z91J@qU@n zX8W@@-cO&>)YLSJURmh&O}m8c%s(Rf&d?Z=aLfhp|Mk4|Rg*FZ z%x$WrGeKNA^Zu`jD(6q6h8pm1BICci=S(@BeThJ5j2*4`hwbtk27@e*j?2Vf>?j*6t z4=u4I0+kw7^lDGD;QAH=G9n5POG;AfE{e@!x7+jcwo#D3qC%Lf=VT-9P&M(d%T4)G zNwMDA%;hFOEEt1hoynb_cq7P* zlF7{J;zmI~sX1{xBhn!6(Psh=cV z>Hk&5hnEBziDzkQ*c)uX?=KsQSw&U?6WZfLZ20D(y@P{;Mo5b-UM-|&PE%{) zTGi5^wQqBVW>A&bwZM&`4avb@yMt<^ZN>gUK~%`5aN3@n#0|W>A{t8z;($7WT^;uD z@R)`hMJ;yQ*pr>la6FnNn_M*ts7cdTMLU~rGx^nDyy9_l96t>5m!%E7?@1jcTN$KU$fZt zwPl9y+thIAVvb#thz2hDp8`C-&D|NfFUi1D9s0XbYe2`(jN2r#HG*u!w7R1OvcKS0 z7AA-?=~Aw~`gc7fCQ-;?CwT;_h4m913NWmL+O!Ju{GY~F=0@s-(0VAy-o7aljdrDD z)a>Hq9rOKvAn@sl54~e71FM(E{0W4U?Izd0*rHr-7$b^zV_=W|A(aC6(r-ZptHUSr za@#r(Gol_ORTUQQm$w+cDiIgOYHm^9NB;YGaAJUXvXeM)h0pc~s(O&~k_e(%hx6>e zZbN;g6}v{<=Ut8Ridi02H4ntNZ}PId?mwc1i#*m z|57e%8bfuHZP^8{as?%-S7OONk~s%Lr@I5WQKN4jh(ug1uB*qT2YJ0MH@{R8pk>R> z>Ny*qP_M!B$}bPLhqnm{6!RL;b8YQvm<4ybr$m?@HhA2{Rw4dT^9_2QGu!vNf_CYcN#=tqr%k; z!vHbx&Obxx<>lpE26yKUL=mHhJBHhEI~nWDT$0T*02qWEBz0QrcXIp>*Zm?lwW#_! zlAcRU5f0GI!Cya8L^^cKF+y?(xJ~BwN|l)5UJ37tfqZGuux=uS=NFn5m-EXl@#KipaAZx3{l1 zZbm5~mbO*vP^E`sWMt&NbH$H60_KKm6%>dtEq1CY0b6_h3nLi4J7PRt*m>4_#*Woa@n4?9+H6y-p4YI>$M4r?DSzK`$ot@E|E-(6ax|Zacy9 zc;@WNGVoWlN%dlB5&Mzt4YQ{{=l5Fkb+maF!UV*w1r2(c383AUpY$0pTRMB%{|G2p z9IUrV_}p;#(mi_!Yk=WDM^7IM*=u-C>hAFmA8)xm-(ay3Y&)?gkBg9-Uy z+?hT0@2#XX1&g>Ur%6$K-DXx19^*!i>k)H zrz*i}JsG#(nh&9P;GHsOt5*fHv9ie29NV{mnw-QPd~pd*$fY*htefP#5fvWbrVY)H zHl9qcb4)(48{r^}k4?rpSZQ2?jsqnDid$L5th8r_uCj=mssS>f632yUrjX(}v)k^M z`rcAPbW6uo@WfiPfvy}JP|NS`DJY00pA~TcYH&qWi}NE!CTiosg=`jv0K3bn*!cZr z?u5xWe{|oY{_G2q-5?9tv_K zn-W~@3uMpXS|@Vtjm=zuQ&j-Zi(vW-9i^(*>f3E>N z2X+8_#@T)rqX07X!9+^(e&WLdY7Vg)mjz<60Cr1VlhpE;zu5i?cWsE2Bp^3!j;BOU5sc zg_HEH;^N5=h3H_BsYHAnV;@!V>=3)hNX2|E)ZP)k*;|yrj#tssIoNW>IlZD>tbE%DeV4ARe~DFU}O<3@h}nF!S{IN+Qq&dkg(`T{H1$XiK(TLsXV zFd9KRdiBaALj3*U{Cc9zPvG**_|V^BhxPPxyR)9I;N|S|eA%)6wcI55NfbmOaval? zgQ}N(`9(|!5uOt0&NK@nc)1E_BJB=C7CxH=?^{sH_nNxc9_|H5oqXWJku?byO)({O z)$34Ef@HIlEXxd=xISRsw<6u|L*!K$>#^#Hv(Qy=u6;&aSyg4zRp?P(OF$+5)@z9g zr5n1x*b`VO;vlMz?Ra_`@xs{q%lq&CA*YGe(j9@!-H0oeeJt^IDS^*Xaz-&~4L5H{ zNa-G~ED&a^EA|#e$2FkLmKe*9iw({dk>{M4_ zdxQS1>_mWk2=}YkDR~$FA?vv*8l(j literal 0 HcmV?d00001 diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index 86c541dfd0..4532b74b93 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -14,6 +14,9 @@ Since version 0.95.0, the :py:mod:`spikeinterface.widgets` module supports multi * | :code:`sortingview`: web-based and interactive rendering using the `sortingview `_ | and `FIGURL `_ packages. +Version 0.100.0, also come with this new backend: +* | :code:`ephyviewer`: interactive Qt based using the `ephyviewer `_ package + Installing backends ------------------- @@ -85,6 +88,28 @@ Finally, if you wish to set up another cloud provider, follow the instruction fr `kachery-cloud `_ package ("Using your own storage bucket"). +ephyviewer +^^^^^^^^^^ + +This backend is Qt based with PyQt5, PyQt6 or PySide6 support. Qt is sometimes tedious to install. + + +For pip based install, run: + +.. code-block:: bash + + pip install PySide6 ephyviewer + + +Anaconda user will have a better experience with this: + +.. code-block:: bash + + conda install pyqt=5 + pip install ephyviewer + + + Usage ----- @@ -215,6 +240,21 @@ For example, here is how to combine the timeseries and sorting summary generated print(url) +ephyviewer +^^^^^^^^^^ + + +The :code:`ephyviewer` backend is only available for :py:func:`~spikeinterface.widgets.plot_traces()` functions. + + +.. code-block:: python + + plot_traces(recording, backend="ephyviewer", mode="line", show_channel_ids=True) + + +.. image:: ../images/plot_traces_ephyviewer.png + + Available plotting functions ---------------------------- From 74ae24ea47393db3a90b6fc9bc9765c3b833bb89 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 21 Sep 2023 08:55:34 +0000 Subject: [PATCH 139/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/widgets/traces.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index e046623eb7..7bb2126744 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -523,7 +523,7 @@ def plot_sortingview(self, data_plot, **backend_kwargs): backend_kwargs["display"] = False self.url = handle_display_and_url(self, self.view, **backend_kwargs) - + def plot_ephyviewer(self, data_plot, **backend_kwargs): import ephyviewer from ..preprocessing import depth_order @@ -534,15 +534,14 @@ def plot_ephyviewer(self, data_plot, **backend_kwargs): win = ephyviewer.MainViewer(debug=False, show_auto_scale=True) for k, rec in dp.recordings.items(): - if dp.order_channel_by_depth: rec = depth_order(rec, flip=True) sig_source = ephyviewer.SpikeInterfaceRecordingSource(recording=rec) view = ephyviewer.TraceViewer(source=sig_source, name=k) - view.params['scale_mode'] = 'by_channel' + view.params["scale_mode"] = "by_channel" if dp.show_channel_ids: - view.params['display_labels'] = True + view.params["display_labels"] = True view.auto_scale() win.add_view(view) @@ -550,7 +549,6 @@ def plot_ephyviewer(self, data_plot, **backend_kwargs): app.exec() - def _get_trace_list(recordings, channel_ids, time_range, segment_index, order=None, return_scaled=False): # function also used in ipywidgets plotter k0 = list(recordings.keys())[0] From 383040c5b063a7427ebe7dc7daf1945d5bf95a07 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 10:59:29 +0200 Subject: [PATCH 140/322] doc --- doc/modules/widgets.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index 4532b74b93..426a1e02e6 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -269,7 +269,7 @@ Available plotting functions * :py:func:`~spikeinterface.widgets.plot_spikes_on_traces` (backends: :code:`matplotlib`, :code:`ipywidgets`) * :py:func:`~spikeinterface.widgets.plot_template_metrics` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`) * :py:func:`~spikeinterface.widgets.plot_template_similarity` (backends: ::code:`matplotlib`, :code:`sortingview`) -* :py:func:`~spikeinterface.widgets.plot_timeseries` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`) +* :py:func:`~spikeinterface.widgets.plot_traces` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`, :code:`ephyviewer`) * :py:func:`~spikeinterface.widgets.plot_unit_depths` (backends: :code:`matplotlib`) * :py:func:`~spikeinterface.widgets.plot_unit_locations` (backends: :code:`matplotlib`, :code:`ipywidgets`, :code:`sortingview`) * :py:func:`~spikeinterface.widgets.plot_unit_summary` (backends: :code:`matplotlib`) From 1c48ce2cdbbd32c8e317f621805132c30e0e5efd Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 11:26:01 +0200 Subject: [PATCH 141/322] Update doc/modules/widgets.rst --- doc/modules/widgets.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index 426a1e02e6..5f71767a7d 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -101,7 +101,7 @@ For pip based install, run: pip install PySide6 ephyviewer -Anaconda user will have a better experience with this: +Anaconda users will have a better experience with this: .. code-block:: bash From b74bbae7a2944e220bde5cb4ff1fa63cb76c9d64 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 11:26:13 +0200 Subject: [PATCH 142/322] Update doc/modules/widgets.rst --- doc/modules/widgets.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index 5f71767a7d..4c8d2f9258 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -244,7 +244,7 @@ ephyviewer ^^^^^^^^^^ -The :code:`ephyviewer` backend is only available for :py:func:`~spikeinterface.widgets.plot_traces()` functions. +The :code:`ephyviewer` backend is currently only available for the :py:func:`~spikeinterface.widgets.plot_traces()` function. .. code-block:: python From 36e197fd784d228ea6ee798ce1a1169e1c0c8a5a Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 11:37:14 +0200 Subject: [PATCH 143/322] Update doc/modules/widgets.rst --- doc/modules/widgets.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index 4c8d2f9258..8565e94fce 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -94,7 +94,7 @@ ephyviewer This backend is Qt based with PyQt5, PyQt6 or PySide6 support. Qt is sometimes tedious to install. -For pip based install, run: +For a pip-based installation, run: .. code-block:: bash From df0504c2748e4086304447fafb857efd4a2110c2 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Thu, 21 Sep 2023 11:38:46 +0200 Subject: [PATCH 144/322] adding some typing (#2031) --- src/spikeinterface/core/sparsity.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/core/sparsity.py b/src/spikeinterface/core/sparsity.py index 455edcfc80..8c5c62d568 100644 --- a/src/spikeinterface/core/sparsity.py +++ b/src/spikeinterface/core/sparsity.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import numpy as np from .recording_tools import get_channel_distances, get_noise_levels @@ -125,7 +127,7 @@ def unit_id_to_channel_indices(self): self._unit_id_to_channel_indices[unit_id] = channel_inds return self._unit_id_to_channel_indices - def sparsify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: + def sparsify_waveforms(self, waveforms: np.ndarray, unit_id: str | int) -> np.ndarray: """ Sparsify the waveforms according to a unit_id corresponding sparsity. @@ -159,7 +161,7 @@ def sparsify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: return sparsified_waveforms - def densify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: + def densify_waveforms(self, waveforms: np.ndarray, unit_id: str | int) -> np.ndarray: """ Densify sparse waveforms that were sparisified according to a unit's channel sparsity. @@ -199,7 +201,7 @@ def densify_waveforms(self, waveforms: np.ndarray, unit_id: str) -> np.ndarray: def are_waveforms_dense(self, waveforms: np.ndarray) -> bool: return waveforms.shape[-1] == self.num_channels - def are_waveforms_sparse(self, waveforms: np.ndarray, unit_id: str) -> bool: + def are_waveforms_sparse(self, waveforms: np.ndarray, unit_id: str | int) -> bool: non_zero_indices = self.unit_id_to_channel_indices[unit_id] num_active_channels = len(non_zero_indices) return waveforms.shape[-1] == num_active_channels From e3cb9bb14ee56e07cbc251556482b9a861d465a2 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 12:00:26 +0200 Subject: [PATCH 145/322] Typing and docstrings --- .../extractors/phykilosortextractors.py | 26 ++++++++++++------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/src/spikeinterface/extractors/phykilosortextractors.py b/src/spikeinterface/extractors/phykilosortextractors.py index 2769e03344..d32846dd79 100644 --- a/src/spikeinterface/extractors/phykilosortextractors.py +++ b/src/spikeinterface/extractors/phykilosortextractors.py @@ -1,3 +1,6 @@ +from __future__ import __annotations__ + +from typing import Optional, List from pathlib import Path import numpy as np @@ -13,7 +16,7 @@ class BasePhyKilosortSortingExtractor(BaseSorting): ---------- folder_path: str or Path Path to the output Phy folder (containing the params.py) - exclude_cluster_groups: list or str, optional + exclude_cluster_groups: list or str, default: None Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). keep_good_only : bool, default: True Whether to only keep good units. @@ -33,11 +36,11 @@ class BasePhyKilosortSortingExtractor(BaseSorting): def __init__( self, - folder_path, - exclude_cluster_groups=None, - keep_good_only=False, - remove_empty_units=False, - load_all_cluster_properties=True, + folder_path: Path | str, + exclude_cluster_groups: Optional[List[str] | str] = None, + keep_good_only: bool = False, + remove_empty_units: bool = False, + load_all_cluster_properties: bool = True, ): try: import pandas as pd @@ -199,7 +202,7 @@ class PhySortingExtractor(BasePhyKilosortSortingExtractor): ---------- folder_path: str or Path Path to the output Phy folder (containing the params.py). - exclude_cluster_groups: list or str, optional + exclude_cluster_groups: list or str, default: None Cluster groups to exclude (e.g. "noise" or ["noise", "mua"]). load_all_cluster_properties : bool, default: True If True, all cluster properties are loaded from the tsv/csv files. @@ -213,7 +216,12 @@ class PhySortingExtractor(BasePhyKilosortSortingExtractor): extractor_name = "PhySorting" name = "phy" - def __init__(self, folder_path, exclude_cluster_groups=None, load_all_cluster_properties=True): + def __init__( + self, + folder_path: Path | str, + exclude_cluster_groups: Optional[List[str] | str] = None, + load_all_cluster_properties: bool = True, + ): BasePhyKilosortSortingExtractor.__init__( self, folder_path, @@ -250,7 +258,7 @@ class KiloSortSortingExtractor(BasePhyKilosortSortingExtractor): extractor_name = "KiloSortSorting" name = "kilosort" - def __init__(self, folder_path, keep_good_only=False, remove_empty_units=True): + def __init__(self, folder_path: Path | str, keep_good_only: bool = False, remove_empty_units: bool = True): BasePhyKilosortSortingExtractor.__init__( self, folder_path, From 195f03c2a710dadc9978cc9f9369f571a7e31554 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 12:17:12 +0200 Subject: [PATCH 146/322] oups --- src/spikeinterface/extractors/phykilosortextractors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/extractors/phykilosortextractors.py b/src/spikeinterface/extractors/phykilosortextractors.py index d32846dd79..96c0415c65 100644 --- a/src/spikeinterface/extractors/phykilosortextractors.py +++ b/src/spikeinterface/extractors/phykilosortextractors.py @@ -1,4 +1,4 @@ -from __future__ import __annotations__ +from __future__ import annotations from typing import Optional, List from pathlib import Path From 8e3324b77849a00467fc75f146663ee39201204c Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 21 Sep 2023 13:26:14 +0200 Subject: [PATCH 147/322] List -> list --- src/spikeinterface/extractors/phykilosortextractors.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/extractors/phykilosortextractors.py b/src/spikeinterface/extractors/phykilosortextractors.py index 96c0415c65..05aee160f5 100644 --- a/src/spikeinterface/extractors/phykilosortextractors.py +++ b/src/spikeinterface/extractors/phykilosortextractors.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Optional, List +from typing import Optional from pathlib import Path import numpy as np @@ -37,7 +37,7 @@ class BasePhyKilosortSortingExtractor(BaseSorting): def __init__( self, folder_path: Path | str, - exclude_cluster_groups: Optional[List[str] | str] = None, + exclude_cluster_groups: Optional[list[str] | str] = None, keep_good_only: bool = False, remove_empty_units: bool = False, load_all_cluster_properties: bool = True, @@ -219,7 +219,7 @@ class PhySortingExtractor(BasePhyKilosortSortingExtractor): def __init__( self, folder_path: Path | str, - exclude_cluster_groups: Optional[List[str] | str] = None, + exclude_cluster_groups: Optional[list[str] | str] = None, load_all_cluster_properties: bool = True, ): BasePhyKilosortSortingExtractor.__init__( From 9ba6fc6cbf0b0fd3d7bfa0b22108c48a05770b67 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Thu, 21 Sep 2023 14:01:25 +0200 Subject: [PATCH 148/322] Update doc/how_to/load_matlab_data.rst Co-authored-by: Alessio Buccino --- doc/how_to/load_matlab_data.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index 3943fbd30f..aaca718096 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -93,7 +93,7 @@ If your data in MATLAB is stored as `int16`, and you know the gain and offset, y recording.get_traces(return_scaled=True) # Return traces in micro volts (uV) -This will equip your recording object with capabilities to convert the data to float values in uV using the `get_traces()` method with the `return_scaled` parameter set to `True`. +This will equip your recording object with capabilities to convert the data to float values in uV using the :code:`get_traces()` method with the :code:`return_scaled` parameter set to :code:`True`. .. note:: From e964731b33401db1757ce813d2078c00a36dcf34 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 16:36:31 +0200 Subject: [PATCH 149/322] Start refactor ipywidgets plot_traces --- src/spikeinterface/widgets/traces.py | 29 +- .../widgets/utils_ipywidgets.py | 251 ++++++++++++++++-- 2 files changed, 254 insertions(+), 26 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index 7bb2126744..c6e36387f8 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -276,11 +276,16 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display + import ipywidgets.widgets as W from .utils_ipywidgets import ( check_ipywidget_backend, make_timeseries_controller, make_channel_controller, make_scale_controller, + + TimeSlider, + ScaleWidget, + ) check_ipywidget_backend() @@ -308,6 +313,8 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): t_start = 0.0 t_stop = rec0.get_num_samples(segment_index=0) / rec0.get_sampling_frequency() + + ts_widget, ts_controller = make_timeseries_controller( t_start, t_stop, @@ -319,6 +326,22 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): width_cm, ) + # some widgets + self.time_slider = TimeSlider( + durations=[rec0.get_duration(s) for s in range(rec0.get_num_segments())], + sampling_frequency=rec0.sampling_frequency, + ) + self.layer_selector = W.Dropdown(description="layer", options=data_plot["layer_keys"], + layout=W.Layout(width="5cm"),) + self.mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=data_plot["mode"], + layout=W.Layout(width="5cm"),) + self.scaler = ScaleWidget() + left_sidebar = W.VBox( + children=[self.layer_selector, self.mode_selector, self.scaler], + layout=W.Layout(width="5cm"), + ) + + ch_widget, ch_controller = make_channel_controller(rec0, width_cm=ratios[2] * width_cm, height_cm=height_cm) scale_widget, scale_controller = make_scale_controller(width_cm=ratios[0] * width_cm, height_cm=height_cm) @@ -346,8 +369,10 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.widget = widgets.AppLayout( center=self.figure.canvas, - footer=ts_widget, - left_sidebar=scale_widget, + # footer=ts_widget, + footer=self.time_slider, + # left_sidebar=scale_widget, + left_sidebar = left_sidebar, right_sidebar=ch_widget, pane_heights=[0, 6, 1], pane_widths=ratios, diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index a7c571d1f0..674a2d2cc7 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -1,4 +1,6 @@ -import ipywidgets.widgets as widgets +import ipywidgets.widgets as W +import traitlets + import numpy as np @@ -10,20 +12,20 @@ def check_ipywidget_backend(): def make_timeseries_controller(t_start, t_stop, layer_keys, num_segments, time_range, mode, all_layers, width_cm): - time_slider = widgets.FloatSlider( + time_slider = W.FloatSlider( orientation="horizontal", description="time:", value=time_range[0], min=t_start, max=t_stop, continuous_update=False, - layout=widgets.Layout(width=f"{width_cm}cm"), + layout=W.Layout(width=f"{width_cm}cm"), ) - layer_selector = widgets.Dropdown(description="layer", options=layer_keys) - segment_selector = widgets.Dropdown(description="segment", options=list(range(num_segments))) - window_sizer = widgets.BoundedFloatText(value=np.diff(time_range)[0], step=0.1, min=0.005, description="win (s)") - mode_selector = widgets.Dropdown(options=["line", "map"], description="mode", value=mode) - all_layers = widgets.Checkbox(description="plot all layers", value=all_layers) + layer_selector = W.Dropdown(description="layer", options=layer_keys) + segment_selector = W.Dropdown(description="segment", options=list(range(num_segments))) + window_sizer = W.BoundedFloatText(value=np.diff(time_range)[0], step=0.1, min=0.005, description="win (s)") + mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=mode) + all_layers = W.Checkbox(description="plot all layers", value=all_layers) controller = { "layer_key": layer_selector, @@ -33,32 +35,32 @@ def make_timeseries_controller(t_start, t_stop, layer_keys, num_segments, time_r "mode": mode_selector, "all_layers": all_layers, } - widget = widgets.VBox( - [time_slider, widgets.HBox([all_layers, layer_selector, segment_selector, window_sizer, mode_selector])] + widget = W.VBox( + [time_slider, W.HBox([all_layers, layer_selector, segment_selector, window_sizer, mode_selector])] ) return widget, controller def make_unit_controller(unit_ids, all_unit_ids, width_cm, height_cm): - unit_label = widgets.Label(value="units:") + unit_label = W.Label(value="units:") - unit_selector = widgets.SelectMultiple( + unit_selector = W.SelectMultiple( options=all_unit_ids, value=list(unit_ids), disabled=False, - layout=widgets.Layout(width=f"{width_cm}cm", height=f"{height_cm}cm"), + layout=W.Layout(width=f"{width_cm}cm", height=f"{height_cm}cm"), ) controller = {"unit_ids": unit_selector} - widget = widgets.VBox([unit_label, unit_selector]) + widget = W.VBox([unit_label, unit_selector]) return widget, controller def make_channel_controller(recording, width_cm, height_cm): - channel_label = widgets.Label("channel indices:", layout=widgets.Layout(justify_content="center")) - channel_selector = widgets.IntRangeSlider( + channel_label = W.Label("channel indices:", layout=W.Layout(justify_content="center")) + channel_selector = W.IntRangeSlider( value=[0, recording.get_num_channels()], min=0, max=recording.get_num_channels(), @@ -68,37 +70,238 @@ def make_channel_controller(recording, width_cm, height_cm): orientation="vertical", readout=True, readout_format="d", - layout=widgets.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), + layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), ) controller = {"channel_inds": channel_selector} - widget = widgets.VBox([channel_label, channel_selector]) + widget = W.VBox([channel_label, channel_selector]) return widget, controller def make_scale_controller(width_cm, height_cm): - scale_label = widgets.Label("Scale", layout=widgets.Layout(justify_content="center")) + scale_label = W.Label("Scale", layout=W.Layout(justify_content="center")) - plus_selector = widgets.Button( + plus_selector = W.Button( description="", disabled=False, button_style="", # 'success', 'info', 'warning', 'danger' or '' tooltip="Increase scale", icon="arrow-up", - layout=widgets.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), + layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), ) - minus_selector = widgets.Button( + minus_selector = W.Button( description="", disabled=False, button_style="", # 'success', 'info', 'warning', 'danger' or '' tooltip="Decrease scale", icon="arrow-down", - layout=widgets.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), + layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), ) controller = {"plus": plus_selector, "minus": minus_selector} - widget = widgets.VBox([scale_label, plus_selector, minus_selector]) + widget = W.VBox([scale_label, plus_selector, minus_selector]) return widget, controller + + + +class TimeSlider(W.HBox): + + position = traitlets.Tuple(traitlets.Int(), traitlets.Int(), traitlets.Int()) + + def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): + + + self.num_segments = len(durations) + self.frame_limits = [int(sampling_frequency * d) for d in durations] + self.sampling_frequency = sampling_frequency + start_frame = int(time_range[0] * sampling_frequency) + end_frame = int(time_range[1] * sampling_frequency) + + self.frame_range = (start_frame, end_frame) + + self.segment_index = 0 + self.position = (start_frame, end_frame, self.segment_index) + + + layout = W.Layout(align_items="center", width="1.5cm", height="100%") + but_left = W.Button(description='', disabled=False, button_style='', icon='arrow-left', layout=layout) + but_right = W.Button(description='', disabled=False, button_style='', icon='arrow-right', layout=layout) + + but_left.on_click(self.move_left) + but_right.on_click(self.move_right) + + self.move_size = W.Dropdown(options=['10 ms', '100 ms', '1 s', '10 s', '1 m', '30 m', '1 h',], # '6 h', '24 h' + value='1 s', + description='', + layout = W.Layout(width="2cm") + ) + + # DatetimePicker is only for ipywidget v8 (which is not working in vscode 2023-03) + self.time_label = W.Text(value=f'{time_range[0]}',description='', + disabled=False, layout=W.Layout(width='5.5cm')) + self.time_label.observe(self.time_label_changed, names='value', type="change") + + + self.slider = W.IntSlider( + orientation='horizontal', + # description='time:', + value=start_frame, + min=0, + max=self.frame_limits[self.segment_index], + readout=False, + continuous_update=False, + layout=W.Layout(width=f'70%') + ) + + self.slider.observe(self.slider_moved, names='value', type="change") + + delta_s = np.diff(self.frame_range) / sampling_frequency + + self.window_sizer = W.BoundedFloatText(value=delta_s, step=1, + min=0.01, max=30., + description='win (s)', + layout=W.Layout(width='auto') + # layout=W.Layout(width=f'10%') + ) + self.window_sizer.observe(self.win_size_changed, names='value', type="change") + + self.segment_selector = W.Dropdown(description="segment", options=list(range(self.num_segments))) + self.segment_selector.observe(self.segment_changed, names='value', type="change") + + super(W.HBox, self).__init__(children=[self.segment_selector, but_left, self.move_size, but_right, + self.slider, self.time_label, self.window_sizer], + layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs) + + self.observe(self.position_changed, names=['position'], type="change") + + def position_changed(self, change=None): + + self.unobserve(self.position_changed, names=['position'], type="change") + + start, stop, seg_index = self.position + if seg_index < 0 or seg_index >= self.num_segments: + self.position = change['old'] + return + if start < 0 or stop < 0: + self.position = change['old'] + return + if start >= self.frame_limits[seg_index] or start > self.frame_limits[seg_index]: + self.position = change['old'] + return + + self.segment_selector.value = seg_index + self.update_time(new_frame=start, update_slider=True, update_label=True) + delta_s = (stop - start) / self.sampling_frequency + self.window_sizer.value = delta_s + + self.observe(self.position_changed, names=['position'], type="change") + + def update_time(self, new_frame=None, new_time=None, update_slider=False, update_label=False): + if new_frame is None and new_time is None: + start_frame = self.slider.value + elif new_frame is None: + start_frame = int(new_time * self.sampling_frequency) + else: + start_frame = new_frame + delta_s = self.window_sizer.value + end_frame = start_frame + int(delta_s * self.sampling_frequency) + + # clip + start_frame = max(0, start_frame) + end_frame = min(self.frame_limits[self.segment_index], end_frame) + + + start_time = start_frame / self.sampling_frequency + + if update_label: + self.time_label.unobserve(self.time_label_changed, names='value', type="change") + self.time_label.value = f'{start_time}' + self.time_label.observe(self.time_label_changed, names='value', type="change") + + if update_slider: + self.slider.unobserve(self.slider_moved, names='value', type="change") + self.slider.value = start_frame + self.slider.observe(self.slider_moved, names='value', type="change") + + self.frame_range = (start_frame, end_frame) + + def time_label_changed(self, change=None): + try: + new_time = float(self.time_label.value) + except: + new_time = None + if new_time is not None: + self.update_time(new_time=new_time, update_slider=True) + + + def win_size_changed(self, change=None): + self.update_time() + + def slider_moved(self, change=None): + new_frame = self.slider.value + self.update_time(new_frame=new_frame, update_label=True) + + def move(self, sign): + value, units = self.move_size.value.split(' ') + value = int(value) + delta_s = (sign * np.timedelta64(value, units)) / np.timedelta64(1, 's') + delta_sample = int(delta_s * self.sampling_frequency) + + new_frame = self.frame_range[0] + delta_sample + self.slider.value = new_frame + + def move_left(self, change=None): + self.move(-1) + + def move_right(self, change=None): + self.move(+1) + + def segment_changed(self, change=None): + self.segment_index = self.segment_selector.value + + self.slider.unobserve(self.slider_moved, names='value', type="change") + # self.slider.value = 0 + self.slider.max = self.frame_limits[self.segment_index] + self.slider.observe(self.slider_moved, names='value', type="change") + + self.update_time(new_frame=0, update_slider=True, update_label=True) + + + +class ScaleWidget(W.VBox): + def __init__(self, **kwargs): + scale_label = W.Label("Scale", + layout=W.Layout(layout=W.Layout(width='95%'), + justify_content="center")) + + self.plus_selector = W.Button( + description="", + disabled=False, + button_style="", # 'success', 'info', 'warning', 'danger' or '' + tooltip="Increase scale", + icon="arrow-up", + # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), + layout=W.Layout(width='95%'), + ) + + self.minus_selector = W.Button( + description="", + disabled=False, + button_style="", # 'success', 'info', 'warning', 'danger' or '' + tooltip="Decrease scale", + icon="arrow-down", + # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), + layout=W.Layout(width='95%'), + ) + + # controller = {"plus": plus_selector, "minus": minus_selector} + # widget = W.VBox([scale_label, plus_selector, minus_selector]) + + + super(W.VBox, self).__init__(children=[scale_label, self.plus_selector, self.minus_selector], + # layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs) From 389737efe1330f1f75afb73caedb41bb6bf84b4d Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 20:58:38 +0200 Subject: [PATCH 150/322] wip refactor plot traces ipywidget --- src/spikeinterface/widgets/traces.py | 126 ++++++++++++++---- .../widgets/utils_ipywidgets.py | 62 ++++++--- 2 files changed, 145 insertions(+), 43 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index c6e36387f8..efd32ffb24 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -279,9 +279,9 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import ipywidgets.widgets as W from .utils_ipywidgets import ( check_ipywidget_backend, - make_timeseries_controller, + # make_timeseries_controller, make_channel_controller, - make_scale_controller, + # make_scale_controller, TimeSlider, ScaleWidget, @@ -315,21 +315,22 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): - ts_widget, ts_controller = make_timeseries_controller( - t_start, - t_stop, - data_plot["layer_keys"], - rec0.get_num_segments(), - data_plot["time_range"], - data_plot["mode"], - False, - width_cm, - ) + # ts_widget, ts_controller = make_timeseries_controller( + # t_start, + # t_stop, + # data_plot["layer_keys"], + # rec0.get_num_segments(), + # data_plot["time_range"], + # data_plot["mode"], + # False, + # width_cm, + # ) # some widgets self.time_slider = TimeSlider( durations=[rec0.get_duration(s) for s in range(rec0.get_num_segments())], sampling_frequency=rec0.sampling_frequency, + # layout=W.Layout(height="2cm"), ) self.layer_selector = W.Dropdown(description="layer", options=data_plot["layer_keys"], layout=W.Layout(width="5cm"),) @@ -338,22 +339,22 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.scaler = ScaleWidget() left_sidebar = W.VBox( children=[self.layer_selector, self.mode_selector, self.scaler], - layout=W.Layout(width="5cm"), + layout=W.Layout(width="3.5cm"), ) ch_widget, ch_controller = make_channel_controller(rec0, width_cm=ratios[2] * width_cm, height_cm=height_cm) - scale_widget, scale_controller = make_scale_controller(width_cm=ratios[0] * width_cm, height_cm=height_cm) + # scale_widget, scale_controller = make_scale_controller(width_cm=ratios[0] * width_cm, height_cm=height_cm) - self.controller = ts_controller - self.controller.update(ch_controller) - self.controller.update(scale_controller) + # self.controller = ts_controller + # self.controller.update(ch_controller) + # self.controller.update(scale_controller) self.recordings = data_plot["recordings"] self.return_scaled = data_plot["return_scaled"] self.list_traces = None - self.actual_segment_index = self.controller["segment_index"].value + # self.actual_segment_index = self.controller["segment_index"].value self.rec0 = self.recordings[self.data_plot["layer_keys"][0]] self.t_stops = [ @@ -361,11 +362,11 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): for seg_index in range(self.rec0.get_num_segments()) ] - for w in self.controller.values(): - if isinstance(w, widgets.Button): - w.on_click(self._update_ipywidget) - else: - w.observe(self._update_ipywidget) + # for w in self.controller.values(): + # if isinstance(w, widgets.Button): + # w.on_click(self._update_ipywidget) + # else: + # w.observe(self._update_ipywidget) self.widget = widgets.AppLayout( center=self.figure.canvas, @@ -379,12 +380,89 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): ) # a first update - self._update_ipywidget(None) + # self._update_ipywidget(None) + + self._retrieve_traces() + self._update_plot() + + # only layer selector and time change generate a new traces retrieve + self.time_slider.observe(self._retrieve_traces, names='value', type="change") + self.layer_selector.observe(self._retrieve_traces, names='value', type="change") + # other widgets only refresh + self.scaler.observe(self._update_plot, names='value', type="change") + self.mode_selector.observe(self._update_plot, names='value', type="change") + if backend_kwargs["display"]: # self.check_backend() display(self.widget) + + + def _retrieve_traces(self, change=None): + # done when: + # * time or window is changes + # * layer is changed + + # TODO connect with channel selector + channel_ids = self.rec0.channel_ids + + # all_channel_ids = self.recordings[list(self.recordings.keys())[0]].channel_ids + # if self.data_plot["order"] is not None: + # all_channel_ids = all_channel_ids[self.data_plot["order"]] + # channel_ids = all_channel_ids[channel_indices] + if self.data_plot["order_channel_by_depth"]: + order, _ = order_channels_by_depth(self.rec0, channel_ids) + else: + order = None + + start_frame, end_frame, segment_index = self.time_slider.value + time_range = np.array([start_frame, end_frame]) / self.rec0.sampling_frequency + + times, list_traces, frame_range, channel_ids = _get_trace_list( + self.recordings, channel_ids, time_range, segment_index, order, self.return_scaled + ) + self.list_traces = list_traces + + self._update_plot() + + def _update_plot(self, change=None): + # done when: + # * time or window is changed (after _retrive_traces) + # * layer is changed (after _retrive_traces) + #  * scale is change + # * mode is change + + data_plot = self.next_data_plot + + # matplotlib next_data_plot dict update at each call + data_plot["mode"] = self.mode_selector.value + # data_plot["frame_range"] = frame_range + # data_plot["time_range"] = time_range + data_plot["with_colorbar"] = False + # data_plot["recordings"] = recordings + # data_plot["layer_keys"] = layer_keys + # data_plot["list_traces"] = list_traces_plot + # data_plot["times"] = times + # data_plot["clims"] = clims + # data_plot["channel_ids"] = channel_ids + + list_traces = [traces * self.scaler.value for traces in self.list_traces] + data_plot["list_traces"] = list_traces + + backend_kwargs = {} + backend_kwargs["ax"] = self.ax + + self.ax.clear() + self.plot_matplotlib(data_plot, **backend_kwargs) + + fig = self.ax.figure + fig.canvas.draw() + fig.canvas.flush_events() + + + + def _update_ipywidget(self, change): import ipywidgets.widgets as widgets diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index 674a2d2cc7..ad0ead7bc0 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -109,7 +109,7 @@ def make_scale_controller(width_cm, height_cm): class TimeSlider(W.HBox): - position = traitlets.Tuple(traitlets.Int(), traitlets.Int(), traitlets.Int()) + value = traitlets.Tuple(traitlets.Int(), traitlets.Int(), traitlets.Int()) def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): @@ -123,10 +123,10 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): self.frame_range = (start_frame, end_frame) self.segment_index = 0 - self.position = (start_frame, end_frame, self.segment_index) + self.value = (start_frame, end_frame, self.segment_index) - layout = W.Layout(align_items="center", width="1.5cm", height="100%") + layout = W.Layout(align_items="center", width="2cm", hight="1.5cm") but_left = W.Button(description='', disabled=False, button_style='', icon='arrow-left', layout=layout) but_right = W.Button(description='', disabled=False, button_style='', icon='arrow-right', layout=layout) @@ -176,21 +176,21 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): layout=W.Layout(align_items="center", width="100%", height="100%"), **kwargs) - self.observe(self.position_changed, names=['position'], type="change") + self.observe(self.value_changed, names=['value'], type="change") - def position_changed(self, change=None): + def value_changed(self, change=None): - self.unobserve(self.position_changed, names=['position'], type="change") + self.unobserve(self.value_changed, names=['value'], type="change") - start, stop, seg_index = self.position + start, stop, seg_index = self.value if seg_index < 0 or seg_index >= self.num_segments: - self.position = change['old'] + self.value = change['old'] return if start < 0 or stop < 0: - self.position = change['old'] + self.value = change['old'] return if start >= self.frame_limits[seg_index] or start > self.frame_limits[seg_index]: - self.position = change['old'] + self.value = change['old'] return self.segment_selector.value = seg_index @@ -198,7 +198,7 @@ def position_changed(self, change=None): delta_s = (stop - start) / self.sampling_frequency self.window_sizer.value = delta_s - self.observe(self.position_changed, names=['position'], type="change") + self.observe(self.value_changed, names=['value'], type="change") def update_time(self, new_frame=None, new_time=None, update_slider=False, update_label=False): if new_frame is None and new_time is None: @@ -228,6 +228,7 @@ def update_time(self, new_frame=None, new_time=None, update_slider=False, update self.slider.observe(self.slider_moved, names='value', type="change") self.frame_range = (start_frame, end_frame) + self.value = (start_frame, end_frame, self.segment_index) def time_label_changed(self, change=None): try: @@ -273,8 +274,14 @@ def segment_changed(self, change=None): class ScaleWidget(W.VBox): - def __init__(self, **kwargs): - scale_label = W.Label("Scale", + value = traitlets.Float() + + def __init__(self, value=1., factor=1.2, **kwargs): + + assert factor > 1. + self.factor = factor + + self.scale_label = W.Label("Scale", layout=W.Layout(layout=W.Layout(width='95%'), justify_content="center")) @@ -285,7 +292,7 @@ def __init__(self, **kwargs): tooltip="Increase scale", icon="arrow-up", # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - layout=W.Layout(width='95%'), + layout=W.Layout(width='60%', align_self='center'), ) self.minus_selector = W.Button( @@ -295,13 +302,30 @@ def __init__(self, **kwargs): tooltip="Decrease scale", icon="arrow-down", # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - layout=W.Layout(width='95%'), + layout=W.Layout(width='60%', align_self='center'), ) - # controller = {"plus": plus_selector, "minus": minus_selector} - # widget = W.VBox([scale_label, plus_selector, minus_selector]) + self.plus_selector.on_click(self.plus_clicked) + self.minus_selector.on_click(self.minus_clicked) - - super(W.VBox, self).__init__(children=[scale_label, self.plus_selector, self.minus_selector], + self.value = 1. + super(W.VBox, self).__init__(children=[self.plus_selector, self.scale_label, self.minus_selector], # layout=W.Layout(align_items="center", width="100%", height="100%"), **kwargs) + + self.update_label() + self.observe(self.value_changed, names=['value'], type="change") + + def update_label(self): + self.scale_label.value = f"Scale: {self.value:0.2f}" + + + def plus_clicked(self, change=None): + self.value = self.value * self.factor + + def minus_clicked(self, change=None): + self.value = self.value / self.factor + + + def value_changed(self, change=None): + self.update_label() From e5995f2aa6445fd878e1c0881f11299f8ae22a2d Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 21 Sep 2023 22:59:53 +0200 Subject: [PATCH 151/322] ipywidget backend refactor wip --- src/spikeinterface/widgets/traces.py | 298 +++++------------- .../widgets/utils_ipywidgets.py | 175 ++++++---- 2 files changed, 190 insertions(+), 283 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index efd32ffb24..d107c5cb23 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -280,23 +280,23 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): from .utils_ipywidgets import ( check_ipywidget_backend, # make_timeseries_controller, - make_channel_controller, + # make_channel_controller, # make_scale_controller, - TimeSlider, + ChannelSelector, ScaleWidget, - ) check_ipywidget_backend() self.next_data_plot = data_plot.copy() - self.next_data_plot["add_legend"] = False + - recordings = data_plot["recordings"] + self.recordings = data_plot["recordings"] # first layer - rec0 = recordings[data_plot["layer_keys"][0]] + # rec0 = recordings[data_plot["layer_keys"][0]] + rec0 = self.rec0 = self.recordings[self.data_plot["layer_keys"][0]] cm = 1 / 2.54 @@ -310,107 +310,92 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.figure, self.ax = plt.subplots(figsize=(0.9 * ratios[1] * width_cm * cm, height_cm * cm)) plt.show() - t_start = 0.0 - t_stop = rec0.get_num_samples(segment_index=0) / rec0.get_sampling_frequency() - - - - # ts_widget, ts_controller = make_timeseries_controller( - # t_start, - # t_stop, - # data_plot["layer_keys"], - # rec0.get_num_segments(), - # data_plot["time_range"], - # data_plot["mode"], - # False, - # width_cm, - # ) - # some widgets self.time_slider = TimeSlider( durations=[rec0.get_duration(s) for s in range(rec0.get_num_segments())], sampling_frequency=rec0.sampling_frequency, # layout=W.Layout(height="2cm"), ) - self.layer_selector = W.Dropdown(description="layer", options=data_plot["layer_keys"], - layout=W.Layout(width="5cm"),) - self.mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=data_plot["mode"], - layout=W.Layout(width="5cm"),) + + start_frame = int(data_plot["time_range"][0] * rec0.sampling_frequency) + end_frame = int(data_plot["time_range"][1] * rec0.sampling_frequency) + + self.time_slider.value = start_frame, end_frame, data_plot["segment_index"] + + _layer_keys = data_plot["layer_keys"] + if len(_layer_keys) > 1: + _layer_keys = ['ALL'] + _layer_keys + self.layer_selector = W.Dropdown(options=_layer_keys, + layout=W.Layout(width="95%"), + ) + self.mode_selector = W.Dropdown(options=["line", "map"], value=data_plot["mode"], + # layout=W.Layout(width="5cm"), + layout=W.Layout(width="95%"), + ) self.scaler = ScaleWidget() + self.channel_selector = ChannelSelector(self.rec0.channel_ids) + left_sidebar = W.VBox( - children=[self.layer_selector, self.mode_selector, self.scaler], + children=[ + W.Label(value="layer"), + self.layer_selector, + W.Label(value="mode"), + self.mode_selector, + self.scaler, + # self.channel_selector, + ], layout=W.Layout(width="3.5cm"), + align_items='center', ) - - ch_widget, ch_controller = make_channel_controller(rec0, width_cm=ratios[2] * width_cm, height_cm=height_cm) - - # scale_widget, scale_controller = make_scale_controller(width_cm=ratios[0] * width_cm, height_cm=height_cm) - - # self.controller = ts_controller - # self.controller.update(ch_controller) - # self.controller.update(scale_controller) - - self.recordings = data_plot["recordings"] self.return_scaled = data_plot["return_scaled"] - self.list_traces = None - # self.actual_segment_index = self.controller["segment_index"].value - - self.rec0 = self.recordings[self.data_plot["layer_keys"][0]] - self.t_stops = [ - self.rec0.get_num_samples(segment_index=seg_index) / self.rec0.get_sampling_frequency() - for seg_index in range(self.rec0.get_num_segments()) - ] - - # for w in self.controller.values(): - # if isinstance(w, widgets.Button): - # w.on_click(self._update_ipywidget) - # else: - # w.observe(self._update_ipywidget) self.widget = widgets.AppLayout( center=self.figure.canvas, - # footer=ts_widget, footer=self.time_slider, - # left_sidebar=scale_widget, left_sidebar = left_sidebar, - right_sidebar=ch_widget, + right_sidebar=self.channel_selector, pane_heights=[0, 6, 1], pane_widths=ratios, ) # a first update - # self._update_ipywidget(None) - self._retrieve_traces() self._update_plot() - # only layer selector and time change generate a new traces retrieve + # callbacks: + # some widgets generate a full retrieve + refresh self.time_slider.observe(self._retrieve_traces, names='value', type="change") self.layer_selector.observe(self._retrieve_traces, names='value', type="change") + self.channel_selector.observe(self._retrieve_traces, names='value', type="change") # other widgets only refresh self.scaler.observe(self._update_plot, names='value', type="change") - self.mode_selector.observe(self._update_plot, names='value', type="change") + # map is a special case because needs to check layer also + self.mode_selector.observe(self._mode_changed, names='value', type="change") - if backend_kwargs["display"]: # self.check_backend() display(self.widget) - + def _get_layers(self): + layer = self.layer_selector.value + if layer == 'ALL': + layer_keys = self.data_plot["layer_keys"] + else: + layer_keys = [layer] + if self.mode_selector.value == "map": + layer_keys = layer_keys[:1] + return layer_keys + + def _mode_changed(self, change=None): + if self.mode_selector.value == "map" and self.layer_selector.value == "ALL": + self.layer_selector.value = self.data_plot["layer_keys"][0] + else: + self._update_plot() def _retrieve_traces(self, change=None): - # done when: - # * time or window is changes - # * layer is changed + channel_ids = np.array(self.channel_selector.value) - # TODO connect with channel selector - channel_ids = self.rec0.channel_ids - - # all_channel_ids = self.recordings[list(self.recordings.keys())[0]].channel_ids - # if self.data_plot["order"] is not None: - # all_channel_ids = all_channel_ids[self.data_plot["order"]] - # channel_ids = all_channel_ids[channel_indices] if self.data_plot["order_channel_by_depth"]: order, _ = order_channels_by_depth(self.rec0, channel_ids) else: @@ -419,176 +404,61 @@ def _retrieve_traces(self, change=None): start_frame, end_frame, segment_index = self.time_slider.value time_range = np.array([start_frame, end_frame]) / self.rec0.sampling_frequency + self._selected_recordings = {k: self.recordings[k] for k in self._get_layers()} times, list_traces, frame_range, channel_ids = _get_trace_list( - self.recordings, channel_ids, time_range, segment_index, order, self.return_scaled + self._selected_recordings, channel_ids, time_range, segment_index, order, self.return_scaled ) - self.list_traces = list_traces + + self._channel_ids = channel_ids + self._list_traces = list_traces + self._times = times + self._time_range = time_range + self._frame_range = (start_frame, end_frame) + self._segment_index = segment_index self._update_plot() def _update_plot(self, change=None): - # done when: - # * time or window is changed (after _retrive_traces) - # * layer is changed (after _retrive_traces) - #  * scale is change - # * mode is change - data_plot = self.next_data_plot # matplotlib next_data_plot dict update at each call - data_plot["mode"] = self.mode_selector.value - # data_plot["frame_range"] = frame_range - # data_plot["time_range"] = time_range - data_plot["with_colorbar"] = False - # data_plot["recordings"] = recordings - # data_plot["layer_keys"] = layer_keys - # data_plot["list_traces"] = list_traces_plot - # data_plot["times"] = times - # data_plot["clims"] = clims - # data_plot["channel_ids"] = channel_ids - - list_traces = [traces * self.scaler.value for traces in self.list_traces] - data_plot["list_traces"] = list_traces - - backend_kwargs = {} - backend_kwargs["ax"] = self.ax - - self.ax.clear() - self.plot_matplotlib(data_plot, **backend_kwargs) - - fig = self.ax.figure - fig.canvas.draw() - fig.canvas.flush_events() - - - - - def _update_ipywidget(self, change): - import ipywidgets.widgets as widgets - - # if changing the layer_key, no need to retrieve and process traces - retrieve_traces = True - scale_up = False - scale_down = False - if change is not None: - for cname, c in self.controller.items(): - if isinstance(change, dict): - if change["owner"] is c and cname == "layer_key": - retrieve_traces = False - elif isinstance(change, widgets.Button): - if change is c and cname == "plus": - scale_up = True - if change is c and cname == "minus": - scale_down = True - - t_start = self.controller["t_start"].value - window = self.controller["window"].value - layer_key = self.controller["layer_key"].value - segment_index = self.controller["segment_index"].value - mode = self.controller["mode"].value - chan_start, chan_stop = self.controller["channel_inds"].value - - if mode == "line": - self.controller["all_layers"].layout.visibility = "visible" - all_layers = self.controller["all_layers"].value - elif mode == "map": - self.controller["all_layers"].layout.visibility = "hidden" - all_layers = False - - if all_layers: - self.controller["layer_key"].layout.visibility = "hidden" - else: - self.controller["layer_key"].layout.visibility = "visible" - - if chan_start == chan_stop: - chan_stop += 1 - channel_indices = np.arange(chan_start, chan_stop) - - t_stop = self.t_stops[segment_index] - if self.actual_segment_index != segment_index: - # change time_slider limits - self.controller["t_start"].max = t_stop - self.actual_segment_index = segment_index - - # protect limits - if t_start >= t_stop - window: - t_start = t_stop - window - - time_range = np.array([t_start, t_start + window]) - data_plot = self.next_data_plot + mode = self.mode_selector.value + layer_keys = self._get_layers() - if retrieve_traces: - all_channel_ids = self.recordings[list(self.recordings.keys())[0]].channel_ids - if self.data_plot["order"] is not None: - all_channel_ids = all_channel_ids[self.data_plot["order"]] - channel_ids = all_channel_ids[channel_indices] - if self.data_plot["order_channel_by_depth"]: - order, _ = order_channels_by_depth(self.rec0, channel_ids) - else: - order = None - times, list_traces, frame_range, channel_ids = _get_trace_list( - self.recordings, channel_ids, time_range, segment_index, order, self.return_scaled - ) - self.list_traces = list_traces - else: - times = data_plot["times"] - list_traces = data_plot["list_traces"] - frame_range = data_plot["frame_range"] - channel_ids = data_plot["channel_ids"] - - if all_layers: - layer_keys = self.data_plot["layer_keys"] - recordings = self.recordings - list_traces_plot = self.list_traces - else: - layer_keys = [layer_key] - recordings = {layer_key: self.recordings[layer_key]} - list_traces_plot = [self.list_traces[list(self.recordings.keys()).index(layer_key)]] - - if scale_up: - if mode == "line": - data_plot["vspacing"] *= 0.8 - elif mode == "map": - data_plot["clims"] = { - layer: (1.2 * val[0], 1.2 * val[1]) for layer, val in self.data_plot["clims"].items() - } - if scale_down: - if mode == "line": - data_plot["vspacing"] *= 1.2 - elif mode == "map": - data_plot["clims"] = { - layer: (0.8 * val[0], 0.8 * val[1]) for layer, val in self.data_plot["clims"].items() - } - - self.next_data_plot["vspacing"] = data_plot["vspacing"] - self.next_data_plot["clims"] = data_plot["clims"] + data_plot["mode"] = mode + data_plot["frame_range"] = self._frame_range + data_plot["time_range"] = self._time_range + data_plot["with_colorbar"] = False + data_plot["recordings"] = self._selected_recordings + data_plot["add_legend"] = False if mode == "line": clims = None elif mode == "map": - clims = {layer_key: self.data_plot["clims"][layer_key]} + clims = {k: self.data_plot["clims"][k] for k in layer_keys} - # matplotlib next_data_plot dict update at each call - data_plot["mode"] = mode - data_plot["frame_range"] = frame_range - data_plot["time_range"] = time_range - data_plot["with_colorbar"] = False - data_plot["recordings"] = recordings - data_plot["layer_keys"] = layer_keys - data_plot["list_traces"] = list_traces_plot - data_plot["times"] = times data_plot["clims"] = clims - data_plot["channel_ids"] = channel_ids + data_plot["channel_ids"] = self._channel_ids + + data_plot["layer_keys"] = layer_keys + data_plot["colors"] = {k:self.data_plot["colors"][k] for k in layer_keys} + + list_traces = [traces * self.scaler.value for traces in self._list_traces] + data_plot["list_traces"] = list_traces + data_plot["times"] = self._times backend_kwargs = {} backend_kwargs["ax"] = self.ax + self.ax.clear() self.plot_matplotlib(data_plot, **backend_kwargs) + self.ax.set_title("") fig = self.ax.figure fig.canvas.draw() fig.canvas.flush_events() + def plot_sortingview(self, data_plot, **backend_kwargs): import sortingview.views as vv from .utils_sortingview import handle_display_and_url diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index ad0ead7bc0..ab2b51a7bb 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -11,35 +11,35 @@ def check_ipywidget_backend(): assert "ipympl" in mpl_backend, "To use the 'ipywidgets' backend, you have to set %matplotlib widget" -def make_timeseries_controller(t_start, t_stop, layer_keys, num_segments, time_range, mode, all_layers, width_cm): - time_slider = W.FloatSlider( - orientation="horizontal", - description="time:", - value=time_range[0], - min=t_start, - max=t_stop, - continuous_update=False, - layout=W.Layout(width=f"{width_cm}cm"), - ) - layer_selector = W.Dropdown(description="layer", options=layer_keys) - segment_selector = W.Dropdown(description="segment", options=list(range(num_segments))) - window_sizer = W.BoundedFloatText(value=np.diff(time_range)[0], step=0.1, min=0.005, description="win (s)") - mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=mode) - all_layers = W.Checkbox(description="plot all layers", value=all_layers) - - controller = { - "layer_key": layer_selector, - "segment_index": segment_selector, - "window": window_sizer, - "t_start": time_slider, - "mode": mode_selector, - "all_layers": all_layers, - } - widget = W.VBox( - [time_slider, W.HBox([all_layers, layer_selector, segment_selector, window_sizer, mode_selector])] - ) - - return widget, controller +# def make_timeseries_controller(t_start, t_stop, layer_keys, num_segments, time_range, mode, all_layers, width_cm): +# time_slider = W.FloatSlider( +# orientation="horizontal", +# description="time:", +# value=time_range[0], +# min=t_start, +# max=t_stop, +# continuous_update=False, +# layout=W.Layout(width=f"{width_cm}cm"), +# ) +# layer_selector = W.Dropdown(description="layer", options=layer_keys) +# segment_selector = W.Dropdown(description="segment", options=list(range(num_segments))) +# window_sizer = W.BoundedFloatText(value=np.diff(time_range)[0], step=0.1, min=0.005, description="win (s)") +# mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=mode) +# all_layers = W.Checkbox(description="plot all layers", value=all_layers) + +# controller = { +# "layer_key": layer_selector, +# "segment_index": segment_selector, +# "window": window_sizer, +# "t_start": time_slider, +# "mode": mode_selector, +# "all_layers": all_layers, +# } +# widget = W.VBox( +# [time_slider, W.HBox([all_layers, layer_selector, segment_selector, window_sizer, mode_selector])] +# ) + +# return widget, controller def make_unit_controller(unit_ids, all_unit_ids, width_cm, height_cm): @@ -58,52 +58,52 @@ def make_unit_controller(unit_ids, all_unit_ids, width_cm, height_cm): return widget, controller -def make_channel_controller(recording, width_cm, height_cm): - channel_label = W.Label("channel indices:", layout=W.Layout(justify_content="center")) - channel_selector = W.IntRangeSlider( - value=[0, recording.get_num_channels()], - min=0, - max=recording.get_num_channels(), - step=1, - disabled=False, - continuous_update=False, - orientation="vertical", - readout=True, - readout_format="d", - layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), - ) +# def make_channel_controller(recording, width_cm, height_cm): +# channel_label = W.Label("channel indices:", layout=W.Layout(justify_content="center")) +# channel_selector = W.IntRangeSlider( +# value=[0, recording.get_num_channels()], +# min=0, +# max=recording.get_num_channels(), +# step=1, +# disabled=False, +# continuous_update=False, +# orientation="vertical", +# readout=True, +# readout_format="d", +# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), +# ) - controller = {"channel_inds": channel_selector} - widget = W.VBox([channel_label, channel_selector]) +# controller = {"channel_inds": channel_selector} +# widget = W.VBox([channel_label, channel_selector]) - return widget, controller +# return widget, controller -def make_scale_controller(width_cm, height_cm): - scale_label = W.Label("Scale", layout=W.Layout(justify_content="center")) +# def make_scale_controller(width_cm, height_cm): +# scale_label = W.Label("Scale", layout=W.Layout(justify_content="center")) - plus_selector = W.Button( - description="", - disabled=False, - button_style="", # 'success', 'info', 'warning', 'danger' or '' - tooltip="Increase scale", - icon="arrow-up", - layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - ) +# plus_selector = W.Button( +# description="", +# disabled=False, +# button_style="", # 'success', 'info', 'warning', 'danger' or '' +# tooltip="Increase scale", +# icon="arrow-up", +# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), +# ) - minus_selector = W.Button( - description="", - disabled=False, - button_style="", # 'success', 'info', 'warning', 'danger' or '' - tooltip="Decrease scale", - icon="arrow-down", - layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - ) +# minus_selector = W.Button( +# description="", +# disabled=False, +# button_style="", # 'success', 'info', 'warning', 'danger' or '' +# tooltip="Decrease scale", +# icon="arrow-down", +# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), +# ) - controller = {"plus": plus_selector, "minus": minus_selector} - widget = W.VBox([scale_label, plus_selector, minus_selector]) +# controller = {"plus": plus_selector, "minus": minus_selector} +# widget = W.VBox([scale_label, plus_selector, minus_selector]) - return widget, controller +# return widget, controller @@ -126,7 +126,7 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): self.value = (start_frame, end_frame, self.segment_index) - layout = W.Layout(align_items="center", width="2cm", hight="1.5cm") + layout = W.Layout(align_items="center", width="2.5cm", height="1.cm") but_left = W.Button(description='', disabled=False, button_style='', icon='arrow-left', layout=layout) but_right = W.Button(description='', disabled=False, button_style='', icon='arrow-right', layout=layout) @@ -141,7 +141,7 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): # DatetimePicker is only for ipywidget v8 (which is not working in vscode 2023-03) self.time_label = W.Text(value=f'{time_range[0]}',description='', - disabled=False, layout=W.Layout(width='5.5cm')) + disabled=False, layout=W.Layout(width='2.5cm')) self.time_label.observe(self.time_label_changed, names='value', type="change") @@ -271,6 +271,43 @@ def segment_changed(self, change=None): self.update_time(new_frame=0, update_slider=True, update_label=True) +class ChannelSelector(W.VBox): + value = traitlets.List() + + def __init__(self, channel_ids, **kwargs): + self.channel_ids = list(channel_ids) + self.value = self.channel_ids + + channel_label = W.Label("Channels", layout=W.Layout(justify_content="center")) + n = len(channel_ids) + self.slider = W.IntRangeSlider( + value=[0, n], + min=0, + max=n, + step=1, + disabled=False, + continuous_update=False, + orientation="vertical", + readout=True, + readout_format="d", + # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), + layout=W.Layout(height="100%"), + ) + + + + super(W.VBox, self).__init__(children=[channel_label, self.slider], + layout=W.Layout(align_items="center"), + # layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs) + self.slider.observe(self.on_slider_changed, names=['value'], type="change") + # self.update_label() + # self.observe(self.value_changed, names=['value'], type="change") + + def on_slider_changed(self, change=None): + i0, i1 = self.slider.value + self.value = self.channel_ids[i0:i1] + class ScaleWidget(W.VBox): From 7b92c2153d4fad412823100fd77079e3cf286138 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 22 Sep 2023 08:06:37 +0200 Subject: [PATCH 152/322] improve channel selector --- .../widgets/utils_ipywidgets.py | 38 +++++++++++++++++-- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index ab2b51a7bb..705dd09f23 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -294,20 +294,52 @@ def __init__(self, channel_ids, **kwargs): layout=W.Layout(height="100%"), ) + # first channel are bottom: need reverse + self.selector = W.SelectMultiple( + options=self.channel_ids[::-1], + value=self.channel_ids[::-1], + disabled=False, + # layout=W.Layout(width=f"{width_cm}cm", height=f"{height_cm}cm"), + layout=W.Layout(height="100%", width="2cm"), + ) + hbox = W.HBox(children=[self.slider, self.selector]) - - super(W.VBox, self).__init__(children=[channel_label, self.slider], + super(W.VBox, self).__init__(children=[channel_label, hbox], layout=W.Layout(align_items="center"), # layout=W.Layout(align_items="center", width="100%", height="100%"), **kwargs) self.slider.observe(self.on_slider_changed, names=['value'], type="change") - # self.update_label() + self.selector.observe(self.on_selector_changed, names=['value'], type="change") + + # TODO external value change # self.observe(self.value_changed, names=['value'], type="change") def on_slider_changed(self, change=None): i0, i1 = self.slider.value + + self.selector.unobserve(self.on_selector_changed, names=['value'], type="change") + self.selector.value = self.channel_ids[i0:i1][::-1] + self.selector.observe(self.on_selector_changed, names=['value'], type="change") + self.value = self.channel_ids[i0:i1] + def on_selector_changed(self, change=None): + channel_ids = self.selector.value + channel_ids = channel_ids[::-1] + + if len(channel_ids) > 0: + self.slider.unobserve(self.on_slider_changed, names=['value'], type="change") + i0 = self.channel_ids.index(channel_ids[0]) + i1 = self.channel_ids.index(channel_ids[-1]) + 1 + self.slider.value = (i0, i1) + self.slider.observe(self.on_slider_changed, names=['value'], type="change") + + self.value = channel_ids + + + + + class ScaleWidget(W.VBox): From c46a7cba4b1e937d40050d0061017256ab5dade3 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 22 Sep 2023 10:31:05 +0200 Subject: [PATCH 153/322] Allow to restrict sparsity --- .../postprocessing/amplitude_scalings.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 4dab68fdf8..3eac333781 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -68,7 +68,6 @@ def _run(self, **job_kwargs): delta_collision_samples = int(delta_collision_ms / 1000 * we.sampling_frequency) return_scaled = we._params["return_scaled"] - unit_ids = we.unit_ids if ms_before is not None: assert ( @@ -82,9 +81,16 @@ def _run(self, **job_kwargs): cut_out_before = int(ms_before / 1000 * we.sampling_frequency) if ms_before is not None else nbefore cut_out_after = int(ms_after / 1000 * we.sampling_frequency) if ms_after is not None else nafter - if we.is_sparse(): + if we.is_sparse() and self._params["sparsity"] is None: sparsity = we.sparsity - elif self._params["sparsity"] is not None: + elif we.is_sparse() and self._params["sparsity"] is not None: + sparsity = self._params["sparsity"] + # assert provided sparsity is sparser than the one in the waveform extractor + waveform_sparsity = we.sparsity + assert np.all( + np.sum(waveform_sparsity.mask, 1) - np.sum(sparsity.mask, 1) > 0 + ), "The provided sparsity needs to be sparser than the one in the waveform extractor!" + elif not we.is_sparse() and self._params["sparsity"] is not None: sparsity = self._params["sparsity"] else: if self._params["max_dense_channels"] is not None: @@ -362,7 +368,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) template = template[cut_out_before - sample_index :] elif sample_index + cut_out_after > end_frame + right: local_waveform = traces_with_margin[cut_out_start:, sparse_indices] - template = template[: -(sample_index + cut_out_after - end_frame)] + template = template[: -(sample_index + cut_out_after - end_frame - right)] else: local_waveform = traces_with_margin[cut_out_start:cut_out_end, sparse_indices] assert template.shape == local_waveform.shape From 2e305586d5b39bb8bfa89280057579a97726e93a Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 22 Sep 2023 11:09:05 +0200 Subject: [PATCH 154/322] ipywidgets backend start UnitCOntroller --- src/spikeinterface/widgets/amplitudes.py | 69 ++++++++++--------- .../widgets/utils_ipywidgets.py | 39 +++++++++-- 2 files changed, 71 insertions(+), 37 deletions(-) diff --git a/src/spikeinterface/widgets/amplitudes.py b/src/spikeinterface/widgets/amplitudes.py index 7ef6e0ff61..b60de98cb0 100644 --- a/src/spikeinterface/widgets/amplitudes.py +++ b/src/spikeinterface/widgets/amplitudes.py @@ -171,9 +171,10 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt - import ipywidgets.widgets as widgets + # import ipywidgets.widgets as widgets + import ipywidgets.widgets as W from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller, UnitSelector check_ipywidget_backend() @@ -188,60 +189,62 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): ratios = [0.15, 0.85] with plt.ioff(): - output = widgets.Output() + output = W.Output() with output: self.figure = plt.figure(figsize=((ratios[1] * width_cm) * cm, height_cm * cm)) plt.show() - data_plot["unit_ids"] = data_plot["unit_ids"][:1] - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], we.unit_ids, ratios[0] * width_cm, height_cm - ) + self.unit_selector = UnitSelector(we.unit_ids) + self.unit_selector.value = list(we.unit_ids)[:1] - plot_histograms = widgets.Checkbox( + self.checkbox_histograms = W.Checkbox( value=data_plot["plot_histograms"], - description="plot histograms", - disabled=False, + description="hist", + # disabled=False, ) - footer = plot_histograms - - self.controller = {"plot_histograms": plot_histograms} - self.controller.update(unit_controller) - - for w in self.controller.values(): - w.observe(self._update_ipywidget) + left_sidebar = W.VBox( + children=[ + self.unit_selector, + self.checkbox_histograms, + ], + layout = W.Layout(align_items="center", width="4cm", height="100%"), + ) - self.widget = widgets.AppLayout( + self.widget = W.AppLayout( center=self.figure.canvas, - left_sidebar=unit_widget, + left_sidebar=left_sidebar, pane_widths=ratios + [0], - footer=footer, ) # a first update - self._update_ipywidget(None) + self._full_update_plot() + + self.unit_selector.observe(self._update_plot, names='value', type="change") + self.checkbox_histograms.observe(self._full_update_plot, names='value', type="change") if backend_kwargs["display"]: display(self.widget) - def _update_ipywidget(self, change): + def _full_update_plot(self, change=None): self.figure.clear() + data_plot = self.next_data_plot + data_plot["unit_ids"] = self.unit_selector.value + data_plot["plot_histograms"] = self.checkbox_histograms.value + + backend_kwargs = dict(figure=self.figure, axes=None, ax=None) + self.plot_matplotlib(data_plot, **backend_kwargs) + self._update_plot() - unit_ids = self.controller["unit_ids"].value - plot_histograms = self.controller["plot_histograms"].value + def _update_plot(self, change=None): + for ax in self.axes.flatten(): + ax.clear() - # matplotlib next_data_plot dict update at each call data_plot = self.next_data_plot - data_plot["unit_ids"] = unit_ids - data_plot["plot_histograms"] = plot_histograms - - backend_kwargs = {} - # backend_kwargs["figure"] = self.fig - backend_kwargs["figure"] = self.figure - backend_kwargs["axes"] = None - backend_kwargs["ax"] = None + data_plot["unit_ids"] = self.unit_selector.value + data_plot["plot_histograms"] = self.checkbox_histograms.value + backend_kwargs = dict(figure=None, axes=self.axes, ax=None) self.plot_matplotlib(data_plot, **backend_kwargs) self.figure.canvas.draw() diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index 705dd09f23..d2c41f234a 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -338,10 +338,6 @@ def on_selector_changed(self, change=None): - - - - class ScaleWidget(W.VBox): value = traitlets.Float() @@ -398,3 +394,38 @@ def minus_clicked(self, change=None): def value_changed(self, change=None): self.update_label() + + +class UnitSelector(W.VBox): + value = traitlets.List() + + def __init__(self, unit_ids, **kwargs): + self.unit_ids = list(unit_ids) + self.value = self.unit_ids + + label = W.Label("Units", layout=W.Layout(justify_content="center")) + + self.selector = W.SelectMultiple( + options=self.unit_ids, + value=self.unit_ids, + disabled=False, + layout=W.Layout(height="100%", width="2cm"), + ) + + super(W.VBox, self).__init__(children=[label, self.selector], + layout=W.Layout(align_items="center"), + **kwargs) + + self.selector.observe(self.on_selector_changed, names=['value'], type="change") + + self.observe(self.value_changed, names=['value'], type="change") + + def on_selector_changed(self, change=None): + unit_ids = self.selector.value + self.value = unit_ids + + def value_changed(self, change=None): + self.selector.unobserve(self.on_selector_changed, names=['value'], type="change") + self.selector.value = change['new'] + self.selector.observe(self.on_selector_changed, names=['value'], type="change") + From 4e31329d9aed376ecc41c4238a2f4836f94054ea Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 22 Sep 2023 11:37:18 +0200 Subject: [PATCH 155/322] Add spikes on border when generating sorting, PCA sparse return fixes --- src/spikeinterface/core/generate.py | 28 +++++++++++++++++ .../core/tests/test_generate.py | 30 +++++++++++++++++-- .../postprocessing/amplitude_scalings.py | 12 ++++---- .../postprocessing/principal_component.py | 15 ++++++++-- .../tests/common_extension_tests.py | 26 ++++++++++++++-- .../tests/test_principal_component.py | 12 ++++---- 6 files changed, 104 insertions(+), 19 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 401c498f03..741dd20000 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -123,6 +123,9 @@ def generate_sorting( firing_rates=3.0, empty_units=None, refractory_period_ms=3.0, # in ms + add_spikes_on_borders=False, + num_spikes_per_border=3, + border_size_samples=20, seed=None, ): """ @@ -142,6 +145,12 @@ def generate_sorting( List of units that will have no spikes. (used for testing mainly). refractory_period_ms : float, default: 3.0 The refractory period in ms + add_spikes_on_borders : bool, default: False + If True, spikes will be added close to the borders of the segments. + num_spikes_per_border : int, default: 3 + The number of spikes to add close to the borders of the segments. + border_size_samples : int, default: 20 + The size of the border in samples to add border spikes. seed : int, default: None The random seed @@ -151,11 +160,13 @@ def generate_sorting( The sorting object """ seed = _ensure_seed(seed) + rng = np.random.default_rng(seed) num_segments = len(durations) unit_ids = np.arange(num_units) spikes = [] for segment_index in range(num_segments): + num_samples = int(sampling_frequency * durations[segment_index]) times, labels = synthesize_random_firings( num_units=num_units, sampling_frequency=sampling_frequency, @@ -175,7 +186,23 @@ def generate_sorting( spikes_in_seg["unit_index"] = labels spikes_in_seg["segment_index"] = segment_index spikes.append(spikes_in_seg) + + if add_spikes_on_borders: + spikes_on_borders = np.zeros(2 * num_spikes_per_border, dtype=minimum_spike_dtype) + spikes_on_borders["segment_index"] = segment_index + spikes_on_borders["unit_index"] = rng.choice(num_units, size=2 * num_spikes_per_border, replace=True) + # at start + spikes_on_borders["sample_index"][:num_spikes_per_border] = rng.integers( + 0, border_size_samples, num_spikes_per_border + ) + # at end + spikes_on_borders["sample_index"][num_spikes_per_border:] = rng.integers( + num_samples - border_size_samples, num_samples, num_spikes_per_border + ) + spikes.append(spikes_on_borders) + spikes = np.concatenate(spikes) + spikes = spikes[np.lexsort((spikes["sample_index"], spikes["segment_index"]))] sorting = NumpySorting(spikes, sampling_frequency, unit_ids) @@ -596,6 +623,7 @@ def __init__( dtype = np.dtype(dtype).name # Cast to string for serialization if dtype not in ("float32", "float64"): raise ValueError(f"'dtype' must be 'float32' or 'float64' but is {dtype}") + assert strategy in ("tile_pregenerated", "on_the_fly"), "'strategy' must be 'tile_pregenerated' or 'on_the_fly'" BaseRecording.__init__(self, sampling_frequency=sampling_frequency, channel_ids=channel_ids, dtype=dtype) diff --git a/src/spikeinterface/core/tests/test_generate.py b/src/spikeinterface/core/tests/test_generate.py index 9ba5de42d6..3844e421ac 100644 --- a/src/spikeinterface/core/tests/test_generate.py +++ b/src/spikeinterface/core/tests/test_generate.py @@ -26,15 +26,38 @@ def test_generate_recording(): - # TODO even this is extenssivly tested in all other function + # TODO even this is extensively tested in all other functions pass def test_generate_sorting(): - # TODO even this is extenssivly tested in all other function + # TODO even this is extensively tested in all other functions pass +def test_generate_sorting_with_spikes_on_borders(): + num_spikes_on_borders = 10 + border_size_samples = 10 + segment_duration = 10 + for nseg in [1, 2, 3]: + sorting = generate_sorting( + durations=[segment_duration] * nseg, + sampling_frequency=30000, + num_units=10, + add_spikes_on_borders=True, + num_spikes_per_border=num_spikes_on_borders, + border_size_samples=border_size_samples, + ) + spikes = sorting.to_spike_vector(concatenated=False) + # at least num_border spikes at borders for all segments + for i, spikes_in_segment in enumerate(spikes): + num_samples = int(segment_duration * 30000) + assert np.sum(spikes_in_segment["sample_index"] < border_size_samples) >= num_spikes_on_borders + assert ( + np.sum(spikes_in_segment["sample_index"] >= num_samples - border_size_samples) >= num_spikes_on_borders + ) + + def measure_memory_allocation(measure_in_process: bool = True) -> float: """ A local utility to measure memory allocation at a specific point in time. @@ -399,7 +422,7 @@ def test_generate_ground_truth_recording(): if __name__ == "__main__": strategy = "tile_pregenerated" # strategy = "on_the_fly" - test_noise_generator_memory() + # test_noise_generator_memory() # test_noise_generator_under_giga() # test_noise_generator_correct_shape(strategy) # test_noise_generator_consistency_across_calls(strategy, 0, 5) @@ -410,3 +433,4 @@ def test_generate_ground_truth_recording(): # test_generate_templates() # test_inject_templates() # test_generate_ground_truth_recording() + test_generate_sorting_with_spikes_on_borders() diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 3eac333781..c86337a30d 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -16,6 +16,7 @@ class AmplitudeScalingsCalculator(BaseWaveformExtractorExtension): """ extension_name = "amplitude_scalings" + handle_sparsity = True def __init__(self, waveform_extractor): BaseWaveformExtractorExtension.__init__(self, waveform_extractor) @@ -357,7 +358,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) continue unit_index = spike["unit_index"] sample_index = spike["sample_index"] - sparse_indices = sparsity_mask[unit_index] + (sparse_indices,) = np.nonzero(sparsity_mask[unit_index]) template = all_templates[unit_index][:, sparse_indices] template = template[nbefore - cut_out_before : nbefore + cut_out_after] sample_centered = sample_index - start_frame @@ -368,7 +369,7 @@ def _amplitude_scalings_chunk(segment_index, start_frame, end_frame, worker_ctx) template = template[cut_out_before - sample_index :] elif sample_index + cut_out_after > end_frame + right: local_waveform = traces_with_margin[cut_out_start:, sparse_indices] - template = template[: -(sample_index + cut_out_after - end_frame - right)] + template = template[: -(sample_index + cut_out_after - (end_frame + right))] else: local_waveform = traces_with_margin[cut_out_start:cut_out_end, sparse_indices] assert template.shape == local_waveform.shape @@ -550,10 +551,11 @@ def fit_collision( sample_last_centered = np.max(collision["sample_index"]) - (start_frame - left) # construct sparsity as union between units' sparsity - sparse_indices = np.zeros(sparsity_mask.shape[1], dtype="int") + common_sparse_mask = np.zeros(sparsity_mask.shape[1], dtype="int") for spike in collision: - sparse_indices_i = sparsity_mask[spike["unit_index"]] - sparse_indices = np.logical_or(sparse_indices, sparse_indices_i) + mask_i = sparsity_mask[spike["unit_index"]] + common_sparse_mask = np.logical_or(common_sparse_mask, mask_i) + (sparse_indices,) = np.nonzero(common_sparse_mask) local_waveform_start = max(0, sample_first_centered - cut_out_before) local_waveform_end = min(traces_with_margin.shape[0], sample_last_centered + cut_out_after) diff --git a/src/spikeinterface/postprocessing/principal_component.py b/src/spikeinterface/postprocessing/principal_component.py index 233625e09e..1214b84ac4 100644 --- a/src/spikeinterface/postprocessing/principal_component.py +++ b/src/spikeinterface/postprocessing/principal_component.py @@ -84,9 +84,16 @@ def get_projections(self, unit_id): Returns ------- proj: np.array - The PCA projections (num_waveforms, num_components, num_channels) + The PCA projections (num_waveforms, num_components, num_channels). + In case sparsity is used, only the projections on sparse channels are returned. """ - return self._extension_data[f"pca_{unit_id}"] + projections = self._extension_data[f"pca_{unit_id}"] + mode = self._params["mode"] + if mode in ("by_channel_local", "by_channel_global"): + sparsity = self.get_sparsity() + if sparsity is not None: + projections = projections[:, :, sparsity.unit_id_to_channel_indices[unit_id]] + return projections def get_pca_model(self): """ @@ -211,6 +218,10 @@ def project_new(self, new_waveforms, unit_id=None): wfs_flat = new_waveforms.reshape(new_waveforms.shape[0], -1) projections = pca_model.transform(wfs_flat) + # take care of sparsity (not in case of concatenated) + if mode in ("by_channel_local", "by_channel_global"): + if sparsity is not None: + projections = projections[:, :, sparsity.unit_id_to_channel_indices[unit_id]] return projections def get_sparsity(self): diff --git a/src/spikeinterface/postprocessing/tests/common_extension_tests.py b/src/spikeinterface/postprocessing/tests/common_extension_tests.py index b9c72f9b99..8657d1dced 100644 --- a/src/spikeinterface/postprocessing/tests/common_extension_tests.py +++ b/src/spikeinterface/postprocessing/tests/common_extension_tests.py @@ -5,7 +5,7 @@ from pathlib import Path from spikeinterface import extract_waveforms, load_extractor, compute_sparsity -from spikeinterface.extractors import toy_example +from spikeinterface.core.generate import generate_ground_truth_recording if hasattr(pytest, "global_test_folder"): cache_folder = pytest.global_test_folder / "postprocessing" @@ -26,7 +26,18 @@ def setUp(self): self.cache_folder = cache_folder # 1-segment - recording, sorting = toy_example(num_segments=1, num_units=10, num_channels=12) + recording, sorting = generate_ground_truth_recording( + durations=[10], + sampling_frequency=30000, + num_channels=12, + num_units=10, + dtype="float32", + seed=91, + generate_sorting_kwargs=dict(add_spikes_on_borders=True), + noise_kwargs=dict(noise_level=10.0, strategy="tile_pregenerated"), + ) + + # add gains and offsets and save gain = 0.1 recording.set_channel_gains(gain) recording.set_channel_offsets(0) @@ -53,7 +64,16 @@ def setUp(self): self.sparsity1 = compute_sparsity(we1, method="radius", radius_um=50) # 2-segments - recording, sorting = toy_example(num_segments=2, num_units=10) + recording, sorting = generate_ground_truth_recording( + durations=[10, 5], + sampling_frequency=30000, + num_channels=12, + num_units=10, + dtype="float32", + seed=91, + generate_sorting_kwargs=dict(add_spikes_on_borders=True), + noise_kwargs=dict(noise_level=10.0, strategy="tile_pregenerated"), + ) recording.set_channel_gains(gain) recording.set_channel_offsets(0) if (cache_folder / "toy_rec_2seg").is_dir(): diff --git a/src/spikeinterface/postprocessing/tests/test_principal_component.py b/src/spikeinterface/postprocessing/tests/test_principal_component.py index 5d64525b52..04ce42b70e 100644 --- a/src/spikeinterface/postprocessing/tests/test_principal_component.py +++ b/src/spikeinterface/postprocessing/tests/test_principal_component.py @@ -87,13 +87,13 @@ def test_sparse(self): pc.run() for i, unit_id in enumerate(unit_ids): proj = pc.get_projections(unit_id) - assert proj.shape[1:] == (5, 4) + assert proj.shape[1:] == (5, len(sparsity.unit_id_to_channel_ids[unit_id])) # test project_new unit_id = 3 new_wfs = we.get_waveforms(unit_id) new_proj = pc.project_new(new_wfs, unit_id=unit_id) - assert new_proj.shape == (new_wfs.shape[0], 5, 4) + assert new_proj.shape == (new_wfs.shape[0], 5, len(sparsity.unit_id_to_channel_ids[unit_id])) if DEBUG: import matplotlib.pyplot as plt @@ -197,8 +197,8 @@ def test_project_new(self): if __name__ == "__main__": test = PrincipalComponentsExtensionTest() test.setUp() - test.test_extension() - test.test_shapes() - test.test_compute_for_all_spikes() + # test.test_extension() + # test.test_shapes() + # test.test_compute_for_all_spikes() test.test_sparse() - test.test_project_new() + # test.test_project_new() From 73ceaacefecc4426d994ebca4ca006d667dada42 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 22 Sep 2023 12:06:15 +0200 Subject: [PATCH 156/322] Extend PCA to be able to return sparse projections and fix tests --- .../postprocessing/principal_component.py | 16 ++++++++++------ .../tests/test_principal_component.py | 12 ++++++++---- .../tests/test_quality_metric_calculator.py | 7 ++++--- 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/src/spikeinterface/postprocessing/principal_component.py b/src/spikeinterface/postprocessing/principal_component.py index 5d62216c20..8383dcbb43 100644 --- a/src/spikeinterface/postprocessing/principal_component.py +++ b/src/spikeinterface/postprocessing/principal_component.py @@ -72,7 +72,7 @@ def _select_extension_data(self, unit_ids): new_extension_data[k] = v return new_extension_data - def get_projections(self, unit_id): + def get_projections(self, unit_id, sparse=False): """ Returns the computed projections for the sampled waveforms of a unit id. @@ -80,16 +80,18 @@ def get_projections(self, unit_id): ---------- unit_id : int or str The unit id to return PCA projections for + sparse: bool, default False + If True, and sparsity is not None, only projections on sparse channels are returned. Returns ------- - proj: np.array + projections: np.array The PCA projections (num_waveforms, num_components, num_channels). In case sparsity is used, only the projections on sparse channels are returned. """ projections = self._extension_data[f"pca_{unit_id}"] mode = self._params["mode"] - if mode in ("by_channel_local", "by_channel_global"): + if mode in ("by_channel_local", "by_channel_global") and sparse: sparsity = self.get_sparsity() if sparsity is not None: projections = projections[:, :, sparsity.unit_id_to_channel_indices[unit_id]] @@ -141,7 +143,7 @@ def get_all_projections(self, channel_ids=None, unit_ids=None, outputs="id"): all_labels = [] #  can be unit_id or unit_index all_projections = [] for unit_index, unit_id in enumerate(unit_ids): - proj = self.get_projections(unit_id) + proj = self.get_projections(unit_id, sparse=False) if channel_ids is not None: chan_inds = self.waveform_extractor.channel_ids_to_indices(channel_ids) proj = proj[:, :, chan_inds] @@ -158,7 +160,7 @@ def get_all_projections(self, channel_ids=None, unit_ids=None, outputs="id"): return all_labels, all_projections - def project_new(self, new_waveforms, unit_id=None): + def project_new(self, new_waveforms, unit_id=None, sparse=False): """ Projects new waveforms or traces snippets on the PC components. @@ -168,6 +170,8 @@ def project_new(self, new_waveforms, unit_id=None): Array with new waveforms to project with shape (num_waveforms, num_samples, num_channels) unit_id: int or str In case PCA is sparse and mode is by_channel_local, the unit_id of 'new_waveforms' + sparse: bool, default: False + If True, and sparsity is not None, only projections on sparse channels are returned. Returns ------- @@ -219,7 +223,7 @@ def project_new(self, new_waveforms, unit_id=None): projections = pca_model.transform(wfs_flat) # take care of sparsity (not in case of concatenated) - if mode in ("by_channel_local", "by_channel_global"): + if mode in ("by_channel_local", "by_channel_global") and sparse: if sparsity is not None: projections = projections[:, :, sparsity.unit_id_to_channel_indices[unit_id]] return projections diff --git a/src/spikeinterface/postprocessing/tests/test_principal_component.py b/src/spikeinterface/postprocessing/tests/test_principal_component.py index 04ce42b70e..49591d9b89 100644 --- a/src/spikeinterface/postprocessing/tests/test_principal_component.py +++ b/src/spikeinterface/postprocessing/tests/test_principal_component.py @@ -86,14 +86,18 @@ def test_sparse(self): pc.set_params(n_components=5, mode=mode, sparsity=sparsity) pc.run() for i, unit_id in enumerate(unit_ids): - proj = pc.get_projections(unit_id) - assert proj.shape[1:] == (5, len(sparsity.unit_id_to_channel_ids[unit_id])) + proj_sparse = pc.get_projections(unit_id, sparse=True) + assert proj_sparse.shape[1:] == (5, len(sparsity.unit_id_to_channel_ids[unit_id])) + proj_dense = pc.get_projections(unit_id, sparse=False) + assert proj_dense.shape[1:] == (5, num_channels) # test project_new unit_id = 3 new_wfs = we.get_waveforms(unit_id) - new_proj = pc.project_new(new_wfs, unit_id=unit_id) - assert new_proj.shape == (new_wfs.shape[0], 5, len(sparsity.unit_id_to_channel_ids[unit_id])) + new_proj_sparse = pc.project_new(new_wfs, unit_id=unit_id, sparse=True) + assert new_proj_sparse.shape == (new_wfs.shape[0], 5, len(sparsity.unit_id_to_channel_ids[unit_id])) + new_proj_dense = pc.project_new(new_wfs, unit_id=unit_id, sparse=False) + assert new_proj_dense.shape == (new_wfs.shape[0], 5, num_channels) if DEBUG: import matplotlib.pyplot as plt diff --git a/src/spikeinterface/qualitymetrics/tests/test_quality_metric_calculator.py b/src/spikeinterface/qualitymetrics/tests/test_quality_metric_calculator.py index 4fa65993d1..977beca210 100644 --- a/src/spikeinterface/qualitymetrics/tests/test_quality_metric_calculator.py +++ b/src/spikeinterface/qualitymetrics/tests/test_quality_metric_calculator.py @@ -261,7 +261,8 @@ def test_nn_metrics(self): we_sparse, metric_names=metric_names, sparsity=None, seed=0, n_jobs=2 ) for metric_name in metrics.columns: - assert np.allclose(metrics[metric_name], metrics_par[metric_name]) + # NaNs are skipped + assert np.allclose(metrics[metric_name].dropna(), metrics_par[metric_name].dropna()) def test_recordingless(self): we = self.we_long @@ -305,7 +306,7 @@ def test_empty_units(self): test.setUp() # test.test_drift_metrics() # test.test_extension() - # test.test_nn_metrics() + test.test_nn_metrics() # test.test_peak_sign() # test.test_empty_units() - test.test_recordingless() + # test.test_recordingless() From b9b6c15b42a64d877ea9fad9fca84424e2c97edf Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 22 Sep 2023 12:12:21 +0200 Subject: [PATCH 157/322] Add test to check correct order of spikes with borders --- src/spikeinterface/core/tests/test_generate.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/core/tests/test_generate.py b/src/spikeinterface/core/tests/test_generate.py index 3844e421ac..9a9c61766f 100644 --- a/src/spikeinterface/core/tests/test_generate.py +++ b/src/spikeinterface/core/tests/test_generate.py @@ -48,9 +48,15 @@ def test_generate_sorting_with_spikes_on_borders(): num_spikes_per_border=num_spikes_on_borders, border_size_samples=border_size_samples, ) + # check that segments are correctly sorted + all_spikes = sorting.to_spike_vector() + np.testing.assert_array_equal(all_spikes["segment_index"], np.sort(all_spikes["segment_index"])) + spikes = sorting.to_spike_vector(concatenated=False) # at least num_border spikes at borders for all segments - for i, spikes_in_segment in enumerate(spikes): + for spikes_in_segment in spikes: + # check that sample indices are correctly sorted within segments + np.testing.assert_array_equal(spikes_in_segment["sample_index"], np.sort(spikes_in_segment["sample_index"])) num_samples = int(segment_duration * 30000) assert np.sum(spikes_in_segment["sample_index"] < border_size_samples) >= num_spikes_on_borders assert ( From 226ad852e25596c0f6072f48a72e2e3d4a84afab Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 22 Sep 2023 12:32:33 +0200 Subject: [PATCH 158/322] Update tests --- .../postprocessing/tests/test_template_metrics.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/postprocessing/tests/test_template_metrics.py b/src/spikeinterface/postprocessing/tests/test_template_metrics.py index 5dcff3ffba..a27ccc77f8 100644 --- a/src/spikeinterface/postprocessing/tests/test_template_metrics.py +++ b/src/spikeinterface/postprocessing/tests/test_template_metrics.py @@ -17,13 +17,13 @@ def test_sparse_metrics(self): tm_sparse = self.extension_class.get_extension_function()(self.we1, sparsity=self.sparsity1) print(tm_sparse) - def test_2d_metrics(self): - tm_2d = self.extension_class.get_extension_function()(self.we1, include_2d_metrics=True) - print(tm_2d) + def test_multi_channel_metrics(self): + tm_multi = self.extension_class.get_extension_function()(self.we1, include_multi_channel_metrics=True) + print(tm_multi) if __name__ == "__main__": test = TemplateMetricsExtensionTest() test.setUp() # test.test_extension() - test.test_2d_metrics() + test.test_multi_channel_metrics() From 4e79b5811d41e6343391a3a6b26fab97f657368b Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 22 Sep 2023 13:32:51 +0200 Subject: [PATCH 159/322] propagate UnitSelector to others ipywidgets --- src/spikeinterface/widgets/amplitudes.py | 12 ++- src/spikeinterface/widgets/base.py | 3 +- src/spikeinterface/widgets/metrics.py | 21 ++-- src/spikeinterface/widgets/spike_locations.py | 34 +++---- .../widgets/spikes_on_traces.py | 87 ++++++++++------- src/spikeinterface/widgets/unit_locations.py | 29 +++--- src/spikeinterface/widgets/unit_waveforms.py | 50 +++++----- .../widgets/utils_ipywidgets.py | 96 ------------------- 8 files changed, 121 insertions(+), 211 deletions(-) diff --git a/src/spikeinterface/widgets/amplitudes.py b/src/spikeinterface/widgets/amplitudes.py index b60de98cb0..5aa090b1b4 100644 --- a/src/spikeinterface/widgets/amplitudes.py +++ b/src/spikeinterface/widgets/amplitudes.py @@ -147,13 +147,16 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): else: bins = dp.bins ax_hist = self.axes.flatten()[1] - ax_hist.hist(amps, bins=bins, orientation="horizontal", color=dp.unit_colors[unit_id], alpha=0.8) + # this is super slow, using plot and np.histogram is really much faster (and nicer!) + # ax_hist.hist(amps, bins=bins, orientation="horizontal", color=dp.unit_colors[unit_id], alpha=0.8) + count, bins = np.histogram(amps, bins=bins) + ax_hist.plot(count, bins[:-1], color=dp.unit_colors[unit_id], alpha=0.8) if dp.plot_histograms: ax_hist = self.axes.flatten()[1] ax_hist.set_ylim(scatter_ax.get_ylim()) ax_hist.axis("off") - self.figure.tight_layout() + # self.figure.tight_layout() if dp.plot_legend: if hasattr(self, "legend") and self.legend is not None: @@ -174,7 +177,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # import ipywidgets.widgets as widgets import ipywidgets.widgets as W from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller, UnitSelector + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -200,7 +203,6 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.checkbox_histograms = W.Checkbox( value=data_plot["plot_histograms"], description="hist", - # disabled=False, ) left_sidebar = W.VBox( @@ -231,6 +233,7 @@ def _full_update_plot(self, change=None): data_plot = self.next_data_plot data_plot["unit_ids"] = self.unit_selector.value data_plot["plot_histograms"] = self.checkbox_histograms.value + data_plot["plot_legend"] = False backend_kwargs = dict(figure=self.figure, axes=None, ax=None) self.plot_matplotlib(data_plot, **backend_kwargs) @@ -243,6 +246,7 @@ def _update_plot(self, change=None): data_plot = self.next_data_plot data_plot["unit_ids"] = self.unit_selector.value data_plot["plot_histograms"] = self.checkbox_histograms.value + data_plot["plot_legend"] = False backend_kwargs = dict(figure=None, axes=self.axes, ax=None) self.plot_matplotlib(data_plot, **backend_kwargs) diff --git a/src/spikeinterface/widgets/base.py b/src/spikeinterface/widgets/base.py index 4ed83fcca9..1ff691320a 100644 --- a/src/spikeinterface/widgets/base.py +++ b/src/spikeinterface/widgets/base.py @@ -38,6 +38,7 @@ def set_default_plotter_backend(backend): "width_cm": "Width of the figure in cm (default 10)", "height_cm": "Height of the figure in cm (default 6)", "display": "If True, widgets are immediately displayed", + # "controllers": "" }, "ephyviewer": {}, } @@ -45,7 +46,7 @@ def set_default_plotter_backend(backend): default_backend_kwargs = { "matplotlib": {"figure": None, "ax": None, "axes": None, "ncols": 5, "figsize": None, "figtitle": None}, "sortingview": {"generate_url": True, "display": True, "figlabel": None, "height": None}, - "ipywidgets": {"width_cm": 25, "height_cm": 10, "display": True}, + "ipywidgets": {"width_cm": 25, "height_cm": 10, "display": True, "controllers": None}, "ephyviewer": {}, } diff --git a/src/spikeinterface/widgets/metrics.py b/src/spikeinterface/widgets/metrics.py index 9dc51f522e..604da35e65 100644 --- a/src/spikeinterface/widgets/metrics.py +++ b/src/spikeinterface/widgets/metrics.py @@ -128,7 +128,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -147,34 +147,29 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): with output: self.figure = plt.figure(figsize=((ratios[1] * width_cm) * cm, height_cm * cm)) plt.show() - if data_plot["unit_ids"] is None: - data_plot["unit_ids"] = [] - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], list(data_plot["unit_colors"].keys()), ratios[0] * width_cm, height_cm - ) - - self.controller = unit_controller + self.unit_selector = UnitSelector(data_plot["sorting"].unit_ids) + self.unit_selector.value = [ ] - for w in self.controller.values(): - w.observe(self._update_ipywidget) self.widget = widgets.AppLayout( center=self.figure.canvas, - left_sidebar=unit_widget, + left_sidebar=self.unit_selector, pane_widths=ratios + [0], ) # a first update self._update_ipywidget(None) + self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + if backend_kwargs["display"]: display(self.widget) def _update_ipywidget(self, change): from matplotlib.lines import Line2D - unit_ids = self.controller["unit_ids"].value + unit_ids = self.unit_selector.value unit_colors = self.data_plot["unit_colors"] # matplotlib next_data_plot dict update at each call @@ -198,6 +193,7 @@ def _update_ipywidget(self, change): self.plot_matplotlib(self.data_plot, **backend_kwargs) if len(unit_ids) > 0: + # TODO later make option to control legend or not for l in self.figure.legends: l.remove() handles = [ @@ -212,6 +208,7 @@ def _update_ipywidget(self, change): self.figure.canvas.draw() self.figure.canvas.flush_events() + def plot_sortingview(self, data_plot, **backend_kwargs): import sortingview.views as vv from .utils_sortingview import generate_unit_table_view, make_serializable, handle_display_and_url diff --git a/src/spikeinterface/widgets/spike_locations.py b/src/spikeinterface/widgets/spike_locations.py index 9771b2c0e9..926051b8f9 100644 --- a/src/spikeinterface/widgets/spike_locations.py +++ b/src/spikeinterface/widgets/spike_locations.py @@ -191,7 +191,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -210,48 +210,36 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): fig, self.ax = plt.subplots(figsize=((ratios[1] * width_cm) * cm, height_cm * cm)) plt.show() - data_plot["unit_ids"] = data_plot["unit_ids"][:1] - - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], - list(data_plot["unit_colors"].keys()), - ratios[0] * width_cm, - height_cm, - ) - - self.controller = unit_controller - - for w in self.controller.values(): - w.observe(self._update_ipywidget) + self.unit_selector = UnitSelector(data_plot["unit_ids"]) + self.unit_selector.value = list(data_plot["unit_ids"])[:1] self.widget = widgets.AppLayout( center=fig.canvas, - left_sidebar=unit_widget, + left_sidebar=self.unit_selector, pane_widths=ratios + [0], ) # a first update - self._update_ipywidget(None) + self._update_ipywidget() + + self.unit_selector.observe(self._update_ipywidget, names='value', type="change") if backend_kwargs["display"]: display(self.widget) - def _update_ipywidget(self, change): + def _update_ipywidget(self, change=None): self.ax.clear() - unit_ids = self.controller["unit_ids"].value - # matplotlib next_data_plot dict update at each call data_plot = self.next_data_plot - data_plot["unit_ids"] = unit_ids + data_plot["unit_ids"] = self.unit_selector.value data_plot["plot_all_units"] = True + # TODO add an option checkbox for legend data_plot["plot_legend"] = True data_plot["hide_axis"] = True - backend_kwargs = {} - backend_kwargs["ax"] = self.ax + backend_kwargs = dict(ax=self.ax) - # self.mpl_plotter.do_plot(data_plot, **backend_kwargs) self.plot_matplotlib(data_plot, **backend_kwargs) fig = self.ax.get_figure() fig.canvas.draw() diff --git a/src/spikeinterface/widgets/spikes_on_traces.py b/src/spikeinterface/widgets/spikes_on_traces.py index ae036d1ba1..2f748cc0fc 100644 --- a/src/spikeinterface/widgets/spikes_on_traces.py +++ b/src/spikeinterface/widgets/spikes_on_traces.py @@ -149,20 +149,20 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): sorting = we.sorting # first plot time series - ts_widget = TracesWidget(recording, **dp.options, backend="matplotlib", **backend_kwargs) - self.ax = ts_widget.ax - self.axes = ts_widget.axes - self.figure = ts_widget.figure + traces_widget = TracesWidget(recording, **dp.options, backend="matplotlib", **backend_kwargs) + self.ax = traces_widget.ax + self.axes = traces_widget.axes + self.figure = traces_widget.figure ax = self.ax - frame_range = ts_widget.data_plot["frame_range"] - segment_index = ts_widget.data_plot["segment_index"] - min_y = np.min(ts_widget.data_plot["channel_locations"][:, 1]) - max_y = np.max(ts_widget.data_plot["channel_locations"][:, 1]) + frame_range = traces_widget.data_plot["frame_range"] + segment_index = traces_widget.data_plot["segment_index"] + min_y = np.min(traces_widget.data_plot["channel_locations"][:, 1]) + max_y = np.max(traces_widget.data_plot["channel_locations"][:, 1]) - n = len(ts_widget.data_plot["channel_ids"]) - order = ts_widget.data_plot["order"] + n = len(traces_widget.data_plot["channel_ids"]) + order = traces_widget.data_plot["order"] if order is None: order = np.arange(n) @@ -210,13 +210,13 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): # construct waveforms label_set = False if len(spike_frames_to_plot) > 0: - vspacing = ts_widget.data_plot["vspacing"] - traces = ts_widget.data_plot["list_traces"][0] + vspacing = traces_widget.data_plot["vspacing"] + traces = traces_widget.data_plot["list_traces"][0] waveform_idxs = spike_frames_to_plot[:, None] + np.arange(-we.nbefore, we.nafter) - frame_range[0] - waveform_idxs = np.clip(waveform_idxs, 0, len(ts_widget.data_plot["times"]) - 1) + waveform_idxs = np.clip(waveform_idxs, 0, len(traces_widget.data_plot["times"]) - 1) - times = ts_widget.data_plot["times"][waveform_idxs] + times = traces_widget.data_plot["times"][waveform_idxs] # discontinuity times[:, -1] = np.nan @@ -224,7 +224,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): waveforms = traces[waveform_idxs] # [:, :, order] waveforms_r = waveforms.reshape((waveforms.shape[0] * waveforms.shape[1], waveforms.shape[2])) - for i, chan_id in enumerate(ts_widget.data_plot["channel_ids"]): + for i, chan_id in enumerate(traces_widget.data_plot["channel_ids"]): offset = vspacing * i if chan_id in chan_ids: l = ax.plot(times_r, offset + waveforms_r[:, i], color=dp.unit_colors[unit]) @@ -232,13 +232,13 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): handles.append(l[0]) labels.append(unit) label_set = True - ax.legend(handles, labels) + # ax.legend(handles, labels) def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -256,37 +256,58 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): width_cm = backend_kwargs["width_cm"] # plot timeseries - ts_widget = TracesWidget(we.recording, **dp.options, backend="ipywidgets", **backend_kwargs_ts) - self.ax = ts_widget.ax - self.axes = ts_widget.axes - self.figure = ts_widget.figure + self._traces_widget = TracesWidget(we.recording, **dp.options, backend="ipywidgets", **backend_kwargs_ts) + self.ax = self._traces_widget.ax + self.axes = self._traces_widget.axes + self.figure = self._traces_widget.figure - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], we.unit_ids, ratios[0] * width_cm, height_cm - ) + self.sampling_frequency = self._traces_widget.rec0.sampling_frequency - self.controller = dict() - self.controller.update(ts_widget.controller) - self.controller.update(unit_controller) + self.time_slider = self._traces_widget.time_slider - for w in self.controller.values(): - w.observe(self._update_ipywidget) + self.unit_selector = UnitSelector(data_plot["unit_ids"]) + self.unit_selector.value = list(data_plot["unit_ids"])[:1] - self.widget = widgets.AppLayout(center=ts_widget.widget, left_sidebar=unit_widget, pane_widths=ratios + [0]) + self.widget = widgets.AppLayout(center=self._traces_widget.widget, + left_sidebar=self.unit_selector, + pane_widths=ratios + [0]) # a first update - self._update_ipywidget(None) + self._update_ipywidget() + + # remove callback from traces_widget + self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + self._traces_widget.time_slider.observe(self._update_ipywidget, names='value', type="change") + self._traces_widget.channel_selector.observe(self._update_ipywidget, names='value', type="change") + self._traces_widget.scaler.observe(self._update_ipywidget, names='value', type="change") + if backend_kwargs["display"]: display(self.widget) - def _update_ipywidget(self, change): + def _update_ipywidget(self, change=None): self.ax.clear() - unit_ids = self.controller["unit_ids"].value + # TODO later: this is still a bit buggy because it make double refresh one from _traces_widget and one internal + + unit_ids = self.unit_selector.value + start_frame, end_frame, segment_index = self._traces_widget.time_slider.value + channel_ids = self._traces_widget.channel_selector.value + mode = self._traces_widget.mode_selector.value data_plot = self.next_data_plot data_plot["unit_ids"] = unit_ids + data_plot["options"].update( + dict( + channel_ids=channel_ids, + segment_index=segment_index, + # frame_range=(start_frame, end_frame), + time_range=np.array([start_frame, end_frame]) / self.sampling_frequency, + mode=mode, + with_colorbar=False, + ) + ) + backend_kwargs = {} backend_kwargs["ax"] = self.ax diff --git a/src/spikeinterface/widgets/unit_locations.py b/src/spikeinterface/widgets/unit_locations.py index 42267e711f..8526a95d60 100644 --- a/src/spikeinterface/widgets/unit_locations.py +++ b/src/spikeinterface/widgets/unit_locations.py @@ -167,7 +167,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -186,42 +186,35 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): fig, self.ax = plt.subplots(figsize=((ratios[1] * width_cm) * cm, height_cm * cm)) plt.show() - data_plot["unit_ids"] = data_plot["unit_ids"][:1] - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], list(data_plot["unit_colors"].keys()), ratios[0] * width_cm, height_cm - ) - - self.controller = unit_controller - - for w in self.controller.values(): - w.observe(self._update_ipywidget) + self.unit_selector = UnitSelector(data_plot["unit_ids"]) + self.unit_selector.value = list(data_plot["unit_ids"])[:1] self.widget = widgets.AppLayout( center=fig.canvas, - left_sidebar=unit_widget, + left_sidebar=self.unit_selector, pane_widths=ratios + [0], ) # a first update - self._update_ipywidget(None) + self._update_ipywidget() + + self.unit_selector.observe(self._update_ipywidget, names='value', type="change") if backend_kwargs["display"]: display(self.widget) - def _update_ipywidget(self, change): + def _update_ipywidget(self, change=None): self.ax.clear() - unit_ids = self.controller["unit_ids"].value - # matplotlib next_data_plot dict update at each call data_plot = self.next_data_plot - data_plot["unit_ids"] = unit_ids + data_plot["unit_ids"] = self.unit_selector.value data_plot["plot_all_units"] = True + # TODO later add an option checkbox for legend data_plot["plot_legend"] = True data_plot["hide_axis"] = True - backend_kwargs = {} - backend_kwargs["ax"] = self.ax + backend_kwargs = dict(ax=self.ax) self.plot_matplotlib(data_plot, **backend_kwargs) fig = self.ax.get_figure() diff --git a/src/spikeinterface/widgets/unit_waveforms.py b/src/spikeinterface/widgets/unit_waveforms.py index e64765b44b..f01c842b66 100644 --- a/src/spikeinterface/widgets/unit_waveforms.py +++ b/src/spikeinterface/widgets/unit_waveforms.py @@ -250,7 +250,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt import ipywidgets.widgets as widgets from IPython.display import display - from .utils_ipywidgets import check_ipywidget_backend, make_unit_controller + from .utils_ipywidgets import check_ipywidget_backend, UnitSelector check_ipywidget_backend() @@ -274,44 +274,33 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.fig_probe, self.ax_probe = plt.subplots(figsize=((ratios[2] * width_cm) * cm, height_cm * cm)) plt.show() - data_plot["unit_ids"] = data_plot["unit_ids"][:1] - unit_widget, unit_controller = make_unit_controller( - data_plot["unit_ids"], we.unit_ids, ratios[0] * width_cm, height_cm - ) + self.unit_selector = UnitSelector(data_plot["unit_ids"]) + self.unit_selector.value = list(data_plot["unit_ids"])[:1] + - same_axis_button = widgets.Checkbox( + self.same_axis_button = widgets.Checkbox( value=False, description="same axis", disabled=False, ) - plot_templates_button = widgets.Checkbox( + self.plot_templates_button = widgets.Checkbox( value=True, description="plot templates", disabled=False, ) - hide_axis_button = widgets.Checkbox( + self.hide_axis_button = widgets.Checkbox( value=True, description="hide axis", disabled=False, ) - footer = widgets.HBox([same_axis_button, plot_templates_button, hide_axis_button]) - - self.controller = { - "same_axis": same_axis_button, - "plot_templates": plot_templates_button, - "hide_axis": hide_axis_button, - } - self.controller.update(unit_controller) - - for w in self.controller.values(): - w.observe(self._update_ipywidget) + footer = widgets.HBox([self.same_axis_button, self.plot_templates_button, self.hide_axis_button]) self.widget = widgets.AppLayout( center=self.fig_wf.canvas, - left_sidebar=unit_widget, + left_sidebar=self.unit_selector, right_sidebar=self.fig_probe.canvas, pane_widths=ratios, footer=footer, @@ -320,6 +309,11 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._update_ipywidget(None) + self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + for w in self.same_axis_button, self.plot_templates_button, self.hide_axis_button: + w.observe(self._update_ipywidget, names='value', type="change") + + if backend_kwargs["display"]: display(self.widget) @@ -327,10 +321,15 @@ def _update_ipywidget(self, change): self.fig_wf.clear() self.ax_probe.clear() - unit_ids = self.controller["unit_ids"].value - same_axis = self.controller["same_axis"].value - plot_templates = self.controller["plot_templates"].value - hide_axis = self.controller["hide_axis"].value + # unit_ids = self.controller["unit_ids"].value + unit_ids = self.unit_selector.value + # same_axis = self.controller["same_axis"].value + # plot_templates = self.controller["plot_templates"].value + # hide_axis = self.controller["hide_axis"].value + + same_axis = self.same_axis_button.value + plot_templates = self.plot_templates_button.value + hide_axis = self.hide_axis_button.value # matplotlib next_data_plot dict update at each call data_plot = self.next_data_plot @@ -341,6 +340,8 @@ def _update_ipywidget(self, change): data_plot["plot_templates"] = plot_templates if data_plot["plot_waveforms"]: data_plot["wfs_by_ids"] = {unit_id: self.we.get_waveforms(unit_id) for unit_id in unit_ids} + + # TODO option for plot_legend backend_kwargs = {} @@ -369,6 +370,7 @@ def _update_ipywidget(self, change): self.ax_probe.axis("off") self.ax_probe.axis("equal") + # TODO this could be done with probeinterface plotting plotting tools!! for unit in unit_ids: channel_inds = data_plot["sparsity"].unit_id_to_channel_indices[unit] self.ax_probe.plot( diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index d2c41f234a..57550c0910 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -11,102 +11,6 @@ def check_ipywidget_backend(): assert "ipympl" in mpl_backend, "To use the 'ipywidgets' backend, you have to set %matplotlib widget" -# def make_timeseries_controller(t_start, t_stop, layer_keys, num_segments, time_range, mode, all_layers, width_cm): -# time_slider = W.FloatSlider( -# orientation="horizontal", -# description="time:", -# value=time_range[0], -# min=t_start, -# max=t_stop, -# continuous_update=False, -# layout=W.Layout(width=f"{width_cm}cm"), -# ) -# layer_selector = W.Dropdown(description="layer", options=layer_keys) -# segment_selector = W.Dropdown(description="segment", options=list(range(num_segments))) -# window_sizer = W.BoundedFloatText(value=np.diff(time_range)[0], step=0.1, min=0.005, description="win (s)") -# mode_selector = W.Dropdown(options=["line", "map"], description="mode", value=mode) -# all_layers = W.Checkbox(description="plot all layers", value=all_layers) - -# controller = { -# "layer_key": layer_selector, -# "segment_index": segment_selector, -# "window": window_sizer, -# "t_start": time_slider, -# "mode": mode_selector, -# "all_layers": all_layers, -# } -# widget = W.VBox( -# [time_slider, W.HBox([all_layers, layer_selector, segment_selector, window_sizer, mode_selector])] -# ) - -# return widget, controller - - -def make_unit_controller(unit_ids, all_unit_ids, width_cm, height_cm): - unit_label = W.Label(value="units:") - - unit_selector = W.SelectMultiple( - options=all_unit_ids, - value=list(unit_ids), - disabled=False, - layout=W.Layout(width=f"{width_cm}cm", height=f"{height_cm}cm"), - ) - - controller = {"unit_ids": unit_selector} - widget = W.VBox([unit_label, unit_selector]) - - return widget, controller - - -# def make_channel_controller(recording, width_cm, height_cm): -# channel_label = W.Label("channel indices:", layout=W.Layout(justify_content="center")) -# channel_selector = W.IntRangeSlider( -# value=[0, recording.get_num_channels()], -# min=0, -# max=recording.get_num_channels(), -# step=1, -# disabled=False, -# continuous_update=False, -# orientation="vertical", -# readout=True, -# readout_format="d", -# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{height_cm}cm"), -# ) - -# controller = {"channel_inds": channel_selector} -# widget = W.VBox([channel_label, channel_selector]) - -# return widget, controller - - -# def make_scale_controller(width_cm, height_cm): -# scale_label = W.Label("Scale", layout=W.Layout(justify_content="center")) - -# plus_selector = W.Button( -# description="", -# disabled=False, -# button_style="", # 'success', 'info', 'warning', 'danger' or '' -# tooltip="Increase scale", -# icon="arrow-up", -# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), -# ) - -# minus_selector = W.Button( -# description="", -# disabled=False, -# button_style="", # 'success', 'info', 'warning', 'danger' or '' -# tooltip="Decrease scale", -# icon="arrow-down", -# layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), -# ) - -# controller = {"plus": plus_selector, "minus": minus_selector} -# widget = W.VBox([scale_label, plus_selector, minus_selector]) - -# return widget, controller - - - class TimeSlider(W.HBox): value = traitlets.Tuple(traitlets.Int(), traitlets.Int(), traitlets.Int()) From f315594b0b88bed01f01232688d62c4c2e4bc0fe Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 22 Sep 2023 15:49:47 +0200 Subject: [PATCH 160/322] protect TimeSlider on the upper limit to avoid border effect on window size --- src/spikeinterface/widgets/utils_ipywidgets.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index 57550c0910..ee6133a990 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -54,7 +54,7 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): # description='time:', value=start_frame, min=0, - max=self.frame_limits[self.segment_index], + max=self.frame_limits[self.segment_index] - 1, readout=False, continuous_update=False, layout=W.Layout(width=f'70%') @@ -112,10 +112,13 @@ def update_time(self, new_frame=None, new_time=None, update_slider=False, update else: start_frame = new_frame delta_s = self.window_sizer.value - end_frame = start_frame + int(delta_s * self.sampling_frequency) - + delta = int(delta_s * self.sampling_frequency) + # clip + start_frame = min(self.frame_limits[self.segment_index] - delta, start_frame) start_frame = max(0, start_frame) + end_frame = start_frame + delta + end_frame = min(self.frame_limits[self.segment_index], end_frame) @@ -170,7 +173,7 @@ def segment_changed(self, change=None): self.slider.unobserve(self.slider_moved, names='value', type="change") # self.slider.value = 0 - self.slider.max = self.frame_limits[self.segment_index] + self.slider.max = self.frame_limits[self.segment_index] - 1 self.slider.observe(self.slider_moved, names='value', type="change") self.update_time(new_frame=0, update_slider=True, update_label=True) From c33f7233b54ccce797a903f8f495d8dbb30f0b2a Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 22 Sep 2023 10:21:53 -0400 Subject: [PATCH 161/322] test reorganize folders --- doc/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 15cb65d46a..b120393911 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -118,11 +118,11 @@ 'examples_dirs': ['../examples/modules_gallery'], 'gallery_dirs': ['modules_gallery', ], # path where to save gallery generated examples 'subsection_order': ExplicitOrder([ - '../examples/modules_gallery/core/', '../examples/modules_gallery/extractors/', '../examples/modules_gallery/qualitymetrics', '../examples/modules_gallery/comparison', '../examples/modules_gallery/widgets', + '../examples/modules_gallery/core/', ]), 'within_subsection_order': FileNameSortKey, 'ignore_pattern': '/generate_', From f2188266647d7faf721d89089b6f9c0bd1d9e637 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 22 Sep 2023 16:22:01 +0200 Subject: [PATCH 162/322] feedback from Ramon --- src/spikeinterface/core/generate.py | 4 ++-- src/spikeinterface/core/tests/test_waveform_extractor.py | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 362b598b0b..05d63f3c8d 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1433,7 +1433,7 @@ def generate_ground_truth_recording( ) recording.annotate(is_filtered=True) recording.set_probe(probe, in_place=True) - recording.set_property("gain_to_uV", np.ones(num_channels)) - recording.set_property("offset_to_uV", np.zeros(num_channels)) + recording.set_channel_gains(1.) + recording.set_channel_offsets(0.) return recording, sorting diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index 3972c9186c..f53b9cf18d 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -517,8 +517,6 @@ def test_non_json_object(): num_units=5, ) - - print(recording.check_serializablility("pickle")) # recording is not save to keep it in memory sorting = sorting.save() From 96be72e5ac05ec7f3bd63f866783b733fca22ab8 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 22 Sep 2023 10:43:48 -0400 Subject: [PATCH 163/322] try removing extra slash from some sections --- doc/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index b120393911..eb8bee5f9a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -118,11 +118,11 @@ 'examples_dirs': ['../examples/modules_gallery'], 'gallery_dirs': ['modules_gallery', ], # path where to save gallery generated examples 'subsection_order': ExplicitOrder([ - '../examples/modules_gallery/extractors/', + '../examples/modules_gallery/core', + '../examples/modules_gallery/extractors', '../examples/modules_gallery/qualitymetrics', '../examples/modules_gallery/comparison', '../examples/modules_gallery/widgets', - '../examples/modules_gallery/core/', ]), 'within_subsection_order': FileNameSortKey, 'ignore_pattern': '/generate_', From c4fec2f135f5166bc3dfe4ebbd1a3ccdff8ddd63 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 22 Sep 2023 11:00:17 -0400 Subject: [PATCH 164/322] try setting nested_sections false --- doc/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/conf.py b/doc/conf.py index eb8bee5f9a..13d1ef4e65 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -126,6 +126,7 @@ ]), 'within_subsection_order': FileNameSortKey, 'ignore_pattern': '/generate_', + 'nested_sections': False, } intersphinx_mapping = { From 2ba8928b785ed06f8a2f01b48ea632a4171ab926 Mon Sep 17 00:00:00 2001 From: Windows Home Date: Sun, 24 Sep 2023 13:51:48 -0500 Subject: [PATCH 165/322] Fix unit ID matching in sortingview curation Refine the logic for matching unit IDs in the sortingview curation process. Instead of using a potentially ambiguous containment check, unit IDs are now split at the '-' character, ensuring accurate mapping between unit labels and merged unit IDs. Additionally, introduced a unit test to validate the improved behavior and guard against potential false positives in future changes. --- .../curation/sortingview_curation.py | 3 +- .../tests/test_sortingview_curation.py | 45 +++++++++++++++++-- 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/curation/sortingview_curation.py b/src/spikeinterface/curation/sortingview_curation.py index 6adf9effd4..f595a67a3f 100644 --- a/src/spikeinterface/curation/sortingview_curation.py +++ b/src/spikeinterface/curation/sortingview_curation.py @@ -83,8 +83,9 @@ def apply_sortingview_curation( properties[label] = np.zeros(len(curation_sorting.current_sorting.unit_ids), dtype=bool) for u_i, unit_id in enumerate(curation_sorting.current_sorting.unit_ids): labels_unit = [] + unit_id_parts = str(unit_id).split('-') for unit_label, labels in labels_dict.items(): - if unit_label in str(unit_id): + if unit_label in unit_id_parts: labels_unit.extend(labels) for label in labels_unit: properties[label][u_i] = True diff --git a/src/spikeinterface/curation/tests/test_sortingview_curation.py b/src/spikeinterface/curation/tests/test_sortingview_curation.py index 9177cb5536..1b9e6f2800 100644 --- a/src/spikeinterface/curation/tests/test_sortingview_curation.py +++ b/src/spikeinterface/curation/tests/test_sortingview_curation.py @@ -1,8 +1,10 @@ import pytest from pathlib import Path import os - +import json +import numpy as np import spikeinterface as si +import spikeinterface.extractors as se from spikeinterface.extractors import read_mearec from spikeinterface import set_global_tmp_folder from spikeinterface.postprocessing import ( @@ -17,9 +19,7 @@ cache_folder = pytest.global_test_folder / "curation" else: cache_folder = Path("cache_folder") / "curation" - parent_folder = Path(__file__).parent - ON_GITHUB = bool(os.getenv("GITHUB_ACTIONS")) KACHERY_CLOUD_SET = bool(os.getenv("KACHERY_CLOUD_CLIENT_ID")) and bool(os.getenv("KACHERY_CLOUD_PRIVATE_KEY")) @@ -111,6 +111,7 @@ def test_json_curation(): # from curation.json json_file = parent_folder / "sv-sorting-curation.json" sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_file, verbose=True) + print(f"Sorting: {sorting.get_unit_ids()}") print(f"From JSON: {sorting_curated_json}") assert len(sorting_curated_json.unit_ids) == 9 @@ -130,9 +131,47 @@ def test_json_curation(): assert len(sorting_curated_json_mua.unit_ids) == 6 assert len(sorting_curated_json_mua1.unit_ids) == 5 +def test_false_positive_curation(): + # https://spikeinterface.readthedocs.io/en/latest/modules_gallery/core/plot_2_sorting_extractor.html + sampling_frequency = 30000. + duration = 20. + num_timepoints = int(sampling_frequency * duration) + num_units = 20 + num_spikes = 1000 + times0 = np.int_(np.sort(np.random.uniform(0, num_timepoints, num_spikes))) + labels0 = np.random.randint(1, num_units + 1, size=num_spikes) + times1 = np.int_(np.sort(np.random.uniform(0, num_timepoints, num_spikes))) + labels1 = np.random.randint(1, num_units + 1, size=num_spikes) + + sorting = se.NumpySorting.from_times_labels([times0, times1], [labels0, labels1], sampling_frequency) + print('Sorting: {}'.format(sorting.get_unit_ids())) + + # Test curation JSON: + test_json = { + "labelsByUnit": { + "1": ["accept"], + }, + "mergeGroups": [] + } + + json_path = "test_data.json" + with open(json_path, 'w') as f: + json.dump(test_json, f, indent=4) + + sorting_curated = apply_sortingview_curation(sorting, uri_or_json=json_path, verbose=True) + accept_idx = np.where(sorting_curated.get_property("accept"))[0] + sorting_curated_ids = sorting_curated.get_unit_ids() + print(f'Accepted unit IDs: {sorting_curated_ids[accept_idx]}') + + # Check if unit_id 1 has received the "accept" label. + assert sorting_curated.get_unit_property(unit_id=1, key="accept") + # Check if unit_id "#10" has received the "accept" label. + # If so, test fails since only unit_id 1 received the "accept" label in test_json. + assert not sorting_curated.get_unit_property(unit_id=10, key="accept") if __name__ == "__main__": # generate_sortingview_curation_dataset() test_sha1_curation() test_gh_curation() test_json_curation() + test_false_positive_curation() From 45c69f52147edd406f293f731b7c7c687c700d29 Mon Sep 17 00:00:00 2001 From: Windows Home Date: Sun, 24 Sep 2023 14:46:01 -0500 Subject: [PATCH 166/322] Add merge check --- .gitignore | 1 + .../tests/test_sortingview_curation.py | 20 ++++++++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 3ee3cb8867..7838213bed 100644 --- a/.gitignore +++ b/.gitignore @@ -188,3 +188,4 @@ test_folder/ # Mac OS .DS_Store +test_data.json diff --git a/src/spikeinterface/curation/tests/test_sortingview_curation.py b/src/spikeinterface/curation/tests/test_sortingview_curation.py index 1b9e6f2800..c8a0788223 100644 --- a/src/spikeinterface/curation/tests/test_sortingview_curation.py +++ b/src/spikeinterface/curation/tests/test_sortingview_curation.py @@ -115,6 +115,7 @@ def test_json_curation(): print(f"From JSON: {sorting_curated_json}") assert len(sorting_curated_json.unit_ids) == 9 + print(sorting_curated_json.unit_ids) assert "#8-#9" in sorting_curated_json.unit_ids assert "accept" in sorting_curated_json.get_property_keys() assert "mua" in sorting_curated_json.get_property_keys() @@ -150,24 +151,29 @@ def test_false_positive_curation(): test_json = { "labelsByUnit": { "1": ["accept"], + "2": ["artifact"], + "12": ["artifact"] }, - "mergeGroups": [] + "mergeGroups": [[2,12]] } json_path = "test_data.json" with open(json_path, 'w') as f: json.dump(test_json, f, indent=4) - sorting_curated = apply_sortingview_curation(sorting, uri_or_json=json_path, verbose=True) - accept_idx = np.where(sorting_curated.get_property("accept"))[0] - sorting_curated_ids = sorting_curated.get_unit_ids() + sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_path, verbose=True) + accept_idx = np.where(sorting_curated_json.get_property("accept"))[0] + sorting_curated_ids = sorting_curated_json.get_unit_ids() print(f'Accepted unit IDs: {sorting_curated_ids[accept_idx]}') # Check if unit_id 1 has received the "accept" label. - assert sorting_curated.get_unit_property(unit_id=1, key="accept") - # Check if unit_id "#10" has received the "accept" label. + assert sorting_curated_json.get_unit_property(unit_id=1, key="accept") + # Check if unit_id 10 has received the "accept" label. # If so, test fails since only unit_id 1 received the "accept" label in test_json. - assert not sorting_curated.get_unit_property(unit_id=10, key="accept") + assert not sorting_curated_json.get_unit_property(unit_id=10, key="accept") + print(sorting_curated_json.unit_ids) + # Merging unit_ids of dtype int creates a new unit id + assert 21 in sorting_curated_json.unit_ids if __name__ == "__main__": # generate_sortingview_curation_dataset() From ffaf06756b3884646785fd81bce2d123abaaff0d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 24 Sep 2023 20:09:34 +0000 Subject: [PATCH 167/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../curation/sortingview_curation.py | 2 +- .../tests/test_sortingview_curation.py | 33 ++++++++----------- 2 files changed, 15 insertions(+), 20 deletions(-) diff --git a/src/spikeinterface/curation/sortingview_curation.py b/src/spikeinterface/curation/sortingview_curation.py index f595a67a3f..a5633fe165 100644 --- a/src/spikeinterface/curation/sortingview_curation.py +++ b/src/spikeinterface/curation/sortingview_curation.py @@ -83,7 +83,7 @@ def apply_sortingview_curation( properties[label] = np.zeros(len(curation_sorting.current_sorting.unit_ids), dtype=bool) for u_i, unit_id in enumerate(curation_sorting.current_sorting.unit_ids): labels_unit = [] - unit_id_parts = str(unit_id).split('-') + unit_id_parts = str(unit_id).split("-") for unit_label, labels in labels_dict.items(): if unit_label in unit_id_parts: labels_unit.extend(labels) diff --git a/src/spikeinterface/curation/tests/test_sortingview_curation.py b/src/spikeinterface/curation/tests/test_sortingview_curation.py index c8a0788223..a8944f0688 100644 --- a/src/spikeinterface/curation/tests/test_sortingview_curation.py +++ b/src/spikeinterface/curation/tests/test_sortingview_curation.py @@ -132,10 +132,11 @@ def test_json_curation(): assert len(sorting_curated_json_mua.unit_ids) == 6 assert len(sorting_curated_json_mua1.unit_ids) == 5 + def test_false_positive_curation(): # https://spikeinterface.readthedocs.io/en/latest/modules_gallery/core/plot_2_sorting_extractor.html - sampling_frequency = 30000. - duration = 20. + sampling_frequency = 30000.0 + duration = 20.0 num_timepoints = int(sampling_frequency * duration) num_units = 20 num_spikes = 1000 @@ -145,36 +146,30 @@ def test_false_positive_curation(): labels1 = np.random.randint(1, num_units + 1, size=num_spikes) sorting = se.NumpySorting.from_times_labels([times0, times1], [labels0, labels1], sampling_frequency) - print('Sorting: {}'.format(sorting.get_unit_ids())) + print("Sorting: {}".format(sorting.get_unit_ids())) # Test curation JSON: - test_json = { - "labelsByUnit": { - "1": ["accept"], - "2": ["artifact"], - "12": ["artifact"] - }, - "mergeGroups": [[2,12]] - } + test_json = {"labelsByUnit": {"1": ["accept"], "2": ["artifact"], "12": ["artifact"]}, "mergeGroups": [[2, 12]]} json_path = "test_data.json" - with open(json_path, 'w') as f: + with open(json_path, "w") as f: json.dump(test_json, f, indent=4) sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_path, verbose=True) accept_idx = np.where(sorting_curated_json.get_property("accept"))[0] sorting_curated_ids = sorting_curated_json.get_unit_ids() - print(f'Accepted unit IDs: {sorting_curated_ids[accept_idx]}') + print(f"Accepted unit IDs: {sorting_curated_ids[accept_idx]}") - # Check if unit_id 1 has received the "accept" label. - assert sorting_curated_json.get_unit_property(unit_id=1, key="accept") - # Check if unit_id 10 has received the "accept" label. - # If so, test fails since only unit_id 1 received the "accept" label in test_json. - assert not sorting_curated_json.get_unit_property(unit_id=10, key="accept") + # Check if unit_id 1 has received the "accept" label. + assert sorting_curated_json.get_unit_property(unit_id=1, key="accept") + # Check if unit_id 10 has received the "accept" label. + # If so, test fails since only unit_id 1 received the "accept" label in test_json. + assert not sorting_curated_json.get_unit_property(unit_id=10, key="accept") print(sorting_curated_json.unit_ids) - # Merging unit_ids of dtype int creates a new unit id + # Merging unit_ids of dtype int creates a new unit id assert 21 in sorting_curated_json.unit_ids + if __name__ == "__main__": # generate_sortingview_curation_dataset() test_sha1_curation() From 00f91eb99de0052daf6ae67a47026e1490bcd278 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 25 Sep 2023 12:02:51 +0200 Subject: [PATCH 168/322] Do not save/overwrite params in read-only mode --- src/spikeinterface/core/waveform_extractor.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 6881ab3ec5..9f85603e51 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -1988,6 +1988,9 @@ def set_params(self, **params): params = self._set_params(**params) self._params = params + if self.waveform_extractor.is_read_only(): + return + params_to_save = params.copy() if "sparsity" in params and params["sparsity"] is not None: assert isinstance( From b23e7e444065ee9b7a72c549a9c0aee22ce39c25 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Mon, 25 Sep 2023 16:11:30 -0400 Subject: [PATCH 169/322] allow relative path when exporting to phy --- src/spikeinterface/exporters/to_phy.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index c92861a8bf..7de1a128e5 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -35,6 +35,7 @@ def export_to_phy( template_mode: str = "median", dtype: Optional[npt.DTypeLike] = None, verbose: bool = True, + use_relative_path: bool = False, **job_kwargs, ): """ @@ -64,6 +65,8 @@ def export_to_phy( Dtype to save binary data verbose: bool If True, output is verbose + use_relative_path : bool, default: False + If True saves the `dat_path` as a relative path, else an absolute {} """ @@ -94,7 +97,7 @@ def export_to_phy( used_sparsity = sparsity else: used_sparsity = ChannelSparsity.create_dense(waveform_extractor) - # convinient sparsity dict for the 3 cases to retrieve channl_inds + # convenient sparsity dict for the 3 cases to retrieve channl_inds sparse_dict = used_sparsity.unit_id_to_channel_indices empty_flag = False @@ -106,7 +109,7 @@ def export_to_phy( empty_flag = True unit_ids = non_empty_units if empty_flag: - warnings.warn("Empty units have been removed when being exported to Phy") + warnings.warn("Empty units have been removed while exporting to Phy") if len(unit_ids) == 0: raise Exception("No non-empty units in the sorting result, can't save to Phy.") @@ -149,7 +152,10 @@ def export_to_phy( # write params.py with (output_folder / "params.py").open("w") as f: - f.write(f"dat_path = r'{str(rec_path)}'\n") + if use_relative_path: + f.write(f"dat_path = r'recording.dat'\n") + else: + f.write(f"dat_path = r'{str(rec_path)}'\n") f.write(f"n_channels_dat = {num_chans}\n") f.write(f"dtype = '{dtype_str}'\n") f.write(f"offset = 0\n") From 68fe2ba08f9c41b3feaf7866fee934291d78f7ea Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 09:26:40 +0200 Subject: [PATCH 170/322] OMP with SVD decomposition --- .../sortingcomponents/matching/circus.py | 307 ++++++++++++++++++ .../sortingcomponents/matching/method_list.py | 3 +- 2 files changed, 309 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index a19e7b71b5..e86c913976 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -478,6 +478,313 @@ def main_function(cls, traces, d): return spikes +class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): + """ + Orthogonal Matching Pursuit inspired from Spyking Circus sorter + + https://elifesciences.org/articles/34518 + + This is an Orthogonal Template Matching algorithm. For speed and + memory optimization, templates are automatically sparsified. Signal + is convolved with the templates, and as long as some scalar products + are higher than a given threshold, we use a Cholesky decomposition + to compute the optimal amplitudes needed to reconstruct the signal. + + IMPORTANT NOTE: small chunks are more efficient for such Peeler, + consider using 100ms chunk + + Parameters + ---------- + amplitude: tuple + (Minimal, Maximal) amplitudes allowed for every template + omp_min_sps: float + Stopping criteria of the OMP algorithm, in percentage of the norm + noise_levels: array + The noise levels, for every channels. If None, they will be automatically + computed + random_chunk_kwargs: dict + Parameters for computing noise levels, if not provided (sub optimal) + sparse_kwargs: dict + Parameters to extract a sparsity mask from the waveform_extractor, if not + already sparse. + ----- + """ + + _default_params = { + "amplitudes": [0.6, 2], + "omp_min_sps": 0.1, + "waveform_extractor": None, + "templates": None, + "overlaps": None, + "norms": None, + "random_chunk_kwargs": {}, + "noise_levels": None, + "rank" : 3, + "sparse_kwargs": {"method": "ptp", "threshold": 1}, + "ignored_ids": [], + "vicinity": 0, + } + + @classmethod + def _prepare_templates(cls, d): + waveform_extractor = d["waveform_extractor"] + num_templates = len(d["waveform_extractor"].sorting.unit_ids) + + if not waveform_extractor.is_sparse(): + sparsity = compute_sparsity(waveform_extractor, **d["sparse_kwargs"]).mask + else: + sparsity = waveform_extractor.sparsity.mask + + templates = waveform_extractor.get_all_templates(mode="median").copy() + + temporal, singular, spatial = np.linalg.svd(templates, full_matrices=False) + + # Keep only the strongest components + rank = d['rank'] + d['templates'] = {} + d["norms"] = np.zeros(num_templates, dtype=np.float32) + d['sparsities'] = {} + d["norms"] = np.linalg.norm(templates, axis=(1, 2)) + for i in range(num_templates): + d['sparsities'][i] = np.arange(templates.shape[2]) + d['templates'][i] = templates[i] / d["norms"][i] + + temporal = temporal[:, :, :rank] + d["temporal"] = np.flip(temporal, axis=1) + d["singular"] = singular[:, :rank] + d["spatial"] = spatial[:, :rank, :] + + d['temporal'] /= d['norms'][:, np.newaxis, np.newaxis] + + d["spatial"] = np.moveaxis(d['spatial'][:, :rank, :], [0, 1, 2], [1, 0, 2]) + d['temporal'] = np.moveaxis(d['temporal'][:, :, :rank], [0, 1, 2], [1, 2, 0]) + d['singular'] = d['singular'].T[:, :, np.newaxis] + return d + + @classmethod + def initialize_and_check_kwargs(cls, recording, kwargs): + d = cls._default_params.copy() + d.update(kwargs) + + # assert isinstance(d['waveform_extractor'], WaveformExtractor) + + for v in ["omp_min_sps"]: + assert (d[v] >= 0) and (d[v] <= 1), f"{v} should be in [0, 1]" + + d["num_channels"] = d["waveform_extractor"].recording.get_num_channels() + d["num_samples"] = d["waveform_extractor"].nsamples + d["nbefore"] = d["waveform_extractor"].nbefore + d["nafter"] = d["waveform_extractor"].nafter + d["sampling_frequency"] = d["waveform_extractor"].recording.get_sampling_frequency() + d["vicinity"] *= d["num_samples"] + + if d["noise_levels"] is None: + print("CircusOMPPeeler : noise should be computed outside") + d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) + + if d["templates"] is None: + d = cls._prepare_templates(d) + else: + for key in ["norms", "sparsities"]: + assert d[key] is not None, "If templates are provided, %d should also be there" % key + + d["num_templates"] = len(d["templates"]) + + if d["overlaps"] is None: + d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) + + d["ignored_ids"] = np.array(d["ignored_ids"]) + + omp_min_sps = d["omp_min_sps"] + # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) + d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + + return d + + @classmethod + def serialize_method_kwargs(cls, kwargs): + kwargs = dict(kwargs) + # remove waveform_extractor + kwargs.pop("waveform_extractor") + return kwargs + + @classmethod + def unserialize_in_worker(cls, kwargs): + return kwargs + + @classmethod + def get_margin(cls, recording, kwargs): + margin = 2 * max(kwargs["nbefore"], kwargs["nafter"]) + return margin + + @classmethod + def main_function(cls, traces, d): + templates = d["templates"] + num_templates = d["num_templates"] + num_channels = d["num_channels"] + num_samples = d["num_samples"] + overlaps = d["overlaps"] + norms = d["norms"] + nbefore = d["nbefore"] + nafter = d["nafter"] + omp_tol = np.finfo(np.float32).eps + num_samples = d["nafter"] + d["nbefore"] + neighbor_window = num_samples - 1 + min_amplitude, max_amplitude = d["amplitudes"] + sparsities = d["sparsities"] + ignored_ids = d["ignored_ids"] + stop_criteria = d["stop_criteria"] + vicinity = d["vicinity"] + rank = d['rank'] + + num_timesteps = len(traces) + + num_peaks = num_timesteps - num_samples + 1 + conv_shape = (num_templates, num_peaks) + scalar_products = np.zeros(conv_shape, dtype=np.float32) + + # Filter using overlap-and-add convolution + spatially_filtered_data = np.matmul(d['spatial'], traces.T[np.newaxis, :, :]) + scaled_filtered_data = spatially_filtered_data * d['singular'] + objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d['temporal'], axes=2, mode="valid") + scalar_products += np.sum(objective_by_rank, axis=0) + + if len(ignored_ids) > 0: + scalar_products[ignored_ids] = -np.inf + + num_spikes = 0 + + spikes = np.empty(scalar_products.size, dtype=spike_dtype) + idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) + + M = np.zeros((100, 100), dtype=np.float32) + + all_selections = np.empty((2, scalar_products.size), dtype=np.int32) + final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) + num_selection = 0 + + full_sps = scalar_products.copy() + + neighbors = {} + cached_overlaps = {} + + is_valid = scalar_products > stop_criteria + all_amplitudes = np.zeros(0, dtype=np.float32) + is_in_vicinity = np.zeros(0, dtype=np.int32) + + while np.any(is_valid): + best_amplitude_ind = scalar_products[is_valid].argmax() + best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) + + if num_selection > 0: + delta_t = selection[1] - peak_index + idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] + myline = num_samples + delta_t[idx] + + if not best_cluster_ind in cached_overlaps: + cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() + + if num_selection == M.shape[0]: + Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) + Z[:num_selection, :num_selection] = M + M = Z + + M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] + + if vicinity == 0: + scipy.linalg.solve_triangular( + M[:num_selection, :num_selection], + M[num_selection, :num_selection], + trans=0, + lower=1, + overwrite_b=True, + check_finite=False, + ) + + v = nrm2(M[num_selection, :num_selection]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + is_in_vicinity = np.where(np.abs(delta_t) < vicinity)[0] + + if len(is_in_vicinity) > 0: + L = M[is_in_vicinity, :][:, is_in_vicinity] + + M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular( + L, M[num_selection, is_in_vicinity], trans=0, lower=1, overwrite_b=True, check_finite=False + ) + + v = nrm2(M[num_selection, is_in_vicinity]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + M[num_selection, num_selection] = 1.0 + else: + M[0, 0] = 1 + + all_selections[:, num_selection] = [best_cluster_ind, peak_index] + num_selection += 1 + + selection = all_selections[:, :num_selection] + res_sps = full_sps[selection[0], selection[1]] + + if True: # vicinity == 0: + all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) + all_amplitudes /= norms[selection[0]] + else: + # This is not working, need to figure out why + is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) + all_amplitudes = np.append(all_amplitudes, np.float32(1)) + L = M[is_in_vicinity, :][:, is_in_vicinity] + all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], lower=True, overwrite_b=False) + all_amplitudes[is_in_vicinity] /= norms[selection[0][is_in_vicinity]] + + diff_amplitudes = all_amplitudes - final_amplitudes[selection[0], selection[1]] + modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0] + final_amplitudes[selection[0], selection[1]] = all_amplitudes + + for i in modified: + tmp_best, tmp_peak = selection[:, i] + diff_amp = diff_amplitudes[i] * norms[tmp_best] + + if not tmp_best in cached_overlaps: + cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() + + if not tmp_peak in neighbors.keys(): + idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] + tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] + neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} + + idx = neighbors[tmp_peak]["idx"] + tdx = neighbors[tmp_peak]["tdx"] + + to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] + scalar_products[:, idx[0] : idx[1]] -= to_add + + is_valid = scalar_products > stop_criteria + + is_valid = (final_amplitudes > min_amplitude) * (final_amplitudes < max_amplitude) + valid_indices = np.where(is_valid) + + num_spikes = len(valid_indices[0]) + spikes["sample_index"][:num_spikes] = valid_indices[1] + d["nbefore"] + spikes["channel_index"][:num_spikes] = 0 + spikes["cluster_index"][:num_spikes] = valid_indices[0] + spikes["amplitude"][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] + + spikes = spikes[:num_spikes] + order = np.argsort(spikes["sample_index"]) + spikes = spikes[order] + + return spikes + + + + class CircusPeeler(BaseTemplateMatchingEngine): """ diff --git a/src/spikeinterface/sortingcomponents/matching/method_list.py b/src/spikeinterface/sortingcomponents/matching/method_list.py index bedc04a9d5..46c4a53872 100644 --- a/src/spikeinterface/sortingcomponents/matching/method_list.py +++ b/src/spikeinterface/sortingcomponents/matching/method_list.py @@ -1,6 +1,6 @@ from .naive import NaiveMatching from .tdc import TridesclousPeeler -from .circus import CircusPeeler, CircusOMPPeeler +from .circus import CircusPeeler, CircusOMPPeeler, CircusOMPSVDPeeler from .wobble import WobbleMatch matching_methods = { @@ -8,5 +8,6 @@ "tridesclous": TridesclousPeeler, "circus": CircusPeeler, "circus-omp": CircusOMPPeeler, + 'circus-omp-svd' : CircusOMPSVDPeeler, "wobble": WobbleMatch, } From 2602ebc5d830ba5945d1d4245c9ffd6020e0c88f Mon Sep 17 00:00:00 2001 From: weiglszonja Date: Tue, 26 Sep 2023 10:11:26 +0200 Subject: [PATCH 171/322] Add ignore_timestamps_errors to extractor --- .../extractors/neoextractors/openephys.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/extractors/neoextractors/openephys.py b/src/spikeinterface/extractors/neoextractors/openephys.py index a771dc47b1..0d9a3887f8 100644 --- a/src/spikeinterface/extractors/neoextractors/openephys.py +++ b/src/spikeinterface/extractors/neoextractors/openephys.py @@ -45,14 +45,24 @@ class OpenEphysLegacyRecordingExtractor(NeoBaseRecordingExtractor): If there are several blocks (experiments), specify the block index you want to load. all_annotations: bool (default False) Load exhaustively all annotation from neo. + ignore_timestamps_errors: bool (default False) + Ignore the discontinuous timestamps errors in neo. """ mode = "folder" NeoRawIOClass = "OpenEphysRawIO" name = "openephyslegacy" - def __init__(self, folder_path, stream_id=None, stream_name=None, block_index=None, all_annotations=False): - neo_kwargs = self.map_to_neo_kwargs(folder_path) + def __init__( + self, + folder_path, + stream_id=None, + stream_name=None, + block_index=None, + all_annotations=False, + ignore_timestamps_errors=False, + ): + neo_kwargs = self.map_to_neo_kwargs(folder_path, ignore_timestamps_errors) NeoBaseRecordingExtractor.__init__( self, stream_id=stream_id, @@ -64,8 +74,8 @@ def __init__(self, folder_path, stream_id=None, stream_name=None, block_index=No self._kwargs.update(dict(folder_path=str(Path(folder_path).absolute()))) @classmethod - def map_to_neo_kwargs(cls, folder_path): - neo_kwargs = {"dirname": str(folder_path)} + def map_to_neo_kwargs(cls, folder_path, ignore_timestamps_errors=False): + neo_kwargs = {"dirname": str(folder_path), "ignore_timestamps_errors": ignore_timestamps_errors} return neo_kwargs From cc4720460127960d5d8cf16248690b3323c6c4a9 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 10:49:57 +0200 Subject: [PATCH 172/322] Increase default rank --- .../sortingcomponents/matching/circus.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e86c913976..bc378fb9a2 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -519,7 +519,7 @@ class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): "norms": None, "random_chunk_kwargs": {}, "noise_levels": None, - "rank" : 3, + "rank" : 10, "sparse_kwargs": {"method": "ptp", "threshold": 1}, "ignored_ids": [], "vicinity": 0, @@ -537,17 +537,20 @@ def _prepare_templates(cls, d): templates = waveform_extractor.get_all_templates(mode="median").copy() - temporal, singular, spatial = np.linalg.svd(templates, full_matrices=False) - # Keep only the strongest components rank = d['rank'] d['templates'] = {} d["norms"] = np.zeros(num_templates, dtype=np.float32) d['sparsities'] = {} - d["norms"] = np.linalg.norm(templates, axis=(1, 2)) - for i in range(num_templates): - d['sparsities'][i] = np.arange(templates.shape[2]) - d['templates'][i] = templates[i] / d["norms"][i] + + for count in range(num_templates): + template = templates[count][:, sparsity[count]] + (d["sparsities"][count],) = np.nonzero(sparsity[count]) + d["norms"][count] = np.linalg.norm(template) + templates[count][:, ~sparsity[count]] = 0 + d["templates"][count] = template / d["norms"][count] + + temporal, singular, spatial = np.linalg.svd(templates, full_matrices=False) temporal = temporal[:, :, :rank] d["temporal"] = np.flip(temporal, axis=1) @@ -631,7 +634,6 @@ def main_function(cls, traces, d): num_samples = d["nafter"] + d["nbefore"] neighbor_window = num_samples - 1 min_amplitude, max_amplitude = d["amplitudes"] - sparsities = d["sparsities"] ignored_ids = d["ignored_ids"] stop_criteria = d["stop_criteria"] vicinity = d["vicinity"] From 10c33c1c8645aa7e144bdb8efbc06b993c79c4b0 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 12:01:10 +0200 Subject: [PATCH 173/322] To be tried --- src/spikeinterface/sortingcomponents/matching/circus.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index bc378fb9a2..8c002a5cc7 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -601,6 +601,7 @@ def initialize_and_check_kwargs(cls, recording, kwargs): omp_min_sps = d["omp_min_sps"] # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + #d['stop_criteria'] = omp_min_sps * np.maximum(d['norms'], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) return d @@ -635,7 +636,7 @@ def main_function(cls, traces, d): neighbor_window = num_samples - 1 min_amplitude, max_amplitude = d["amplitudes"] ignored_ids = d["ignored_ids"] - stop_criteria = d["stop_criteria"] + stop_criteria = d["stop_criteria"]#[:, np.newaxis] vicinity = d["vicinity"] rank = d['rank'] From 8ea82ee0a43f04c8a51017651710e19eb9a156db Mon Sep 17 00:00:00 2001 From: weiglszonja Date: Tue, 26 Sep 2023 13:17:39 +0200 Subject: [PATCH 174/322] check neo version and pop ignore_timestamps_errors for version 0.12.0 and older --- .../extractors/neoextractors/openephys.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/spikeinterface/extractors/neoextractors/openephys.py b/src/spikeinterface/extractors/neoextractors/openephys.py index 0d9a3887f8..cd2b6fb941 100644 --- a/src/spikeinterface/extractors/neoextractors/openephys.py +++ b/src/spikeinterface/extractors/neoextractors/openephys.py @@ -22,6 +22,19 @@ from spikeinterface.extractors.neuropixels_utils import get_neuropixels_sample_shifts +def drop_invalid_neo_arguments_for_version_0_12_0(neo_kwargs): + # Temporary function until neo version 0.13.0 is released + from packaging.version import Version + from importlib.metadata import version as lib_version + + neo_version = lib_version("neo") + # The possibility of ignoring timestamps errors is not present in neo <= 0.12.0 + if Version(neo_version) <= Version("0.12.0"): + neo_kwargs.pop("ignore_timestamps_errors") + + return neo_kwargs + + class OpenEphysLegacyRecordingExtractor(NeoBaseRecordingExtractor): """ Class for reading data saved by the Open Ephys GUI. @@ -76,6 +89,7 @@ def __init__( @classmethod def map_to_neo_kwargs(cls, folder_path, ignore_timestamps_errors=False): neo_kwargs = {"dirname": str(folder_path), "ignore_timestamps_errors": ignore_timestamps_errors} + neo_kwargs = drop_invalid_neo_arguments_for_version_0_12_0(neo_kwargs) return neo_kwargs From 5029445580bc6274ee8845636dd8d09b07e85826 Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Tue, 26 Sep 2023 13:25:48 +0200 Subject: [PATCH 175/322] Apply suggestions from code review thanks alessio Co-authored-by: Alessio Buccino --- doc/modules/comparison.rst | 1 - .../comparison/groundtruthstudy.py | 45 +++++++++---------- .../comparison/tests/test_groundtruthstudy.py | 1 - 3 files changed, 20 insertions(+), 27 deletions(-) diff --git a/doc/modules/comparison.rst b/doc/modules/comparison.rst index 9b2e701dac..57e9a0b5ba 100644 --- a/doc/modules/comparison.rst +++ b/doc/modules/comparison.rst @@ -293,7 +293,6 @@ The all mechanism is based on an intrinsic organization into a "study_folder" wi "sorter_name": "tridesclous2", }, }, - # ("tdc2", "toy1"): { "label": "tridesclous2 on tetrode1", "dataset": "toy1", diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 6dc9cb30f0..2d4486bbe4 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -28,24 +28,23 @@ class GroundTruthStudy: """ - This class is an helper function to run any comparison on several "cases" for several ground truth dataset. + This class is an helper function to run any comparison on several "cases" for many ground-truth dataset. - "cases" can be: - * several sorter for comparisons + "cases" refer to: + * several sorters for comparisons * same sorter with differents parameters * parameters of comparisons - * any combination of theses + * any combination of these (and more) - For enough flexibility cases key can be a tuple so that we can varify complexity along several - "levels" or "axis" (paremeters or sorter). - - Generated dataframes will have index with several levels optionaly. + For increased flexibility, cases keys can be a tuple so that we can vary complexity along several + "levels" or "axis" (paremeters or sorters). + In this case, the result dataframes will have `MultiIndex` to handle the different levels. - Ground truth dataset need recording+sorting. This can be from mearec file or from the internal generator - :py:fun:`generate_ground_truth_recording()` + A ground-truth dataset is made of a `Recording` and a `Sorting` object. For example, it can be a simulated dataset with MEArec or internally generated (see + :py:fun:`~spikeinterface.core.generate.generate_ground_truth_recording()`). This GroundTruthStudy have been refactor in version 0.100 to be more flexible than previous versions. - Folders structures are not backward compatible at all. + Note that the underlying folder structure is not backward compatible! """ def __init__(self, study_folder): self.folder = Path(study_folder) @@ -85,21 +84,21 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): study_folder.mkdir(exist_ok=False, parents=True) (study_folder / "datasets").mkdir() - (study_folder / "datasets/recordings").mkdir() - (study_folder / "datasets/gt_sortings").mkdir() + (study_folder / "datasets" / "recordings").mkdir() + (study_folder / "datasets" / "gt_sortings").mkdir() (study_folder / "sorters").mkdir() (study_folder / "sortings").mkdir() (study_folder / "sortings" / "run_logs").mkdir() (study_folder / "metrics").mkdir() for key, (rec, gt_sorting) in datasets.items(): - assert "/" not in key - assert "\\" not in key + assert "/" not in key, "'/' cannot be in the key name!" + assert "\\" not in key, "'\\' cannot be in the key name!" - # rec are pickle + # recordings are pickled rec.dump_to_pickle(study_folder / f"datasets/recordings/{key}.pickle") - # sorting are pickle + saved as NumpyFolderSorting + # sortings are pickled + saved as NumpyFolderSorting gt_sorting.dump_to_pickle(study_folder / f"datasets/gt_sortings/{key}.pickle") gt_sorting.save(format="numpy_folder", folder=study_folder / f"datasets/gt_sortings/{key}") @@ -108,11 +107,7 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): info["levels"] = levels (study_folder / "info.json").write_text(json.dumps(info, indent=4), encoding="utf8") - # (study_folder / "cases.jon").write_text( - # json.dumps(cases, indent=4, cls=SIJsonEncoder), - # encoding="utf8", - # ) - # cases is dump to a pickle file, json is not possible because of tuple key + # cases is dumped to a pickle file, json is not possible because of the tuple key (study_folder / "cases.pickle").write_bytes(pickle.dumps(cases)) return cls(study_folder) @@ -127,10 +122,10 @@ def scan_folder(self): self.levels = self.info["levels"] - for rec_file in (self.folder / "datasets/recordings").glob("*.pickle"): + for rec_file in (self.folder / "datasets" / "recordings").glob("*.pickle"): key = rec_file.stem rec = load_extractor(rec_file) - gt_sorting = load_extractor(self.folder / f"datasets/gt_sortings/{key}") + gt_sorting = load_extractor(self.folder / f"datasets" / "gt_sortings" / key) self.datasets[key] = (rec, gt_sorting) with open(self.folder / "cases.pickle", "rb") as f: @@ -304,7 +299,7 @@ def compute_metrics(self, case_keys=None, metric_names=["snr", "firing_rate"], f case_keys = self.cases.keys() for key in case_keys: - filename = self.folder / "metrics" / f"{self.key_to_str(key)}.txt" + filename = self.folder / "metrics" / f"{self.key_to_str(key)}.csv" if filename.exists(): if force: os.remove(filename) diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index a75ac272be..12d764950e 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -16,7 +16,6 @@ study_folder = cache_folder / "test_groundtruthstudy/" -print(study_folder.absolute()) def setup_module(): if study_folder.is_dir(): From a970899c2e5162e842be6b0237a4338063508513 Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Tue, 26 Sep 2023 08:34:03 -0400 Subject: [PATCH 176/322] handle case of if-else copy_binary Co-authored-by: Alessio Buccino --- src/spikeinterface/exporters/to_phy.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index 7de1a128e5..4af6f73b25 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -153,7 +153,10 @@ def export_to_phy( # write params.py with (output_folder / "params.py").open("w") as f: if use_relative_path: - f.write(f"dat_path = r'recording.dat'\n") + if copy_binary: + f.write(f"dat_path = r'recording.dat'\n") + else: + f.write(f"dat_path = r'{str(Path(rec_path).relative_to(output_folder))}'\n") else: f.write(f"dat_path = r'{str(rec_path)}'\n") f.write(f"n_channels_dat = {num_chans}\n") From b2a9b70abeb1fccbfa73e51f604253c0f02c81c0 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 16:33:17 +0200 Subject: [PATCH 177/322] WIP --- src/spikeinterface/sortingcomponents/matching/circus.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 8c002a5cc7..482d36956f 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -519,7 +519,7 @@ class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): "norms": None, "random_chunk_kwargs": {}, "noise_levels": None, - "rank" : 10, + "rank" : 5, "sparse_kwargs": {"method": "ptp", "threshold": 1}, "ignored_ids": [], "vicinity": 0, @@ -599,9 +599,8 @@ def initialize_and_check_kwargs(cls, recording, kwargs): d["ignored_ids"] = np.array(d["ignored_ids"]) omp_min_sps = d["omp_min_sps"] - # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) - d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) - #d['stop_criteria'] = omp_min_sps * np.maximum(d['norms'], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) + #d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + d['stop_criteria'] = omp_min_sps * np.maximum(d['norms'], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) return d @@ -636,7 +635,7 @@ def main_function(cls, traces, d): neighbor_window = num_samples - 1 min_amplitude, max_amplitude = d["amplitudes"] ignored_ids = d["ignored_ids"] - stop_criteria = d["stop_criteria"]#[:, np.newaxis] + stop_criteria = d["stop_criteria"][:, np.newaxis] vicinity = d["vicinity"] rank = d['rank'] From 3c94594fdd5ee6a58c2635a2f9a8dba9c8ce500d Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 17:01:51 +0200 Subject: [PATCH 178/322] Working with circus2 --- .../sorters/internal/spyking_circus2.py | 2 +- .../clustering/clustering_tools.py | 7 ++-- .../sortingcomponents/matching/circus.py | 37 ++++++++++--------- 3 files changed, 25 insertions(+), 21 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index db3d88f116..7097b9e56b 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -152,7 +152,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_job_params["chunk_duration"] = "100ms" spikes = find_spikes_from_templates( - recording_f, method="circus-omp", method_kwargs=matching_params, **matching_job_params + recording_f, method="circus-omp-svd", method_kwargs=matching_params, **matching_job_params ) if verbose: diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index b87bbc7cee..99836fa293 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -602,8 +602,6 @@ def remove_duplicates_via_matching( "noise_levels": noise_levels, "amplitudes": [0.95, 1.05], "omp_min_sps": 0.1, - "templates": None, - "overlaps": None, } ) @@ -618,7 +616,7 @@ def remove_duplicates_via_matching( method_kwargs.update({"ignored_ids": ignore_ids + [i]}) spikes, computed = find_spikes_from_templates( - sub_recording, method="circus-omp", method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs + sub_recording, method="circus-omp-svd", method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs ) method_kwargs.update( { @@ -626,6 +624,9 @@ def remove_duplicates_via_matching( "templates": computed["templates"], "norms": computed["norms"], "sparsities": computed["sparsities"], + "temporal" : computed["temporal"], + "spatial" : computed["spatial"], + "singular" : computed["singular"], } ) valid = (spikes["sample_index"] >= half_marging) * (spikes["sample_index"] < duration + half_marging) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 482d36956f..e955687ed7 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -514,9 +514,6 @@ class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): "amplitudes": [0.6, 2], "omp_min_sps": 0.1, "waveform_extractor": None, - "templates": None, - "overlaps": None, - "norms": None, "random_chunk_kwargs": {}, "noise_levels": None, "rank" : 5, @@ -537,28 +534,34 @@ def _prepare_templates(cls, d): templates = waveform_extractor.get_all_templates(mode="median").copy() - # Keep only the strongest components - rank = d['rank'] - d['templates'] = {} - d["norms"] = np.zeros(num_templates, dtype=np.float32) + #First, we set masked channels to 0 d['sparsities'] = {} - for count in range(num_templates): template = templates[count][:, sparsity[count]] (d["sparsities"][count],) = np.nonzero(sparsity[count]) - d["norms"][count] = np.linalg.norm(template) templates[count][:, ~sparsity[count]] = 0 - d["templates"][count] = template / d["norms"][count] + # Then we keep only the strongest components + rank = d['rank'] temporal, singular, spatial = np.linalg.svd(templates, full_matrices=False) - - temporal = temporal[:, :, :rank] - d["temporal"] = np.flip(temporal, axis=1) + d["temporal"] = temporal[:, :, :rank] d["singular"] = singular[:, :rank] d["spatial"] = spatial[:, :rank, :] - d['temporal'] /= d['norms'][:, np.newaxis, np.newaxis] + # We reconstruct the approximated templates + templates = np.matmul(d["temporal"] * d["singular"][:, np.newaxis, :], d["spatial"]) + + d["temporal"] = np.flip(temporal, axis=1) + d['templates'] = {} + d["norms"] = np.zeros(num_templates, dtype=np.float32) + + # And get the norms, saving compressed templates for CC matrix + for count in range(num_templates): + template = templates[count][:, sparsity[count]] + d["norms"][count] = np.linalg.norm(template) + d["templates"][count] = template / d["norms"][count] + d['temporal'] /= d['norms'][:, np.newaxis, np.newaxis] d["spatial"] = np.moveaxis(d['spatial'][:, :rank, :], [0, 1, 2], [1, 0, 2]) d['temporal'] = np.moveaxis(d['temporal'][:, :, :rank], [0, 1, 2], [1, 2, 0]) d['singular'] = d['singular'].T[:, :, np.newaxis] @@ -585,15 +588,15 @@ def initialize_and_check_kwargs(cls, recording, kwargs): print("CircusOMPPeeler : noise should be computed outside") d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) - if d["templates"] is None: + if "templates" not in d: d = cls._prepare_templates(d) else: - for key in ["norms", "sparsities"]: + for key in ["norms", "sparsities", 'temporal', 'spatial', 'singular']: assert d[key] is not None, "If templates are provided, %d should also be there" % key d["num_templates"] = len(d["templates"]) - if d["overlaps"] is None: + if "overlaps" not in d: d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) d["ignored_ids"] = np.array(d["ignored_ids"]) From ad78ef269136a0d4bec37236a79c30f15862581f Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Tue, 26 Sep 2023 12:10:06 -0400 Subject: [PATCH 179/322] improve docstring-feedback --- src/spikeinterface/exporters/to_phy.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index 4af6f73b25..edfca0fa52 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -66,7 +66,8 @@ def export_to_phy( verbose: bool If True, output is verbose use_relative_path : bool, default: False - If True saves the `dat_path` as a relative path, else an absolute + If True and `copy_binary=True` saves the binary file `dat_path` in the `params.py` relative to `output_folder` (ie `dat_path=r'recording.dat'`). If `copy_binary=False`, then uses a path relative to the `output_folder` + If False, uses an absolute path in the `params.py` (ie `dat_path=r'path/to/the/recording.dat'`) {} """ From 32d3d7a6aebdaed8757fe6ca994c537e6034927c Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 26 Sep 2023 20:52:40 +0200 Subject: [PATCH 180/322] extract_waveforms_gt must be done on dataset key instead of case key. --- .../comparison/groundtruthstudy.py | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 2d4486bbe4..8a294a88af 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -267,24 +267,29 @@ def get_run_times(self, case_keys=None): return pd.Series(run_times, name="run_time") def extract_waveforms_gt(self, case_keys=None, **extract_kwargs): - + if case_keys is None: case_keys = self.cases.keys() base_folder = self.folder / "waveforms" base_folder.mkdir(exist_ok=True) - for key in case_keys: - dataset_key = self.cases[key]["dataset"] + dataset_keys = [self.cases[key]["dataset"] for key in case_keys] + dataset_keys = set(dataset_keys) + for dataset_key in dataset_keys: + # the waveforms depend on the dataset key + wf_folder = base_folder / self.key_to_str(dataset_key) recording, gt_sorting = self.datasets[dataset_key] - wf_folder = base_folder / self.key_to_str(key) we = extract_waveforms(recording, gt_sorting, folder=wf_folder) def get_waveform_extractor(self, key): # some recording are not dumpable to json and the waveforms extactor need it! # so we load it with and put after - we = load_waveforms(self.folder / "waveforms" / self.key_to_str(key), with_recording=False) + # this should be fixed in PR 2027 so remove this after + dataset_key = self.cases[key]["dataset"] + wf_folder = self.folder / "waveforms" / self.key_to_str(dataset_key) + we = load_waveforms(wf_folder, with_recording=False) recording, _ = self.datasets[dataset_key] we.set_recording(recording) return we @@ -298,21 +303,29 @@ def compute_metrics(self, case_keys=None, metric_names=["snr", "firing_rate"], f if case_keys is None: case_keys = self.cases.keys() + done = [] for key in case_keys: - filename = self.folder / "metrics" / f"{self.key_to_str(key)}.csv" + dataset_key = self.cases[key]["dataset"] + if dataset_key in done: + # some case can share the same waveform extractor + continue + done.append(dataset_key) + filename = self.folder / "metrics" / f"{self.key_to_str(dataset_key)}.csv" if filename.exists(): if force: os.remove(filename) else: continue - we = self.get_waveform_extractor(key) metrics = compute_quality_metrics(we, metric_names=metric_names) metrics.to_csv(filename, sep="\t", index=True) def get_metrics(self, key): import pandas as pd - filename = self.folder / "metrics" / f"{self.key_to_str(key)}.txt" + + dataset_key = self.cases[key]["dataset"] + + filename = self.folder / "metrics" / f"{self.key_to_str(dataset_key)}.csv" if not filename.exists(): return metrics = pd.read_csv(filename, sep="\t", index_col=0) From 46149ef0730a8965f2ae612e9672419a18dc674c Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 22:29:35 +0200 Subject: [PATCH 181/322] Put OMP with SVD as default --- .../sorters/internal/spyking_circus2.py | 2 +- .../clustering/clustering_tools.py | 2 +- .../sortingcomponents/matching/circus.py | 315 ------------------ .../sortingcomponents/matching/method_list.py | 1 - 4 files changed, 2 insertions(+), 318 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 7097b9e56b..db3d88f116 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -152,7 +152,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_job_params["chunk_duration"] = "100ms" spikes = find_spikes_from_templates( - recording_f, method="circus-omp-svd", method_kwargs=matching_params, **matching_job_params + recording_f, method="circus-omp", method_kwargs=matching_params, **matching_job_params ) if verbose: diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 99836fa293..7a2af09942 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -616,7 +616,7 @@ def remove_duplicates_via_matching( method_kwargs.update({"ignored_ids": ignore_ids + [i]}) spikes, computed = find_spikes_from_templates( - sub_recording, method="circus-omp-svd", method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs + sub_recording, method="circus-omp", method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs ) method_kwargs.update( { diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e955687ed7..aeac69fc86 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -195,321 +195,6 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): ----- """ - _default_params = { - "amplitudes": [0.6, 2], - "omp_min_sps": 0.1, - "waveform_extractor": None, - "templates": None, - "overlaps": None, - "norms": None, - "random_chunk_kwargs": {}, - "noise_levels": None, - "sparse_kwargs": {"method": "ptp", "threshold": 1}, - "ignored_ids": [], - "vicinity": 0, - } - - @classmethod - def _prepare_templates(cls, d): - waveform_extractor = d["waveform_extractor"] - num_templates = len(d["waveform_extractor"].sorting.unit_ids) - - if not waveform_extractor.is_sparse(): - sparsity = compute_sparsity(waveform_extractor, **d["sparse_kwargs"]).mask - else: - sparsity = waveform_extractor.sparsity.mask - - templates = waveform_extractor.get_all_templates(mode="median").copy() - - d["sparsities"] = {} - d["templates"] = {} - d["norms"] = np.zeros(num_templates, dtype=np.float32) - - for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): - template = templates[count][:, sparsity[count]] - (d["sparsities"][count],) = np.nonzero(sparsity[count]) - d["norms"][count] = np.linalg.norm(template) - d["templates"][count] = template / d["norms"][count] - - return d - - @classmethod - def initialize_and_check_kwargs(cls, recording, kwargs): - d = cls._default_params.copy() - d.update(kwargs) - - # assert isinstance(d['waveform_extractor'], WaveformExtractor) - - for v in ["omp_min_sps"]: - assert (d[v] >= 0) and (d[v] <= 1), f"{v} should be in [0, 1]" - - d["num_channels"] = d["waveform_extractor"].recording.get_num_channels() - d["num_samples"] = d["waveform_extractor"].nsamples - d["nbefore"] = d["waveform_extractor"].nbefore - d["nafter"] = d["waveform_extractor"].nafter - d["sampling_frequency"] = d["waveform_extractor"].recording.get_sampling_frequency() - d["vicinity"] *= d["num_samples"] - - if d["noise_levels"] is None: - print("CircusOMPPeeler : noise should be computed outside") - d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) - - if d["templates"] is None: - d = cls._prepare_templates(d) - else: - for key in ["norms", "sparsities"]: - assert d[key] is not None, "If templates are provided, %d should also be there" % key - - d["num_templates"] = len(d["templates"]) - - if d["overlaps"] is None: - d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) - - d["ignored_ids"] = np.array(d["ignored_ids"]) - - omp_min_sps = d["omp_min_sps"] - # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) - d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) - - return d - - @classmethod - def serialize_method_kwargs(cls, kwargs): - kwargs = dict(kwargs) - # remove waveform_extractor - kwargs.pop("waveform_extractor") - return kwargs - - @classmethod - def unserialize_in_worker(cls, kwargs): - return kwargs - - @classmethod - def get_margin(cls, recording, kwargs): - margin = 2 * max(kwargs["nbefore"], kwargs["nafter"]) - return margin - - @classmethod - def main_function(cls, traces, d): - templates = d["templates"] - num_templates = d["num_templates"] - num_channels = d["num_channels"] - num_samples = d["num_samples"] - overlaps = d["overlaps"] - norms = d["norms"] - nbefore = d["nbefore"] - nafter = d["nafter"] - omp_tol = np.finfo(np.float32).eps - num_samples = d["nafter"] + d["nbefore"] - neighbor_window = num_samples - 1 - min_amplitude, max_amplitude = d["amplitudes"] - sparsities = d["sparsities"] - ignored_ids = d["ignored_ids"] - stop_criteria = d["stop_criteria"] - vicinity = d["vicinity"] - - if "cached_fft_kernels" not in d: - d["cached_fft_kernels"] = {"fshape": 0} - - cached_fft_kernels = d["cached_fft_kernels"] - - num_timesteps = len(traces) - - num_peaks = num_timesteps - num_samples + 1 - - traces = traces.T - - dummy_filter = np.empty((num_channels, num_samples), dtype=np.float32) - dummy_traces = np.empty((num_channels, num_timesteps), dtype=np.float32) - - fshape, axes = get_scipy_shape(dummy_filter, traces, axes=1) - fft_cache = {"full": sp_fft.rfftn(traces, fshape, axes=axes)} - - scalar_products = np.empty((num_templates, num_peaks), dtype=np.float32) - - flagged_chunk = cached_fft_kernels["fshape"] != fshape[0] - - for i in range(num_templates): - if i not in ignored_ids: - if i not in cached_fft_kernels or flagged_chunk: - kernel_filter = np.ascontiguousarray(templates[i][::-1].T) - cached_fft_kernels.update({i: sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) - cached_fft_kernels["fshape"] = fshape[0] - - fft_cache.update({"mask": sparsities[i], "template": cached_fft_kernels[i]}) - - convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode="valid") - if len(convolution) > 0: - scalar_products[i] = convolution.sum(0) - else: - scalar_products[i] = 0 - - if len(ignored_ids) > 0: - scalar_products[ignored_ids] = -np.inf - - num_spikes = 0 - - spikes = np.empty(scalar_products.size, dtype=spike_dtype) - idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) - - M = np.zeros((100, 100), dtype=np.float32) - - all_selections = np.empty((2, scalar_products.size), dtype=np.int32) - final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) - num_selection = 0 - - full_sps = scalar_products.copy() - - neighbors = {} - cached_overlaps = {} - - is_valid = scalar_products > stop_criteria - all_amplitudes = np.zeros(0, dtype=np.float32) - is_in_vicinity = np.zeros(0, dtype=np.int32) - - while np.any(is_valid): - best_amplitude_ind = scalar_products[is_valid].argmax() - best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) - - if num_selection > 0: - delta_t = selection[1] - peak_index - idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] - myline = num_samples + delta_t[idx] - - if not best_cluster_ind in cached_overlaps: - cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() - - if num_selection == M.shape[0]: - Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) - Z[:num_selection, :num_selection] = M - M = Z - - M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] - - if vicinity == 0: - scipy.linalg.solve_triangular( - M[:num_selection, :num_selection], - M[num_selection, :num_selection], - trans=0, - lower=1, - overwrite_b=True, - check_finite=False, - ) - - v = nrm2(M[num_selection, :num_selection]) ** 2 - Lkk = 1 - v - if Lkk <= omp_tol: # selected atoms are dependent - break - M[num_selection, num_selection] = np.sqrt(Lkk) - else: - is_in_vicinity = np.where(np.abs(delta_t) < vicinity)[0] - - if len(is_in_vicinity) > 0: - L = M[is_in_vicinity, :][:, is_in_vicinity] - - M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular( - L, M[num_selection, is_in_vicinity], trans=0, lower=1, overwrite_b=True, check_finite=False - ) - - v = nrm2(M[num_selection, is_in_vicinity]) ** 2 - Lkk = 1 - v - if Lkk <= omp_tol: # selected atoms are dependent - break - M[num_selection, num_selection] = np.sqrt(Lkk) - else: - M[num_selection, num_selection] = 1.0 - else: - M[0, 0] = 1 - - all_selections[:, num_selection] = [best_cluster_ind, peak_index] - num_selection += 1 - - selection = all_selections[:, :num_selection] - res_sps = full_sps[selection[0], selection[1]] - - if True: # vicinity == 0: - all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) - all_amplitudes /= norms[selection[0]] - else: - # This is not working, need to figure out why - is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) - all_amplitudes = np.append(all_amplitudes, np.float32(1)) - L = M[is_in_vicinity, :][:, is_in_vicinity] - all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], lower=True, overwrite_b=False) - all_amplitudes[is_in_vicinity] /= norms[selection[0][is_in_vicinity]] - - diff_amplitudes = all_amplitudes - final_amplitudes[selection[0], selection[1]] - modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0] - final_amplitudes[selection[0], selection[1]] = all_amplitudes - - for i in modified: - tmp_best, tmp_peak = selection[:, i] - diff_amp = diff_amplitudes[i] * norms[tmp_best] - - if not tmp_best in cached_overlaps: - cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() - - if not tmp_peak in neighbors.keys(): - idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] - tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] - neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} - - idx = neighbors[tmp_peak]["idx"] - tdx = neighbors[tmp_peak]["tdx"] - - to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] - scalar_products[:, idx[0] : idx[1]] -= to_add - - is_valid = scalar_products > stop_criteria - - is_valid = (final_amplitudes > min_amplitude) * (final_amplitudes < max_amplitude) - valid_indices = np.where(is_valid) - - num_spikes = len(valid_indices[0]) - spikes["sample_index"][:num_spikes] = valid_indices[1] + d["nbefore"] - spikes["channel_index"][:num_spikes] = 0 - spikes["cluster_index"][:num_spikes] = valid_indices[0] - spikes["amplitude"][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] - - spikes = spikes[:num_spikes] - order = np.argsort(spikes["sample_index"]) - spikes = spikes[order] - - return spikes - - -class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): - """ - Orthogonal Matching Pursuit inspired from Spyking Circus sorter - - https://elifesciences.org/articles/34518 - - This is an Orthogonal Template Matching algorithm. For speed and - memory optimization, templates are automatically sparsified. Signal - is convolved with the templates, and as long as some scalar products - are higher than a given threshold, we use a Cholesky decomposition - to compute the optimal amplitudes needed to reconstruct the signal. - - IMPORTANT NOTE: small chunks are more efficient for such Peeler, - consider using 100ms chunk - - Parameters - ---------- - amplitude: tuple - (Minimal, Maximal) amplitudes allowed for every template - omp_min_sps: float - Stopping criteria of the OMP algorithm, in percentage of the norm - noise_levels: array - The noise levels, for every channels. If None, they will be automatically - computed - random_chunk_kwargs: dict - Parameters for computing noise levels, if not provided (sub optimal) - sparse_kwargs: dict - Parameters to extract a sparsity mask from the waveform_extractor, if not - already sparse. - ----- - """ - _default_params = { "amplitudes": [0.6, 2], "omp_min_sps": 0.1, diff --git a/src/spikeinterface/sortingcomponents/matching/method_list.py b/src/spikeinterface/sortingcomponents/matching/method_list.py index 46c4a53872..c00c0a1fd3 100644 --- a/src/spikeinterface/sortingcomponents/matching/method_list.py +++ b/src/spikeinterface/sortingcomponents/matching/method_list.py @@ -8,6 +8,5 @@ "tridesclous": TridesclousPeeler, "circus": CircusPeeler, "circus-omp": CircusOMPPeeler, - 'circus-omp-svd' : CircusOMPSVDPeeler, "wobble": WobbleMatch, } From f21d80bf3cb34e5f39d59a7692a0c594025ea7b8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 20:32:10 +0000 Subject: [PATCH 182/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../clustering/clustering_tools.py | 6 +-- .../sortingcomponents/matching/circus.py | 44 +++++++++---------- 2 files changed, 24 insertions(+), 26 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 7a2af09942..c1b635fdaf 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -624,9 +624,9 @@ def remove_duplicates_via_matching( "templates": computed["templates"], "norms": computed["norms"], "sparsities": computed["sparsities"], - "temporal" : computed["temporal"], - "spatial" : computed["spatial"], - "singular" : computed["singular"], + "temporal": computed["temporal"], + "spatial": computed["spatial"], + "singular": computed["singular"], } ) valid = (spikes["sample_index"] >= half_marging) * (spikes["sample_index"] < duration + half_marging) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index aeac69fc86..d2b02ea15d 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -201,7 +201,7 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): "waveform_extractor": None, "random_chunk_kwargs": {}, "noise_levels": None, - "rank" : 5, + "rank": 5, "sparse_kwargs": {"method": "ptp", "threshold": 1}, "ignored_ids": [], "vicinity": 0, @@ -219,37 +219,37 @@ def _prepare_templates(cls, d): templates = waveform_extractor.get_all_templates(mode="median").copy() - #First, we set masked channels to 0 - d['sparsities'] = {} + # First, we set masked channels to 0 + d["sparsities"] = {} for count in range(num_templates): template = templates[count][:, sparsity[count]] (d["sparsities"][count],) = np.nonzero(sparsity[count]) templates[count][:, ~sparsity[count]] = 0 # Then we keep only the strongest components - rank = d['rank'] + rank = d["rank"] temporal, singular, spatial = np.linalg.svd(templates, full_matrices=False) d["temporal"] = temporal[:, :, :rank] d["singular"] = singular[:, :rank] d["spatial"] = spatial[:, :rank, :] - + # We reconstruct the approximated templates templates = np.matmul(d["temporal"] * d["singular"][:, np.newaxis, :], d["spatial"]) d["temporal"] = np.flip(temporal, axis=1) - d['templates'] = {} + d["templates"] = {} d["norms"] = np.zeros(num_templates, dtype=np.float32) - + # And get the norms, saving compressed templates for CC matrix for count in range(num_templates): template = templates[count][:, sparsity[count]] d["norms"][count] = np.linalg.norm(template) - d["templates"][count] = template / d["norms"][count] - - d['temporal'] /= d['norms'][:, np.newaxis, np.newaxis] - d["spatial"] = np.moveaxis(d['spatial'][:, :rank, :], [0, 1, 2], [1, 0, 2]) - d['temporal'] = np.moveaxis(d['temporal'][:, :, :rank], [0, 1, 2], [1, 2, 0]) - d['singular'] = d['singular'].T[:, :, np.newaxis] + d["templates"][count] = template / d["norms"][count] + + d["temporal"] /= d["norms"][:, np.newaxis, np.newaxis] + d["spatial"] = np.moveaxis(d["spatial"][:, :rank, :], [0, 1, 2], [1, 0, 2]) + d["temporal"] = np.moveaxis(d["temporal"][:, :, :rank], [0, 1, 2], [1, 2, 0]) + d["singular"] = d["singular"].T[:, :, np.newaxis] return d @classmethod @@ -276,7 +276,7 @@ def initialize_and_check_kwargs(cls, recording, kwargs): if "templates" not in d: d = cls._prepare_templates(d) else: - for key in ["norms", "sparsities", 'temporal', 'spatial', 'singular']: + for key in ["norms", "sparsities", "temporal", "spatial", "singular"]: assert d[key] is not None, "If templates are provided, %d should also be there" % key d["num_templates"] = len(d["templates"]) @@ -287,8 +287,8 @@ def initialize_and_check_kwargs(cls, recording, kwargs): d["ignored_ids"] = np.array(d["ignored_ids"]) omp_min_sps = d["omp_min_sps"] - #d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) - d['stop_criteria'] = omp_min_sps * np.maximum(d['norms'], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) + # d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + d["stop_criteria"] = omp_min_sps * np.maximum(d["norms"], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) return d @@ -325,18 +325,18 @@ def main_function(cls, traces, d): ignored_ids = d["ignored_ids"] stop_criteria = d["stop_criteria"][:, np.newaxis] vicinity = d["vicinity"] - rank = d['rank'] + rank = d["rank"] num_timesteps = len(traces) num_peaks = num_timesteps - num_samples + 1 conv_shape = (num_templates, num_peaks) scalar_products = np.zeros(conv_shape, dtype=np.float32) - + # Filter using overlap-and-add convolution - spatially_filtered_data = np.matmul(d['spatial'], traces.T[np.newaxis, :, :]) - scaled_filtered_data = spatially_filtered_data * d['singular'] - objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d['temporal'], axes=2, mode="valid") + spatially_filtered_data = np.matmul(d["spatial"], traces.T[np.newaxis, :, :]) + scaled_filtered_data = spatially_filtered_data * d["singular"] + objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"], axes=2, mode="valid") scalar_products += np.sum(objective_by_rank, axis=0) if len(ignored_ids) > 0: @@ -473,8 +473,6 @@ def main_function(cls, traces, d): return spikes - - class CircusPeeler(BaseTemplateMatchingEngine): """ From a275bcaaf14819e64aa24a78a504b134f1d9288e Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 22:32:57 +0200 Subject: [PATCH 183/322] Patch --- src/spikeinterface/sortingcomponents/matching/method_list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/matching/method_list.py b/src/spikeinterface/sortingcomponents/matching/method_list.py index c00c0a1fd3..bedc04a9d5 100644 --- a/src/spikeinterface/sortingcomponents/matching/method_list.py +++ b/src/spikeinterface/sortingcomponents/matching/method_list.py @@ -1,6 +1,6 @@ from .naive import NaiveMatching from .tdc import TridesclousPeeler -from .circus import CircusPeeler, CircusOMPPeeler, CircusOMPSVDPeeler +from .circus import CircusPeeler, CircusOMPPeeler from .wobble import WobbleMatch matching_methods = { From 85eb432c16a0719520a8dcbb24d2c8bb2c804d60 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Tue, 26 Sep 2023 22:44:15 +0200 Subject: [PATCH 184/322] Cleaning useless functions --- .../clustering/clustering_tools.py | 6 -- .../sortingcomponents/matching/circus.py | 95 ------------------- 2 files changed, 101 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index c1b635fdaf..5ff74db3e7 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -546,7 +546,6 @@ def remove_duplicates_via_matching( from spikeinterface.core import NumpySorting from spikeinterface.core import extract_waveforms from spikeinterface.core import get_global_tmp_folder - from spikeinterface.sortingcomponents.matching.circus import get_scipy_shape import string, random, shutil, os from pathlib import Path @@ -591,11 +590,6 @@ def remove_duplicates_via_matching( chunk_size = duration + 3 * margin - dummy_filter = np.empty((num_chans, duration), dtype=np.float32) - dummy_traces = np.empty((num_chans, chunk_size), dtype=np.float32) - - fshape, axes = get_scipy_shape(dummy_filter, dummy_traces, axes=1) - method_kwargs.update( { "waveform_extractor": waveform_extractor, diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index d2b02ea15d..ec6ef3a292 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -35,101 +35,6 @@ ################# # Circus peeler # -################# - -from scipy.fft._helper import _init_nd_shape_and_axes - -try: - from scipy.signal.signaltools import _init_freq_conv_axes, _apply_conv_mode -except Exception: - from scipy.signal._signaltools import _init_freq_conv_axes, _apply_conv_mode -from scipy import linalg, fft as sp_fft - - -def get_scipy_shape(in1, in2, mode="full", axes=None, calc_fast_len=True): - in1 = np.asarray(in1) - in2 = np.asarray(in2) - - if in1.ndim == in2.ndim == 0: # scalar inputs - return in1 * in2 - elif in1.ndim != in2.ndim: - raise ValueError("in1 and in2 should have the same dimensionality") - elif in1.size == 0 or in2.size == 0: # empty arrays - return np.array([]) - - in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False) - - s1 = in1.shape - s2 = in2.shape - - shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] - - if not len(axes): - return in1 * in2 - - complex_result = in1.dtype.kind == "c" or in2.dtype.kind == "c" - - if calc_fast_len: - # Speed up FFT by padding to optimal size. - fshape = [sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] - else: - fshape = shape - - return fshape, axes - - -def fftconvolve_with_cache(in1, in2, cache, mode="full", axes=None): - in1 = np.asarray(in1) - in2 = np.asarray(in2) - - if in1.ndim == in2.ndim == 0: # scalar inputs - return in1 * in2 - elif in1.ndim != in2.ndim: - raise ValueError("in1 and in2 should have the same dimensionality") - elif in1.size == 0 or in2.size == 0: # empty arrays - return np.array([]) - - in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False) - - s1 = in1.shape - s2 = in2.shape - - shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] - - ret = _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True) - - return _apply_conv_mode(ret, s1, s2, mode, axes) - - -def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): - if not len(axes): - return in1 * in2 - - complex_result = in1.dtype.kind == "c" or in2.dtype.kind == "c" - - if calc_fast_len: - # Speed up FFT by padding to optimal size. - fshape = [sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] - else: - fshape = shape - - if not complex_result: - fft, ifft = sp_fft.rfftn, sp_fft.irfftn - else: - fft, ifft = sp_fft.fftn, sp_fft.ifftn - - sp1 = cache["full"][cache["mask"]] - sp2 = cache["template"] - - # sp2 = fft(in2[cache['mask']], fshape, axes=axes) - ret = ifft(sp1 * sp2, fshape, axes=axes) - - if calc_fast_len: - fslice = tuple([slice(sz) for sz in shape]) - ret = ret[fslice] - - return ret - def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) From 15ae43215bf5a3b49a52081e18ad8ba3810bce15 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 26 Sep 2023 20:44:37 +0000 Subject: [PATCH 185/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sortingcomponents/matching/circus.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index ec6ef3a292..7bef8358de 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -36,6 +36,7 @@ ################# # Circus peeler # + def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) From 57bb3a734978d207f12733eb4c4807cb8e22c06f Mon Sep 17 00:00:00 2001 From: Windows Home Date: Tue, 26 Sep 2023 22:54:41 -0500 Subject: [PATCH 186/322] Implement more tests to ensure int and string unit IDs merging, inheriting labels, etc. --- .../curation/sortingview_curation.py | 49 +++-- .../tests/test_sortingview_curation.py | 195 +++++++++++++++--- 2 files changed, 202 insertions(+), 42 deletions(-) diff --git a/src/spikeinterface/curation/sortingview_curation.py b/src/spikeinterface/curation/sortingview_curation.py index f595a67a3f..b7f0cab6a0 100644 --- a/src/spikeinterface/curation/sortingview_curation.py +++ b/src/spikeinterface/curation/sortingview_curation.py @@ -57,38 +57,52 @@ def apply_sortingview_curation( unit_ids_dtype = sorting.unit_ids.dtype # STEP 1: merge groups + labels_dict = sortingview_curation_dict["labelsByUnit"] if "mergeGroups" in sortingview_curation_dict and not skip_merge: merge_groups = sortingview_curation_dict["mergeGroups"] - for mg in merge_groups: + for merge_group in merge_groups: + # Store labels of units that are about to be merged + labels_to_inherit = [] + for unit in merge_group: + labels_to_inherit.extend(labels_dict.get(str(unit), [])) + labels_to_inherit = list(set(labels_to_inherit)) # Remove duplicates + if verbose: - print(f"Merging {mg}") + print(f"Merging {merge_group}") if unit_ids_dtype.kind in ("U", "S"): # if unit dtype is str, set new id as "{unit1}-{unit2}" - new_unit_id = "-".join(mg) + new_unit_id = "-".join(merge_group) + curation_sorting.merge(merge_group, new_unit_id=new_unit_id) else: # in this case, the CurationSorting takes care of finding a new unused int - new_unit_id = None - curation_sorting.merge(mg, new_unit_id=new_unit_id) + curation_sorting.merge(merge_group, new_unit_id=None) + new_unit_id = curation_sorting.max_used_id # merged unit id + labels_dict[str(new_unit_id)] = labels_to_inherit # STEP 2: gather and apply sortingview curation labels - # In sortingview, a unit is not required to have all labels. # For example, the first 3 units could be labeled as "accept". # In this case, the first 3 values of the property "accept" will be True, the rest False - labels_dict = sortingview_curation_dict["labelsByUnit"] - properties = {} - for _, labels in labels_dict.items(): - for label in labels: - if label not in properties: - properties[label] = np.zeros(len(curation_sorting.current_sorting.unit_ids), dtype=bool) + + # Initialize the properties dictionary + properties = {label: np.zeros(len(curation_sorting.current_sorting.unit_ids), dtype=bool) + for labels in labels_dict.values() for label in labels} + + # Populate the properties dictionary for u_i, unit_id in enumerate(curation_sorting.current_sorting.unit_ids): - labels_unit = [] - unit_id_parts = str(unit_id).split('-') - for unit_label, labels in labels_dict.items(): - if unit_label in unit_id_parts: - labels_unit.extend(labels) + labels_unit = set() + + # Check for exact match first + if str(unit_id) in labels_dict: + labels_unit.update(labels_dict[str(unit_id)]) + # If no exact match, check if unit_label is a substring of unit_id (for string unit ID merged unit) + else: + for unit_label, labels in labels_dict.items(): + if isinstance(unit_id, str) and unit_label in unit_id: + labels_unit.update(labels) for label in labels_unit: properties[label][u_i] = True + for prop_name, prop_values in properties.items(): curation_sorting.current_sorting.set_property(prop_name, prop_values) @@ -104,5 +118,4 @@ def apply_sortingview_curation( units_to_remove.extend(unit_ids[curation_sorting.current_sorting.get_property(exclude_label) == True]) units_to_remove = np.unique(units_to_remove) curation_sorting.remove_units(units_to_remove) - return curation_sorting.current_sorting diff --git a/src/spikeinterface/curation/tests/test_sortingview_curation.py b/src/spikeinterface/curation/tests/test_sortingview_curation.py index c8a0788223..48923aa15d 100644 --- a/src/spikeinterface/curation/tests/test_sortingview_curation.py +++ b/src/spikeinterface/curation/tests/test_sortingview_curation.py @@ -3,6 +3,7 @@ import os import json import numpy as np + import spikeinterface as si import spikeinterface.extractors as se from spikeinterface.extractors import read_mearec @@ -14,11 +15,11 @@ compute_spike_amplitudes, ) from spikeinterface.curation import apply_sortingview_curation - if hasattr(pytest, "global_test_folder"): cache_folder = pytest.global_test_folder / "curation" else: cache_folder = Path("cache_folder") / "curation" + parent_folder = Path(__file__).parent ON_GITHUB = bool(os.getenv("GITHUB_ACTIONS")) KACHERY_CLOUD_SET = bool(os.getenv("KACHERY_CLOUD_CLIENT_ID")) and bool(os.getenv("KACHERY_CLOUD_PRIVATE_KEY")) @@ -27,6 +28,7 @@ set_global_tmp_folder(cache_folder) + # this needs to be run only once def generate_sortingview_curation_dataset(): import spikeinterface.widgets as sw @@ -50,15 +52,15 @@ def generate_sortingview_curation_dataset(): @pytest.mark.skipif(ON_GITHUB and not KACHERY_CLOUD_SET, reason="Kachery cloud secrets not available") def test_gh_curation(): + """ + Test curation using GitHub URI. + """ local_path = si.download_dataset(remote_path="mearec/mearec_test_10s.h5") _, sorting = read_mearec(local_path) - - # from GH # curated link: # https://figurl.org/f?v=gs://figurl/spikesortingview-10&d=sha1://bd53f6b707f8121cadc901562a89b67aec81cc81&label=SpikeInterface%20-%20Sorting%20Summary&s={%22sortingCuration%22:%22gh://alejoe91/spikeinterface/fix-codecov/spikeinterface/curation/tests/sv-sorting-curation.json%22} gh_uri = "gh://SpikeInterface/spikeinterface/main/src/spikeinterface/curation/tests/sv-sorting-curation.json" sorting_curated_gh = apply_sortingview_curation(sorting, uri_or_json=gh_uri, verbose=True) - print(f"From GH: {sorting_curated_gh}") assert len(sorting_curated_gh.unit_ids) == 9 assert "#8-#9" in sorting_curated_gh.unit_ids @@ -75,9 +77,13 @@ def test_gh_curation(): assert len(sorting_curated_gh_mua.unit_ids) == 6 assert len(sorting_curated_gh_art_mua.unit_ids) == 5 + print("Test for GH passed!\n") @pytest.mark.skipif(ON_GITHUB and not KACHERY_CLOUD_SET, reason="Kachery cloud secrets not available") def test_sha1_curation(): + """ + Test curation using SHA1 URI. + """ local_path = si.download_dataset(remote_path="mearec/mearec_test_10s.h5") _, sorting = read_mearec(local_path) @@ -93,7 +99,7 @@ def test_sha1_curation(): assert "accept" in sorting_curated_sha1.get_property_keys() assert "mua" in sorting_curated_sha1.get_property_keys() assert "artifact" in sorting_curated_sha1.get_property_keys() - + unit_ids = sorting_curated_sha1.unit_ids sorting_curated_sha1_accepted = apply_sortingview_curation(sorting, uri_or_json=sha1_uri, include_labels=["accept"]) sorting_curated_sha1_mua = apply_sortingview_curation(sorting, uri_or_json=sha1_uri, exclude_labels=["mua"]) sorting_curated_sha1_art_mua = apply_sortingview_curation( @@ -103,19 +109,21 @@ def test_sha1_curation(): assert len(sorting_curated_sha1_mua.unit_ids) == 6 assert len(sorting_curated_sha1_art_mua.unit_ids) == 5 + print("Test for sha1 curation passed!\n") def test_json_curation(): + """ + Test curation using a JSON file. + """ local_path = si.download_dataset(remote_path="mearec/mearec_test_10s.h5") _, sorting = read_mearec(local_path) # from curation.json json_file = parent_folder / "sv-sorting-curation.json" - sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_file, verbose=True) print(f"Sorting: {sorting.get_unit_ids()}") - print(f"From JSON: {sorting_curated_json}") + sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_file, verbose=True) assert len(sorting_curated_json.unit_ids) == 9 - print(sorting_curated_json.unit_ids) assert "#8-#9" in sorting_curated_json.unit_ids assert "accept" in sorting_curated_json.get_property_keys() assert "mua" in sorting_curated_json.get_property_keys() @@ -131,20 +139,23 @@ def test_json_curation(): assert len(sorting_curated_json_accepted.unit_ids) == 3 assert len(sorting_curated_json_mua.unit_ids) == 6 assert len(sorting_curated_json_mua1.unit_ids) == 5 + + print("Test for json curation passed!\n") def test_false_positive_curation(): + """ + Test curation for false positives. + """ # https://spikeinterface.readthedocs.io/en/latest/modules_gallery/core/plot_2_sorting_extractor.html sampling_frequency = 30000. duration = 20. num_timepoints = int(sampling_frequency * duration) num_units = 20 num_spikes = 1000 - times0 = np.int_(np.sort(np.random.uniform(0, num_timepoints, num_spikes))) - labels0 = np.random.randint(1, num_units + 1, size=num_spikes) - times1 = np.int_(np.sort(np.random.uniform(0, num_timepoints, num_spikes))) - labels1 = np.random.randint(1, num_units + 1, size=num_spikes) + times = np.int_(np.sort(np.random.uniform(0, num_timepoints, num_spikes))) + labels = np.random.randint(1, num_units + 1, size=num_spikes) - sorting = se.NumpySorting.from_times_labels([times0, times1], [labels0, labels1], sampling_frequency) + sorting = se.NumpySorting.from_times_labels(times, labels, sampling_frequency) print('Sorting: {}'.format(sorting.get_unit_ids())) # Test curation JSON: @@ -161,23 +172,159 @@ def test_false_positive_curation(): with open(json_path, 'w') as f: json.dump(test_json, f, indent=4) + # Apply curation sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_path, verbose=True) - accept_idx = np.where(sorting_curated_json.get_property("accept"))[0] - sorting_curated_ids = sorting_curated_json.get_unit_ids() - print(f'Accepted unit IDs: {sorting_curated_ids[accept_idx]}') - - # Check if unit_id 1 has received the "accept" label. - assert sorting_curated_json.get_unit_property(unit_id=1, key="accept") - # Check if unit_id 10 has received the "accept" label. - # If so, test fails since only unit_id 1 received the "accept" label in test_json. - assert not sorting_curated_json.get_unit_property(unit_id=10, key="accept") - print(sorting_curated_json.unit_ids) - # Merging unit_ids of dtype int creates a new unit id + print('Curated:', sorting_curated_json.get_unit_ids()) + + # Assertions + assert sorting_curated_json.get_unit_property(unit_id=1, key="accept") + assert not sorting_curated_json.get_unit_property(unit_id=10, key="accept") assert 21 in sorting_curated_json.unit_ids + print("False positive test for integer unit IDs passed!\n") + +def test_label_inheritance_int(): + """ + Test curation for label inheritance for integer unit IDs. + """ + # Setup + sampling_frequency = 30000. + duration = 20. + num_timepoints = int(sampling_frequency * duration) + num_spikes = 1000 + times = np.int_(np.sort(np.random.uniform(0, num_timepoints, num_spikes))) + labels = np.random.randint(1, 8, size=num_spikes) # 7 units: 1 to 7 + + sorting = se.NumpySorting.from_times_labels(times, labels, sampling_frequency) + + # Create a curation JSON with labels and merge groups + curation_dict = { + "labelsByUnit": { + "1": ["mua"], + "2": ["mua"], + "3": ["reject"], + "4": ["noise"], + "5": ["accept"], + "6": ["accept"], + "7": ["accept"] + }, + "mergeGroups": [[1, 2], [3, 4], [5, 6]] + } + + json_path = "test_curation_int.json" + with open(json_path, 'w') as f: + json.dump(curation_dict, f, indent=4) + + # Apply curation + sorting_merge = apply_sortingview_curation(sorting, uri_or_json=json_path) + + # Assertions for merged units + print(f"Merge only: {sorting_merge.get_unit_ids()}") + assert sorting_merge.get_unit_property(unit_id=8, key="mua") # 8 = merged unit of 1 and 2 + assert not sorting_merge.get_unit_property(unit_id=8, key="reject") + assert not sorting_merge.get_unit_property(unit_id=8, key="noise") + assert not sorting_merge.get_unit_property(unit_id=8, key="accept") + + assert not sorting_merge.get_unit_property(unit_id=9, key="mua") # 9 = merged unit of 3 and 4 + assert sorting_merge.get_unit_property(unit_id=9, key="reject") + assert sorting_merge.get_unit_property(unit_id=9, key="noise") + assert not sorting_merge.get_unit_property(unit_id=9, key="accept") + + assert not sorting_merge.get_unit_property(unit_id=10, key="mua") # 10 = merged unit of 5 and 6 + assert not sorting_merge.get_unit_property(unit_id=10, key="reject") + assert not sorting_merge.get_unit_property(unit_id=10, key="noise") + assert sorting_merge.get_unit_property(unit_id=10, key="accept") + + # Assertions for exclude_labels + sorting_exclude_noise = apply_sortingview_curation(sorting, uri_or_json=json_path, exclude_labels=["noise"]) + print(f"Exclude noise: {sorting_exclude_noise.get_unit_ids()}") + assert 9 not in sorting_exclude_noise.get_unit_ids() + + # Assertions for include_labels + sorting_include_accept = apply_sortingview_curation(sorting, uri_or_json=json_path, include_labels=["accept"]) + print(f"Include accept: {sorting_include_accept.get_unit_ids()}") + assert 8 not in sorting_include_accept.get_unit_ids() + assert 9 not in sorting_include_accept.get_unit_ids() + assert 10 in sorting_include_accept.get_unit_ids() + + print("Test for integer unit IDs passed!\n") + + +def test_label_inheritance_str(): + """ + Test curation for label inheritance for string unit IDs. + """ + sampling_frequency = 30000. + duration = 20. + num_timepoints = int(sampling_frequency * duration) + num_spikes = 1000 + times = np.int_(np.sort(np.random.uniform(0, num_timepoints, num_spikes))) + labels = np.random.choice(['a', 'b', 'c', 'd', 'e', 'f', 'g'], size=num_spikes) + + sorting = se.NumpySorting.from_times_labels(times, labels, sampling_frequency) + print(f"Sorting: {sorting.get_unit_ids()}") + # Create a curation JSON with labels and merge groups + curation_dict = { + "labelsByUnit": { + "a": ["mua"], + "b": ["mua"], + "c": ["reject"], + "d": ["noise"], + "e": ["accept"], + "f": ["accept"], + "g": ["accept"] + }, + "mergeGroups": [["a", "b"], ["c", "d"], ["e", "f"]] + } + + json_path = "test_curation_str.json" + with open(json_path, 'w') as f: + json.dump(curation_dict, f, indent=4) + + # Check label inheritance for merged units + merged_id_1 = "a-b" + merged_id_2 = "c-d" + merged_id_3 = "e-f" + # Apply curation + sorting_merge = apply_sortingview_curation(sorting, uri_or_json=json_path, verbose=True) + + # Assertions for merged units + print(f"Merge only: {sorting_merge.get_unit_ids()}") + assert sorting_merge.get_unit_property(unit_id="a-b", key="mua") + assert not sorting_merge.get_unit_property(unit_id="a-b", key="reject") + assert not sorting_merge.get_unit_property(unit_id="a-b", key="noise") + assert not sorting_merge.get_unit_property(unit_id="a-b", key="accept") + + assert not sorting_merge.get_unit_property(unit_id="c-d", key="mua") + assert sorting_merge.get_unit_property(unit_id="c-d", key="reject") + assert sorting_merge.get_unit_property(unit_id="c-d", key="noise") + assert not sorting_merge.get_unit_property(unit_id="c-d", key="accept") + + assert not sorting_merge.get_unit_property(unit_id="e-f", key="mua") + assert not sorting_merge.get_unit_property(unit_id="e-f", key="reject") + assert not sorting_merge.get_unit_property(unit_id="e-f", key="noise") + assert sorting_merge.get_unit_property(unit_id="e-f", key="accept") + + # Assertions for exclude_labels + sorting_exclude_noise = apply_sortingview_curation(sorting, uri_or_json=json_path, exclude_labels=["noise"]) + print(f"Exclude noise: {sorting_exclude_noise.get_unit_ids()}") + assert "c-d" not in sorting_exclude_noise.get_unit_ids() + + # Assertions for include_labels + sorting_include_accept = apply_sortingview_curation(sorting, uri_or_json=json_path, include_labels=["accept"]) + print(f"Include accept: {sorting_include_accept.get_unit_ids()}") + assert "a-b" not in sorting_include_accept.get_unit_ids() + assert "c-d" not in sorting_include_accept.get_unit_ids() + assert "e-f" in sorting_include_accept.get_unit_ids() + + print("Test for string unit IDs passed!\n") + + if __name__ == "__main__": # generate_sortingview_curation_dataset() test_sha1_curation() test_gh_curation() test_json_curation() test_false_positive_curation() + test_label_inheritance_int() + test_label_inheritance_str() \ No newline at end of file From a8e07a71d8306550a20a6a611222fb76190d3178 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 04:01:49 +0000 Subject: [PATCH 187/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../curation/sortingview_curation.py | 9 ++++-- .../tests/test_sortingview_curation.py | 31 ++++++++++--------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/src/spikeinterface/curation/sortingview_curation.py b/src/spikeinterface/curation/sortingview_curation.py index 7ae8e41030..f83ff3352b 100644 --- a/src/spikeinterface/curation/sortingview_curation.py +++ b/src/spikeinterface/curation/sortingview_curation.py @@ -76,7 +76,7 @@ def apply_sortingview_curation( else: # in this case, the CurationSorting takes care of finding a new unused int curation_sorting.merge(merge_group, new_unit_id=None) - new_unit_id = curation_sorting.max_used_id # merged unit id + new_unit_id = curation_sorting.max_used_id # merged unit id labels_dict[str(new_unit_id)] = labels_to_inherit # STEP 2: gather and apply sortingview curation labels @@ -85,8 +85,11 @@ def apply_sortingview_curation( # In this case, the first 3 values of the property "accept" will be True, the rest False # Initialize the properties dictionary - properties = {label: np.zeros(len(curation_sorting.current_sorting.unit_ids), dtype=bool) - for labels in labels_dict.values() for label in labels} + properties = { + label: np.zeros(len(curation_sorting.current_sorting.unit_ids), dtype=bool) + for labels in labels_dict.values() + for label in labels + } # Populate the properties dictionary for u_i, unit_id in enumerate(curation_sorting.current_sorting.unit_ids): diff --git a/src/spikeinterface/curation/tests/test_sortingview_curation.py b/src/spikeinterface/curation/tests/test_sortingview_curation.py index 958df6acb5..cfc15013a3 100644 --- a/src/spikeinterface/curation/tests/test_sortingview_curation.py +++ b/src/spikeinterface/curation/tests/test_sortingview_curation.py @@ -15,6 +15,7 @@ compute_spike_amplitudes, ) from spikeinterface.curation import apply_sortingview_curation + if hasattr(pytest, "global_test_folder"): cache_folder = pytest.global_test_folder / "curation" else: @@ -28,7 +29,6 @@ set_global_tmp_folder(cache_folder) - # this needs to be run only once def generate_sortingview_curation_dataset(): import spikeinterface.widgets as sw @@ -79,6 +79,7 @@ def test_gh_curation(): print("Test for GH passed!\n") + @pytest.mark.skipif(ON_GITHUB and not KACHERY_CLOUD_SET, reason="Kachery cloud secrets not available") def test_sha1_curation(): """ @@ -111,6 +112,7 @@ def test_sha1_curation(): print("Test for sha1 curation passed!\n") + def test_json_curation(): """ Test curation using a JSON file. @@ -157,7 +159,7 @@ def test_false_positive_curation(): labels = np.random.randint(1, num_units + 1, size=num_spikes) sorting = se.NumpySorting.from_times_labels(times, labels, sampling_frequency) - print('Sorting: {}'.format(sorting.get_unit_ids())) + print("Sorting: {}".format(sorting.get_unit_ids())) # Test curation JSON: test_json = {"labelsByUnit": {"1": ["accept"], "2": ["artifact"], "12": ["artifact"]}, "mergeGroups": [[2, 12]]} @@ -168,7 +170,7 @@ def test_false_positive_curation(): # Apply curation sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_path, verbose=True) - print('Curated:', sorting_curated_json.get_unit_ids()) + print("Curated:", sorting_curated_json.get_unit_ids()) # Assertions assert sorting_curated_json.get_unit_property(unit_id=1, key="accept") @@ -177,13 +179,14 @@ def test_false_positive_curation(): print("False positive test for integer unit IDs passed!\n") + def test_label_inheritance_int(): """ Test curation for label inheritance for integer unit IDs. """ # Setup - sampling_frequency = 30000. - duration = 20. + sampling_frequency = 30000.0 + duration = 20.0 num_timepoints = int(sampling_frequency * duration) num_spikes = 1000 times = np.int_(np.sort(np.random.uniform(0, num_timepoints, num_spikes))) @@ -200,13 +203,13 @@ def test_label_inheritance_int(): "4": ["noise"], "5": ["accept"], "6": ["accept"], - "7": ["accept"] + "7": ["accept"], }, - "mergeGroups": [[1, 2], [3, 4], [5, 6]] + "mergeGroups": [[1, 2], [3, 4], [5, 6]], } json_path = "test_curation_int.json" - with open(json_path, 'w') as f: + with open(json_path, "w") as f: json.dump(curation_dict, f, indent=4) # Apply curation @@ -248,12 +251,12 @@ def test_label_inheritance_str(): """ Test curation for label inheritance for string unit IDs. """ - sampling_frequency = 30000. - duration = 20. + sampling_frequency = 30000.0 + duration = 20.0 num_timepoints = int(sampling_frequency * duration) num_spikes = 1000 times = np.int_(np.sort(np.random.uniform(0, num_timepoints, num_spikes))) - labels = np.random.choice(['a', 'b', 'c', 'd', 'e', 'f', 'g'], size=num_spikes) + labels = np.random.choice(["a", "b", "c", "d", "e", "f", "g"], size=num_spikes) sorting = se.NumpySorting.from_times_labels(times, labels, sampling_frequency) print(f"Sorting: {sorting.get_unit_ids()}") @@ -266,13 +269,13 @@ def test_label_inheritance_str(): "d": ["noise"], "e": ["accept"], "f": ["accept"], - "g": ["accept"] + "g": ["accept"], }, - "mergeGroups": [["a", "b"], ["c", "d"], ["e", "f"]] + "mergeGroups": [["a", "b"], ["c", "d"], ["e", "f"]], } json_path = "test_curation_str.json" - with open(json_path, 'w') as f: + with open(json_path, "w") as f: json.dump(curation_dict, f, indent=4) # Check label inheritance for merged units From d48cd681f97fcee2374b65a97f0ecbc9d10b4588 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 09:02:05 +0200 Subject: [PATCH 188/322] implement some TODOs --- .../comparison/groundtruthstudy.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 8a294a88af..34777c6f20 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -22,8 +22,6 @@ # This is to separate names when the key are tuples when saving folders _key_separator = " ## " -# This would be more funny -# _key_separator = " (°_°) " class GroundTruthStudy: @@ -184,8 +182,12 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True continue if sorting_exists: - # TODO : delete sorting + log - pass + # delete older sorting + log before running sorters + shutil.rmtree(sorting_exists) + log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" + if log_file.exists(): + log_file.unlink() + params = self.cases[key]["run_sorter_params"].copy() # this ensure that sorter_name is given @@ -201,7 +203,7 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=False) - # TODO create a list in laucher for engine blocking and non-blocking + # TODO later create a list in laucher for engine blocking and non-blocking if engine not in ("slurm", ): self.copy_sortings(case_keys) @@ -223,8 +225,10 @@ def copy_sortings(self, case_keys=None, force=True): if sorting is not None: if sorting_folder.exists(): if force: - # TODO delete folder + log + # delete folder + log shutil.rmtree(sorting_folder) + if log_file.exists(): + log_file.unlink() else: continue From 3c3451ecf6452419ebf83dd6dd2d9454ba7e6419 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 10:00:35 +0200 Subject: [PATCH 189/322] replace is_dumpable() by a more explicit naming : is_memory_serializable() --- src/spikeinterface/core/base.py | 53 +++++++++---------- src/spikeinterface/core/job_tools.py | 2 +- src/spikeinterface/core/numpyextractors.py | 6 +-- src/spikeinterface/core/old_api_utils.py | 6 +-- src/spikeinterface/core/tests/test_base.py | 10 ++-- .../core/tests/test_jsonification.py | 3 +- .../postprocessing/spike_amplitudes.py | 2 +- 7 files changed, 38 insertions(+), 44 deletions(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 63cf8e894f..3b8765a398 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -57,9 +57,7 @@ def __init__(self, main_ids: Sequence) -> None: # * number of units for sorting self._properties = {} - self._is_dumpable = True - # self._is_json_serializable = True - self._serializablility = {'json': True, 'pickle': True} + self._serializablility = {'memory': True, 'json': True, 'pickle': True} # extractor specific list of pip extra requirements self.extra_requirements = [] @@ -472,31 +470,8 @@ def clone(self) -> "BaseExtractor": clone = BaseExtractor.from_dict(d) return clone - def check_if_dumpable(self): - """Check if the object is dumpable, including nested objects. - - Returns - ------- - bool - True if the object is dumpable, False otherwise. - """ - kwargs = self._kwargs - for value in kwargs.values(): - # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors - if isinstance(value, BaseExtractor): - if not value.check_if_dumpable(): - return False - elif isinstance(value, list): - for v in value: - if isinstance(v, BaseExtractor) and not v.check_if_dumpable(): - return False - elif isinstance(value, dict): - for v in value.values(): - if isinstance(v, BaseExtractor) and not v.check_if_dumpable(): - return False - return self._is_dumpable - def check_serializablility(self, type="json"): + def check_serializablility(self, type): kwargs = self._kwargs for value in kwargs.values(): # here we check if the value is a BaseExtractor, a list of BaseExtractors, or a dict of BaseExtractors @@ -512,6 +487,26 @@ def check_serializablility(self, type="json"): if isinstance(v, BaseExtractor) and not v.check_serializablility(type=type): return False return self._serializablility[type] + + + def check_if_dumpable(self): + warnings.warn( + "check_if_dumpable() is replace by is_memory_serializable()", DeprecationWarning, stacklevel=2 + ) + return self.check_serializablility("memory") + + def is_memory_serializable(self): + """ + Check if the object is serializable to memory with pickle, including nested objects. + + Returns + ------- + bool + True if the object is json serializable, False otherwise. + """ + return self.check_serializablility("memory") + + def check_if_json_serializable(self): """ @@ -636,7 +631,7 @@ def dump_to_pickle( folder_metadata: str, Path, or None Folder with files containing additional information (e.g. probe in BaseRecording) and properties. """ - assert self.check_if_dumpable(), "The extractor is not dumpable" + assert self.check_if_pickle_serializable(), "The extractor is not dumpable" dump_dict = self.to_dict( include_annotations=True, @@ -931,7 +926,7 @@ def save_to_zarr( zarr_root = zarr.open(zarr_path_init, mode="w", storage_options=storage_options) - if self.check_if_dumpable(): + if self.check_if_json_serializable(): zarr_root.attrs["provenance"] = check_json(self.to_dict()) else: zarr_root.attrs["provenance"] = None diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index c0ee77d2fd..0535872ca6 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -167,7 +167,7 @@ def ensure_n_jobs(recording, n_jobs=1): print(f"Python {sys.version} does not support parallel processing") n_jobs = 1 - if not recording.check_if_dumpable(): + if not recording.is_memory_serializable(): if n_jobs != 1: raise RuntimeError( "Recording is not dumpable and can't be processed in parallel. " diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index 5ef955a6eb..d09016c8f1 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -127,7 +127,7 @@ def __init__(self, spikes, sampling_frequency, unit_ids): """ """ BaseSorting.__init__(self, sampling_frequency, unit_ids) - self._is_dumpable = True + self._serializablility["memory"] = True self._serializablility["json"] = False # theorically this should be False but for simplicity make generators simples we still need this. self._serializablility["pickle"] = True @@ -360,8 +360,8 @@ def __init__(self, shm_name, shape, sampling_frequency, unit_ids, dtype=minimum_ assert shape[0] > 0, "SharedMemorySorting only supported with no empty sorting" BaseSorting.__init__(self, sampling_frequency, unit_ids) - self._is_dumpable = True + self._serializablility["memory"] = True self._serializablility["json"] = False self._serializablility["pickle"] = False @@ -521,7 +521,7 @@ def __init__(self, snippets_list, spikesframes_list, sampling_frequency, nbefore dtype=dtype, ) - self._is_dumpable = False + self._serializablility["memory"] = False self._serializablility["json"] = False self._serializablility["pickle"] = False diff --git a/src/spikeinterface/core/old_api_utils.py b/src/spikeinterface/core/old_api_utils.py index a31edb0dd7..879700cc15 100644 --- a/src/spikeinterface/core/old_api_utils.py +++ b/src/spikeinterface/core/old_api_utils.py @@ -181,8 +181,8 @@ def __init__(self, oldapi_recording_extractor): dtype=oldapi_recording_extractor.get_dtype(return_scaled=False), ) - # set _is_dumpable to False to use dumping mechanism of old extractor - self._is_dumpable = False + # set to False to use dumping mechanism of old extractor + self._serializablility["memory"] = False self._serializablility["json"] = False self._serializablility["pickle"] = False @@ -269,7 +269,7 @@ def __init__(self, oldapi_sorting_extractor): sorting_segment = OldToNewSortingSegment(oldapi_sorting_extractor) self.add_sorting_segment(sorting_segment) - self._is_dumpable = False + self._serializablility["memory"] = False self._serializablility["json"] = False self._serializablility["pickle"] = False diff --git a/src/spikeinterface/core/tests/test_base.py b/src/spikeinterface/core/tests/test_base.py index b716f6b1dd..28dbd166ec 100644 --- a/src/spikeinterface/core/tests/test_base.py +++ b/src/spikeinterface/core/tests/test_base.py @@ -31,19 +31,19 @@ def make_nested_extractors(extractor): ) -def test_check_if_dumpable(): +def test_is_memory_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) # make a list of dumpable objects extractors_dumpable = make_nested_extractors(test_extractor) for extractor in extractors_dumpable: - assert extractor.check_if_dumpable() + assert extractor.is_memory_serializable() # make not dumpable - test_extractor._is_dumpable = False + test_extractor._serializablility["memory"] = False extractors_not_dumpable = make_nested_extractors(test_extractor) for extractor in extractors_not_dumpable: - assert not extractor.check_if_dumpable() + assert not extractor.is_memory_serializable() def test_check_if_serializable(): @@ -66,5 +66,5 @@ def test_check_if_serializable(): if __name__ == "__main__": - test_check_if_dumpable() + test_is_memory_serializable() test_check_if_serializable() diff --git a/src/spikeinterface/core/tests/test_jsonification.py b/src/spikeinterface/core/tests/test_jsonification.py index 8572cda23e..026e676966 100644 --- a/src/spikeinterface/core/tests/test_jsonification.py +++ b/src/spikeinterface/core/tests/test_jsonification.py @@ -144,8 +144,7 @@ def __init__(self, attribute, other_extractor=None, extractor_list=None, extract BaseExtractor.__init__(self, main_ids=['1', '2']) # this already the case by default - self._is_dumpable = True - # self._is_json_serializable = True + self._serializablility["memory"] = True self._serializablility["json"] = True self._serializablility["pickle"] = True diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 38cb714d59..aa99f7fc5e 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -75,7 +75,7 @@ def _run(self, **job_kwargs): n_jobs = ensure_n_jobs(recording, job_kwargs.get("n_jobs", None)) if n_jobs != 1: # TODO: avoid dumping sorting and use spike vector and peak pipeline instead - assert sorting.check_if_dumpable(), ( + assert sorting.is_memory_serializable(), ( "The sorting object is not dumpable and cannot be processed in parallel. You can use the " "`sorting.save()` function to make it dumpable" ) From 9d3dceaacc77158487c47972a2d949a71bb3c65a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 08:52:23 +0000 Subject: [PATCH 190/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/comparison/multicomparisons.py | 4 ++-- src/spikeinterface/core/base.py | 10 ++-------- src/spikeinterface/core/generate.py | 4 ++-- src/spikeinterface/core/numpyextractors.py | 2 +- .../core/tests/test_jsonification.py | 8 ++++---- .../core/tests/test_waveform_extractor.py | 15 ++++++++++----- src/spikeinterface/core/waveform_extractor.py | 1 - src/spikeinterface/sorters/basesorter.py | 6 ++---- src/spikeinterface/sorters/runsorter.py | 7 ++++--- 9 files changed, 27 insertions(+), 30 deletions(-) diff --git a/src/spikeinterface/comparison/multicomparisons.py b/src/spikeinterface/comparison/multicomparisons.py index 6fe474822b..f44e14c4c4 100644 --- a/src/spikeinterface/comparison/multicomparisons.py +++ b/src/spikeinterface/comparison/multicomparisons.py @@ -189,8 +189,8 @@ def save_to_folder(self, save_folder): stacklevel=2, ) for sorting in self.object_list: - assert ( - sorting.check_serializablility("json") + assert sorting.check_serializablility( + "json" ), "MultiSortingComparison.save_to_folder() need json serializable sortings" save_folder = Path(save_folder) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 3b8765a398..6e91cedcb5 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -57,7 +57,7 @@ def __init__(self, main_ids: Sequence) -> None: # * number of units for sorting self._properties = {} - self._serializablility = {'memory': True, 'json': True, 'pickle': True} + self._serializablility = {"memory": True, "json": True, "pickle": True} # extractor specific list of pip extra requirements self.extra_requirements = [] @@ -470,7 +470,6 @@ def clone(self) -> "BaseExtractor": clone = BaseExtractor.from_dict(d) return clone - def check_serializablility(self, type): kwargs = self._kwargs for value in kwargs.values(): @@ -488,11 +487,8 @@ def check_serializablility(self, type): return False return self._serializablility[type] - def check_if_dumpable(self): - warnings.warn( - "check_if_dumpable() is replace by is_memory_serializable()", DeprecationWarning, stacklevel=2 - ) + warnings.warn("check_if_dumpable() is replace by is_memory_serializable()", DeprecationWarning, stacklevel=2) return self.check_serializablility("memory") def is_memory_serializable(self): @@ -506,8 +502,6 @@ def is_memory_serializable(self): """ return self.check_serializablility("memory") - - def check_if_json_serializable(self): """ Check if the object is json serializable, including nested objects. diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 05d63f3c8d..eeb1e8af60 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1433,7 +1433,7 @@ def generate_ground_truth_recording( ) recording.annotate(is_filtered=True) recording.set_probe(probe, in_place=True) - recording.set_channel_gains(1.) - recording.set_channel_offsets(0.) + recording.set_channel_gains(1.0) + recording.set_channel_offsets(0.0) return recording, sorting diff --git a/src/spikeinterface/core/numpyextractors.py b/src/spikeinterface/core/numpyextractors.py index d09016c8f1..3d7ec6cd1a 100644 --- a/src/spikeinterface/core/numpyextractors.py +++ b/src/spikeinterface/core/numpyextractors.py @@ -523,7 +523,7 @@ def __init__(self, snippets_list, spikesframes_list, sampling_frequency, nbefore self._serializablility["memory"] = False self._serializablility["json"] = False - self._serializablility["pickle"] = False + self._serializablility["pickle"] = False for snippets, spikesframes in zip(snippets_list, spikesframes_list): snp_segment = NumpySnippetsSegment(snippets, spikesframes) diff --git a/src/spikeinterface/core/tests/test_jsonification.py b/src/spikeinterface/core/tests/test_jsonification.py index 026e676966..1c491bd7a6 100644 --- a/src/spikeinterface/core/tests/test_jsonification.py +++ b/src/spikeinterface/core/tests/test_jsonification.py @@ -142,11 +142,11 @@ def __init__(self, attribute, other_extractor=None, extractor_list=None, extract self.extractor_list = extractor_list self.extractor_dict = extractor_dict - BaseExtractor.__init__(self, main_ids=['1', '2']) + BaseExtractor.__init__(self, main_ids=["1", "2"]) # this already the case by default self._serializablility["memory"] = True self._serializablility["json"] = True - self._serializablility["pickle"] = True + self._serializablility["pickle"] = True self._kwargs = { "attribute": attribute, @@ -199,6 +199,6 @@ def test_encoding_numpy_scalars_within_nested_extractors_dict(nested_extractor_d json.dumps(nested_extractor_dict, cls=SIJsonEncoder) -if __name__ == '__main__': +if __name__ == "__main__": nested_extractor = nested_extractor() - test_encoding_numpy_scalars_within_nested_extractors(nested_extractor_) \ No newline at end of file + test_encoding_numpy_scalars_within_nested_extractors(nested_extractor_) diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index f53b9cf18d..12dac52d43 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -6,7 +6,13 @@ import zarr -from spikeinterface.core import generate_recording, generate_sorting, NumpySorting, ChannelSparsity, generate_ground_truth_recording +from spikeinterface.core import ( + generate_recording, + generate_sorting, + NumpySorting, + ChannelSparsity, + generate_ground_truth_recording, +) from spikeinterface import WaveformExtractor, BaseRecording, extract_waveforms, load_waveforms from spikeinterface.core.waveform_extractor import precompute_sparsity @@ -509,14 +515,15 @@ def test_compute_sparsity(): ) print(sparsity) + def test_non_json_object(): recording, sorting = generate_ground_truth_recording( durations=[30, 40], sampling_frequency=30000.0, num_channels=32, num_units=5, - ) - + ) + # recording is not save to keep it in memory sorting = sorting.save() @@ -524,7 +531,6 @@ def test_non_json_object(): if wf_folder.is_dir(): shutil.rmtree(wf_folder) - we = extract_waveforms( recording, sorting, @@ -551,4 +557,3 @@ def test_non_json_object(): # test_recordingless() # test_compute_sparsity() test_non_json_object() - diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 3de1429feb..cd8a62f5bc 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -900,7 +900,6 @@ def save( elif self.recording.check_serializablility("pickle"): self.recording.dump(folder / "recording.pickle") - if self.sorting.check_serializablility("json"): self.sorting.dump(folder / "sorting.json", relative_to=relative_to) elif self.sorting.check_serializablility("pickle"): diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index bbcde31eed..8d87558191 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -189,11 +189,9 @@ def set_params_to_folder(cls, recording, output_folder, new_params, verbose): @classmethod def load_recording_from_folder(cls, output_folder, with_warnings=False): - json_file = output_folder / "spikeinterface_recording.json" pickle_file = output_folder / "spikeinterface_recording.pickle" - if json_file.exists(): with (json_file).open("r", encoding="utf8") as f: recording_dict = json.load(f) @@ -206,7 +204,7 @@ def load_recording_from_folder(cls, output_folder, with_warnings=False): recording = load_extractor(json_file, base_folder=output_folder) elif pickle_file.exits(): recording = load_extractor(pickle_file) - + return recording @classmethod @@ -320,7 +318,7 @@ def get_result_from_folder(cls, output_folder, register_recording=True, sorting_ if register_recording: # register recording to Sorting object - recording = cls.load_recording_from_folder( output_folder, with_warnings=False) + recording = cls.load_recording_from_folder(output_folder, with_warnings=False) if recording is not None: sorting.register_recording(recording) diff --git a/src/spikeinterface/sorters/runsorter.py b/src/spikeinterface/sorters/runsorter.py index e930ec7f79..bd5667b15f 100644 --- a/src/spikeinterface/sorters/runsorter.py +++ b/src/spikeinterface/sorters/runsorter.py @@ -629,7 +629,7 @@ def read_sorter_folder(output_folder, register_recording=True, sorting_info=True Load a sorting object from a spike sorting output folder. The 'output_folder' must contain a valid 'spikeinterface_log.json' file - + Parameters ---------- output_folder: Pth or str @@ -657,8 +657,9 @@ def read_sorter_folder(output_folder, register_recording=True, sorting_info=True sorter_name = log["sorter_name"] SorterClass = sorter_dict[sorter_name] - sorting = SorterClass.get_result_from_folder(output_folder, register_recording=register_recording, - sorting_info=sorting_info) + sorting = SorterClass.get_result_from_folder( + output_folder, register_recording=register_recording, sorting_info=sorting_info + ) return sorting From 7329927cfb3035d764648a2175d617aa8999c67b Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 10:54:57 +0200 Subject: [PATCH 191/322] rename to check_if_memory_serializable --- src/spikeinterface/core/base.py | 6 +----- src/spikeinterface/core/job_tools.py | 2 +- src/spikeinterface/core/tests/test_base.py | 8 ++++---- src/spikeinterface/postprocessing/spike_amplitudes.py | 2 +- 4 files changed, 7 insertions(+), 11 deletions(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 6e91cedcb5..b1b5065339 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -487,11 +487,7 @@ def check_serializablility(self, type): return False return self._serializablility[type] - def check_if_dumpable(self): - warnings.warn("check_if_dumpable() is replace by is_memory_serializable()", DeprecationWarning, stacklevel=2) - return self.check_serializablility("memory") - - def is_memory_serializable(self): + def check_if_memory_serializable(self): """ Check if the object is serializable to memory with pickle, including nested objects. diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index 0535872ca6..9369ad0b61 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -167,7 +167,7 @@ def ensure_n_jobs(recording, n_jobs=1): print(f"Python {sys.version} does not support parallel processing") n_jobs = 1 - if not recording.is_memory_serializable(): + if not recording.check_if_memory_serializable(): if n_jobs != 1: raise RuntimeError( "Recording is not dumpable and can't be processed in parallel. " diff --git a/src/spikeinterface/core/tests/test_base.py b/src/spikeinterface/core/tests/test_base.py index 28dbd166ec..8d0907c700 100644 --- a/src/spikeinterface/core/tests/test_base.py +++ b/src/spikeinterface/core/tests/test_base.py @@ -31,19 +31,19 @@ def make_nested_extractors(extractor): ) -def test_is_memory_serializable(): +def test_check_if_memory_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) # make a list of dumpable objects extractors_dumpable = make_nested_extractors(test_extractor) for extractor in extractors_dumpable: - assert extractor.is_memory_serializable() + assert extractor.check_if_memory_serializable() # make not dumpable test_extractor._serializablility["memory"] = False extractors_not_dumpable = make_nested_extractors(test_extractor) for extractor in extractors_not_dumpable: - assert not extractor.is_memory_serializable() + assert not extractor.check_if_memory_serializable() def test_check_if_serializable(): @@ -66,5 +66,5 @@ def test_check_if_serializable(): if __name__ == "__main__": - test_is_memory_serializable() + test_check_if_memory_serializable() test_check_if_serializable() diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index aa99f7fc5e..9eb5a815d4 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -75,7 +75,7 @@ def _run(self, **job_kwargs): n_jobs = ensure_n_jobs(recording, job_kwargs.get("n_jobs", None)) if n_jobs != 1: # TODO: avoid dumping sorting and use spike vector and peak pipeline instead - assert sorting.is_memory_serializable(), ( + assert sorting.check_if_memory_serializable(), ( "The sorting object is not dumpable and cannot be processed in parallel. You can use the " "`sorting.save()` function to make it dumpable" ) From b9c6a38e99430fc7b734e0751871e6d08eb5aea1 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 10:56:28 +0200 Subject: [PATCH 192/322] oups --- src/spikeinterface/core/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index b1b5065339..e3b88588e2 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -494,7 +494,7 @@ def check_if_memory_serializable(self): Returns ------- bool - True if the object is json serializable, False otherwise. + True if the object is memory serializable, False otherwise. """ return self.check_serializablility("memory") From 331379a3f441e2691eb15985b60254fcc9e3f887 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 11:13:29 +0200 Subject: [PATCH 193/322] Remove "dumpable" naming also in doc and warnings. --- doc/modules/core.rst | 3 +-- src/spikeinterface/comparison/hybrid.py | 4 ++-- src/spikeinterface/core/base.py | 8 ++++---- src/spikeinterface/core/job_tools.py | 4 ++-- src/spikeinterface/core/tests/test_base.py | 17 ++++++++--------- .../core/tests/test_core_tools.py | 1 - src/spikeinterface/core/tests/test_job_tools.py | 6 +++--- .../core/tests/test_waveform_extractor.py | 2 +- src/spikeinterface/core/waveform_extractor.py | 15 ++++++++------- .../postprocessing/spike_amplitudes.py | 6 ------ .../sorters/tests/test_launcher.py | 2 +- 11 files changed, 30 insertions(+), 38 deletions(-) diff --git a/doc/modules/core.rst b/doc/modules/core.rst index fdc4d71fe7..976a82a4a3 100644 --- a/doc/modules/core.rst +++ b/doc/modules/core.rst @@ -547,8 +547,7 @@ workflow. In order to do this, one can use the :code:`Numpy*` classes, :py:class:`~spikeinterface.core.NumpyRecording`, :py:class:`~spikeinterface.core.NumpySorting`, :py:class:`~spikeinterface.core.NumpyEvent`, and :py:class:`~spikeinterface.core.NumpySnippets`. These object behave exactly like normal SpikeInterface objects, -but they are not bound to a file. This makes these objects *not dumpable*, so parallel processing is not supported. -In order to make them *dumpable*, one can simply :code:`save()` them (see :ref:`save_load`). +but they are not bound to a file. Also note the class :py:class:`~spikeinterface.core.SharedMemorySorting` which is very similar to Similar to :py:class:`~spikeinterface.core.NumpySorting` but with an unerlying SharedMemory which is usefull for diff --git a/src/spikeinterface/comparison/hybrid.py b/src/spikeinterface/comparison/hybrid.py index 3b8e9e0a72..e0c98cd772 100644 --- a/src/spikeinterface/comparison/hybrid.py +++ b/src/spikeinterface/comparison/hybrid.py @@ -39,7 +39,7 @@ class HybridUnitsRecording(InjectTemplatesRecording): The refractory period of the injected spike train (in ms). injected_sorting_folder: str | Path | None If given, the injected sorting is saved to this folder. - It must be specified if injected_sorting is None or not dumpable. + It must be specified if injected_sorting is None or not serialisable to file. Returns ------- @@ -138,7 +138,7 @@ class HybridSpikesRecording(InjectTemplatesRecording): this refractory period. injected_sorting_folder: str | Path | None If given, the injected sorting is saved to this folder. - It must be specified if injected_sorting is None or not dumpable. + It must be specified if injected_sorting is None or not serializable to file. Returns ------- diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index e3b88588e2..73f8619348 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -621,7 +621,7 @@ def dump_to_pickle( folder_metadata: str, Path, or None Folder with files containing additional information (e.g. probe in BaseRecording) and properties. """ - assert self.check_if_pickle_serializable(), "The extractor is not dumpable" + assert self.check_if_pickle_serializable(), "The extractor is not serializable to file with pickle" dump_dict = self.to_dict( include_annotations=True, @@ -658,8 +658,8 @@ def load(file_path: Union[str, Path], base_folder: Optional[Union[Path, str, boo d = pickle.load(f) else: raise ValueError(f"Impossible to load {file_path}") - if "warning" in d and "not dumpable" in d["warning"]: - print("The extractor was not dumpable") + if "warning" in d: + print("The extractor was not serializable to file") return None extractor = BaseExtractor.from_dict(d, base_folder=base_folder) return extractor @@ -822,7 +822,7 @@ def save_to_folder(self, name=None, folder=None, verbose=True, **save_kwargs): if self.check_serializablility("json"): self.dump(provenance_file) else: - provenance_file.write_text(json.dumps({"warning": "the provenace is not dumpable!!!"}), encoding="utf8") + provenance_file.write_text(json.dumps({"warning": "the provenace is not json serializable!!!"}), encoding="utf8") self.save_metadata_to_folder(folder) diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index 9369ad0b61..84ee502c14 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -170,8 +170,8 @@ def ensure_n_jobs(recording, n_jobs=1): if not recording.check_if_memory_serializable(): if n_jobs != 1: raise RuntimeError( - "Recording is not dumpable and can't be processed in parallel. " - "You can use the `recording.save()` function to make it dumpable or set 'n_jobs' to 1." + "Recording is not serializable to memory and can't be processed in parallel. " + "You can use the `rec = recording.save(folder=...)` function or set 'n_jobs' to 1." ) return n_jobs diff --git a/src/spikeinterface/core/tests/test_base.py b/src/spikeinterface/core/tests/test_base.py index 8d0907c700..a944be3da0 100644 --- a/src/spikeinterface/core/tests/test_base.py +++ b/src/spikeinterface/core/tests/test_base.py @@ -34,30 +34,29 @@ def make_nested_extractors(extractor): def test_check_if_memory_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) - # make a list of dumpable objects - extractors_dumpable = make_nested_extractors(test_extractor) - for extractor in extractors_dumpable: + # make a list of memory serializable objects + extractors_mem_serializable = make_nested_extractors(test_extractor) + for extractor in extractors_mem_serializable: assert extractor.check_if_memory_serializable() - # make not dumpable + # make not not memory serilizable test_extractor._serializablility["memory"] = False - extractors_not_dumpable = make_nested_extractors(test_extractor) - for extractor in extractors_not_dumpable: + extractors_not_mem_serializable = make_nested_extractors(test_extractor) + for extractor in extractors_not_mem_serializable: assert not extractor.check_if_memory_serializable() def test_check_if_serializable(): test_extractor = generate_recording(seed=0, durations=[2]) - # make a list of dumpable objects + # make a list of json serializable objects test_extractor._serializablility["json"] = True extractors_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_json_serializable: print(extractor) assert extractor.check_serializablility("json") - # make not dumpable - # test_extractor._is_json_serializable = False + # make of not json serializable objects test_extractor._serializablility["json"] = False extractors_not_json_serializable = make_nested_extractors(test_extractor) for extractor in extractors_not_json_serializable: diff --git a/src/spikeinterface/core/tests/test_core_tools.py b/src/spikeinterface/core/tests/test_core_tools.py index a3cd0caa92..223b2a8a3a 100644 --- a/src/spikeinterface/core/tests/test_core_tools.py +++ b/src/spikeinterface/core/tests/test_core_tools.py @@ -142,7 +142,6 @@ def test_write_memory_recording(): recording = NoiseGeneratorRecording( num_channels=2, durations=[10.325, 3.5], sampling_frequency=30_000, strategy="tile_pregenerated" ) - # make dumpable recording = recording.save() # write with loop diff --git a/src/spikeinterface/core/tests/test_job_tools.py b/src/spikeinterface/core/tests/test_job_tools.py index 7d7af6025b..a904e4dd32 100644 --- a/src/spikeinterface/core/tests/test_job_tools.py +++ b/src/spikeinterface/core/tests/test_job_tools.py @@ -36,7 +36,7 @@ def test_ensure_n_jobs(): n_jobs = ensure_n_jobs(recording, n_jobs=1) assert n_jobs == 1 - # dumpable + # check serializable n_jobs = ensure_n_jobs(recording.save(), n_jobs=-1) assert n_jobs > 1 @@ -45,7 +45,7 @@ def test_ensure_chunk_size(): recording = generate_recording(num_channels=2) dtype = recording.get_dtype() assert dtype == "float32" - # make dumpable + # make serializable recording = recording.save() chunk_size = ensure_chunk_size(recording, total_memory="512M", chunk_size=None, chunk_memory=None, n_jobs=2) @@ -90,7 +90,7 @@ def init_func(arg1, arg2, arg3): def test_ChunkRecordingExecutor(): recording = generate_recording(num_channels=2) - # make dumpable + # make serializable recording = recording.save() init_args = "a", 120, "yep" diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index 12dac52d43..2bbf5e9b0f 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -315,7 +315,7 @@ def test_recordingless(): recording = recording.save(folder=cache_folder / "recording1") sorting = sorting.save(folder=cache_folder / "sorting1") - # recording and sorting are not dumpable + # recording and sorting are not serializable wf_folder = cache_folder / "wf_recordingless" # save with relative paths diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index cd8a62f5bc..2710ff1338 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -290,11 +290,12 @@ def create( sorting.dump(folder / "sorting.json", relative_to=relative_to) elif sorting.check_serializablility("pickle"): # In this case we loose the relative_to!! + # TODO later the dump to pickle should dump the dictionary and so relative could be put back sorting.dump(folder / "sorting.pickle") else: warn( - "Sorting object is not dumpable, which might result in downstream errors for " - "parallel processing. To make the sorting dumpable, use the `sorting.save()` function." + "Sorting object is not serializable to file, which might result in downstream errors for " + "parallel processing. To make the sorting serializable, use the `sorting = sorting.save()` function." ) # dump some attributes of the recording for the mode with_recording=False at next load @@ -903,11 +904,11 @@ def save( if self.sorting.check_serializablility("json"): self.sorting.dump(folder / "sorting.json", relative_to=relative_to) elif self.sorting.check_serializablility("pickle"): - self.sorting.dump(folder / "sorting.pickle", relative_to=relative_to) + self.sorting.dump(folder / "sorting.pickle") else: warn( - "Sorting object is not dumpable, which might result in downstream errors for " - "parallel processing. To make the sorting dumpable, use the `sorting.save()` function." + "Sorting object is not serializable to file, which might result in downstream errors for " + "parallel processing. To make the sorting serializable, use the `sorting = sorting.save()` function." ) # dump some attributes of the recording for the mode with_recording=False at next load @@ -960,8 +961,8 @@ def save( zarr_root.attrs["sorting"] = check_json(sort_dict) else: warn( - "Sorting object is not dumpable, which might result in downstream errors for " - "parallel processing. To make the sorting dumpable, use the `sorting.save()` function." + "Sorting object is not json serializable, which might result in downstream errors for " + "parallel processing. To make the sorting serializable, use the `sorting = sorting.save()` function." ) recording_info = zarr_root.create_group("recording_info") recording_info.attrs["recording_attributes"] = check_json(rec_attributes) diff --git a/src/spikeinterface/postprocessing/spike_amplitudes.py b/src/spikeinterface/postprocessing/spike_amplitudes.py index 9eb5a815d4..ccd2121174 100644 --- a/src/spikeinterface/postprocessing/spike_amplitudes.py +++ b/src/spikeinterface/postprocessing/spike_amplitudes.py @@ -73,12 +73,6 @@ def _run(self, **job_kwargs): func = _spike_amplitudes_chunk init_func = _init_worker_spike_amplitudes n_jobs = ensure_n_jobs(recording, job_kwargs.get("n_jobs", None)) - if n_jobs != 1: - # TODO: avoid dumping sorting and use spike vector and peak pipeline instead - assert sorting.check_if_memory_serializable(), ( - "The sorting object is not dumpable and cannot be processed in parallel. You can use the " - "`sorting.save()` function to make it dumpable" - ) init_args = (recording, sorting.to_multiprocessing(n_jobs), extremum_channels_index, peak_shifts, return_scaled) processor = ChunkRecordingExecutor( recording, func, init_func, init_args, handle_returns=True, job_name="extract amplitudes", **job_kwargs diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index 14c938f8ba..a5e29c8fd9 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -178,7 +178,7 @@ def test_run_sorters_with_list(): if working_folder.is_dir(): shutil.rmtree(working_folder) - # make dumpable + # make serializable rec0 = load_extractor(cache_folder / "toy_rec_0") rec1 = load_extractor(cache_folder / "toy_rec_1") From 0ea10e3baf97fbcedc8c25c2745754cacabb7b5c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 09:13:52 +0000 Subject: [PATCH 194/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/core/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 73f8619348..e8b3232e13 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -822,7 +822,9 @@ def save_to_folder(self, name=None, folder=None, verbose=True, **save_kwargs): if self.check_serializablility("json"): self.dump(provenance_file) else: - provenance_file.write_text(json.dumps({"warning": "the provenace is not json serializable!!!"}), encoding="utf8") + provenance_file.write_text( + json.dumps({"warning": "the provenace is not json serializable!!!"}), encoding="utf8" + ) self.save_metadata_to_folder(folder) From af72fbcaa040c4216e2f2b60465197b484e2d2c9 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 11:25:20 +0200 Subject: [PATCH 195/322] oups --- src/spikeinterface/comparison/groundtruthstudy.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 34777c6f20..fcebb356a0 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -31,7 +31,6 @@ class GroundTruthStudy: "cases" refer to: * several sorters for comparisons * same sorter with differents parameters - * parameters of comparisons * any combination of these (and more) For increased flexibility, cases keys can be a tuple so that we can vary complexity along several @@ -403,11 +402,11 @@ def get_count_units( count_units.loc[key, "num_well_detected"] = comp.count_well_detected_units( well_detected_score ) + count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( + overmerged_score + ) + count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) if comp.exhaustive_gt: - count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( - overmerged_score - ) - count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) count_units.loc[key, "num_false_positive"] = comp.count_false_positive_units( redundant_score ) From 2c015f78e9311e106e9d2fda4e4026a61ca68c5b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 09:28:28 +0000 Subject: [PATCH 196/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/widgets/amplitudes.py | 7 +- src/spikeinterface/widgets/base.py | 2 +- src/spikeinterface/widgets/metrics.py | 6 +- src/spikeinterface/widgets/spike_locations.py | 2 +- .../widgets/spikes_on_traces.py | 20 +- src/spikeinterface/widgets/traces.py | 51 ++-- src/spikeinterface/widgets/unit_locations.py | 2 +- src/spikeinterface/widgets/unit_waveforms.py | 8 +- .../widgets/utils_ipywidgets.py | 222 +++++++++--------- 9 files changed, 163 insertions(+), 157 deletions(-) diff --git a/src/spikeinterface/widgets/amplitudes.py b/src/spikeinterface/widgets/amplitudes.py index 5aa090b1b4..6b6496a577 100644 --- a/src/spikeinterface/widgets/amplitudes.py +++ b/src/spikeinterface/widgets/amplitudes.py @@ -174,6 +174,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt + # import ipywidgets.widgets as widgets import ipywidgets.widgets as W from IPython.display import display @@ -210,7 +211,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.unit_selector, self.checkbox_histograms, ], - layout = W.Layout(align_items="center", width="4cm", height="100%"), + layout=W.Layout(align_items="center", width="4cm", height="100%"), ) self.widget = W.AppLayout( @@ -222,8 +223,8 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._full_update_plot() - self.unit_selector.observe(self._update_plot, names='value', type="change") - self.checkbox_histograms.observe(self._full_update_plot, names='value', type="change") + self.unit_selector.observe(self._update_plot, names="value", type="change") + self.checkbox_histograms.observe(self._full_update_plot, names="value", type="change") if backend_kwargs["display"]: display(self.widget) diff --git a/src/spikeinterface/widgets/base.py b/src/spikeinterface/widgets/base.py index 1ff691320a..9fc7b73707 100644 --- a/src/spikeinterface/widgets/base.py +++ b/src/spikeinterface/widgets/base.py @@ -38,7 +38,7 @@ def set_default_plotter_backend(backend): "width_cm": "Width of the figure in cm (default 10)", "height_cm": "Height of the figure in cm (default 6)", "display": "If True, widgets are immediately displayed", - # "controllers": "" + # "controllers": "" }, "ephyviewer": {}, } diff --git a/src/spikeinterface/widgets/metrics.py b/src/spikeinterface/widgets/metrics.py index 604da35e65..c7b701c8b0 100644 --- a/src/spikeinterface/widgets/metrics.py +++ b/src/spikeinterface/widgets/metrics.py @@ -149,8 +149,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): plt.show() self.unit_selector = UnitSelector(data_plot["sorting"].unit_ids) - self.unit_selector.value = [ ] - + self.unit_selector.value = [] self.widget = widgets.AppLayout( center=self.figure.canvas, @@ -161,7 +160,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._update_ipywidget(None) - self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + self.unit_selector.observe(self._update_ipywidget, names="value", type="change") if backend_kwargs["display"]: display(self.widget) @@ -208,7 +207,6 @@ def _update_ipywidget(self, change): self.figure.canvas.draw() self.figure.canvas.flush_events() - def plot_sortingview(self, data_plot, **backend_kwargs): import sortingview.views as vv from .utils_sortingview import generate_unit_table_view, make_serializable, handle_display_and_url diff --git a/src/spikeinterface/widgets/spike_locations.py b/src/spikeinterface/widgets/spike_locations.py index 926051b8f9..fda2356105 100644 --- a/src/spikeinterface/widgets/spike_locations.py +++ b/src/spikeinterface/widgets/spike_locations.py @@ -222,7 +222,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._update_ipywidget() - self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + self.unit_selector.observe(self._update_ipywidget, names="value", type="change") if backend_kwargs["display"]: display(self.widget) diff --git a/src/spikeinterface/widgets/spikes_on_traces.py b/src/spikeinterface/widgets/spikes_on_traces.py index 2f748cc0fc..c2bed8fe41 100644 --- a/src/spikeinterface/widgets/spikes_on_traces.py +++ b/src/spikeinterface/widgets/spikes_on_traces.py @@ -232,7 +232,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): handles.append(l[0]) labels.append(unit) label_set = True - # ax.legend(handles, labels) + # ax.legend(handles, labels) def plot_ipywidgets(self, data_plot, **backend_kwargs): import matplotlib.pyplot as plt @@ -268,19 +268,18 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.unit_selector = UnitSelector(data_plot["unit_ids"]) self.unit_selector.value = list(data_plot["unit_ids"])[:1] - self.widget = widgets.AppLayout(center=self._traces_widget.widget, - left_sidebar=self.unit_selector, - pane_widths=ratios + [0]) + self.widget = widgets.AppLayout( + center=self._traces_widget.widget, left_sidebar=self.unit_selector, pane_widths=ratios + [0] + ) # a first update self._update_ipywidget() # remove callback from traces_widget - self.unit_selector.observe(self._update_ipywidget, names='value', type="change") - self._traces_widget.time_slider.observe(self._update_ipywidget, names='value', type="change") - self._traces_widget.channel_selector.observe(self._update_ipywidget, names='value', type="change") - self._traces_widget.scaler.observe(self._update_ipywidget, names='value', type="change") - + self.unit_selector.observe(self._update_ipywidget, names="value", type="change") + self._traces_widget.time_slider.observe(self._update_ipywidget, names="value", type="change") + self._traces_widget.channel_selector.observe(self._update_ipywidget, names="value", type="change") + self._traces_widget.scaler.observe(self._update_ipywidget, names="value", type="change") if backend_kwargs["display"]: display(self.widget) @@ -305,10 +304,9 @@ def _update_ipywidget(self, change=None): time_range=np.array([start_frame, end_frame]) / self.sampling_frequency, mode=mode, with_colorbar=False, - ) + ) ) - backend_kwargs = {} backend_kwargs["ax"] = self.ax diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index d107c5cb23..9b6716e8f3 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -290,7 +290,6 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): check_ipywidget_backend() self.next_data_plot = data_plot.copy() - self.recordings = data_plot["recordings"] @@ -314,7 +313,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.time_slider = TimeSlider( durations=[rec0.get_duration(s) for s in range(rec0.get_num_segments())], sampling_frequency=rec0.sampling_frequency, - # layout=W.Layout(height="2cm"), + # layout=W.Layout(height="2cm"), ) start_frame = int(data_plot["time_range"][0] * rec0.sampling_frequency) @@ -324,14 +323,17 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): _layer_keys = data_plot["layer_keys"] if len(_layer_keys) > 1: - _layer_keys = ['ALL'] + _layer_keys - self.layer_selector = W.Dropdown(options=_layer_keys, - layout=W.Layout(width="95%"), - ) - self.mode_selector = W.Dropdown(options=["line", "map"], value=data_plot["mode"], - # layout=W.Layout(width="5cm"), - layout=W.Layout(width="95%"), - ) + _layer_keys = ["ALL"] + _layer_keys + self.layer_selector = W.Dropdown( + options=_layer_keys, + layout=W.Layout(width="95%"), + ) + self.mode_selector = W.Dropdown( + options=["line", "map"], + value=data_plot["mode"], + # layout=W.Layout(width="5cm"), + layout=W.Layout(width="95%"), + ) self.scaler = ScaleWidget() self.channel_selector = ChannelSelector(self.rec0.channel_ids) @@ -343,9 +345,9 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.mode_selector, self.scaler, # self.channel_selector, - ], + ], layout=W.Layout(width="3.5cm"), - align_items='center', + align_items="center", ) self.return_scaled = data_plot["return_scaled"] @@ -353,7 +355,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.widget = widgets.AppLayout( center=self.figure.canvas, footer=self.time_slider, - left_sidebar = left_sidebar, + left_sidebar=left_sidebar, right_sidebar=self.channel_selector, pane_heights=[0, 6, 1], pane_widths=ratios, @@ -365,28 +367,28 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # callbacks: # some widgets generate a full retrieve + refresh - self.time_slider.observe(self._retrieve_traces, names='value', type="change") - self.layer_selector.observe(self._retrieve_traces, names='value', type="change") - self.channel_selector.observe(self._retrieve_traces, names='value', type="change") + self.time_slider.observe(self._retrieve_traces, names="value", type="change") + self.layer_selector.observe(self._retrieve_traces, names="value", type="change") + self.channel_selector.observe(self._retrieve_traces, names="value", type="change") # other widgets only refresh - self.scaler.observe(self._update_plot, names='value', type="change") + self.scaler.observe(self._update_plot, names="value", type="change") # map is a special case because needs to check layer also - self.mode_selector.observe(self._mode_changed, names='value', type="change") - + self.mode_selector.observe(self._mode_changed, names="value", type="change") + if backend_kwargs["display"]: # self.check_backend() display(self.widget) def _get_layers(self): layer = self.layer_selector.value - if layer == 'ALL': + if layer == "ALL": layer_keys = self.data_plot["layer_keys"] else: layer_keys = [layer] if self.mode_selector.value == "map": layer_keys = layer_keys[:1] return layer_keys - + def _mode_changed(self, change=None): if self.mode_selector.value == "map" and self.layer_selector.value == "ALL": self.layer_selector.value = self.data_plot["layer_keys"][0] @@ -400,7 +402,7 @@ def _retrieve_traces(self, change=None): order, _ = order_channels_by_depth(self.rec0, channel_ids) else: order = None - + start_frame, end_frame, segment_index = self.time_slider.value time_range = np.array([start_frame, end_frame]) / self.rec0.sampling_frequency @@ -439,9 +441,9 @@ def _update_plot(self, change=None): data_plot["clims"] = clims data_plot["channel_ids"] = self._channel_ids - + data_plot["layer_keys"] = layer_keys - data_plot["colors"] = {k:self.data_plot["colors"][k] for k in layer_keys} + data_plot["colors"] = {k: self.data_plot["colors"][k] for k in layer_keys} list_traces = [traces * self.scaler.value for traces in self._list_traces] data_plot["list_traces"] = list_traces @@ -458,7 +460,6 @@ def _update_plot(self, change=None): fig.canvas.draw() fig.canvas.flush_events() - def plot_sortingview(self, data_plot, **backend_kwargs): import sortingview.views as vv from .utils_sortingview import handle_display_and_url diff --git a/src/spikeinterface/widgets/unit_locations.py b/src/spikeinterface/widgets/unit_locations.py index 8526a95d60..b41ee3508b 100644 --- a/src/spikeinterface/widgets/unit_locations.py +++ b/src/spikeinterface/widgets/unit_locations.py @@ -198,7 +198,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._update_ipywidget() - self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + self.unit_selector.observe(self._update_ipywidget, names="value", type="change") if backend_kwargs["display"]: display(self.widget) diff --git a/src/spikeinterface/widgets/unit_waveforms.py b/src/spikeinterface/widgets/unit_waveforms.py index f01c842b66..8ffc931bf2 100644 --- a/src/spikeinterface/widgets/unit_waveforms.py +++ b/src/spikeinterface/widgets/unit_waveforms.py @@ -277,7 +277,6 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): self.unit_selector = UnitSelector(data_plot["unit_ids"]) self.unit_selector.value = list(data_plot["unit_ids"])[:1] - self.same_axis_button = widgets.Checkbox( value=False, description="same axis", @@ -309,10 +308,9 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): # a first update self._update_ipywidget(None) - self.unit_selector.observe(self._update_ipywidget, names='value', type="change") + self.unit_selector.observe(self._update_ipywidget, names="value", type="change") for w in self.same_axis_button, self.plot_templates_button, self.hide_axis_button: - w.observe(self._update_ipywidget, names='value', type="change") - + w.observe(self._update_ipywidget, names="value", type="change") if backend_kwargs["display"]: display(self.widget) @@ -340,7 +338,7 @@ def _update_ipywidget(self, change): data_plot["plot_templates"] = plot_templates if data_plot["plot_waveforms"]: data_plot["wfs_by_ids"] = {unit_id: self.we.get_waveforms(unit_id) for unit_id in unit_ids} - + # TODO option for plot_legend backend_kwargs = {} diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index ee6133a990..6e872eca55 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -12,12 +12,9 @@ def check_ipywidget_backend(): class TimeSlider(W.HBox): - value = traitlets.Tuple(traitlets.Int(), traitlets.Int(), traitlets.Int()) - - def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): - - + + def __init__(self, durations, sampling_frequency, time_range=(0, 1.0), **kwargs): self.num_segments = len(durations) self.frame_limits = [int(sampling_frequency * d) for d in durations] self.sampling_frequency = sampling_frequency @@ -28,81 +25,100 @@ def __init__(self, durations, sampling_frequency, time_range=(0, 1.), **kwargs): self.segment_index = 0 self.value = (start_frame, end_frame, self.segment_index) - - + layout = W.Layout(align_items="center", width="2.5cm", height="1.cm") - but_left = W.Button(description='', disabled=False, button_style='', icon='arrow-left', layout=layout) - but_right = W.Button(description='', disabled=False, button_style='', icon='arrow-right', layout=layout) - + but_left = W.Button(description="", disabled=False, button_style="", icon="arrow-left", layout=layout) + but_right = W.Button(description="", disabled=False, button_style="", icon="arrow-right", layout=layout) + but_left.on_click(self.move_left) but_right.on_click(self.move_right) - self.move_size = W.Dropdown(options=['10 ms', '100 ms', '1 s', '10 s', '1 m', '30 m', '1 h',], # '6 h', '24 h' - value='1 s', - description='', - layout = W.Layout(width="2cm") - ) + self.move_size = W.Dropdown( + options=[ + "10 ms", + "100 ms", + "1 s", + "10 s", + "1 m", + "30 m", + "1 h", + ], # '6 h', '24 h' + value="1 s", + description="", + layout=W.Layout(width="2cm"), + ) # DatetimePicker is only for ipywidget v8 (which is not working in vscode 2023-03) - self.time_label = W.Text(value=f'{time_range[0]}',description='', - disabled=False, layout=W.Layout(width='2.5cm')) - self.time_label.observe(self.time_label_changed, names='value', type="change") - + self.time_label = W.Text( + value=f"{time_range[0]}", description="", disabled=False, layout=W.Layout(width="2.5cm") + ) + self.time_label.observe(self.time_label_changed, names="value", type="change") self.slider = W.IntSlider( - orientation='horizontal', - # description='time:', + orientation="horizontal", + # description='time:', value=start_frame, min=0, max=self.frame_limits[self.segment_index] - 1, readout=False, continuous_update=False, - layout=W.Layout(width=f'70%') + layout=W.Layout(width=f"70%"), ) - - self.slider.observe(self.slider_moved, names='value', type="change") - + + self.slider.observe(self.slider_moved, names="value", type="change") + delta_s = np.diff(self.frame_range) / sampling_frequency - - self.window_sizer = W.BoundedFloatText(value=delta_s, step=1, - min=0.01, max=30., - description='win (s)', - layout=W.Layout(width='auto') - # layout=W.Layout(width=f'10%') - ) - self.window_sizer.observe(self.win_size_changed, names='value', type="change") + + self.window_sizer = W.BoundedFloatText( + value=delta_s, + step=1, + min=0.01, + max=30.0, + description="win (s)", + layout=W.Layout(width="auto") + # layout=W.Layout(width=f'10%') + ) + self.window_sizer.observe(self.win_size_changed, names="value", type="change") self.segment_selector = W.Dropdown(description="segment", options=list(range(self.num_segments))) - self.segment_selector.observe(self.segment_changed, names='value', type="change") + self.segment_selector.observe(self.segment_changed, names="value", type="change") + + super(W.HBox, self).__init__( + children=[ + self.segment_selector, + but_left, + self.move_size, + but_right, + self.slider, + self.time_label, + self.window_sizer, + ], + layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs, + ) - super(W.HBox, self).__init__(children=[self.segment_selector, but_left, self.move_size, but_right, - self.slider, self.time_label, self.window_sizer], - layout=W.Layout(align_items="center", width="100%", height="100%"), - **kwargs) - - self.observe(self.value_changed, names=['value'], type="change") + self.observe(self.value_changed, names=["value"], type="change") def value_changed(self, change=None): - - self.unobserve(self.value_changed, names=['value'], type="change") + self.unobserve(self.value_changed, names=["value"], type="change") start, stop, seg_index = self.value if seg_index < 0 or seg_index >= self.num_segments: - self.value = change['old'] + self.value = change["old"] return if start < 0 or stop < 0: - self.value = change['old'] + self.value = change["old"] return if start >= self.frame_limits[seg_index] or start > self.frame_limits[seg_index]: - self.value = change['old'] + self.value = change["old"] return - + self.segment_selector.value = seg_index self.update_time(new_frame=start, update_slider=True, update_label=True) delta_s = (stop - start) / self.sampling_frequency self.window_sizer.value = delta_s - self.observe(self.value_changed, names=['value'], type="change") + self.observe(self.value_changed, names=["value"], type="change") def update_time(self, new_frame=None, new_time=None, update_slider=False, update_label=False): if new_frame is None and new_time is None: @@ -118,25 +134,24 @@ def update_time(self, new_frame=None, new_time=None, update_slider=False, update start_frame = min(self.frame_limits[self.segment_index] - delta, start_frame) start_frame = max(0, start_frame) end_frame = start_frame + delta - + end_frame = min(self.frame_limits[self.segment_index], end_frame) - start_time = start_frame / self.sampling_frequency if update_label: - self.time_label.unobserve(self.time_label_changed, names='value', type="change") - self.time_label.value = f'{start_time}' - self.time_label.observe(self.time_label_changed, names='value', type="change") + self.time_label.unobserve(self.time_label_changed, names="value", type="change") + self.time_label.value = f"{start_time}" + self.time_label.observe(self.time_label_changed, names="value", type="change") if update_slider: - self.slider.unobserve(self.slider_moved, names='value', type="change") + self.slider.unobserve(self.slider_moved, names="value", type="change") self.slider.value = start_frame - self.slider.observe(self.slider_moved, names='value', type="change") - + self.slider.observe(self.slider_moved, names="value", type="change") + self.frame_range = (start_frame, end_frame) self.value = (start_frame, end_frame, self.segment_index) - + def time_label_changed(self, change=None): try: new_time = float(self.time_label.value) @@ -145,39 +160,39 @@ def time_label_changed(self, change=None): if new_time is not None: self.update_time(new_time=new_time, update_slider=True) - def win_size_changed(self, change=None): self.update_time() - + def slider_moved(self, change=None): new_frame = self.slider.value self.update_time(new_frame=new_frame, update_label=True) - + def move(self, sign): - value, units = self.move_size.value.split(' ') + value, units = self.move_size.value.split(" ") value = int(value) - delta_s = (sign * np.timedelta64(value, units)) / np.timedelta64(1, 's') + delta_s = (sign * np.timedelta64(value, units)) / np.timedelta64(1, "s") delta_sample = int(delta_s * self.sampling_frequency) new_frame = self.frame_range[0] + delta_sample self.slider.value = new_frame - + def move_left(self, change=None): self.move(-1) def move_right(self, change=None): self.move(+1) - + def segment_changed(self, change=None): self.segment_index = self.segment_selector.value - self.slider.unobserve(self.slider_moved, names='value', type="change") + self.slider.unobserve(self.slider_moved, names="value", type="change") # self.slider.value = 0 self.slider.max = self.frame_limits[self.segment_index] - 1 - self.slider.observe(self.slider_moved, names='value', type="change") + self.slider.observe(self.slider_moved, names="value", type="change") self.update_time(new_frame=0, update_slider=True, update_label=True) + class ChannelSelector(W.VBox): value = traitlets.List() @@ -211,22 +226,24 @@ def __init__(self, channel_ids, **kwargs): ) hbox = W.HBox(children=[self.slider, self.selector]) - super(W.VBox, self).__init__(children=[channel_label, hbox], - layout=W.Layout(align_items="center"), - # layout=W.Layout(align_items="center", width="100%", height="100%"), - **kwargs) - self.slider.observe(self.on_slider_changed, names=['value'], type="change") - self.selector.observe(self.on_selector_changed, names=['value'], type="change") + super(W.VBox, self).__init__( + children=[channel_label, hbox], + layout=W.Layout(align_items="center"), + # layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs, + ) + self.slider.observe(self.on_slider_changed, names=["value"], type="change") + self.selector.observe(self.on_selector_changed, names=["value"], type="change") # TODO external value change # self.observe(self.value_changed, names=['value'], type="change") - + def on_slider_changed(self, change=None): i0, i1 = self.slider.value - - self.selector.unobserve(self.on_selector_changed, names=['value'], type="change") + + self.selector.unobserve(self.on_selector_changed, names=["value"], type="change") self.selector.value = self.channel_ids[i0:i1][::-1] - self.selector.observe(self.on_selector_changed, names=['value'], type="change") + self.selector.observe(self.on_selector_changed, names=["value"], type="change") self.value = self.channel_ids[i0:i1] @@ -235,27 +252,23 @@ def on_selector_changed(self, change=None): channel_ids = channel_ids[::-1] if len(channel_ids) > 0: - self.slider.unobserve(self.on_slider_changed, names=['value'], type="change") + self.slider.unobserve(self.on_slider_changed, names=["value"], type="change") i0 = self.channel_ids.index(channel_ids[0]) i1 = self.channel_ids.index(channel_ids[-1]) + 1 self.slider.value = (i0, i1) - self.slider.observe(self.on_slider_changed, names=['value'], type="change") + self.slider.observe(self.on_slider_changed, names=["value"], type="change") self.value = channel_ids - class ScaleWidget(W.VBox): value = traitlets.Float() - def __init__(self, value=1., factor=1.2, **kwargs): - - assert factor > 1. + def __init__(self, value=1.0, factor=1.2, **kwargs): + assert factor > 1.0 self.factor = factor - self.scale_label = W.Label("Scale", - layout=W.Layout(layout=W.Layout(width='95%'), - justify_content="center")) + self.scale_label = W.Label("Scale", layout=W.Layout(layout=W.Layout(width="95%"), justify_content="center")) self.plus_selector = W.Button( description="", @@ -264,7 +277,7 @@ def __init__(self, value=1., factor=1.2, **kwargs): tooltip="Increase scale", icon="arrow-up", # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - layout=W.Layout(width='60%', align_self='center'), + layout=W.Layout(width="60%", align_self="center"), ) self.minus_selector = W.Button( @@ -274,31 +287,31 @@ def __init__(self, value=1., factor=1.2, **kwargs): tooltip="Decrease scale", icon="arrow-down", # layout=W.Layout(width=f"{0.8 * width_cm}cm", height=f"{0.4 * height_cm}cm"), - layout=W.Layout(width='60%', align_self='center'), + layout=W.Layout(width="60%", align_self="center"), ) self.plus_selector.on_click(self.plus_clicked) self.minus_selector.on_click(self.minus_clicked) - self.value = 1. - super(W.VBox, self).__init__(children=[self.plus_selector, self.scale_label, self.minus_selector], - # layout=W.Layout(align_items="center", width="100%", height="100%"), - **kwargs) + self.value = 1.0 + super(W.VBox, self).__init__( + children=[self.plus_selector, self.scale_label, self.minus_selector], + # layout=W.Layout(align_items="center", width="100%", height="100%"), + **kwargs, + ) self.update_label() - self.observe(self.value_changed, names=['value'], type="change") - + self.observe(self.value_changed, names=["value"], type="change") + def update_label(self): self.scale_label.value = f"Scale: {self.value:0.2f}" - def plus_clicked(self, change=None): self.value = self.value * self.factor def minus_clicked(self, change=None): self.value = self.value / self.factor - def value_changed(self, change=None): self.update_label() @@ -319,20 +332,17 @@ def __init__(self, unit_ids, **kwargs): layout=W.Layout(height="100%", width="2cm"), ) - super(W.VBox, self).__init__(children=[label, self.selector], - layout=W.Layout(align_items="center"), - **kwargs) - - self.selector.observe(self.on_selector_changed, names=['value'], type="change") + super(W.VBox, self).__init__(children=[label, self.selector], layout=W.Layout(align_items="center"), **kwargs) + + self.selector.observe(self.on_selector_changed, names=["value"], type="change") + + self.observe(self.value_changed, names=["value"], type="change") - self.observe(self.value_changed, names=['value'], type="change") - def on_selector_changed(self, change=None): unit_ids = self.selector.value self.value = unit_ids - - def value_changed(self, change=None): - self.selector.unobserve(self.on_selector_changed, names=['value'], type="change") - self.selector.value = change['new'] - self.selector.observe(self.on_selector_changed, names=['value'], type="change") + def value_changed(self, change=None): + self.selector.unobserve(self.on_selector_changed, names=["value"], type="change") + self.selector.value = change["new"] + self.selector.observe(self.on_selector_changed, names=["value"], type="change") From eb80725559f6d5b3d1c882e9254e39e39331952d Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Wed, 27 Sep 2023 11:30:41 +0200 Subject: [PATCH 197/322] Update doc/modules/qualitymetrics/amplitude_cv.rst --- doc/modules/qualitymetrics/amplitude_cv.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/qualitymetrics/amplitude_cv.rst b/doc/modules/qualitymetrics/amplitude_cv.rst index 3edb1f9833..13117b607c 100644 --- a/doc/modules/qualitymetrics/amplitude_cv.rst +++ b/doc/modules/qualitymetrics/amplitude_cv.rst @@ -46,7 +46,7 @@ Example code References ---------- -.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_amplitude_spreads +.. autofunction:: spikeinterface.qualitymetrics.misc_metrics.compute_amplitude_cv_metrics Literature From 8e4b43a4f67a92a1497eda5d53f2be2e04f7779f Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Wed, 27 Sep 2023 11:37:12 +0200 Subject: [PATCH 198/322] Update src/spikeinterface/postprocessing/amplitude_scalings.py --- src/spikeinterface/postprocessing/amplitude_scalings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/postprocessing/amplitude_scalings.py b/src/spikeinterface/postprocessing/amplitude_scalings.py index 8823fd6257..7e6c95a875 100644 --- a/src/spikeinterface/postprocessing/amplitude_scalings.py +++ b/src/spikeinterface/postprocessing/amplitude_scalings.py @@ -431,7 +431,7 @@ def _are_unit_indices_overlapping(sparsity_mask, i, j): bool True if the unit indices i and j are overlapping, False otherwise """ - if np.sum(np.logical_and(sparsity_mask[i], sparsity_mask[j])) > 0: + if np.any(sparsity_mask[i] & sparsity_mask[j]): return True else: return False From 7605222e5707f6451a2ecc8b4fdbde747883c7bc Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Wed, 27 Sep 2023 06:49:32 -0400 Subject: [PATCH 199/322] rec_path = None, from Sam Co-authored-by: Garcia Samuel --- src/spikeinterface/exporters/to_phy.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index edfca0fa52..54ad0ea366 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -156,6 +156,8 @@ def export_to_phy( if use_relative_path: if copy_binary: f.write(f"dat_path = r'recording.dat'\n") + elif rec_path == "None": + f.write(f"dat_path = {rec_path}\n") else: f.write(f"dat_path = r'{str(Path(rec_path).relative_to(output_folder))}'\n") else: From f16b12c040ab512ce30e17219ca61e84168cc586 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 10:49:49 +0000 Subject: [PATCH 200/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/exporters/to_phy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index 54ad0ea366..ebc810b953 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -157,7 +157,7 @@ def export_to_phy( if copy_binary: f.write(f"dat_path = r'recording.dat'\n") elif rec_path == "None": - f.write(f"dat_path = {rec_path}\n") + f.write(f"dat_path = {rec_path}\n") else: f.write(f"dat_path = r'{str(Path(rec_path).relative_to(output_folder))}'\n") else: From 957a169e9cb663446398ed7e44abe47209e85619 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Wed, 27 Sep 2023 13:18:45 +0200 Subject: [PATCH 201/322] hotfix: synchrony metrics indexing --- src/spikeinterface/qualitymetrics/misc_metrics.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index f449b3c31b..e9726a16da 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -552,12 +552,13 @@ def compute_synchrony_metrics(waveform_extractor, synchrony_sizes=(2, 4, 8), uni continue spike_complexity = complexity[np.isin(unique_spike_index, spikes_per_unit["sample_index"])] for synchrony_size in synchrony_sizes: - synchrony_counts[synchrony_size][unit_id] += np.count_nonzero(spike_complexity >= synchrony_size) + synchrony_counts[synchrony_size][unit_index] += np.count_nonzero(spike_complexity >= synchrony_size) # add counts for this segment synchrony_metrics_dict = { f"sync_spike_{synchrony_size}": { - unit_id: synchrony_counts[synchrony_size][unit_id] / spike_counts[unit_id] for unit_id in unit_ids + unit_id: synchrony_counts[synchrony_size][all_unit_ids.index(unit_id)] / spike_counts[unit_id] + for unit_id in unit_ids } for synchrony_size in synchrony_sizes } From 41155a1835f348d9181501d823cd78fca5cf6191 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 27 Sep 2023 13:36:15 +0200 Subject: [PATCH 202/322] Changing the internal representation of overlaps --- .../clustering/clustering_tools.py | 4 +- .../sortingcomponents/matching/circus.py | 78 +++++++++++++------ 2 files changed, 59 insertions(+), 23 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 5ff74db3e7..032694a47e 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -617,10 +617,12 @@ def remove_duplicates_via_matching( "overlaps": computed["overlaps"], "templates": computed["templates"], "norms": computed["norms"], - "sparsities": computed["sparsities"], "temporal": computed["temporal"], "spatial": computed["spatial"], "singular": computed["singular"], + "units_overlaps": computed["units_overlaps"], + "unit_overlaps_indices": computed["unit_overlaps_indices"], + "sparsity_mask": computed["sparsity_mask"], } ) valid = (spikes["sample_index"] >= half_marging) * (spikes["sample_index"] < duration + half_marging) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index ec6ef3a292..ffc2a225e8 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -122,14 +122,20 @@ def _prepare_templates(cls, d): else: sparsity = waveform_extractor.sparsity.mask + d['sparsity_mask'] = sparsity + units_overlaps = np.sum( + np.logical_and(sparsity[:, np.newaxis, :], sparsity[np.newaxis, :, :]), axis=2 + ) + d['units_overlaps'] = units_overlaps > 0 + d['unit_overlaps_indices'] = {} + for i in range(num_templates): + d['unit_overlaps_indices'][i], = np.nonzero(d['units_overlaps'][i]) + templates = waveform_extractor.get_all_templates(mode="median").copy() # First, we set masked channels to 0 - d["sparsities"] = {} for count in range(num_templates): - template = templates[count][:, sparsity[count]] - (d["sparsities"][count],) = np.nonzero(sparsity[count]) - templates[count][:, ~sparsity[count]] = 0 + templates[count][:, ~d['sparsity_mask'][count]] = 0 # Then we keep only the strongest components rank = d["rank"] @@ -141,19 +147,45 @@ def _prepare_templates(cls, d): # We reconstruct the approximated templates templates = np.matmul(d["temporal"] * d["singular"][:, np.newaxis, :], d["spatial"]) - d["temporal"] = np.flip(temporal, axis=1) d["templates"] = {} d["norms"] = np.zeros(num_templates, dtype=np.float32) # And get the norms, saving compressed templates for CC matrix for count in range(num_templates): - template = templates[count][:, sparsity[count]] + template = templates[count][:, d['sparsity_mask'][count]] d["norms"][count] = np.linalg.norm(template) d["templates"][count] = template / d["norms"][count] d["temporal"] /= d["norms"][:, np.newaxis, np.newaxis] - d["spatial"] = np.moveaxis(d["spatial"][:, :rank, :], [0, 1, 2], [1, 0, 2]) - d["temporal"] = np.moveaxis(d["temporal"][:, :, :rank], [0, 1, 2], [1, 2, 0]) + d["temporal"] = np.flip(d["temporal"], axis=1) + + d['overlaps'] = [] + for i in range(num_templates): + num_overlaps = np.sum(d['units_overlaps'][i]) + overlapping_units = np.where(d['units_overlaps'][i])[0] + + # Reconstruct unit template from SVD Matrices + data = d['temporal'][i] * d['singular'][i][np.newaxis, :] + template_i = np.matmul(data, d['spatial'][i, :, :]) + template_i = np.flipud(template_i) + + unit_overlaps = np.zeros([num_overlaps, 2*d['num_samples'] - 1], dtype=np.float32) + + for count, j in enumerate(overlapping_units): + overlapped_channels = d['sparsity_mask'][j] + visible_i = template_i[:, overlapped_channels] + + spatial_filters = d['spatial'][j, :, overlapped_channels] + spatially_filtered_template = np.matmul(visible_i, spatial_filters) + visible_i = spatially_filtered_template * d['singular'][j] + + for rank in range(visible_i.shape[1]): + unit_overlaps[count, :] += np.convolve(visible_i[:, rank], d['temporal'][j][:, rank], mode='full') + + d['overlaps'].append(unit_overlaps) + + d["spatial"] = np.moveaxis(d["spatial"], [0, 1, 2], [1, 0, 2]) + d["temporal"] = np.moveaxis(d["temporal"], [0, 1, 2], [1, 2, 0]) d["singular"] = d["singular"].T[:, :, np.newaxis] return d @@ -181,14 +213,10 @@ def initialize_and_check_kwargs(cls, recording, kwargs): if "templates" not in d: d = cls._prepare_templates(d) else: - for key in ["norms", "sparsities", "temporal", "spatial", "singular"]: + for key in ["norms", "temporal", "spatial", "singular", "units_overlaps", "sparsity_mask", "unit_overlaps_indices"]: assert d[key] is not None, "If templates are provided, %d should also be there" % key d["num_templates"] = len(d["templates"]) - - if "overlaps" not in d: - d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) - d["ignored_ids"] = np.array(d["ignored_ids"]) omp_min_sps = d["omp_min_sps"] @@ -252,7 +280,7 @@ def main_function(cls, traces, d): spikes = np.empty(scalar_products.size, dtype=spike_dtype) idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) - M = np.zeros((100, 100), dtype=np.float32) + M = np.zeros((num_templates, num_templates), dtype=np.float32) all_selections = np.empty((2, scalar_products.size), dtype=np.int32) final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) @@ -273,18 +301,24 @@ def main_function(cls, traces, d): if num_selection > 0: delta_t = selection[1] - peak_index - idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] + idx = np.where((delta_t < neighbor_window) & (delta_t >= -num_samples))[0] myline = num_samples + delta_t[idx] + myindices = selection[0, idx] - if not best_cluster_ind in cached_overlaps: - cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() + local_overlaps = overlaps[best_cluster_ind] + overlapping_templates = d['unit_overlaps_indices'][best_cluster_ind] if num_selection == M.shape[0]: Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) Z[:num_selection, :num_selection] = M M = Z - M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] + mask = np.isin(myindices, overlapping_templates) + a, b = myindices[mask], myline[mask] + + table = np.zeros(num_templates, dtype=int) + table[overlapping_templates] = np.arange(len(overlapping_templates)) + M[num_selection, myindices[mask]] = local_overlaps[table[a], b] if vicinity == 0: scipy.linalg.solve_triangular( @@ -346,8 +380,8 @@ def main_function(cls, traces, d): tmp_best, tmp_peak = selection[:, i] diff_amp = diff_amplitudes[i] * norms[tmp_best] - if not tmp_best in cached_overlaps: - cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() + local_overlaps = overlaps[tmp_best] + overlapping_templates = d['units_overlaps'][tmp_best] if not tmp_peak in neighbors.keys(): idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] @@ -357,8 +391,8 @@ def main_function(cls, traces, d): idx = neighbors[tmp_peak]["idx"] tdx = neighbors[tmp_peak]["tdx"] - to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] - scalar_products[:, idx[0] : idx[1]] -= to_add + to_add = diff_amp * local_overlaps[:, tdx[0] : tdx[1]] + scalar_products[overlapping_templates, idx[0] : idx[1]] -= to_add is_valid = scalar_products > stop_criteria From 97aff7f6754e7c4d333b95629552fe37151bf24f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 11:36:51 +0000 Subject: [PATCH 203/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sortingcomponents/matching/circus.py | 54 ++++++++++--------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e047cbdd31..5924d3bc18 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -123,20 +123,18 @@ def _prepare_templates(cls, d): else: sparsity = waveform_extractor.sparsity.mask - d['sparsity_mask'] = sparsity - units_overlaps = np.sum( - np.logical_and(sparsity[:, np.newaxis, :], sparsity[np.newaxis, :, :]), axis=2 - ) - d['units_overlaps'] = units_overlaps > 0 - d['unit_overlaps_indices'] = {} + d["sparsity_mask"] = sparsity + units_overlaps = np.sum(np.logical_and(sparsity[:, np.newaxis, :], sparsity[np.newaxis, :, :]), axis=2) + d["units_overlaps"] = units_overlaps > 0 + d["unit_overlaps_indices"] = {} for i in range(num_templates): - d['unit_overlaps_indices'][i], = np.nonzero(d['units_overlaps'][i]) + (d["unit_overlaps_indices"][i],) = np.nonzero(d["units_overlaps"][i]) templates = waveform_extractor.get_all_templates(mode="median").copy() # First, we set masked channels to 0 for count in range(num_templates): - templates[count][:, ~d['sparsity_mask'][count]] = 0 + templates[count][:, ~d["sparsity_mask"][count]] = 0 # Then we keep only the strongest components rank = d["rank"] @@ -153,37 +151,37 @@ def _prepare_templates(cls, d): # And get the norms, saving compressed templates for CC matrix for count in range(num_templates): - template = templates[count][:, d['sparsity_mask'][count]] + template = templates[count][:, d["sparsity_mask"][count]] d["norms"][count] = np.linalg.norm(template) d["templates"][count] = template / d["norms"][count] d["temporal"] /= d["norms"][:, np.newaxis, np.newaxis] d["temporal"] = np.flip(d["temporal"], axis=1) - d['overlaps'] = [] + d["overlaps"] = [] for i in range(num_templates): - num_overlaps = np.sum(d['units_overlaps'][i]) - overlapping_units = np.where(d['units_overlaps'][i])[0] + num_overlaps = np.sum(d["units_overlaps"][i]) + overlapping_units = np.where(d["units_overlaps"][i])[0] # Reconstruct unit template from SVD Matrices - data = d['temporal'][i] * d['singular'][i][np.newaxis, :] - template_i = np.matmul(data, d['spatial'][i, :, :]) + data = d["temporal"][i] * d["singular"][i][np.newaxis, :] + template_i = np.matmul(data, d["spatial"][i, :, :]) template_i = np.flipud(template_i) - unit_overlaps = np.zeros([num_overlaps, 2*d['num_samples'] - 1], dtype=np.float32) + unit_overlaps = np.zeros([num_overlaps, 2 * d["num_samples"] - 1], dtype=np.float32) for count, j in enumerate(overlapping_units): - overlapped_channels = d['sparsity_mask'][j] + overlapped_channels = d["sparsity_mask"][j] visible_i = template_i[:, overlapped_channels] - spatial_filters = d['spatial'][j, :, overlapped_channels] + spatial_filters = d["spatial"][j, :, overlapped_channels] spatially_filtered_template = np.matmul(visible_i, spatial_filters) - visible_i = spatially_filtered_template * d['singular'][j] - + visible_i = spatially_filtered_template * d["singular"][j] + for rank in range(visible_i.shape[1]): - unit_overlaps[count, :] += np.convolve(visible_i[:, rank], d['temporal'][j][:, rank], mode='full') + unit_overlaps[count, :] += np.convolve(visible_i[:, rank], d["temporal"][j][:, rank], mode="full") - d['overlaps'].append(unit_overlaps) + d["overlaps"].append(unit_overlaps) d["spatial"] = np.moveaxis(d["spatial"], [0, 1, 2], [1, 0, 2]) d["temporal"] = np.moveaxis(d["temporal"], [0, 1, 2], [1, 2, 0]) @@ -214,7 +212,15 @@ def initialize_and_check_kwargs(cls, recording, kwargs): if "templates" not in d: d = cls._prepare_templates(d) else: - for key in ["norms", "temporal", "spatial", "singular", "units_overlaps", "sparsity_mask", "unit_overlaps_indices"]: + for key in [ + "norms", + "temporal", + "spatial", + "singular", + "units_overlaps", + "sparsity_mask", + "unit_overlaps_indices", + ]: assert d[key] is not None, "If templates are provided, %d should also be there" % key d["num_templates"] = len(d["templates"]) @@ -307,7 +313,7 @@ def main_function(cls, traces, d): myindices = selection[0, idx] local_overlaps = overlaps[best_cluster_ind] - overlapping_templates = d['unit_overlaps_indices'][best_cluster_ind] + overlapping_templates = d["unit_overlaps_indices"][best_cluster_ind] if num_selection == M.shape[0]: Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) @@ -382,7 +388,7 @@ def main_function(cls, traces, d): diff_amp = diff_amplitudes[i] * norms[tmp_best] local_overlaps = overlaps[tmp_best] - overlapping_templates = d['units_overlaps'][tmp_best] + overlapping_templates = d["units_overlaps"][tmp_best] if not tmp_peak in neighbors.keys(): idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] From 8da6b79daa95bc4148123e76742607fb82b23fb3 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 27 Sep 2023 13:59:41 +0200 Subject: [PATCH 204/322] Keeping the two matching engines for more tests before merging and final decision --- .../clustering/clustering_tools.py | 39 +- .../sortingcomponents/matching/circus.py | 410 +++++++++++++++++- .../sortingcomponents/matching/method_list.py | 5 +- 3 files changed, 436 insertions(+), 18 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 032694a47e..455af3ddfd 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -539,6 +539,7 @@ def remove_duplicates_via_matching( method_kwargs={}, job_kwargs={}, tmp_folder=None, + method='circus-omp' ): from spikeinterface.sortingcomponents.matching import find_spikes_from_templates from spikeinterface import get_noise_levels @@ -610,21 +611,31 @@ def remove_duplicates_via_matching( method_kwargs.update({"ignored_ids": ignore_ids + [i]}) spikes, computed = find_spikes_from_templates( - sub_recording, method="circus-omp", method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs - ) - method_kwargs.update( - { - "overlaps": computed["overlaps"], - "templates": computed["templates"], - "norms": computed["norms"], - "temporal": computed["temporal"], - "spatial": computed["spatial"], - "singular": computed["singular"], - "units_overlaps": computed["units_overlaps"], - "unit_overlaps_indices": computed["unit_overlaps_indices"], - "sparsity_mask": computed["sparsity_mask"], - } + sub_recording, method=method, method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs ) + if method == 'circus-omp-vsd': + method_kwargs.update( + { + "overlaps": computed["overlaps"], + "templates": computed["templates"], + "norms": computed["norms"], + "temporal": computed["temporal"], + "spatial": computed["spatial"], + "singular": computed["singular"], + "units_overlaps": computed["units_overlaps"], + "unit_overlaps_indices": computed["unit_overlaps_indices"], + "sparsity_mask": computed["sparsity_mask"], + } + ) + elif method == 'circus-omp': + method_kwargs.update( + { + "overlaps": computed["overlaps"], + "templates": computed["templates"], + "norms": computed["norms"], + "sparsities": computed["sparsities"] + } + ) valid = (spikes["sample_index"] >= half_marging) * (spikes["sample_index"] < duration + half_marging) if np.sum(valid) > 0: if np.sum(valid) == 1: diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e047cbdd31..08be0985f1 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -33,8 +33,100 @@ from .main import BaseTemplateMatchingEngine -################# -# Circus peeler # + +from scipy.fft._helper import _init_nd_shape_and_axes + +try: + from scipy.signal.signaltools import _init_freq_conv_axes, _apply_conv_mode +except Exception: + from scipy.signal._signaltools import _init_freq_conv_axes, _apply_conv_mode +from scipy import linalg, fft as sp_fft + + +def get_scipy_shape(in1, in2, mode="full", axes=None, calc_fast_len=True): + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return np.array([]) + + in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False) + + s1 = in1.shape + s2 = in2.shape + + shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] + + if not len(axes): + return in1 * in2 + + complex_result = in1.dtype.kind == "c" or in2.dtype.kind == "c" + + if calc_fast_len: + # Speed up FFT by padding to optimal size. + fshape = [sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] + else: + fshape = shape + + return fshape, axes + + +def fftconvolve_with_cache(in1, in2, cache, mode="full", axes=None): + in1 = np.asarray(in1) + in2 = np.asarray(in2) + + if in1.ndim == in2.ndim == 0: # scalar inputs + return in1 * in2 + elif in1.ndim != in2.ndim: + raise ValueError("in1 and in2 should have the same dimensionality") + elif in1.size == 0 or in2.size == 0: # empty arrays + return np.array([]) + + in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False) + + s1 = in1.shape + s2 = in2.shape + + shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] + + ret = _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True) + + return _apply_conv_mode(ret, s1, s2, mode, axes) + + +def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): + if not len(axes): + return in1 * in2 + + complex_result = in1.dtype.kind == "c" or in2.dtype.kind == "c" + + if calc_fast_len: + # Speed up FFT by padding to optimal size. + fshape = [sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] + else: + fshape = shape + + if not complex_result: + fft, ifft = sp_fft.rfftn, sp_fft.irfftn + else: + fft, ifft = sp_fft.fftn, sp_fft.ifftn + + sp1 = cache["full"][cache["mask"]] + sp2 = cache["template"] + + # sp2 = fft(in2[cache['mask']], fshape, axes=axes) + ret = ifft(sp1 * sp2, fshape, axes=axes) + + if calc_fast_len: + fslice = tuple([slice(sz) for sz in shape]) + ret = ret[fslice] + + return ret + def compute_overlaps(templates, num_samples, num_channels, sparsities): @@ -101,6 +193,320 @@ class CircusOMPPeeler(BaseTemplateMatchingEngine): ----- """ + _default_params = { + "amplitudes": [0.6, 2], + "omp_min_sps": 0.1, + "waveform_extractor": None, + "templates": None, + "overlaps": None, + "norms": None, + "random_chunk_kwargs": {}, + "noise_levels": None, + "sparse_kwargs": {"method": "ptp", "threshold": 1}, + "ignored_ids": [], + "vicinity": 0, + } + + @classmethod + def _prepare_templates(cls, d): + waveform_extractor = d["waveform_extractor"] + num_templates = len(d["waveform_extractor"].sorting.unit_ids) + + if not waveform_extractor.is_sparse(): + sparsity = compute_sparsity(waveform_extractor, **d["sparse_kwargs"]).mask + else: + sparsity = waveform_extractor.sparsity.mask + + templates = waveform_extractor.get_all_templates(mode="median").copy() + + d["sparsities"] = {} + d["templates"] = {} + d["norms"] = np.zeros(num_templates, dtype=np.float32) + + for count, unit_id in enumerate(waveform_extractor.sorting.unit_ids): + template = templates[count][:, sparsity[count]] + (d["sparsities"][count],) = np.nonzero(sparsity[count]) + d["norms"][count] = np.linalg.norm(template) + d["templates"][count] = template / d["norms"][count] + + return d + + @classmethod + def initialize_and_check_kwargs(cls, recording, kwargs): + d = cls._default_params.copy() + d.update(kwargs) + + # assert isinstance(d['waveform_extractor'], WaveformExtractor) + + for v in ["omp_min_sps"]: + assert (d[v] >= 0) and (d[v] <= 1), f"{v} should be in [0, 1]" + + d["num_channels"] = d["waveform_extractor"].recording.get_num_channels() + d["num_samples"] = d["waveform_extractor"].nsamples + d["nbefore"] = d["waveform_extractor"].nbefore + d["nafter"] = d["waveform_extractor"].nafter + d["sampling_frequency"] = d["waveform_extractor"].recording.get_sampling_frequency() + d["vicinity"] *= d["num_samples"] + + if d["noise_levels"] is None: + print("CircusOMPPeeler : noise should be computed outside") + d["noise_levels"] = get_noise_levels(recording, **d["random_chunk_kwargs"], return_scaled=False) + + if d["templates"] is None: + d = cls._prepare_templates(d) + else: + for key in ["norms", "sparsities"]: + assert d[key] is not None, "If templates are provided, %d should also be there" % key + + d["num_templates"] = len(d["templates"]) + + if d["overlaps"] is None: + d["overlaps"] = compute_overlaps(d["templates"], d["num_samples"], d["num_channels"], d["sparsities"]) + + d["ignored_ids"] = np.array(d["ignored_ids"]) + + omp_min_sps = d["omp_min_sps"] + # nb_active_channels = np.array([len(d['sparsities'][count]) for count in range(d['num_templates'])]) + d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) + + return d + + @classmethod + def serialize_method_kwargs(cls, kwargs): + kwargs = dict(kwargs) + # remove waveform_extractor + kwargs.pop("waveform_extractor") + return kwargs + + @classmethod + def unserialize_in_worker(cls, kwargs): + return kwargs + + @classmethod + def get_margin(cls, recording, kwargs): + margin = 2 * max(kwargs["nbefore"], kwargs["nafter"]) + return margin + + @classmethod + def main_function(cls, traces, d): + templates = d["templates"] + num_templates = d["num_templates"] + num_channels = d["num_channels"] + num_samples = d["num_samples"] + overlaps = d["overlaps"] + norms = d["norms"] + nbefore = d["nbefore"] + nafter = d["nafter"] + omp_tol = np.finfo(np.float32).eps + num_samples = d["nafter"] + d["nbefore"] + neighbor_window = num_samples - 1 + min_amplitude, max_amplitude = d["amplitudes"] + sparsities = d["sparsities"] + ignored_ids = d["ignored_ids"] + stop_criteria = d["stop_criteria"] + vicinity = d["vicinity"] + + if "cached_fft_kernels" not in d: + d["cached_fft_kernels"] = {"fshape": 0} + + cached_fft_kernels = d["cached_fft_kernels"] + + num_timesteps = len(traces) + + num_peaks = num_timesteps - num_samples + 1 + + traces = traces.T + + dummy_filter = np.empty((num_channels, num_samples), dtype=np.float32) + dummy_traces = np.empty((num_channels, num_timesteps), dtype=np.float32) + + fshape, axes = get_scipy_shape(dummy_filter, traces, axes=1) + fft_cache = {"full": sp_fft.rfftn(traces, fshape, axes=axes)} + + scalar_products = np.empty((num_templates, num_peaks), dtype=np.float32) + + flagged_chunk = cached_fft_kernels["fshape"] != fshape[0] + + for i in range(num_templates): + if i not in ignored_ids: + if i not in cached_fft_kernels or flagged_chunk: + kernel_filter = np.ascontiguousarray(templates[i][::-1].T) + cached_fft_kernels.update({i: sp_fft.rfftn(kernel_filter, fshape, axes=axes)}) + cached_fft_kernels["fshape"] = fshape[0] + + fft_cache.update({"mask": sparsities[i], "template": cached_fft_kernels[i]}) + + convolution = fftconvolve_with_cache(dummy_filter, dummy_traces, fft_cache, axes=1, mode="valid") + if len(convolution) > 0: + scalar_products[i] = convolution.sum(0) + else: + scalar_products[i] = 0 + + if len(ignored_ids) > 0: + scalar_products[ignored_ids] = -np.inf + + num_spikes = 0 + + spikes = np.empty(scalar_products.size, dtype=spike_dtype) + idx_lookup = np.arange(scalar_products.size).reshape(num_templates, -1) + + M = np.zeros((100, 100), dtype=np.float32) + + all_selections = np.empty((2, scalar_products.size), dtype=np.int32) + final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32) + num_selection = 0 + + full_sps = scalar_products.copy() + + neighbors = {} + cached_overlaps = {} + + is_valid = scalar_products > stop_criteria + all_amplitudes = np.zeros(0, dtype=np.float32) + is_in_vicinity = np.zeros(0, dtype=np.int32) + + while np.any(is_valid): + best_amplitude_ind = scalar_products[is_valid].argmax() + best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape) + + if num_selection > 0: + delta_t = selection[1] - peak_index + idx = np.where((delta_t < neighbor_window) & (delta_t > -num_samples))[0] + myline = num_samples + delta_t[idx] + + if not best_cluster_ind in cached_overlaps: + cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray() + + if num_selection == M.shape[0]: + Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) + Z[:num_selection, :num_selection] = M + M = Z + + M[num_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline] + + if vicinity == 0: + scipy.linalg.solve_triangular( + M[:num_selection, :num_selection], + M[num_selection, :num_selection], + trans=0, + lower=1, + overwrite_b=True, + check_finite=False, + ) + + v = nrm2(M[num_selection, :num_selection]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + is_in_vicinity = np.where(np.abs(delta_t) < vicinity)[0] + + if len(is_in_vicinity) > 0: + L = M[is_in_vicinity, :][:, is_in_vicinity] + + M[num_selection, is_in_vicinity] = scipy.linalg.solve_triangular( + L, M[num_selection, is_in_vicinity], trans=0, lower=1, overwrite_b=True, check_finite=False + ) + + v = nrm2(M[num_selection, is_in_vicinity]) ** 2 + Lkk = 1 - v + if Lkk <= omp_tol: # selected atoms are dependent + break + M[num_selection, num_selection] = np.sqrt(Lkk) + else: + M[num_selection, num_selection] = 1.0 + else: + M[0, 0] = 1 + + all_selections[:, num_selection] = [best_cluster_ind, peak_index] + num_selection += 1 + + selection = all_selections[:, :num_selection] + res_sps = full_sps[selection[0], selection[1]] + + if True: # vicinity == 0: + all_amplitudes, _ = potrs(M[:num_selection, :num_selection], res_sps, lower=True, overwrite_b=False) + all_amplitudes /= norms[selection[0]] + else: + # This is not working, need to figure out why + is_in_vicinity = np.append(is_in_vicinity, num_selection - 1) + all_amplitudes = np.append(all_amplitudes, np.float32(1)) + L = M[is_in_vicinity, :][:, is_in_vicinity] + all_amplitudes[is_in_vicinity], _ = potrs(L, res_sps[is_in_vicinity], lower=True, overwrite_b=False) + all_amplitudes[is_in_vicinity] /= norms[selection[0][is_in_vicinity]] + + diff_amplitudes = all_amplitudes - final_amplitudes[selection[0], selection[1]] + modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0] + final_amplitudes[selection[0], selection[1]] = all_amplitudes + + for i in modified: + tmp_best, tmp_peak = selection[:, i] + diff_amp = diff_amplitudes[i] * norms[tmp_best] + + if not tmp_best in cached_overlaps: + cached_overlaps[tmp_best] = overlaps[tmp_best].toarray() + + if not tmp_peak in neighbors.keys(): + idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] + tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] + neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} + + idx = neighbors[tmp_peak]["idx"] + tdx = neighbors[tmp_peak]["tdx"] + + to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0] : tdx[1]] + scalar_products[:, idx[0] : idx[1]] -= to_add + + is_valid = scalar_products > stop_criteria + + is_valid = (final_amplitudes > min_amplitude) * (final_amplitudes < max_amplitude) + valid_indices = np.where(is_valid) + + num_spikes = len(valid_indices[0]) + spikes["sample_index"][:num_spikes] = valid_indices[1] + d["nbefore"] + spikes["channel_index"][:num_spikes] = 0 + spikes["cluster_index"][:num_spikes] = valid_indices[0] + spikes["amplitude"][:num_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]] + + spikes = spikes[:num_spikes] + order = np.argsort(spikes["sample_index"]) + spikes = spikes[order] + + return spikes + +class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): + """ + Orthogonal Matching Pursuit inspired from Spyking Circus sorter + + https://elifesciences.org/articles/34518 + + This is an Orthogonal Template Matching algorithm. For speed and + memory optimization, templates are automatically sparsified. Signal + is convolved with the templates, and as long as some scalar products + are higher than a given threshold, we use a Cholesky decomposition + to compute the optimal amplitudes needed to reconstruct the signal. + + IMPORTANT NOTE: small chunks are more efficient for such Peeler, + consider using 100ms chunk + + Parameters + ---------- + amplitude: tuple + (Minimal, Maximal) amplitudes allowed for every template + omp_min_sps: float + Stopping criteria of the OMP algorithm, in percentage of the norm + noise_levels: array + The noise levels, for every channels. If None, they will be automatically + computed + random_chunk_kwargs: dict + Parameters for computing noise levels, if not provided (sub optimal) + sparse_kwargs: dict + Parameters to extract a sparsity mask from the waveform_extractor, if not + already sparse. + ----- + """ + _default_params = { "amplitudes": [0.6, 2], "omp_min_sps": 0.1, diff --git a/src/spikeinterface/sortingcomponents/matching/method_list.py b/src/spikeinterface/sortingcomponents/matching/method_list.py index bedc04a9d5..99c2817338 100644 --- a/src/spikeinterface/sortingcomponents/matching/method_list.py +++ b/src/spikeinterface/sortingcomponents/matching/method_list.py @@ -1,6 +1,6 @@ from .naive import NaiveMatching from .tdc import TridesclousPeeler -from .circus import CircusPeeler, CircusOMPPeeler +from .circus import CircusPeeler, CircusOMPPeeler, CircusOMPSVDPeeler from .wobble import WobbleMatch matching_methods = { @@ -8,5 +8,6 @@ "tridesclous": TridesclousPeeler, "circus": CircusPeeler, "circus-omp": CircusOMPPeeler, + 'circus-omp-svd' : CircusOMPSVDPeeler, "wobble": WobbleMatch, -} +} \ No newline at end of file From a6b4774000159f8db5439072acc8bdec4757d26b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 12:00:19 +0000 Subject: [PATCH 205/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../clustering/clustering_tools.py | 14 ++++---------- .../sortingcomponents/matching/circus.py | 2 +- .../sortingcomponents/matching/method_list.py | 4 ++-- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 455af3ddfd..17c38e2f8a 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -533,13 +533,7 @@ def remove_duplicates( def remove_duplicates_via_matching( - waveform_extractor, - noise_levels, - peak_labels, - method_kwargs={}, - job_kwargs={}, - tmp_folder=None, - method='circus-omp' + waveform_extractor, noise_levels, peak_labels, method_kwargs={}, job_kwargs={}, tmp_folder=None, method="circus-omp" ): from spikeinterface.sortingcomponents.matching import find_spikes_from_templates from spikeinterface import get_noise_levels @@ -613,7 +607,7 @@ def remove_duplicates_via_matching( spikes, computed = find_spikes_from_templates( sub_recording, method=method, method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs ) - if method == 'circus-omp-vsd': + if method == "circus-omp-vsd": method_kwargs.update( { "overlaps": computed["overlaps"], @@ -627,13 +621,13 @@ def remove_duplicates_via_matching( "sparsity_mask": computed["sparsity_mask"], } ) - elif method == 'circus-omp': + elif method == "circus-omp": method_kwargs.update( { "overlaps": computed["overlaps"], "templates": computed["templates"], "norms": computed["norms"], - "sparsities": computed["sparsities"] + "sparsities": computed["sparsities"], } ) valid = (spikes["sample_index"] >= half_marging) * (spikes["sample_index"] < duration + half_marging) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e7bdcd161c..502c887ac4 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -128,7 +128,6 @@ def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): return ret - def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) @@ -475,6 +474,7 @@ def main_function(cls, traces, d): return spikes + class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): """ Orthogonal Matching Pursuit inspired from Spyking Circus sorter diff --git a/src/spikeinterface/sortingcomponents/matching/method_list.py b/src/spikeinterface/sortingcomponents/matching/method_list.py index 99c2817338..d982943126 100644 --- a/src/spikeinterface/sortingcomponents/matching/method_list.py +++ b/src/spikeinterface/sortingcomponents/matching/method_list.py @@ -8,6 +8,6 @@ "tridesclous": TridesclousPeeler, "circus": CircusPeeler, "circus-omp": CircusOMPPeeler, - 'circus-omp-svd' : CircusOMPSVDPeeler, + "circus-omp-svd": CircusOMPSVDPeeler, "wobble": WobbleMatch, -} \ No newline at end of file +} From 257c74c856254f8ed31365f0629b53baf844fb74 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 27 Sep 2023 15:52:38 +0200 Subject: [PATCH 206/322] Slight misalignement --- .../sortingcomponents/matching/circus.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index e7bdcd161c..04d780bb6b 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -714,8 +714,8 @@ def main_function(cls, traces, d): if num_selection > 0: delta_t = selection[1] - peak_index - idx = np.where((delta_t < neighbor_window) & (delta_t >= -num_samples))[0] - myline = num_samples + delta_t[idx] + idx = np.where((delta_t < num_samples) & (delta_t > -num_samples))[0] + myline = neighbor_window + delta_t[idx] myindices = selection[0, idx] local_overlaps = overlaps[best_cluster_ind] @@ -731,7 +731,7 @@ def main_function(cls, traces, d): table = np.zeros(num_templates, dtype=int) table[overlapping_templates] = np.arange(len(overlapping_templates)) - M[num_selection, myindices[mask]] = local_overlaps[table[a], b] + M[num_selection, idx[mask]] = local_overlaps[table[a], b] if vicinity == 0: scipy.linalg.solve_triangular( @@ -797,8 +797,8 @@ def main_function(cls, traces, d): overlapping_templates = d["units_overlaps"][tmp_best] if not tmp_peak in neighbors.keys(): - idx = [max(0, tmp_peak - num_samples), min(num_peaks, tmp_peak + neighbor_window)] - tdx = [num_samples + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak] + idx = [max(0, tmp_peak - neighbor_window), min(num_peaks, tmp_peak + num_samples)] + tdx = [neighbor_window + idx[0] - tmp_peak, num_samples + idx[1] - tmp_peak - 1] neighbors[tmp_peak] = {"idx": idx, "tdx": tdx} idx = neighbors[tmp_peak]["idx"] From 6c561f214b02716e8da41a7ac198a94081f056a4 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 15:54:14 +0200 Subject: [PATCH 207/322] more fix after merge with main and the new pickle to file mechanism --- .../comparison/groundtruthstudy.py | 21 +++++++++++-------- src/spikeinterface/sorters/basesorter.py | 10 ++++++--- src/spikeinterface/sorters/launcher.py | 8 ++++++- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index fcebb356a0..eb430f69bd 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -194,10 +194,12 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True sorter_name = params.pop("sorter_name") job = dict(sorter_name=sorter_name, recording=recording, - output_folder=sorter_folder) + output_folder=sorter_folder, + ) job.update(params) # the verbose is overwritten and global to all run_sorters job["verbose"] = verbose + job["with_output"] = False job_list.append(job) run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=False) @@ -217,7 +219,8 @@ def copy_sortings(self, case_keys=None, force=True): if (sorter_folder / "spikeinterface_log.json").exists(): - sorting = read_sorter_folder(sorter_folder, raise_error=False) + sorting = read_sorter_folder(sorter_folder, raise_error=False, + register_recording=False, sorting_info=False) else: sorting = None @@ -383,13 +386,12 @@ def get_count_units( index = pd.MultiIndex.from_tuples(case_keys, names=self.levels) - columns = ["num_gt", "num_sorter", "num_well_detected", "num_redundant", "num_overmerged"] + columns = ["num_gt", "num_sorter", "num_well_detected"] comp = self.comparisons[case_keys[0]] if comp.exhaustive_gt: - columns.extend(["num_false_positive", "num_bad"]) + columns.extend(["num_false_positive", "num_redundant", "num_overmerged", "num_bad"]) count_units = pd.DataFrame(index=index, columns=columns, dtype=int) - for key in case_keys: comp = self.comparisons.get(key, None) assert comp is not None, "You need to do study.run_comparisons() first" @@ -402,11 +404,12 @@ def get_count_units( count_units.loc[key, "num_well_detected"] = comp.count_well_detected_units( well_detected_score ) - count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( - overmerged_score - ) - count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) + if comp.exhaustive_gt: + count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) + count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( + overmerged_score + ) count_units.loc[key, "num_false_positive"] = comp.count_false_positive_units( redundant_score ) diff --git a/src/spikeinterface/sorters/basesorter.py b/src/spikeinterface/sorters/basesorter.py index 8d87558191..a956f8c811 100644 --- a/src/spikeinterface/sorters/basesorter.py +++ b/src/spikeinterface/sorters/basesorter.py @@ -202,7 +202,7 @@ def load_recording_from_folder(cls, output_folder, with_warnings=False): recording = None else: recording = load_extractor(json_file, base_folder=output_folder) - elif pickle_file.exits(): + elif pickle_file.exists(): recording = load_extractor(pickle_file) return recording @@ -324,8 +324,12 @@ def get_result_from_folder(cls, output_folder, register_recording=True, sorting_ if sorting_info: # set sorting info to Sorting object - with open(output_folder / "spikeinterface_recording.json", "r") as f: - rec_dict = json.load(f) + if (output_folder / "spikeinterface_recording.json").exists(): + with open(output_folder / "spikeinterface_recording.json", "r") as f: + rec_dict = json.load(f) + else: + rec_dict = None + with open(output_folder / "spikeinterface_params.json", "r") as f: params_dict = json.load(f) with open(output_folder / "spikeinterface_log.json", "r") as f: diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index f32a468a22..12c59cbe45 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -66,7 +66,8 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal engine_kwargs: dict return_output: bool, dfault False - Return a sorting or None. + Return a sortings or None. + This also overwrite kwargs in in run_sorter(with_sorting=True/False) Returns ------- @@ -88,8 +89,13 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal "processpoolexecutor", ), "Only 'loop', 'joblib', and 'processpoolexecutor' support return_output=True." out = [] + for kwargs in job_list: + kwargs['with_output'] = True else: out = None + for kwargs in job_list: + kwargs['with_output'] = False + if engine == "loop": # simple loop in main process From cb9a2289cf1aab818307265aefa1abfcf2a0329c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 13:55:09 +0000 Subject: [PATCH 208/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- doc/modules/comparison.rst | 2 +- src/spikeinterface/comparison/collision.py | 17 +--- src/spikeinterface/comparison/correlogram.py | 6 +- .../comparison/groundtruthstudy.py | 92 ++++++++----------- .../comparison/tests/test_groundtruthstudy.py | 28 ++---- src/spikeinterface/sorters/launcher.py | 5 +- src/spikeinterface/widgets/gtstudy.py | 31 +++---- src/spikeinterface/widgets/widget_list.py | 2 +- 8 files changed, 74 insertions(+), 109 deletions(-) diff --git a/doc/modules/comparison.rst b/doc/modules/comparison.rst index 57e9a0b5ba..76ab7855c6 100644 --- a/doc/modules/comparison.rst +++ b/doc/modules/comparison.rst @@ -314,7 +314,7 @@ The all mechanism is based on an intrinsic organization into a "study_folder" wi study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases, levels=["sorter_name", "dataset"]) - + # all cases in one function study.run_sorters() diff --git a/src/spikeinterface/comparison/collision.py b/src/spikeinterface/comparison/collision.py index 01626b34b8..dd04b2c72d 100644 --- a/src/spikeinterface/comparison/collision.py +++ b/src/spikeinterface/comparison/collision.py @@ -5,10 +5,6 @@ import numpy as np - - - - class CollisionGTComparison(GroundTruthComparison): """ This class is an extension of GroundTruthComparison by focusing to benchmark spike in collision. @@ -164,7 +160,6 @@ def compute_collision_by_similarity(self, similarity_matrix, unit_ids=None, good return similarities, recall_scores, pair_names - class CollisionGTStudy(GroundTruthStudy): def run_comparisons(self, case_keys=None, exhaustive_gt=True, collision_lag=2.0, nbins=11, **kwargs): _kwargs = dict() @@ -179,11 +174,12 @@ def run_comparisons(self, case_keys=None, exhaustive_gt=True, collision_lag=2.0, def get_lags(self, key): comp = self.comparisons[key] fs = comp.sorting1.get_sampling_frequency() - lags = comp.bins / fs * 1000. + lags = comp.bins / fs * 1000.0 return lags def precompute_scores_by_similarities(self, case_keys=None, good_only=False, min_accuracy=0.9): import sklearn + if case_keys is None: case_keys = self.cases.keys() @@ -197,16 +193,13 @@ def precompute_scores_by_similarities(self, case_keys=None, good_only=False, min similarity = sklearn.metrics.pairwise.cosine_similarity(flat_templates) comp = self.comparisons[key] similarities, recall_scores, pair_names = comp.compute_collision_by_similarity( - similarity, good_only=good_only, min_accuracy=min_accuracy - ) + similarity, good_only=good_only, min_accuracy=min_accuracy + ) self.all_similarities[key] = similarities self.all_recall_scores[key] = recall_scores - def get_mean_over_similarity_range(self, similarity_range, key): - idx = (self.all_similarities[key] >= similarity_range[0]) & ( - self.all_similarities[key] <= similarity_range[1] - ) + idx = (self.all_similarities[key] >= similarity_range[0]) & (self.all_similarities[key] <= similarity_range[1]) all_similarities = self.all_similarities[key][idx] all_recall_scores = self.all_recall_scores[key][idx] diff --git a/src/spikeinterface/comparison/correlogram.py b/src/spikeinterface/comparison/correlogram.py index 150f5afe55..aaffef1887 100644 --- a/src/spikeinterface/comparison/correlogram.py +++ b/src/spikeinterface/comparison/correlogram.py @@ -6,7 +6,6 @@ import numpy as np - class CorrelogramGTComparison(GroundTruthComparison): """ This class is an extension of GroundTruthComparison by focusing @@ -112,9 +111,10 @@ def compute_correlogram_by_similarity(self, similarity_matrix, window_ms=None): return similarities, errors - class CorrelogramGTStudy(GroundTruthStudy): - def run_comparisons(self, case_keys=None, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs): + def run_comparisons( + self, case_keys=None, exhaustive_gt=True, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs + ): _kwargs = dict() _kwargs.update(kwargs) _kwargs["exhaustive_gt"] = exhaustive_gt diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index eb430f69bd..d43727cb44 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -32,17 +32,18 @@ class GroundTruthStudy: * several sorters for comparisons * same sorter with differents parameters * any combination of these (and more) - + For increased flexibility, cases keys can be a tuple so that we can vary complexity along several "levels" or "axis" (paremeters or sorters). In this case, the result dataframes will have `MultiIndex` to handle the different levels. - - A ground-truth dataset is made of a `Recording` and a `Sorting` object. For example, it can be a simulated dataset with MEArec or internally generated (see + + A ground-truth dataset is made of a `Recording` and a `Sorting` object. For example, it can be a simulated dataset with MEArec or internally generated (see :py:fun:`~spikeinterface.core.generate.generate_ground_truth_recording()`). - + This GroundTruthStudy have been refactor in version 0.100 to be more flexible than previous versions. Note that the underlying folder structure is not backward compatible! """ + def __init__(self, study_folder): self.folder = Path(study_folder) @@ -55,7 +56,6 @@ def __init__(self, study_folder): @classmethod def create(cls, study_folder, datasets={}, cases={}, levels=None): - # check that cases keys are homogeneous key0 = list(cases.keys())[0] if isinstance(key0, str): @@ -67,7 +67,9 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): elif isinstance(key0, tuple): assert all(isinstance(key, tuple) for key in cases.keys()), "Keys for cases are not homogeneous" num_levels = len(key0) - assert all(len(key) == num_levels for key in cases.keys()), "Keys for cases are not homogeneous, tuple negth differ" + assert all( + len(key) == num_levels for key in cases.keys() + ), "Keys for cases are not homogeneous, tuple negth differ" if levels is None: levels = [f"level{i}" for i in range(num_levels)] else: @@ -76,7 +78,6 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): else: raise ValueError("Keys for cases must str or tuple") - study_folder = Path(study_folder) study_folder.mkdir(exist_ok=False, parents=True) @@ -98,8 +99,7 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): # sortings are pickled + saved as NumpyFolderSorting gt_sorting.dump_to_pickle(study_folder / f"datasets/gt_sortings/{key}.pickle") gt_sorting.save(format="numpy_folder", folder=study_folder / f"datasets/gt_sortings/{key}") - - + info = {} info["levels"] = levels (study_folder / "info.json").write_text(json.dumps(info, indent=4), encoding="utf8") @@ -109,14 +109,13 @@ def create(cls, study_folder, datasets={}, cases={}, levels=None): return cls(study_folder) - def scan_folder(self): if not (self.folder / "datasets").exists(): raise ValueError(f"This is folder is not a GroundTruthStudy : {self.folder.absolute()}") with open(self.folder / "info.json", "r") as f: self.info = json.load(f) - + self.levels = self.info["levels"] for rec_file in (self.folder / "datasets" / "recordings").glob("*.pickle"): @@ -124,7 +123,7 @@ def scan_folder(self): rec = load_extractor(rec_file) gt_sorting = load_extractor(self.folder / f"datasets" / "gt_sortings" / key) self.datasets[key] = (rec, gt_sorting) - + with open(self.folder / "cases.pickle", "rb") as f: self.cases = pickle.load(f) @@ -139,7 +138,6 @@ def scan_folder(self): sorting = None self.sortings[key] = sorting - def __repr__(self): t = f"{self.__class__.__name__} {self.folder.stem} \n" t += f" datasets: {len(self.datasets)} {list(self.datasets.keys())}\n" @@ -157,7 +155,7 @@ def key_to_str(self, key): else: raise ValueError("Keys for cases must str or tuple") - def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True, verbose=False): + def run_sorters(self, case_keys=None, engine="loop", engine_kwargs={}, keep=True, verbose=False): if case_keys is None: case_keys = self.cases.keys() @@ -187,15 +185,15 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True if log_file.exists(): log_file.unlink() - params = self.cases[key]["run_sorter_params"].copy() # this ensure that sorter_name is given recording, _ = self.datasets[self.cases[key]["dataset"]] sorter_name = params.pop("sorter_name") - job = dict(sorter_name=sorter_name, - recording=recording, - output_folder=sorter_folder, - ) + job = dict( + sorter_name=sorter_name, + recording=recording, + output_folder=sorter_folder, + ) job.update(params) # the verbose is overwritten and global to all run_sorters job["verbose"] = verbose @@ -205,25 +203,25 @@ def run_sorters(self, case_keys=None, engine='loop', engine_kwargs={}, keep=True run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=False) # TODO later create a list in laucher for engine blocking and non-blocking - if engine not in ("slurm", ): + if engine not in ("slurm",): self.copy_sortings(case_keys) def copy_sortings(self, case_keys=None, force=True): if case_keys is None: case_keys = self.cases.keys() - + for key in case_keys: sorting_folder = self.folder / "sortings" / self.key_to_str(key) sorter_folder = self.folder / "sorters" / self.key_to_str(key) log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" - if (sorter_folder / "spikeinterface_log.json").exists(): - sorting = read_sorter_folder(sorter_folder, raise_error=False, - register_recording=False, sorting_info=False) + sorting = read_sorter_folder( + sorter_folder, raise_error=False, register_recording=False, sorting_info=False + ) else: sorting = None - + if sorting is not None: if sorting_folder.exists(): if force: @@ -241,7 +239,6 @@ def copy_sortings(self, case_keys=None, force=True): shutil.copyfile(sorter_folder / "spikeinterface_log.json", log_file) def run_comparisons(self, case_keys=None, comparison_class=GroundTruthComparison, **kwargs): - if case_keys is None: case_keys = self.cases.keys() @@ -250,18 +247,19 @@ def run_comparisons(self, case_keys=None, comparison_class=GroundTruthComparison _, gt_sorting = self.datasets[dataset_key] sorting = self.sortings[key] if sorting is None: - self.comparisons[key] = None + self.comparisons[key] = None continue comp = comparison_class(gt_sorting, sorting, **kwargs) self.comparisons[key] = comp def get_run_times(self, case_keys=None): import pandas as pd + if case_keys is None: case_keys = self.cases.keys() log_folder = self.folder / "sortings" / "run_logs" - + run_times = {} for key in case_keys: log_file = log_folder / f"{self.key_to_str(key)}.json" @@ -273,7 +271,6 @@ def get_run_times(self, case_keys=None): return pd.Series(run_times, name="run_time") def extract_waveforms_gt(self, case_keys=None, **extract_kwargs): - if case_keys is None: case_keys = self.cases.keys() @@ -292,11 +289,11 @@ def get_waveform_extractor(self, key): # some recording are not dumpable to json and the waveforms extactor need it! # so we load it with and put after # this should be fixed in PR 2027 so remove this after - + dataset_key = self.cases[key]["dataset"] wf_folder = self.folder / "waveforms" / self.key_to_str(dataset_key) we = load_waveforms(wf_folder, with_recording=False) - recording, _ = self.datasets[dataset_key] + recording, _ = self.datasets[dataset_key] we.set_recording(recording) return we @@ -308,7 +305,7 @@ def get_templates(self, key, mode="average"): def compute_metrics(self, case_keys=None, metric_names=["snr", "firing_rate"], force=False): if case_keys is None: case_keys = self.cases.keys() - + done = [] for key in case_keys: dataset_key = self.cases[key]["dataset"] @@ -327,7 +324,7 @@ def compute_metrics(self, case_keys=None, metric_names=["snr", "firing_rate"], f metrics.to_csv(filename, sep="\t", index=True) def get_metrics(self, key): - import pandas as pd + import pandas as pd dataset_key = self.cases[key]["dataset"] @@ -336,17 +333,15 @@ def get_metrics(self, key): return metrics = pd.read_csv(filename, sep="\t", index_col=0) dataset_key = self.cases[key]["dataset"] - recording, gt_sorting = self.datasets[dataset_key] + recording, gt_sorting = self.datasets[dataset_key] metrics.index = gt_sorting.unit_ids return metrics def get_units_snr(self, key): - """ - """ + """ """ return self.get_metrics(key)["snr"] def get_performance_by_unit(self, case_keys=None): - import pandas as pd if case_keys is None: @@ -363,7 +358,7 @@ def get_performance_by_unit(self, case_keys=None): elif isinstance(key, tuple): for col, k in zip(self.levels, key): perf[col] = k - + perf = perf.reset_index() perf_by_unit.append(perf) @@ -371,10 +366,7 @@ def get_performance_by_unit(self, case_keys=None): perf_by_unit = perf_by_unit.set_index(self.levels) return perf_by_unit - def get_count_units( - self, case_keys=None, well_detected_score=None, redundant_score=None, overmerged_score=None - ): - + def get_count_units(self, case_keys=None, well_detected_score=None, redundant_score=None, overmerged_score=None): import pandas as pd if case_keys is None: @@ -385,7 +377,6 @@ def get_count_units( else: index = pd.MultiIndex.from_tuples(case_keys, names=self.levels) - columns = ["num_gt", "num_sorter", "num_well_detected"] comp = self.comparisons[case_keys[0]] if comp.exhaustive_gt: @@ -401,19 +392,12 @@ def get_count_units( count_units.loc[key, "num_gt"] = len(gt_sorting.get_unit_ids()) count_units.loc[key, "num_sorter"] = len(sorting.get_unit_ids()) - count_units.loc[key, "num_well_detected"] = comp.count_well_detected_units( - well_detected_score - ) - + count_units.loc[key, "num_well_detected"] = comp.count_well_detected_units(well_detected_score) + if comp.exhaustive_gt: count_units.loc[key, "num_redundant"] = comp.count_redundant_units(redundant_score) - count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units( - overmerged_score - ) - count_units.loc[key, "num_false_positive"] = comp.count_false_positive_units( - redundant_score - ) + count_units.loc[key, "num_overmerged"] = comp.count_overmerged_units(overmerged_score) + count_units.loc[key, "num_false_positive"] = comp.count_false_positive_units(redundant_score) count_units.loc[key, "num_bad"] = comp.count_bad_units() return count_units - diff --git a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py index 12d764950e..91c8c640e0 100644 --- a/src/spikeinterface/comparison/tests/test_groundtruthstudy.py +++ b/src/spikeinterface/comparison/tests/test_groundtruthstudy.py @@ -7,7 +7,6 @@ from spikeinterface.comparison import GroundTruthStudy - if hasattr(pytest, "global_test_folder"): cache_folder = pytest.global_test_folder / "comparison" else: @@ -28,8 +27,8 @@ def simple_preprocess(rec): def create_a_study(study_folder): - rec0, gt_sorting0 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=42) - rec1, gt_sorting1 = generate_ground_truth_recording(num_channels=4, durations=[30.], seed=91) + rec0, gt_sorting0 = generate_ground_truth_recording(num_channels=4, durations=[30.0], seed=42) + rec1, gt_sorting1 = generate_ground_truth_recording(num_channels=4, durations=[30.0], seed=91) datasets = { "toy_tetrode": (rec0, gt_sorting0), @@ -46,9 +45,7 @@ def create_a_study(study_folder): "run_sorter_params": { "sorter_name": "tridesclous2", }, - "comparison_params": { - - }, + "comparison_params": {}, }, # ("tdc2", "with-preprocess", "probe32"): { @@ -57,11 +54,9 @@ def create_a_study(study_folder): "run_sorter_params": { "sorter_name": "tridesclous2", }, - "comparison_params": { - - }, + "comparison_params": {}, }, - # we comment this at the moement because SC2 is quite slow for testing + # we comment this at the moement because SC2 is quite slow for testing # ("sc2", "no-preprocess", "tetrode"): { # "label": "spykingcircus2 without preprocessing standar params", # "dataset": "toy_tetrode", @@ -69,16 +64,16 @@ def create_a_study(study_folder): # "sorter_name": "spykingcircus2", # }, # "comparison_params": { - # }, # }, } - study = GroundTruthStudy.create(study_folder, datasets=datasets, cases=cases, levels=["sorter_name", "processing", "probe_type"]) + study = GroundTruthStudy.create( + study_folder, datasets=datasets, cases=cases, levels=["sorter_name", "processing", "probe_type"] + ) # print(study) - def test_GroundTruthStudy(): study = GroundTruthStudy(study_folder) print(study) @@ -98,14 +93,11 @@ def test_GroundTruthStudy(): for key in study.cases: metrics = study.get_metrics(key) print(metrics) - + study.get_performance_by_unit() study.get_count_units() - if __name__ == "__main__": setup_module() - test_GroundTruthStudy() - - \ No newline at end of file + test_GroundTruthStudy() diff --git a/src/spikeinterface/sorters/launcher.py b/src/spikeinterface/sorters/launcher.py index 12c59cbe45..704f6843f2 100644 --- a/src/spikeinterface/sorters/launcher.py +++ b/src/spikeinterface/sorters/launcher.py @@ -90,12 +90,11 @@ def run_sorter_jobs(job_list, engine="loop", engine_kwargs={}, return_output=Fal ), "Only 'loop', 'joblib', and 'processpoolexecutor' support return_output=True." out = [] for kwargs in job_list: - kwargs['with_output'] = True + kwargs["with_output"] = True else: out = None for kwargs in job_list: - kwargs['with_output'] = False - + kwargs["with_output"] = False if engine == "loop": # simple loop in main process diff --git a/src/spikeinterface/widgets/gtstudy.py b/src/spikeinterface/widgets/gtstudy.py index 438858beae..6a27b78dec 100644 --- a/src/spikeinterface/widgets/gtstudy.py +++ b/src/spikeinterface/widgets/gtstudy.py @@ -29,7 +29,6 @@ def __init__( backend=None, **backend_kwargs, ): - if case_keys is None: case_keys = list(study.cases.keys()) @@ -53,9 +52,8 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): label = dp.study.cases[key]["label"] rt = dp.run_times.loc[key] self.ax.bar(i, rt, width=0.8, label=label) - - self.ax.legend() + self.ax.legend() # TODO : plot optionally average on some levels using group by @@ -80,13 +78,12 @@ def __init__( backend=None, **backend_kwargs, ): - if case_keys is None: case_keys = list(study.cases.keys()) plot_data = dict( study=study, - count_units = study.get_count_units(case_keys=case_keys), + count_units=study.get_count_units(case_keys=case_keys), case_keys=case_keys, ) @@ -107,8 +104,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): ncol = len(columns) - colors = get_some_colors(columns, color_engine="auto", - map_name="hot") + colors = get_some_colors(columns, color_engine="auto", map_name="hot") colors["num_well_detected"] = "green" xticklabels = [] @@ -118,7 +114,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): y = dp.count_units.loc[key, col] if not "well_detected" in col: y = -y - + if i == 0: label = col.replace("num_", "").replace("_", " ").title() else: @@ -158,7 +154,6 @@ def __init__( backend=None, **backend_kwargs, ): - if case_keys is None: case_keys = list(study.cases.keys()) @@ -186,11 +181,15 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): if dp.mode == "swarm": levels = perfs.index.names - df = pd.melt(perfs.reset_index(), id_vars=levels, var_name='Metric', value_name='Score', - value_vars=('accuracy','precision', 'recall')) - df['x'] = df.apply(lambda r: ' '.join([r[col] for col in levels]), axis=1) - sns.swarmplot(data=df, x='x', y='Score', hue='Metric', dodge=True) - + df = pd.melt( + perfs.reset_index(), + id_vars=levels, + var_name="Metric", + value_name="Score", + value_vars=("accuracy", "precision", "recall"), + ) + df["x"] = df.apply(lambda r: " ".join([r[col] for col in levels]), axis=1) + sns.swarmplot(data=df, x="x", y="Score", hue="Metric", dodge=True) class StudyPerformancesVsMetrics(BaseWidget): @@ -218,7 +217,6 @@ def __init__( backend=None, **backend_kwargs, ): - if case_keys is None: case_keys = list(study.cases.keys()) @@ -239,7 +237,6 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): dp = to_attr(data_plot) self.figure, self.axes, self.ax = make_mpl_figure(**backend_kwargs) - study = dp.study perfs = study.get_performance_by_unit(case_keys=dp.case_keys) @@ -253,4 +250,4 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): self.ax.legend() self.ax.set_xlim(0, max_metric * 1.05) - self.ax.set_ylim(0, 1.05) \ No newline at end of file + self.ax.set_ylim(0, 1.05) diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index ce853f16bf..ed77de6128 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -53,7 +53,7 @@ StudyRunTimesWidget, StudyUnitCountsWidget, StudyPerformances, - StudyPerformancesVsMetrics + StudyPerformancesVsMetrics, ] From 0a2c0f618b11374558f536147845a1cbc6710661 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 27 Sep 2023 16:01:21 +0200 Subject: [PATCH 209/322] Default SVD Peeler is now good to go --- src/spikeinterface/sorters/internal/spyking_circus2.py | 2 +- .../sortingcomponents/clustering/clustering_tools.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index db3d88f116..7097b9e56b 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -152,7 +152,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): matching_job_params["chunk_duration"] = "100ms" spikes = find_spikes_from_templates( - recording_f, method="circus-omp", method_kwargs=matching_params, **matching_job_params + recording_f, method="circus-omp-svd", method_kwargs=matching_params, **matching_job_params ) if verbose: diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 17c38e2f8a..273b1402fe 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -533,7 +533,7 @@ def remove_duplicates( def remove_duplicates_via_matching( - waveform_extractor, noise_levels, peak_labels, method_kwargs={}, job_kwargs={}, tmp_folder=None, method="circus-omp" + waveform_extractor, noise_levels, peak_labels, method_kwargs={}, job_kwargs={}, tmp_folder=None, method="circus-omp-svd" ): from spikeinterface.sortingcomponents.matching import find_spikes_from_templates from spikeinterface import get_noise_levels From 5fbc88d416f863784ee7ed890c45f04726d4dc5a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 14:01:43 +0000 Subject: [PATCH 210/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sortingcomponents/clustering/clustering_tools.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 273b1402fe..af3a9cb86a 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -533,7 +533,13 @@ def remove_duplicates( def remove_duplicates_via_matching( - waveform_extractor, noise_levels, peak_labels, method_kwargs={}, job_kwargs={}, tmp_folder=None, method="circus-omp-svd" + waveform_extractor, + noise_levels, + peak_labels, + method_kwargs={}, + job_kwargs={}, + tmp_folder=None, + method="circus-omp-svd", ): from spikeinterface.sortingcomponents.matching import find_spikes_from_templates from spikeinterface import get_noise_levels From fb82e029be652fa33b69367d9d97f9c7a465914e Mon Sep 17 00:00:00 2001 From: Robin Kim <31869753+rkim48@users.noreply.github.com> Date: Wed, 27 Sep 2023 10:16:37 -0500 Subject: [PATCH 211/322] Apply suggestions from code review Remove print('success') statements Co-authored-by: Zach McKenzie <92116279+zm711@users.noreply.github.com> --- .../curation/tests/test_sortingview_curation.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/spikeinterface/curation/tests/test_sortingview_curation.py b/src/spikeinterface/curation/tests/test_sortingview_curation.py index cfc15013a3..79cea3d010 100644 --- a/src/spikeinterface/curation/tests/test_sortingview_curation.py +++ b/src/spikeinterface/curation/tests/test_sortingview_curation.py @@ -77,7 +77,6 @@ def test_gh_curation(): assert len(sorting_curated_gh_mua.unit_ids) == 6 assert len(sorting_curated_gh_art_mua.unit_ids) == 5 - print("Test for GH passed!\n") @pytest.mark.skipif(ON_GITHUB and not KACHERY_CLOUD_SET, reason="Kachery cloud secrets not available") @@ -110,7 +109,6 @@ def test_sha1_curation(): assert len(sorting_curated_sha1_mua.unit_ids) == 6 assert len(sorting_curated_sha1_art_mua.unit_ids) == 5 - print("Test for sha1 curation passed!\n") def test_json_curation(): @@ -244,7 +242,6 @@ def test_label_inheritance_int(): assert 9 not in sorting_include_accept.get_unit_ids() assert 10 in sorting_include_accept.get_unit_ids() - print("Test for integer unit IDs passed!\n") def test_label_inheritance_str(): @@ -314,7 +311,6 @@ def test_label_inheritance_str(): assert "c-d" not in sorting_include_accept.get_unit_ids() assert "e-f" in sorting_include_accept.get_unit_ids() - print("Test for string unit IDs passed!\n") if __name__ == "__main__": From 776520bb100986bd90653d9b8eeba77eb0cc16aa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 15:16:55 +0000 Subject: [PATCH 212/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../curation/tests/test_sortingview_curation.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/spikeinterface/curation/tests/test_sortingview_curation.py b/src/spikeinterface/curation/tests/test_sortingview_curation.py index 79cea3d010..71912d7793 100644 --- a/src/spikeinterface/curation/tests/test_sortingview_curation.py +++ b/src/spikeinterface/curation/tests/test_sortingview_curation.py @@ -78,7 +78,6 @@ def test_gh_curation(): assert len(sorting_curated_gh_art_mua.unit_ids) == 5 - @pytest.mark.skipif(ON_GITHUB and not KACHERY_CLOUD_SET, reason="Kachery cloud secrets not available") def test_sha1_curation(): """ @@ -110,7 +109,6 @@ def test_sha1_curation(): assert len(sorting_curated_sha1_art_mua.unit_ids) == 5 - def test_json_curation(): """ Test curation using a JSON file. @@ -243,7 +241,6 @@ def test_label_inheritance_int(): assert 10 in sorting_include_accept.get_unit_ids() - def test_label_inheritance_str(): """ Test curation for label inheritance for string unit IDs. @@ -312,7 +309,6 @@ def test_label_inheritance_str(): assert "e-f" in sorting_include_accept.get_unit_ids() - if __name__ == "__main__": # generate_sortingview_curation_dataset() test_sha1_curation() From 9f45f2e5757e9f3dcb890a65d69bdecbca8c7eb6 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 27 Sep 2023 17:35:31 +0200 Subject: [PATCH 213/322] Enhance the clustering --- .../sorters/internal/spyking_circus2.py | 2 +- .../clustering/random_projections.py | 106 +++++++++--------- .../sortingcomponents/features_from_peaks.py | 27 +++-- 3 files changed, 71 insertions(+), 64 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 7097b9e56b..55a36d26d5 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -20,7 +20,7 @@ class Spykingcircus2Sorter(ComponentsBasedSorter): sorter_name = "spykingcircus2" _default_params = { - "general": {"ms_before": 2, "ms_after": 2, "radius_um": 75}, + "general": {"ms_before": 2, "ms_after": 2, "radius_um": 100}, "waveforms": {"max_spikes_per_unit": 200, "overwrite": True, "sparse": True, "method": "ptp", "threshold": 1}, "filtering": {"dtype": "float32"}, "detection": {"peak_sign": "neg", "detect_threshold": 5}, diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index be8ecd6702..8c0cab07c6 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -18,7 +18,9 @@ from .clustering_tools import remove_duplicates, remove_duplicates_via_matching, remove_duplicates_via_dip from spikeinterface.core import NumpySorting from spikeinterface.core import extract_waveforms -from spikeinterface.sortingcomponents.features_from_peaks import compute_features_from_peaks, EnergyFeature +from spikeinterface.sortingcomponents.waveforms.savgol_denoiser import SavGolDenoiser +from spikeinterface.sortingcomponents.features_from_peaks import RandomProjectionsFeature +from spikeinterface.core.node_pipeline import run_node_pipeline, ExtractDenseWaveforms, PeakRetriever class RandomProjectionClustering: @@ -34,17 +36,17 @@ class RandomProjectionClustering: "cluster_selection_method": "leaf", }, "cleaning_kwargs": {}, + "waveforms" : {"ms_before" : 2, "ms_after" : 2, "max_spikes_per_unit": 100}, "radius_um": 100, - "max_spikes_per_unit": 200, "selection_method": "closest_to_centroid", - "nb_projections": {"ptp": 8, "energy": 2}, - "ms_before": 1.5, - "ms_after": 1.5, + "nb_projections": 10, + "ms_before": 1, + "ms_after": 1, "random_seed": 42, - "shared_memory": False, - "min_values": {"ptp": 0, "energy": 0}, + "smoothing_kwargs" : {"window_length_ms" : 1}, + "shared_memory": True, "tmp_folder": None, - "job_kwargs": {"n_jobs": os.cpu_count(), "chunk_memory": "10M", "verbose": True, "progress_bar": True}, + "job_kwargs": {"n_jobs": os.cpu_count(), "chunk_memory": "100M", "verbose": True, "progress_bar": True}, } @classmethod @@ -74,50 +76,52 @@ def main_function(cls, recording, peaks, params): np.random.seed(d["random_seed"]) - features_params = {} - features_list = [] - - noise_snippets = None - - for proj_type in ["ptp", "energy"]: - if d["nb_projections"][proj_type] > 0: - features_list += [f"random_projections_{proj_type}"] - - if d["min_values"][proj_type] == "auto": - if noise_snippets is None: - num_segments = recording.get_num_segments() - num_chunks = 3 * d["max_spikes_per_unit"] // num_segments - noise_snippets = get_random_data_chunks( - recording, num_chunks_per_segment=num_chunks, chunk_size=num_samples, seed=42 - ) - noise_snippets = noise_snippets.reshape(num_chunks, num_samples, num_chans) - - if proj_type == "energy": - data = np.linalg.norm(noise_snippets, axis=1) - min_values = np.median(data, axis=0) - elif proj_type == "ptp": - data = np.ptp(noise_snippets, axis=1) - min_values = np.median(data, axis=0) - elif d["min_values"][proj_type] > 0: - min_values = d["min_values"][proj_type] - else: - min_values = None - - projections = np.random.randn(num_chans, d["nb_projections"][proj_type]) - features_params[f"random_projections_{proj_type}"] = { - "radius_um": params["radius_um"], - "projections": projections, - "min_values": min_values, - } - - features_data = compute_features_from_peaks( - recording, peaks, features_list, features_params, ms_before=1, ms_after=1, **params["job_kwargs"] + if params["tmp_folder"] is None: + name = "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) + tmp_folder = get_global_tmp_folder() / name + else: + tmp_folder = Path(params["tmp_folder"]).absolute() + + ### Then we extract the SVD features + node0 = PeakRetriever(recording, peaks) + node1 = ExtractDenseWaveforms(recording, parents=[node0], return_output=False, + ms_before=params['ms_before'], + ms_after=params['ms_after'] ) - if len(features_data) > 1: - hdbscan_data = np.hstack((features_data[0], features_data[1])) - else: - hdbscan_data = features_data[0] + node2 = SavGolDenoiser(recording, parents=[node0, node1], return_output=False, **params['smoothing_kwargs']) + + projections = np.random.randn(num_chans, d["nb_projections"]) + projections -= projections.mean(0) + projections /= projections.std(0) + + nbefore = int(params['ms_before'] * fs / 1000) + nafter = int(params['ms_after'] * fs / 1000) + nsamples = nbefore + nafter + + import scipy + x = np.random.randn(100, nsamples, num_chans).astype(np.float32) + x = scipy.signal.savgol_filter(x, node2.window_length, node2.order, axis=1) + + ptps = np.ptp(x, axis=1) + a, b = np.histogram(ptps.flatten(), np.linspace(0, 100, 1000)) + ydata = np.cumsum(a)/a.sum() + xdata = b[1:] + + from scipy.optimize import curve_fit + def sigmoid(x, L ,x0, k, b): + y = L / (1 + np.exp(-k*(x-x0))) + b + return (y) + + p0 = [max(ydata), np.median(xdata), 1, min(ydata)] # this is an mandatory initial guess + popt, pcov = curve_fit(sigmoid, xdata, ydata, p0) + + node3 = RandomProjectionsFeature(recording, parents=[node0, node2], return_output=True, + projections=projections, radius_um=params['radius_um']) + + pipeline_nodes = [node0, node1, node2, node3] + + hdbscan_data = run_node_pipeline(recording, pipeline_nodes, params["job_kwargs"]) import sklearn @@ -132,7 +136,7 @@ def main_function(cls, recording, peaks, params): all_indices = np.arange(0, peak_labels.size) - max_spikes = params["max_spikes_per_unit"] + max_spikes = params['waveforms']["max_spikes_per_unit"] selection_method = params["selection_method"] for unit_ind in labels: diff --git a/src/spikeinterface/sortingcomponents/features_from_peaks.py b/src/spikeinterface/sortingcomponents/features_from_peaks.py index bd82ffa0a6..2f1acb6a19 100644 --- a/src/spikeinterface/sortingcomponents/features_from_peaks.py +++ b/src/spikeinterface/sortingcomponents/features_from_peaks.py @@ -184,41 +184,44 @@ def __init__( return_output=True, parents=None, projections=None, - radius_um=150.0, - min_values=None, + sigmoid=None, + radius_um=None ): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) self.projections = projections - self.radius_um = radius_um - self.min_values = min_values - + self.sigmoid = sigmoid self.contact_locations = recording.get_channel_locations() self.channel_distance = get_channel_distances(recording) self.neighbours_mask = self.channel_distance < radius_um - - self._kwargs.update(dict(projections=projections, radius_um=radius_um, min_values=min_values)) - + self.radius_um = radius_um + self._kwargs.update(dict(projections=projections, sigmoid=sigmoid, radius_um=radius_um)) self._dtype = recording.get_dtype() def get_dtype(self): return self._dtype + def _sigmoid(self, x): + L, x0, k, b = self.sigmoid + y = L / (1 + np.exp(-k*(x-x0))) + b + return y + def compute(self, traces, peaks, waveforms): all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype) + for main_chan in np.unique(peaks["channel_index"]): (idx,) = np.nonzero(peaks["channel_index"] == main_chan) (chan_inds,) = np.nonzero(self.neighbours_mask[main_chan]) local_projections = self.projections[chan_inds, :] - wf_ptp = (waveforms[idx][:, :, chan_inds]).ptp(axis=1) + wf_ptp = np.ptp(waveforms[idx][:, :, chan_inds], axis=1) - if self.min_values is not None: - wf_ptp = (wf_ptp / self.min_values[chan_inds]) ** 4 + if self.sigmoid is not None: + wf_ptp *= self._sigmoid(wf_ptp) denom = np.sum(wf_ptp, axis=1) mask = denom != 0 - all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections) / (denom[mask][:, np.newaxis]) + return all_projections From 3cbf8f8fc8267ff0bffd8c340514db983e059a0c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 27 Sep 2023 15:36:51 +0000 Subject: [PATCH 214/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../clustering/random_projections.py | 38 +++++++++++-------- .../sortingcomponents/features_from_peaks.py | 8 ++-- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 8c0cab07c6..f8cad2cf3f 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -36,14 +36,14 @@ class RandomProjectionClustering: "cluster_selection_method": "leaf", }, "cleaning_kwargs": {}, - "waveforms" : {"ms_before" : 2, "ms_after" : 2, "max_spikes_per_unit": 100}, + "waveforms": {"ms_before": 2, "ms_after": 2, "max_spikes_per_unit": 100}, "radius_um": 100, "selection_method": "closest_to_centroid", "nb_projections": 10, "ms_before": 1, "ms_after": 1, "random_seed": 42, - "smoothing_kwargs" : {"window_length_ms" : 1}, + "smoothing_kwargs": {"window_length_ms": 1}, "shared_memory": True, "tmp_folder": None, "job_kwargs": {"n_jobs": os.cpu_count(), "chunk_memory": "100M", "verbose": True, "progress_bar": True}, @@ -84,40 +84,46 @@ def main_function(cls, recording, peaks, params): ### Then we extract the SVD features node0 = PeakRetriever(recording, peaks) - node1 = ExtractDenseWaveforms(recording, parents=[node0], return_output=False, - ms_before=params['ms_before'], - ms_after=params['ms_after'] + node1 = ExtractDenseWaveforms( + recording, parents=[node0], return_output=False, ms_before=params["ms_before"], ms_after=params["ms_after"] ) - node2 = SavGolDenoiser(recording, parents=[node0, node1], return_output=False, **params['smoothing_kwargs']) + node2 = SavGolDenoiser(recording, parents=[node0, node1], return_output=False, **params["smoothing_kwargs"]) projections = np.random.randn(num_chans, d["nb_projections"]) projections -= projections.mean(0) projections /= projections.std(0) - nbefore = int(params['ms_before'] * fs / 1000) - nafter = int(params['ms_after'] * fs / 1000) + nbefore = int(params["ms_before"] * fs / 1000) + nafter = int(params["ms_after"] * fs / 1000) nsamples = nbefore + nafter import scipy + x = np.random.randn(100, nsamples, num_chans).astype(np.float32) x = scipy.signal.savgol_filter(x, node2.window_length, node2.order, axis=1) ptps = np.ptp(x, axis=1) a, b = np.histogram(ptps.flatten(), np.linspace(0, 100, 1000)) - ydata = np.cumsum(a)/a.sum() + ydata = np.cumsum(a) / a.sum() xdata = b[1:] from scipy.optimize import curve_fit - def sigmoid(x, L ,x0, k, b): - y = L / (1 + np.exp(-k*(x-x0))) + b - return (y) - p0 = [max(ydata), np.median(xdata), 1, min(ydata)] # this is an mandatory initial guess + def sigmoid(x, L, x0, k, b): + y = L / (1 + np.exp(-k * (x - x0))) + b + return y + + p0 = [max(ydata), np.median(xdata), 1, min(ydata)] # this is an mandatory initial guess popt, pcov = curve_fit(sigmoid, xdata, ydata, p0) - node3 = RandomProjectionsFeature(recording, parents=[node0, node2], return_output=True, - projections=projections, radius_um=params['radius_um']) + node3 = RandomProjectionsFeature( + recording, + parents=[node0, node2], + return_output=True, + projections=projections, + radius_um=params["radius_um"], + ) pipeline_nodes = [node0, node1, node2, node3] @@ -136,7 +142,7 @@ def sigmoid(x, L ,x0, k, b): all_indices = np.arange(0, peak_labels.size) - max_spikes = params['waveforms']["max_spikes_per_unit"] + max_spikes = params["waveforms"]["max_spikes_per_unit"] selection_method = params["selection_method"] for unit_ind in labels: diff --git a/src/spikeinterface/sortingcomponents/features_from_peaks.py b/src/spikeinterface/sortingcomponents/features_from_peaks.py index 2f1acb6a19..b534c2356d 100644 --- a/src/spikeinterface/sortingcomponents/features_from_peaks.py +++ b/src/spikeinterface/sortingcomponents/features_from_peaks.py @@ -185,7 +185,7 @@ def __init__( parents=None, projections=None, sigmoid=None, - radius_um=None + radius_um=None, ): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) @@ -203,12 +203,12 @@ def get_dtype(self): def _sigmoid(self, x): L, x0, k, b = self.sigmoid - y = L / (1 + np.exp(-k*(x-x0))) + b + y = L / (1 + np.exp(-k * (x - x0))) + b return y def compute(self, traces, peaks, waveforms): all_projections = np.zeros((peaks.size, self.projections.shape[1]), dtype=self._dtype) - + for main_chan in np.unique(peaks["channel_index"]): (idx,) = np.nonzero(peaks["channel_index"] == main_chan) (chan_inds,) = np.nonzero(self.neighbours_mask[main_chan]) @@ -221,7 +221,7 @@ def compute(self, traces, peaks, waveforms): denom = np.sum(wf_ptp, axis=1) mask = denom != 0 all_projections[idx[mask]] = np.dot(wf_ptp[mask], local_projections) / (denom[mask][:, np.newaxis]) - + return all_projections From 9dde3760dd62803ea54d5c1f42d560fd907380a0 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 27 Sep 2023 21:31:11 +0200 Subject: [PATCH 215/322] title --- .../benchmark/benchmark_motion_estimation.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py index a47b97fb6d..c505676c05 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py @@ -500,8 +500,8 @@ def plot_errors_several_benchmarks(benchmarks, axes=None, show_legend=True, colo axes[2].plot(benchmark.spatial_bins, depth_error, label=benchmark.title, color=c) ax0 = ax = axes[0] - ax.set_xlabel("time [s]") - ax.set_ylabel("error [um]") + ax.set_xlabel("Time [s]") + ax.set_ylabel("Error [μm]") if show_legend: ax.legend() _simpleaxis(ax) @@ -514,7 +514,7 @@ def plot_errors_several_benchmarks(benchmarks, axes=None, show_legend=True, colo ax2 = axes[2] ax2.set_yticks([]) - ax2.set_xlabel("depth [um]") + ax2.set_xlabel("Depth [μm]") # ax.set_ylabel('error') channel_positions = benchmark.recording.get_channel_locations() probe_y_min, probe_y_max = channel_positions[:, 1].min(), channel_positions[:, 1].max() From daddd8cef722a35233dbed530e14775de87b8caa Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Thu, 28 Sep 2023 09:16:51 +0200 Subject: [PATCH 216/322] Adding a lookup table --- .../sortingcomponents/matching/circus.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 5775589321..1d13eca1df 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -128,6 +128,7 @@ def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): return ret + def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) @@ -474,7 +475,6 @@ def main_function(cls, traces, d): return spikes - class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): """ Orthogonal Matching Pursuit inspired from Spyking Circus sorter @@ -632,6 +632,12 @@ def initialize_and_check_kwargs(cls, recording, kwargs): d["num_templates"] = len(d["templates"]) d["ignored_ids"] = np.array(d["ignored_ids"]) + d["unit_overlaps_tables"] = {} + for i in range(d["num_templates"]): + d["unit_overlaps_tables"][i] = np.zeros(d["num_templates"], dtype=int) + d["unit_overlaps_tables"][i][d["unit_overlaps_indices"][i]] = np.arange(len(d["unit_overlaps_indices"][i])) + + omp_min_sps = d["omp_min_sps"] # d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) d["stop_criteria"] = omp_min_sps * np.maximum(d["norms"], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) @@ -720,6 +726,7 @@ def main_function(cls, traces, d): local_overlaps = overlaps[best_cluster_ind] overlapping_templates = d["unit_overlaps_indices"][best_cluster_ind] + table = d["unit_overlaps_tables"][best_cluster_ind] if num_selection == M.shape[0]: Z = np.zeros((2 * num_selection, 2 * num_selection), dtype=np.float32) @@ -728,9 +735,6 @@ def main_function(cls, traces, d): mask = np.isin(myindices, overlapping_templates) a, b = myindices[mask], myline[mask] - - table = np.zeros(num_templates, dtype=int) - table[overlapping_templates] = np.arange(len(overlapping_templates)) M[num_selection, idx[mask]] = local_overlaps[table[a], b] if vicinity == 0: From d7dcbe05f082f5ecd93d9233b9f5ca30ae51a8f4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 28 Sep 2023 07:17:14 +0000 Subject: [PATCH 217/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sortingcomponents/matching/circus.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 1d13eca1df..44c394aec9 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -128,7 +128,6 @@ def _freq_domain_conv(in1, in2, axes, shape, cache, calc_fast_len=True): return ret - def compute_overlaps(templates, num_samples, num_channels, sparsities): num_templates = len(templates) @@ -475,6 +474,7 @@ def main_function(cls, traces, d): return spikes + class CircusOMPSVDPeeler(BaseTemplateMatchingEngine): """ Orthogonal Matching Pursuit inspired from Spyking Circus sorter @@ -637,7 +637,6 @@ def initialize_and_check_kwargs(cls, recording, kwargs): d["unit_overlaps_tables"][i] = np.zeros(d["num_templates"], dtype=int) d["unit_overlaps_tables"][i][d["unit_overlaps_indices"][i]] = np.arange(len(d["unit_overlaps_indices"][i])) - omp_min_sps = d["omp_min_sps"] # d["stop_criteria"] = omp_min_sps * np.sqrt(d["noise_levels"].sum() * d["num_samples"]) d["stop_criteria"] = omp_min_sps * np.maximum(d["norms"], np.sqrt(d["noise_levels"].sum() * d["num_samples"])) From d623da38f38924b9c5857abdeccf16891c729bc7 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Thu, 28 Sep 2023 10:21:08 +0200 Subject: [PATCH 218/322] typos for cleaning via matching --- .../clustering/clustering_tools.py | 2 +- .../clustering/random_projections.py | 2 +- .../sortingcomponents/matching/circus.py | 15 ++++++++++----- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index af3a9cb86a..28a1a63065 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -613,7 +613,7 @@ def remove_duplicates_via_matching( spikes, computed = find_spikes_from_templates( sub_recording, method=method, method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs ) - if method == "circus-omp-vsd": + if method == "circus-omp-svd": method_kwargs.update( { "overlaps": computed["overlaps"], diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index f8cad2cf3f..df9290a1f5 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -127,7 +127,7 @@ def sigmoid(x, L, x0, k, b): pipeline_nodes = [node0, node1, node2, node3] - hdbscan_data = run_node_pipeline(recording, pipeline_nodes, params["job_kwargs"]) + hdbscan_data = run_node_pipeline(recording, pipeline_nodes, params["job_kwargs"], job_name="extracting features") import sklearn diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 1d13eca1df..9e02aa4ff6 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -686,13 +686,18 @@ def main_function(cls, traces, d): scalar_products = np.zeros(conv_shape, dtype=np.float32) # Filter using overlap-and-add convolution - spatially_filtered_data = np.matmul(d["spatial"], traces.T[np.newaxis, :, :]) - scaled_filtered_data = spatially_filtered_data * d["singular"] - objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"], axes=2, mode="valid") - scalar_products += np.sum(objective_by_rank, axis=0) - if len(ignored_ids) > 0: + mask = ~np.isin(np.arange(num_templates), ignored_ids) + spatially_filtered_data = np.matmul(d["spatial"][:, mask, :], traces.T[np.newaxis, :, :]) + scaled_filtered_data = spatially_filtered_data * d["singular"][:, mask, :] + objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"][:, mask, :], axes=2, mode="valid") + scalar_products[mask] += np.sum(objective_by_rank, axis=0) scalar_products[ignored_ids] = -np.inf + else: + spatially_filtered_data = np.matmul(d["spatial"], traces.T[np.newaxis, :, :]) + scaled_filtered_data = spatially_filtered_data * d["singular"] + objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"], axes=2, mode="valid") + scalar_products += np.sum(objective_by_rank, axis=0) num_spikes = 0 From fdb84668137ba71b1ca36787032551da52764842 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 28 Sep 2023 08:21:36 +0000 Subject: [PATCH 219/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sortingcomponents/clustering/random_projections.py | 4 +++- src/spikeinterface/sortingcomponents/matching/circus.py | 8 +++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index df9290a1f5..864548e7d4 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -127,7 +127,9 @@ def sigmoid(x, L, x0, k, b): pipeline_nodes = [node0, node1, node2, node3] - hdbscan_data = run_node_pipeline(recording, pipeline_nodes, params["job_kwargs"], job_name="extracting features") + hdbscan_data = run_node_pipeline( + recording, pipeline_nodes, params["job_kwargs"], job_name="extracting features" + ) import sklearn diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index b963447ba2..358691cd25 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -687,16 +687,18 @@ def main_function(cls, traces, d): # Filter using overlap-and-add convolution if len(ignored_ids) > 0: mask = ~np.isin(np.arange(num_templates), ignored_ids) - spatially_filtered_data = np.matmul(d["spatial"][:, mask, :], traces.T[np.newaxis, :, :]) + spatially_filtered_data = np.matmul(d["spatial"][:, mask, :], traces.T[np.newaxis, :, :]) scaled_filtered_data = spatially_filtered_data * d["singular"][:, mask, :] - objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"][:, mask, :], axes=2, mode="valid") + objective_by_rank = scipy.signal.oaconvolve( + scaled_filtered_data, d["temporal"][:, mask, :], axes=2, mode="valid" + ) scalar_products[mask] += np.sum(objective_by_rank, axis=0) scalar_products[ignored_ids] = -np.inf else: spatially_filtered_data = np.matmul(d["spatial"], traces.T[np.newaxis, :, :]) scaled_filtered_data = spatially_filtered_data * d["singular"] objective_by_rank = scipy.signal.oaconvolve(scaled_filtered_data, d["temporal"], axes=2, mode="valid") - scalar_products += np.sum(objective_by_rank, axis=0) + scalar_products += np.sum(objective_by_rank, axis=0) num_spikes = 0 From 7ba84ad7d9913b4846d9d6903a13a1f441156647 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 28 Sep 2023 12:25:29 +0200 Subject: [PATCH 220/322] updates --- src/spikeinterface/core/waveform_extractor.py | 24 +- .../postprocessing/template_metrics.py | 343 +++++++++++------- 2 files changed, 239 insertions(+), 128 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 9f85603e51..79456a40ce 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -811,14 +811,30 @@ def select_units(self, unit_ids, new_folder=None, use_relative_path: bool = Fals sparsity = ChannelSparsity(mask, unit_ids, self.channel_ids) else: sparsity = None - we = WaveformExtractor.create(self.recording, sorting, folder=None, mode="memory", sparsity=sparsity) - we.set_params(**self._params) + if self.has_recording(): + we = WaveformExtractor.create(self.recording, sorting, folder=None, mode="memory", sparsity=sparsity) + else: + we = WaveformExtractor( + recording=None, + sorting=sorting, + folder=None, + sparsity=sparsity, + rec_attributes=self._rec_attributes, + allow_unfiltered=True, + ) + we._params = self._params # copy memory objects if self.has_waveforms(): we._memory_objects = {"wfs_arrays": {}, "sampled_indices": {}} for unit_id in unit_ids: - we._memory_objects["wfs_arrays"][unit_id] = self._memory_objects["wfs_arrays"][unit_id] - we._memory_objects["sampled_indices"][unit_id] = self._memory_objects["sampled_indices"][unit_id] + if self.format == "memory": + we._memory_objects["wfs_arrays"][unit_id] = self._memory_objects["wfs_arrays"][unit_id] + we._memory_objects["sampled_indices"][unit_id] = self._memory_objects["sampled_indices"][ + unit_id + ] + else: + we._memory_objects["wfs_arrays"][unit_id] = self.get_waveforms(unit_id) + we._memory_objects["sampled_indices"][unit_id] = self.get_sampled_indices(unit_id) # finally select extensions data for ext_name in self.get_available_extension_names(): diff --git a/src/spikeinterface/postprocessing/template_metrics.py b/src/spikeinterface/postprocessing/template_metrics.py index ea44dea9cb..090dae4567 100644 --- a/src/spikeinterface/postprocessing/template_metrics.py +++ b/src/spikeinterface/postprocessing/template_metrics.py @@ -11,12 +11,9 @@ from ..core.waveform_extractor import BaseWaveformExtractorExtension import warnings -# DEBUG = True -# if DEBUG: -# import matplotlib.pyplot as plt -# plt.ion() -# plt.show() +global DEBUG +DEBUG = False def get_single_channel_template_metric_names(): @@ -52,20 +49,20 @@ def _set_params( peak_sign="neg", upsampling_factor=10, sparsity=None, - functions_kwargs=None, + metrics_kwargs=None, include_multi_channel_metrics=False, ): if metric_names is None: metric_names = get_single_channel_template_metric_names() if include_multi_channel_metrics: metric_names += get_multi_channel_template_metric_names() - functions_kwargs = functions_kwargs or dict() + metrics_kwargs = metrics_kwargs or dict() params = dict( metric_names=[str(name) for name in metric_names], sparsity=sparsity, peak_sign=peak_sign, upsampling_factor=int(upsampling_factor), - functions_kwargs=functions_kwargs, + metrics_kwargs=metrics_kwargs, ) return params @@ -141,7 +138,7 @@ def _run(self): sampling_frequency=sampling_frequency_up, trough_idx=trough_idx, peak_idx=peak_idx, - **self._params["functions_kwargs"], + **self._params["metrics_kwargs"], ) template_metrics.at[index, metric_name] = value @@ -173,7 +170,7 @@ def _run(self): template_upsampled, channel_locations=channel_locations_sparse, sampling_frequency=sampling_frequency_up, - **self._params["functions_kwargs"], + **self._params["metrics_kwargs"], ) template_metrics.at[index, metric_name] = value self._extension_data["metrics"] = template_metrics @@ -199,6 +196,21 @@ def get_extension_function(): WaveformExtractor.register_extension(TemplateMetricsCalculator) +_default_function_kwargs = dict( + recovery_window_ms=0.7, + peak_relative_threshold=0.2, + peak_width_ms=0.1, + depth_direction="y", + min_channels_for_velocity=5, + min_r2_velocity=0.5, + exp_peak_function="ptp", + min_r2_exp_decay=0.5, + spread_threshold=0.2, + spread_smooth_um=20, + same_x=False, +) + + def compute_template_metrics( waveform_extractor, load_if_exists=False, @@ -207,16 +219,8 @@ def compute_template_metrics( upsampling_factor=10, sparsity=None, include_multi_channel_metrics=False, - functions_kwargs=dict( - recovery_window_ms=0.7, - peak_relative_threshold=0.2, - peak_width_ms=0.2, - depth_direction="y", - min_channels_for_velocity=5, - min_r2_for_velocity=0.5, - exp_peak_function="ptp", - spread_threshold=0.2, - ), + metrics_kwargs=None, + debug_plots=False, ): """ Compute template metrics including: @@ -252,14 +256,14 @@ def compute_template_metrics( For more generating a sparsity dict, see the postprocessing.compute_sparsity() function. include_multi_channel_metrics: bool, default: False Whether to compute multi-channel metrics - functions_kwargs: dict + metrics_kwargs: dict Additional arguments to pass to the metric functions. Including: * recovery_window_ms: the window in ms after the peak to compute the recovery_slope, default: 0.7 * peak_relative_threshold: the relative threshold to detect positive and negative peaks, default: 0.2 * peak_width_ms: the width in samples to detect peaks, default: 0.2 * depth_direction: the direction to compute velocity above and below, default: "y" * min_channels_for_velocity: the minimum number of channels above or below to compute velocity, default: 5 - * min_r2_for_velocity: the minimum r2 to accept the velocity fit, default: 0.7 + * min_r2_velocity: the minimum r2 to accept the velocity fit, default: 0.7 * exp_peak_function: the function to use to compute the peak amplitude for the exp decay, default: "ptp" * spread_threshold: the threshold to compute the spread, default: 0.2 @@ -275,6 +279,9 @@ def compute_template_metrics( If any multi-channel metric is in the metric_names or include_multi_channel_metrics is True, sparsity must be None, so that one metric value will be computed per unit. """ + if debug_plots: + global DEBUG + DEBUG = True if load_if_exists and waveform_extractor.is_extension(TemplateMetricsCalculator.extension_name): tmc = waveform_extractor.load_extension(TemplateMetricsCalculator.extension_name) else: @@ -287,13 +294,19 @@ def compute_template_metrics( "If multi-channel metrics are computed, sparsity must be None, " "so that each unit will correspond to 1 row of the output dataframe." ) + default_kwargs = _default_function_kwargs.copy() + if metrics_kwargs is None: + metrics_kwargs = default_kwargs + else: + default_kwargs.update(metrics_kwargs) + metrics_kwargs = default_kwargs tmc.set_params( metric_names=metric_names, peak_sign=peak_sign, upsampling_factor=upsampling_factor, sparsity=sparsity, include_multi_channel_metrics=include_multi_channel_metrics, - functions_kwargs=functions_kwargs, + metrics_kwargs=metrics_kwargs, ) tmc.run() @@ -328,7 +341,7 @@ def get_trough_and_peak_idx(template): ######################################################################################### # Single-channel metrics -def get_peak_to_valley(template_single, trough_idx=None, peak_idx=None, **kwargs): +def get_peak_to_valley(template_single, sampling_frequency, trough_idx=None, peak_idx=None, **kwargs): """ Return the peak to valley duration in seconds of input waveforms. @@ -340,22 +353,19 @@ def get_peak_to_valley(template_single, trough_idx=None, peak_idx=None, **kwargs The index of the trough peak_idx: int, default: None The index of the peak - **kwargs: Required kwargs: - - sampling_frequency: the sampling frequency Returns ------- ptv: float The peak to valley duration in seconds """ - sampling_frequency = kwargs["sampling_frequency"] if trough_idx is None or peak_idx is None: trough_idx, peak_idx = get_trough_and_peak_idx(template_single) ptv = (peak_idx - trough_idx) / sampling_frequency return ptv -def get_peak_trough_ratio(template_single, trough_idx=None, peak_idx=None, **kwargs): +def get_peak_trough_ratio(template_single, sampling_frequency=None, trough_idx=None, peak_idx=None, **kwargs): """ Return the peak to trough ratio of input waveforms. @@ -367,8 +377,6 @@ def get_peak_trough_ratio(template_single, trough_idx=None, peak_idx=None, **kwa The index of the trough peak_idx: int, default: None The index of the peak - **kwargs: Required kwargs: - - sampling_frequency: the sampling frequency Returns ------- @@ -381,7 +389,7 @@ def get_peak_trough_ratio(template_single, trough_idx=None, peak_idx=None, **kwa return ptratio -def get_half_width(template_single, trough_idx=None, peak_idx=None, **kwargs): +def get_half_width(template_single, sampling_frequency, trough_idx=None, peak_idx=None, **kwargs): """ Return the half width of input waveforms in seconds. @@ -393,8 +401,6 @@ def get_half_width(template_single, trough_idx=None, peak_idx=None, **kwargs): The index of the trough peak_idx: int, default: None The index of the peak - **kwargs: Required kwargs: - - sampling_frequency: the sampling frequency Returns ------- @@ -403,7 +409,6 @@ def get_half_width(template_single, trough_idx=None, peak_idx=None, **kwargs): """ if trough_idx is None or peak_idx is None: trough_idx, peak_idx = get_trough_and_peak_idx(template_single) - sampling_frequency = kwargs["sampling_frequency"] if peak_idx == 0: return np.nan @@ -428,7 +433,7 @@ def get_half_width(template_single, trough_idx=None, peak_idx=None, **kwargs): return hw -def get_repolarization_slope(template_single, trough_idx=None, **kwargs): +def get_repolarization_slope(template_single, sampling_frequency, trough_idx=None, **kwargs): """ Return slope of repolarization period between trough and baseline @@ -445,12 +450,9 @@ def get_repolarization_slope(template_single, trough_idx=None, **kwargs): The 1D template waveform trough_idx: int, default: None The index of the trough - **kwargs: Required kwargs: - - sampling_frequency: the sampling frequency """ if trough_idx is None: trough_idx = get_trough_and_peak_idx(template_single) - sampling_frequency = kwargs["sampling_frequency"] times = np.arange(template_single.shape[0]) / sampling_frequency @@ -472,7 +474,7 @@ def get_repolarization_slope(template_single, trough_idx=None, **kwargs): return res.slope -def get_recovery_slope(template_single, peak_idx=None, **kwargs): +def get_recovery_slope(template_single, sampling_frequency, peak_idx=None, **kwargs): """ Return the recovery slope of input waveforms. After repolarization, the neuron hyperpolarizes untill it peaks. The recovery slope is the @@ -490,7 +492,6 @@ def get_recovery_slope(template_single, peak_idx=None, **kwargs): peak_idx: int, default: None The index of the peak **kwargs: Required kwargs: - - sampling_frequency: the sampling frequency - recovery_window_ms: the window in ms after the peak to compute the recovery_slope """ import scipy.stats @@ -499,7 +500,6 @@ def get_recovery_slope(template_single, peak_idx=None, **kwargs): recovery_window_ms = kwargs["recovery_window_ms"] if peak_idx is None: _, peak_idx = get_trough_and_peak_idx(template_single) - sampling_frequency = kwargs["sampling_frequency"] times = np.arange(template_single.shape[0]) / sampling_frequency @@ -512,7 +512,7 @@ def get_recovery_slope(template_single, peak_idx=None, **kwargs): return res.slope -def get_num_positive_peaks(template_single, **kwargs): +def get_num_positive_peaks(template_single, sampling_frequency, **kwargs): """ Count the number of positive peaks in the template. @@ -523,7 +523,6 @@ def get_num_positive_peaks(template_single, **kwargs): **kwargs: Required kwargs: - peak_relative_threshold: the relative threshold to detect positive and negative peaks - peak_width_ms: the width in samples to detect peaks - - sampling_frequency: the sampling frequency """ from scipy.signal import find_peaks @@ -532,14 +531,14 @@ def get_num_positive_peaks(template_single, **kwargs): peak_relative_threshold = kwargs["peak_relative_threshold"] peak_width_ms = kwargs["peak_width_ms"] max_value = np.max(np.abs(template_single)) - peak_width_samples = int(peak_width_ms / 1000 * kwargs["sampling_frequency"]) + peak_width_samples = int(peak_width_ms / 1000 * sampling_frequency) pos_peaks = find_peaks(template_single, height=peak_relative_threshold * max_value, width=peak_width_samples) return len(pos_peaks[0]) -def get_num_negative_peaks(template_single, **kwargs): +def get_num_negative_peaks(template_single, sampling_frequency, **kwargs): """ Count the number of negative peaks in the template. @@ -550,7 +549,6 @@ def get_num_negative_peaks(template_single, **kwargs): **kwargs: Required kwargs: - peak_relative_threshold: the relative threshold to detect positive and negative peaks - peak_width_ms: the width in samples to detect peaks - - sampling_frequency: the sampling frequency """ from scipy.signal import find_peaks @@ -559,7 +557,7 @@ def get_num_negative_peaks(template_single, **kwargs): peak_relative_threshold = kwargs["peak_relative_threshold"] peak_width_ms = kwargs["peak_width_ms"] max_value = np.max(np.abs(template_single)) - peak_width_samples = int(peak_width_ms / 1000 * kwargs["sampling_frequency"]) + peak_width_samples = int(peak_width_ms / 1000 * sampling_frequency) neg_peaks = find_peaks(-template_single, height=peak_relative_threshold * max_value, width=peak_width_samples) @@ -581,6 +579,20 @@ def get_num_negative_peaks(template_single, **kwargs): # Multi-channel metrics +def transform_same_x(template, channel_locations): + max_channel_x = channel_locations[np.argmax(np.ptp(template, axis=0)), 0] + same_x_mask = channel_locations[:, 0] == max_channel_x + channel_locations_same_x = channel_locations[same_x_mask] + template_same_x = template[:, same_x_mask] + return template_same_x, channel_locations_same_x + + +def sort_template_and_locations(template, channel_locations, depth_direction="y"): + direction_index = ["x", "y", "z"].index(depth_direction) + sort_indices = np.argsort(channel_locations[:, direction_index]) + return template[:, sort_indices], channel_locations[sort_indices, :] + + def fit_velocity(peak_times, channel_dist): # from scipy.stats import linregress # slope, intercept, _, _, _ = linregress(peak_times, channel_dist) @@ -595,7 +607,7 @@ def fit_velocity(peak_times, channel_dist): return slope, intercept, score -def get_velocity_above(template, channel_locations, **kwargs): +def get_velocity_above(template, channel_locations, sampling_frequency, **kwargs): """ Compute the velocity above the max channel of the template. @@ -608,56 +620,70 @@ def get_velocity_above(template, channel_locations, **kwargs): **kwargs: Required kwargs: - depth_direction: the direction to compute velocity above and below ("x", "y", or "z") - min_channels_for_velocity: the minimum number of channels above or below to compute velocity - - min_r2_for_velocity: the minimum r2 to accept the velocity fit - - sampling_frequency: the sampling frequency + - min_r2_velocity: the minimum r2 to accept the velocity fit """ assert "depth_direction" in kwargs, "depth_direction must be given as kwarg" assert "min_channels_for_velocity" in kwargs, "min_channels_for_velocity must be given as kwarg" - assert "min_r2_for_velocity" in kwargs, "min_r2_for_velocity must be given as kwarg" + assert "min_r2_velocity" in kwargs, "min_r2_velocity must be given as kwarg" + assert "same_x" in kwargs, "same_x must be given as kwarg" depth_direction = kwargs["depth_direction"] min_channels_for_velocity = kwargs["min_channels_for_velocity"] - min_r2_for_velocity = kwargs["min_r2_for_velocity"] + min_r2_velocity = kwargs["min_r2_velocity"] + same_x = kwargs["same_x"] direction_index = ["x", "y", "z"].index(depth_direction) - sampling_frequency = kwargs["sampling_frequency"] + template, channel_locations = sort_template_and_locations(template, channel_locations, depth_direction) + + if same_x: + template, channel_locations = transform_same_x(template, channel_locations) # find location of max channel max_sample_idx, max_channel_idx = np.unravel_index(np.argmin(template), template.shape) + max_peak_time = max_sample_idx / sampling_frequency * 1000 max_channel_location = channel_locations[max_channel_idx] channels_above = channel_locations[:, direction_index] >= max_channel_location[direction_index] # we only consider samples forward in time with respect to the max channel - template_above = template[max_sample_idx:, channels_above] + # TODO: not sure + # template_above = template[max_sample_idx:, channels_above] + template_above = template[:, channels_above] channel_locations_above = channel_locations[channels_above] - peak_times_ms_above = np.argmin(template_above, 0) / sampling_frequency * 1000 + peak_times_ms_above = np.argmin(template_above, 0) / sampling_frequency * 1000 - max_peak_time distances_um_above = np.array([np.linalg.norm(cl - max_channel_location) for cl in channel_locations_above]) velocity_above, intercept, score = fit_velocity(peak_times_ms_above, distances_um_above) - # if DEBUG: - # fig, ax = plt.subplots() - # ax.plot(peak_times_ms_above, distances_um_above, "o") - # x = np.linspace(peak_times_ms_above.min(), peak_times_ms_above.max(), 20) - # ax.plot(x, intercept + x * velocity_above) - # ax.set_xlabel("Peak time (ms)") - # ax.set_ylabel("Distance from max channel (um)") - # ax.set_title(f"Velocity above: {velocity_above:.2f} um/ms") - - if np.sum(channels_above) < min_channels_for_velocity: - # if DEBUG: - # ax.set_title("NaN velocity - not enough channels") - return np.nan + global DEBUG + if DEBUG: + import matplotlib.pyplot as plt + + fig, axs = plt.subplots(ncols=2, figsize=(10, 7)) + offset = 1.2 * np.max(np.ptp(template, axis=0)) + ts = np.arange(template.shape[0]) / sampling_frequency * 1000 - max_peak_time + (channel_indices_above,) = np.nonzero(channels_above) + for i, single_template in enumerate(template.T): + color = "r" if i in channel_indices_above else "k" + axs[0].plot(ts, single_template + i * offset, color=color) + axs[0].axvline(0, color="g", ls="--") + axs[1].plot(peak_times_ms_above, distances_um_above, "o") + x = np.linspace(peak_times_ms_above.min(), peak_times_ms_above.max(), 20) + axs[1].plot(x, intercept + x * velocity_above) + axs[1].set_xlabel("Peak time (ms)") + axs[1].set_ylabel("Distance from max channel (um)") + fig.suptitle( + f"Velocity above: {velocity_above:.2f} um/ms - score {score:.2f} - channels: {np.sum(channels_above)}" + ) + plt.show() + + if np.sum(channels_above) < min_channels_for_velocity or score < min_r2_velocity: + velocity_above = np.nan - if score < min_r2_for_velocity: - # if DEBUG: - # ax.set_title(f"NaN velocity - R2 is too low: {score:.2f}") - return np.nan return velocity_above -def get_velocity_below(template, channel_locations, **kwargs): +def get_velocity_below(template, channel_locations, sampling_frequency, **kwargs): """ Compute the velocity below the max channel of the template. @@ -670,55 +696,70 @@ def get_velocity_below(template, channel_locations, **kwargs): **kwargs: Required kwargs: - depth_direction: the direction to compute velocity above and below ("x", "y", or "z") - min_channels_for_velocity: the minimum number of channels above or below to compute velocity - - min_r2_for_velocity: the minimum r2 to accept the velocity fit - - sampling_frequency: the sampling frequency + - min_r2_velocity: the minimum r2 to accept the velocity fit + - same_x: whether to transform the template and channel locations to have the same x coordinate """ assert "depth_direction" in kwargs, "depth_direction must be given as kwarg" assert "min_channels_for_velocity" in kwargs, "min_channels_for_velocity must be given as kwarg" - assert "min_r2_for_velocity" in kwargs, "min_r2_for_velocity must be given as kwarg" - direction = kwargs["depth_direction"] + assert "min_r2_velocity" in kwargs, "min_r2_velocity must be given as kwarg" + assert "same_x" in kwargs, "same_x must be given as kwarg" + + depth_direction = kwargs["depth_direction"] min_channels_for_velocity = kwargs["min_channels_for_velocity"] - min_r2_for_velocity = kwargs["min_r2_for_velocity"] - direction_index = ["x", "y", "z"].index(direction) + min_r2_velocity = kwargs["min_r2_velocity"] + same_x = kwargs["same_x"] + + direction_index = ["x", "y", "z"].index(depth_direction) + template, channel_locations = sort_template_and_locations(template, channel_locations, depth_direction) + + if same_x: + template, channel_locations = transform_same_x(template, channel_locations) # find location of max channel max_sample_idx, max_channel_idx = np.unravel_index(np.argmin(template), template.shape) + max_peak_time = max_sample_idx / sampling_frequency * 1000 max_channel_location = channel_locations[max_channel_idx] - sampling_frequency = kwargs["sampling_frequency"] channels_below = channel_locations[:, direction_index] <= max_channel_location[direction_index] # we only consider samples forward in time with respect to the max channel - template_below = template[max_sample_idx:, channels_below] + # template_below = template[max_sample_idx:, channels_below] + template_below = template[:, channels_below] channel_locations_below = channel_locations[channels_below] - peak_times_ms_below = np.argmin(template_below, 0) / sampling_frequency * 1000 + peak_times_ms_below = np.argmin(template_below, 0) / sampling_frequency * 1000 - max_peak_time distances_um_below = np.array([np.linalg.norm(cl - max_channel_location) for cl in channel_locations_below]) velocity_below, intercept, score = fit_velocity(peak_times_ms_below, distances_um_below) - # if DEBUG: - # fig, ax = plt.subplots() - # ax.plot(peak_times_ms_below, distances_um_below, "o") - # x = np.linspace(peak_times_ms_below.min(), peak_times_ms_below.max(), 20) - # ax.plot(x, intercept + x * velocity_below) - # ax.set_xlabel("Peak time (ms)") - # ax.set_ylabel("Distance from max channel (um)") - # ax.set_title(f"Velocity below: {np.round(velocity_below, 3)} um/ms") - - if np.sum(channels_below) < min_channels_for_velocity: - # if DEBUG: - # ax.set_title("NaN velocity - not enough channels") - return np.nan + global DEBUG + if DEBUG: + import matplotlib.pyplot as plt + + fig, axs = plt.subplots(ncols=2, figsize=(10, 7)) + offset = 1.2 * np.max(np.ptp(template, axis=0)) + ts = np.arange(template.shape[0]) / sampling_frequency * 1000 - max_peak_time + (channel_indices_below,) = np.nonzero(channels_below) + for i, single_template in enumerate(template.T): + color = "r" if i in channel_indices_below else "k" + axs[0].plot(ts, single_template + i * offset, color=color) + axs[0].axvline(0, color="g", ls="--") + axs[1].plot(peak_times_ms_below, distances_um_below, "o") + x = np.linspace(peak_times_ms_below.min(), peak_times_ms_below.max(), 20) + axs[1].plot(x, intercept + x * velocity_below) + axs[1].set_xlabel("Peak time (ms)") + axs[1].set_ylabel("Distance from max channel (um)") + fig.suptitle( + f"Velocity below: {np.round(velocity_below, 3)} um/ms - score {score:.2f} - channels: {np.sum(channels_below)}" + ) + plt.show() - if score < min_r2_for_velocity: - # if DEBUG: - # ax.set_title(f"NaN velocity - R2 is too low: {np.round(score, 3)}") - return np.nan + if np.sum(channels_below) < min_channels_for_velocity or score < min_r2_velocity: + velocity_below = np.nan return velocity_below -def get_exp_decay(template, channel_locations, **kwargs): +def get_exp_decay(template, channel_locations, sampling_frequency=None, **kwargs): """ Compute the exponential decay of the template amplitude over distance. @@ -730,14 +771,18 @@ def get_exp_decay(template, channel_locations, **kwargs): The channel locations (num_channels, 2) **kwargs: Required kwargs: - exp_peak_function: the function to use to compute the peak amplitude for the exp decay ("ptp" or "min") + - min_r2_exp_decay: the minimum r2 to accept the exp decay fit """ from scipy.optimize import curve_fit + from sklearn.metrics import r2_score - def exp_decay(x, a, b, c): - return a * np.exp(-b * x) + c + def exp_decay(x, decay, amp0, offset): + return amp0 * np.exp(-decay * x) + offset assert "exp_peak_function" in kwargs, "exp_peak_function must be given as kwarg" exp_peak_function = kwargs["exp_peak_function"] + assert "min_r2_exp_decay" in kwargs, "min_r2_exp_decay must be given as kwarg" + min_r2_exp_decay = kwargs["min_r2_exp_decay"] # exp decay fit if exp_peak_function == "ptp": fun = np.ptp @@ -747,25 +792,49 @@ def exp_decay(x, a, b, c): max_channel_location = channel_locations[np.argmax(peak_amplitudes)] channel_distances = np.array([np.linalg.norm(cl - max_channel_location) for cl in channel_locations]) distances_sort_indices = np.argsort(channel_distances) - channel_distances_sorted = channel_distances[distances_sort_indices] - peak_amplitudes_sorted = peak_amplitudes[distances_sort_indices] + # np.float128 avoids overflow error + channel_distances_sorted = channel_distances[distances_sort_indices].astype(np.float128) + peak_amplitudes_sorted = peak_amplitudes[distances_sort_indices].astype(np.float128) try: - popt, _ = curve_fit(exp_decay, channel_distances_sorted, peak_amplitudes_sorted) - exp_decay_value = popt[1] - # if DEBUG: - # fig, ax = plt.subplots() - # ax.plot(channel_distances_sorted, peak_amplitudes_sorted, "o") - # x = np.arange(channel_distances_sorted.min(), channel_distances_sorted.max()) - # ax.plot(x, exp_decay(x, *popt)) - # ax.set_xlabel("Distance from max channel (um)") - # ax.set_ylabel("Peak amplitude") - # ax.set_title(f"Exp decay: {np.round(exp_decay_value, 3)}") + amp0 = peak_amplitudes_sorted[0] + offset0 = np.min(peak_amplitudes_sorted) + + popt, _ = curve_fit( + exp_decay, + channel_distances_sorted, + peak_amplitudes_sorted, + bounds=([1e-5, amp0 - 0.5 * amp0, 0], [2, amp0 + 0.5 * amp0, 2 * offset0]), + p0=[1e-3, peak_amplitudes_sorted[0], offset0], + ) + r2 = r2_score(peak_amplitudes_sorted, exp_decay(channel_distances_sorted, *popt)) + exp_decay_value = popt[0] + + if r2 < min_r2_exp_decay: + exp_decay_value = np.nan + + global DEBUG + if DEBUG: + import matplotlib.pyplot as plt + + fig, ax = plt.subplots() + ax.plot(channel_distances_sorted, peak_amplitudes_sorted, "o") + x = np.arange(channel_distances_sorted.min(), channel_distances_sorted.max()) + ax.plot(x, exp_decay(x, *popt)) + ax.set_xlabel("Distance from max channel (um)") + ax.set_ylabel("Peak amplitude") + ax.set_title( + f"Exp decay: {np.round(exp_decay_value, 3)} - Amp: {np.round(popt[1], 3)} - Offset: {np.round(popt[2], 3)} - " + f"R2: {np.round(r2, 4)}" + ) + fig.suptitle("Exp decay") + plt.show() except: exp_decay_value = np.nan + return exp_decay_value -def get_spread(template, channel_locations, **kwargs): +def get_spread(template, channel_locations, sampling_frequency, **kwargs): """ Compute the spread of the template amplitude over distance. @@ -783,23 +852,49 @@ def get_spread(template, channel_locations, **kwargs): depth_direction = kwargs["depth_direction"] assert "spread_threshold" in kwargs, "spread_threshold must be given as kwarg" spread_threshold = kwargs["spread_threshold"] + assert "spread_smooth_um" in kwargs, "spread_smooth_um must be given as kwarg" + spread_smooth_um = kwargs["spread_smooth_um"] + assert "same_x" in kwargs, "same_x must be given as kwarg" + same_x = kwargs["same_x"] direction_index = ["x", "y", "z"].index(depth_direction) + template, channel_locations = sort_template_and_locations(template, channel_locations, depth_direction) + + if same_x: + template, channel_locations = transform_same_x(template, channel_locations) MM = np.ptp(template, 0) MM = MM / np.max(MM) + channel_depths = channel_locations[:, direction_index] + + if spread_smooth_um is not None and spread_smooth_um > 0: + from scipy.ndimage import gaussian_filter1d + + spread_sigma = spread_smooth_um / np.median(np.diff(np.unique(channel_depths))) + MM = gaussian_filter1d(MM, spread_sigma) + channel_locations_above_theshold = channel_locations[MM > spread_threshold] channel_depth_above_theshold = channel_locations_above_theshold[:, direction_index] spread = np.ptp(channel_depth_above_theshold) - # if DEBUG: - # fig, ax = plt.subplots() - # channel_depths = channel_locations[:, direction_index] - # sort_indices = np.argsort(channel_depths) - # ax.plot(channel_depths[sort_indices], MM[sort_indices], "o-") - # ax.axhline(spread_threshold, ls="--", color="r") - # ax.set_xlabel("Depth (um)") - # ax.set_ylabel("Amplitude") - # ax.set_title(f"Spread: {np.round(spread, 3)} um") + global DEBUG + if DEBUG: + import matplotlib.pyplot as plt + + fig, axs = plt.subplots(ncols=2, figsize=(10, 7)) + axs[0].imshow( + template.T, + aspect="auto", + origin="lower", + extent=[0, template.shape[0] / sampling_frequency, channel_depths[0], channel_depths[1]], + ) + axs[1].plot(channel_depths, MM, "o-") + axs[1].axhline(spread_threshold, ls="--", color="r") + axs[1].set_xlabel("Depth (um)") + axs[1].set_ylabel("Amplitude") + axs[1].set_title(f"Spread: {np.round(spread, 3)} um") + fig.suptitle("Spread") + plt.show() + return spread From 986fe6f50fd33a81fd3bc8ff26e05db22964bf5d Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 28 Sep 2023 13:15:14 +0200 Subject: [PATCH 221/322] CellExplorer: fix tests and deprecation (#2048) * CellExplorer: fix tests and deprecation * Drop session_info_matfile_path from __init__ --- .../cellexplorersortingextractor.py | 21 ------------------- .../tests/test_cellexplorerextractor.py | 2 +- 2 files changed, 1 insertion(+), 22 deletions(-) diff --git a/src/spikeinterface/extractors/cellexplorersortingextractor.py b/src/spikeinterface/extractors/cellexplorersortingextractor.py index b40b998103..31241a4147 100644 --- a/src/spikeinterface/extractors/cellexplorersortingextractor.py +++ b/src/spikeinterface/extractors/cellexplorersortingextractor.py @@ -40,7 +40,6 @@ def __init__( sampling_frequency: float | None = None, session_info_file_path: str | Path | None = None, spikes_matfile_path: str | Path | None = None, - session_info_matfile_path: str | Path | None = None, ): try: from pymatreader import read_mat @@ -67,26 +66,6 @@ def __init__( ) file_path = spikes_matfile_path if file_path is None else file_path - if session_info_matfile_path is not None: - # Raise an error if the warning period has expired - deprecation_issued = datetime.datetime(2023, 4, 1) - deprecation_deadline = deprecation_issued + datetime.timedelta(days=180) - if datetime.datetime.now() > deprecation_deadline: - raise ValueError( - "The session_info_matfile_path argument is no longer supported in. Use session_info_file_path instead." - ) - - # Otherwise, issue a DeprecationWarning - else: - warnings.warn( - "The session_info_matfile_path argument is deprecated and will be removed in six months. " - "Use session_info_file_path instead.", - DeprecationWarning, - ) - session_info_file_path = ( - session_info_matfile_path if session_info_file_path is None else session_info_file_path - ) - self.spikes_cellinfo_path = Path(file_path) self.session_path = self.spikes_cellinfo_path.parent self.session_id = self.spikes_cellinfo_path.stem.split(".")[0] diff --git a/src/spikeinterface/extractors/tests/test_cellexplorerextractor.py b/src/spikeinterface/extractors/tests/test_cellexplorerextractor.py index 35de8a23e2..c4c8d0c993 100644 --- a/src/spikeinterface/extractors/tests/test_cellexplorerextractor.py +++ b/src/spikeinterface/extractors/tests/test_cellexplorerextractor.py @@ -26,7 +26,7 @@ class CellExplorerSortingTest(SortingCommonTestSuite, unittest.TestCase): ( "cellexplorer/dataset_2/20170504_396um_0um_merge.spikes.cellinfo.mat", { - "session_info_matfile_path": local_folder + "session_info_file_path": local_folder / "cellexplorer/dataset_2/20170504_396um_0um_merge.sessionInfo.mat" }, ), From 719ffc9466f2f5f91ed14129fd514379a4c5962f Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Thu, 28 Sep 2023 13:15:39 +0200 Subject: [PATCH 222/322] minor corrections to matlab documentation (#2047) --- doc/how_to/load_matlab_data.rst | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index aaca718096..e12d83810a 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -30,7 +30,7 @@ Here, we present a MATLAB code that creates a random dataset and writes it to a Loading Data in SpikeInterface ------------------------------ -After executing the above MATLAB code, a binary file named `your_data_as_a_binary.bin` will be created in your MATLAB directory. To load this file in Python, you'll need its full path. +After executing the above MATLAB code, a binary file named :code:`your_data_as_a_binary.bin` will be created in your MATLAB directory. To load this file in Python, you'll need its full path. Use the following Python script to load the binary data into SpikeInterface: @@ -55,7 +55,7 @@ Use the following Python script to load the binary data into SpikeInterface: # Load data using SpikeInterface recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, - num_channels=num_channels, dtype=dtype) + num_channels=num_channels, dtype=dtype) # Confirm that the data was loaded correctly by comparing the data shapes and see they match the MATLAB data print(recording.get_num_frames(), recording.get_num_channels()) @@ -65,18 +65,18 @@ Follow the steps above to seamlessly import your MATLAB data into SpikeInterface Common Pitfalls & Tips ---------------------- -1. **Data Shape**: Make sure your MATLAB data matrix's first dimension is samples/time and the second is channels. If your time is in the second dimension, use `time_axis=1` in `si.read_binary()`. +1. **Data Shape**: Make sure your MATLAB data matrix's first dimension is samples/time and the second is channels. If your time is in the second dimension, use :code:`time_axis=1` in :code:`si.read_binary()`. 2. **File Path**: Always double-check the Python file path. 3. **Data Type Consistency**: Ensure data types between MATLAB and Python are consistent. MATLAB's `double` is equivalent to Numpy's `float64`. 4. **Sampling Frequency**: Set the appropriate sampling frequency in Hz for SpikeInterface. -5. **Transition to Python**: Moving from MATLAB to Python can be challenging. For newcomers to Python, consider reviewing numpy's [Numpy for MATLAB Users](https://numpy.org/doc/stable/user/numpy-for-matlab-users.html) guide. +5. **Transition to Python**: Moving from MATLAB to Python can be challenging. For newcomers to Python, consider reviewing numpy's `Numpy for MATLAB Users `_ guide. Using gains and offsets for integer data ---------------------------------------- Raw data formats often store data as integer values for memory efficiency. To give these integers meaningful physical units, you can apply a gain and an offset. -In SpikeInterface, you can use the `gain_to_uV` and `offset_to_uV` parameters, since traces are handled in microvolts (uV). Both parameters can be integrated into the `read_binary` function. -If your data in MATLAB is stored as `int16`, and you know the gain and offset, you can use the following code to load the data: +In SpikeInterface, you can use the :code:`gain_to_uV` and :code:`offset_to_uV` parameters, since traces are handled in microvolts (uV). Both parameters can be integrated into the :code:`read_binary` function. +If your data in MATLAB is stored as :code:`int16`, and you know the gain and offset, you can use the following code to load the data: .. code-block:: python @@ -90,7 +90,8 @@ If your data in MATLAB is stored as `int16`, and you know the gain and offset, y num_channels=num_channels, dtype=dtype_int, gain_to_uV=gain_to_uV, offset_to_uV=offset_to_uV) - recording.get_traces(return_scaled=True) # Return traces in micro volts (uV) + recording.get_traces() # Return traces in original units [type: int] + recording.get_traces(return_scaled=True) # Return traces in micro volts (uV) [type: float] This will equip your recording object with capabilities to convert the data to float values in uV using the :code:`get_traces()` method with the :code:`return_scaled` parameter set to :code:`True`. From a85b4a8d666311325e74feaf05e47656048355ea Mon Sep 17 00:00:00 2001 From: Windows Home Date: Thu, 28 Sep 2023 09:39:22 -0500 Subject: [PATCH 223/322] Simplify label assignment logic and add test.json files to tests directory --- .../curation/sortingview_curation.py | 19 ++--- .../sv-sorting-curation-false-positive.json | 19 +++++ .../tests/sv-sorting-curation-int.json | 39 ++++++++++ .../tests/sv-sorting-curation-str.json | 39 ++++++++++ .../tests/test_sortingview_curation.py | 71 +++---------------- 5 files changed, 114 insertions(+), 73 deletions(-) create mode 100644 src/spikeinterface/curation/tests/sv-sorting-curation-false-positive.json create mode 100644 src/spikeinterface/curation/tests/sv-sorting-curation-int.json create mode 100644 src/spikeinterface/curation/tests/sv-sorting-curation-str.json diff --git a/src/spikeinterface/curation/sortingview_curation.py b/src/spikeinterface/curation/sortingview_curation.py index f83ff3352b..7a573c38c4 100644 --- a/src/spikeinterface/curation/sortingview_curation.py +++ b/src/spikeinterface/curation/sortingview_curation.py @@ -77,7 +77,7 @@ def apply_sortingview_curation( # in this case, the CurationSorting takes care of finding a new unused int curation_sorting.merge(merge_group, new_unit_id=None) new_unit_id = curation_sorting.max_used_id # merged unit id - labels_dict[str(new_unit_id)] = labels_to_inherit + labels_dict[str(new_unit_id)] = labels_to_inherit # STEP 2: gather and apply sortingview curation labels # In sortingview, a unit is not required to have all labels. @@ -92,19 +92,12 @@ def apply_sortingview_curation( } # Populate the properties dictionary - for u_i, unit_id in enumerate(curation_sorting.current_sorting.unit_ids): - labels_unit = set() - + for unit_index, unit_id in enumerate(curation_sorting.current_sorting.unit_ids): + unit_id_str = str(unit_id) # Check for exact match first - if str(unit_id) in labels_dict: - labels_unit.update(labels_dict[str(unit_id)]) - # If no exact match, check if unit_label is a substring of unit_id (for string unit ID merged unit) - else: - for unit_label, labels in labels_dict.items(): - if isinstance(unit_id, str) and unit_label in unit_id: - labels_unit.update(labels) - for label in labels_unit: - properties[label][u_i] = True + if unit_id_str in labels_dict: + for label in labels_dict[unit_id_str]: + properties[label][unit_index] = True for prop_name, prop_values in properties.items(): curation_sorting.current_sorting.set_property(prop_name, prop_values) diff --git a/src/spikeinterface/curation/tests/sv-sorting-curation-false-positive.json b/src/spikeinterface/curation/tests/sv-sorting-curation-false-positive.json new file mode 100644 index 0000000000..5c29328363 --- /dev/null +++ b/src/spikeinterface/curation/tests/sv-sorting-curation-false-positive.json @@ -0,0 +1,19 @@ +{ + "labelsByUnit": { + "1": [ + "accept" + ], + "2": [ + "artifact" + ], + "12": [ + "artifact" + ] + }, + "mergeGroups": [ + [ + 2, + 12 + ] + ] +} \ No newline at end of file diff --git a/src/spikeinterface/curation/tests/sv-sorting-curation-int.json b/src/spikeinterface/curation/tests/sv-sorting-curation-int.json new file mode 100644 index 0000000000..486a51a583 --- /dev/null +++ b/src/spikeinterface/curation/tests/sv-sorting-curation-int.json @@ -0,0 +1,39 @@ +{ + "labelsByUnit": { + "1": [ + "mua" + ], + "2": [ + "mua" + ], + "3": [ + "reject" + ], + "4": [ + "noise" + ], + "5": [ + "accept" + ], + "6": [ + "accept" + ], + "7": [ + "accept" + ] + }, + "mergeGroups": [ + [ + 1, + 2 + ], + [ + 3, + 4 + ], + [ + 5, + 6 + ] + ] +} \ No newline at end of file diff --git a/src/spikeinterface/curation/tests/sv-sorting-curation-str.json b/src/spikeinterface/curation/tests/sv-sorting-curation-str.json new file mode 100644 index 0000000000..b2ab1d5849 --- /dev/null +++ b/src/spikeinterface/curation/tests/sv-sorting-curation-str.json @@ -0,0 +1,39 @@ +{ + "labelsByUnit": { + "a": [ + "mua" + ], + "b": [ + "mua" + ], + "c": [ + "reject" + ], + "d": [ + "noise" + ], + "e": [ + "accept" + ], + "f": [ + "accept" + ], + "g": [ + "accept" + ] + }, + "mergeGroups": [ + [ + "a", + "b" + ], + [ + "c", + "d" + ], + [ + "e", + "f" + ] + ] +} \ No newline at end of file diff --git a/src/spikeinterface/curation/tests/test_sortingview_curation.py b/src/spikeinterface/curation/tests/test_sortingview_curation.py index 71912d7793..1579c9f03b 100644 --- a/src/spikeinterface/curation/tests/test_sortingview_curation.py +++ b/src/spikeinterface/curation/tests/test_sortingview_curation.py @@ -138,8 +138,6 @@ def test_json_curation(): assert len(sorting_curated_json_mua.unit_ids) == 6 assert len(sorting_curated_json_mua1.unit_ids) == 5 - print("Test for json curation passed!\n") - def test_false_positive_curation(): """ @@ -157,15 +155,8 @@ def test_false_positive_curation(): sorting = se.NumpySorting.from_times_labels(times, labels, sampling_frequency) print("Sorting: {}".format(sorting.get_unit_ids())) - # Test curation JSON: - test_json = {"labelsByUnit": {"1": ["accept"], "2": ["artifact"], "12": ["artifact"]}, "mergeGroups": [[2, 12]]} - - json_path = "test_data.json" - with open(json_path, "w") as f: - json.dump(test_json, f, indent=4) - - # Apply curation - sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_path, verbose=True) + json_file = parent_folder / "sv-sorting-curation-false-positive.json" + sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_file, verbose=True) print("Curated:", sorting_curated_json.get_unit_ids()) # Assertions @@ -173,8 +164,6 @@ def test_false_positive_curation(): assert not sorting_curated_json.get_unit_property(unit_id=10, key="accept") assert 21 in sorting_curated_json.unit_ids - print("False positive test for integer unit IDs passed!\n") - def test_label_inheritance_int(): """ @@ -190,26 +179,8 @@ def test_label_inheritance_int(): sorting = se.NumpySorting.from_times_labels(times, labels, sampling_frequency) - # Create a curation JSON with labels and merge groups - curation_dict = { - "labelsByUnit": { - "1": ["mua"], - "2": ["mua"], - "3": ["reject"], - "4": ["noise"], - "5": ["accept"], - "6": ["accept"], - "7": ["accept"], - }, - "mergeGroups": [[1, 2], [3, 4], [5, 6]], - } - - json_path = "test_curation_int.json" - with open(json_path, "w") as f: - json.dump(curation_dict, f, indent=4) - - # Apply curation - sorting_merge = apply_sortingview_curation(sorting, uri_or_json=json_path) + json_file = parent_folder / "sv-sorting-curation-int.json" + sorting_merge = apply_sortingview_curation(sorting, uri_or_json=json_file) # Assertions for merged units print(f"Merge only: {sorting_merge.get_unit_ids()}") @@ -229,12 +200,12 @@ def test_label_inheritance_int(): assert sorting_merge.get_unit_property(unit_id=10, key="accept") # Assertions for exclude_labels - sorting_exclude_noise = apply_sortingview_curation(sorting, uri_or_json=json_path, exclude_labels=["noise"]) + sorting_exclude_noise = apply_sortingview_curation(sorting, uri_or_json=json_file, exclude_labels=["noise"]) print(f"Exclude noise: {sorting_exclude_noise.get_unit_ids()}") assert 9 not in sorting_exclude_noise.get_unit_ids() # Assertions for include_labels - sorting_include_accept = apply_sortingview_curation(sorting, uri_or_json=json_path, include_labels=["accept"]) + sorting_include_accept = apply_sortingview_curation(sorting, uri_or_json=json_file, include_labels=["accept"]) print(f"Include accept: {sorting_include_accept.get_unit_ids()}") assert 8 not in sorting_include_accept.get_unit_ids() assert 9 not in sorting_include_accept.get_unit_ids() @@ -254,30 +225,10 @@ def test_label_inheritance_str(): sorting = se.NumpySorting.from_times_labels(times, labels, sampling_frequency) print(f"Sorting: {sorting.get_unit_ids()}") - # Create a curation JSON with labels and merge groups - curation_dict = { - "labelsByUnit": { - "a": ["mua"], - "b": ["mua"], - "c": ["reject"], - "d": ["noise"], - "e": ["accept"], - "f": ["accept"], - "g": ["accept"], - }, - "mergeGroups": [["a", "b"], ["c", "d"], ["e", "f"]], - } - - json_path = "test_curation_str.json" - with open(json_path, "w") as f: - json.dump(curation_dict, f, indent=4) - - # Check label inheritance for merged units - merged_id_1 = "a-b" - merged_id_2 = "c-d" - merged_id_3 = "e-f" + # Apply curation - sorting_merge = apply_sortingview_curation(sorting, uri_or_json=json_path, verbose=True) + json_file = parent_folder / "sv-sorting-curation-str.json" + sorting_merge = apply_sortingview_curation(sorting, uri_or_json=json_file, verbose=True) # Assertions for merged units print(f"Merge only: {sorting_merge.get_unit_ids()}") @@ -297,12 +248,12 @@ def test_label_inheritance_str(): assert sorting_merge.get_unit_property(unit_id="e-f", key="accept") # Assertions for exclude_labels - sorting_exclude_noise = apply_sortingview_curation(sorting, uri_or_json=json_path, exclude_labels=["noise"]) + sorting_exclude_noise = apply_sortingview_curation(sorting, uri_or_json=json_file, exclude_labels=["noise"]) print(f"Exclude noise: {sorting_exclude_noise.get_unit_ids()}") assert "c-d" not in sorting_exclude_noise.get_unit_ids() # Assertions for include_labels - sorting_include_accept = apply_sortingview_curation(sorting, uri_or_json=json_path, include_labels=["accept"]) + sorting_include_accept = apply_sortingview_curation(sorting, uri_or_json=json_file, include_labels=["accept"]) print(f"Include accept: {sorting_include_accept.get_unit_ids()}") assert "a-b" not in sorting_include_accept.get_unit_ids() assert "c-d" not in sorting_include_accept.get_unit_ids() From 54d40eb2a0cc4468100fd8a058cb8a6b8354fd67 Mon Sep 17 00:00:00 2001 From: Windows Home Date: Thu, 28 Sep 2023 09:52:29 -0500 Subject: [PATCH 224/322] Comment out print statements --- .../tests/test_sortingview_curation.py | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/spikeinterface/curation/tests/test_sortingview_curation.py b/src/spikeinterface/curation/tests/test_sortingview_curation.py index 1579c9f03b..a620cb8db1 100644 --- a/src/spikeinterface/curation/tests/test_sortingview_curation.py +++ b/src/spikeinterface/curation/tests/test_sortingview_curation.py @@ -91,7 +91,7 @@ def test_sha1_curation(): # https://figurl.org/f?v=gs://figurl/spikesortingview-10&d=sha1://bd53f6b707f8121cadc901562a89b67aec81cc81&label=SpikeInterface%20-%20Sorting%20Summary&s={%22sortingCuration%22:%22sha1://1182ba19671fcc7d3f8e0501b0f8c07fb9736c22%22} sha1_uri = "sha1://1182ba19671fcc7d3f8e0501b0f8c07fb9736c22" sorting_curated_sha1 = apply_sortingview_curation(sorting, uri_or_json=sha1_uri, verbose=True) - print(f"From SHA: {sorting_curated_sha1}") + # print(f"From SHA: {sorting_curated_sha1}") assert len(sorting_curated_sha1.unit_ids) == 9 assert "#8-#9" in sorting_curated_sha1.unit_ids @@ -118,7 +118,7 @@ def test_json_curation(): # from curation.json json_file = parent_folder / "sv-sorting-curation.json" - print(f"Sorting: {sorting.get_unit_ids()}") + # print(f"Sorting: {sorting.get_unit_ids()}") sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_file, verbose=True) assert len(sorting_curated_json.unit_ids) == 9 @@ -153,11 +153,11 @@ def test_false_positive_curation(): labels = np.random.randint(1, num_units + 1, size=num_spikes) sorting = se.NumpySorting.from_times_labels(times, labels, sampling_frequency) - print("Sorting: {}".format(sorting.get_unit_ids())) + # print("Sorting: {}".format(sorting.get_unit_ids())) json_file = parent_folder / "sv-sorting-curation-false-positive.json" sorting_curated_json = apply_sortingview_curation(sorting, uri_or_json=json_file, verbose=True) - print("Curated:", sorting_curated_json.get_unit_ids()) + # print("Curated:", sorting_curated_json.get_unit_ids()) # Assertions assert sorting_curated_json.get_unit_property(unit_id=1, key="accept") @@ -183,7 +183,7 @@ def test_label_inheritance_int(): sorting_merge = apply_sortingview_curation(sorting, uri_or_json=json_file) # Assertions for merged units - print(f"Merge only: {sorting_merge.get_unit_ids()}") + # print(f"Merge only: {sorting_merge.get_unit_ids()}") assert sorting_merge.get_unit_property(unit_id=8, key="mua") # 8 = merged unit of 1 and 2 assert not sorting_merge.get_unit_property(unit_id=8, key="reject") assert not sorting_merge.get_unit_property(unit_id=8, key="noise") @@ -201,12 +201,12 @@ def test_label_inheritance_int(): # Assertions for exclude_labels sorting_exclude_noise = apply_sortingview_curation(sorting, uri_or_json=json_file, exclude_labels=["noise"]) - print(f"Exclude noise: {sorting_exclude_noise.get_unit_ids()}") + # print(f"Exclude noise: {sorting_exclude_noise.get_unit_ids()}") assert 9 not in sorting_exclude_noise.get_unit_ids() # Assertions for include_labels sorting_include_accept = apply_sortingview_curation(sorting, uri_or_json=json_file, include_labels=["accept"]) - print(f"Include accept: {sorting_include_accept.get_unit_ids()}") + # print(f"Include accept: {sorting_include_accept.get_unit_ids()}") assert 8 not in sorting_include_accept.get_unit_ids() assert 9 not in sorting_include_accept.get_unit_ids() assert 10 in sorting_include_accept.get_unit_ids() @@ -224,14 +224,14 @@ def test_label_inheritance_str(): labels = np.random.choice(["a", "b", "c", "d", "e", "f", "g"], size=num_spikes) sorting = se.NumpySorting.from_times_labels(times, labels, sampling_frequency) - print(f"Sorting: {sorting.get_unit_ids()}") + # print(f"Sorting: {sorting.get_unit_ids()}") # Apply curation json_file = parent_folder / "sv-sorting-curation-str.json" sorting_merge = apply_sortingview_curation(sorting, uri_or_json=json_file, verbose=True) # Assertions for merged units - print(f"Merge only: {sorting_merge.get_unit_ids()}") + # print(f"Merge only: {sorting_merge.get_unit_ids()}") assert sorting_merge.get_unit_property(unit_id="a-b", key="mua") assert not sorting_merge.get_unit_property(unit_id="a-b", key="reject") assert not sorting_merge.get_unit_property(unit_id="a-b", key="noise") @@ -249,17 +249,16 @@ def test_label_inheritance_str(): # Assertions for exclude_labels sorting_exclude_noise = apply_sortingview_curation(sorting, uri_or_json=json_file, exclude_labels=["noise"]) - print(f"Exclude noise: {sorting_exclude_noise.get_unit_ids()}") + # print(f"Exclude noise: {sorting_exclude_noise.get_unit_ids()}") assert "c-d" not in sorting_exclude_noise.get_unit_ids() # Assertions for include_labels sorting_include_accept = apply_sortingview_curation(sorting, uri_or_json=json_file, include_labels=["accept"]) - print(f"Include accept: {sorting_include_accept.get_unit_ids()}") + # print(f"Include accept: {sorting_include_accept.get_unit_ids()}") assert "a-b" not in sorting_include_accept.get_unit_ids() assert "c-d" not in sorting_include_accept.get_unit_ids() assert "e-f" in sorting_include_accept.get_unit_ids() - if __name__ == "__main__": # generate_sortingview_curation_dataset() test_sha1_curation() From f1b7bfe668ac8ff0581f252241edfb004577551d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 28 Sep 2023 14:53:07 +0000 Subject: [PATCH 225/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../curation/tests/sv-sorting-curation-false-positive.json | 2 +- src/spikeinterface/curation/tests/sv-sorting-curation-int.json | 2 +- src/spikeinterface/curation/tests/sv-sorting-curation-str.json | 2 +- src/spikeinterface/curation/tests/test_sortingview_curation.py | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/curation/tests/sv-sorting-curation-false-positive.json b/src/spikeinterface/curation/tests/sv-sorting-curation-false-positive.json index 5c29328363..48881388bb 100644 --- a/src/spikeinterface/curation/tests/sv-sorting-curation-false-positive.json +++ b/src/spikeinterface/curation/tests/sv-sorting-curation-false-positive.json @@ -16,4 +16,4 @@ 12 ] ] -} \ No newline at end of file +} diff --git a/src/spikeinterface/curation/tests/sv-sorting-curation-int.json b/src/spikeinterface/curation/tests/sv-sorting-curation-int.json index 486a51a583..2047c514ce 100644 --- a/src/spikeinterface/curation/tests/sv-sorting-curation-int.json +++ b/src/spikeinterface/curation/tests/sv-sorting-curation-int.json @@ -36,4 +36,4 @@ 6 ] ] -} \ No newline at end of file +} diff --git a/src/spikeinterface/curation/tests/sv-sorting-curation-str.json b/src/spikeinterface/curation/tests/sv-sorting-curation-str.json index b2ab1d5849..2585b5cc50 100644 --- a/src/spikeinterface/curation/tests/sv-sorting-curation-str.json +++ b/src/spikeinterface/curation/tests/sv-sorting-curation-str.json @@ -36,4 +36,4 @@ "f" ] ] -} \ No newline at end of file +} diff --git a/src/spikeinterface/curation/tests/test_sortingview_curation.py b/src/spikeinterface/curation/tests/test_sortingview_curation.py index a620cb8db1..22085f2f77 100644 --- a/src/spikeinterface/curation/tests/test_sortingview_curation.py +++ b/src/spikeinterface/curation/tests/test_sortingview_curation.py @@ -259,6 +259,7 @@ def test_label_inheritance_str(): assert "c-d" not in sorting_include_accept.get_unit_ids() assert "e-f" in sorting_include_accept.get_unit_ids() + if __name__ == "__main__": # generate_sortingview_curation_dataset() test_sha1_curation() From e0bcb28fc019e7ecde6df3ecdeb504e3c719fccc Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 29 Sep 2023 07:22:13 +0200 Subject: [PATCH 226/322] move import in --- src/spikeinterface/extractors/cbin_ibl.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/src/spikeinterface/extractors/cbin_ibl.py b/src/spikeinterface/extractors/cbin_ibl.py index 3dde998ca1..bd56208ebe 100644 --- a/src/spikeinterface/extractors/cbin_ibl.py +++ b/src/spikeinterface/extractors/cbin_ibl.py @@ -6,13 +6,6 @@ from spikeinterface.extractors.neuropixels_utils import get_neuropixels_sample_shifts from spikeinterface.core.core_tools import define_function_from_class -try: - import mtscomp - - HAVE_MTSCOMP = True -except: - HAVE_MTSCOMP = False - class CompressedBinaryIblExtractor(BaseRecording): """Load IBL data as an extractor object. @@ -42,7 +35,6 @@ class CompressedBinaryIblExtractor(BaseRecording): """ extractor_name = "CompressedBinaryIbl" - installed = HAVE_MTSCOMP mode = "folder" installation_mesg = "To use the CompressedBinaryIblExtractor, install mtscomp: \n\n pip install mtscomp\n\n" name = "cbin_ibl" @@ -51,7 +43,10 @@ def __init__(self, folder_path, load_sync_channel=False, stream_name="ap"): # this work only for future neo from neo.rawio.spikeglxrawio import read_meta_file, extract_stream_info - assert HAVE_MTSCOMP + try: + import mtscomp + except: + raise ImportError(self.installation_mesg) folder_path = Path(folder_path) # check bands From 6fd74496e368be2031ae74ee0c64173142788adb Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 29 Sep 2023 12:13:00 +0200 Subject: [PATCH 227/322] oups --- src/spikeinterface/comparison/groundtruthstudy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index d43727cb44..3a12aeeb70 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -180,7 +180,7 @@ def run_sorters(self, case_keys=None, engine="loop", engine_kwargs={}, keep=True if sorting_exists: # delete older sorting + log before running sorters - shutil.rmtree(sorting_exists) + shutil.rmtree(sorter_folder) log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" if log_file.exists(): log_file.unlink() From 8c633aceb84ff8e19e98949e9a9e366da3277053 Mon Sep 17 00:00:00 2001 From: Matthias H Hennig Date: Fri, 29 Sep 2023 11:54:02 +0100 Subject: [PATCH 228/322] Pip install into working directory for containers Apptainer fails to pip install into the system directory (not writable by default, no space when writable), and the --user flag ensures packages are installed in a writable location. Note not tested with docker. --- src/spikeinterface/sorters/runsorter.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sorters/runsorter.py b/src/spikeinterface/sorters/runsorter.py index 6e6ccc0358..f6501ef40f 100644 --- a/src/spikeinterface/sorters/runsorter.py +++ b/src/spikeinterface/sorters/runsorter.py @@ -514,19 +514,19 @@ def run_sorter_container( res_output = container_client.run_command(cmd) cmd = f"cp -r {si_dev_path_unix} {si_source_folder}" res_output = container_client.run_command(cmd) - cmd = f"pip install {si_source_folder}/spikeinterface[full]" + cmd = f"pip install --user {si_source_folder}/spikeinterface[full]" else: si_source = "remote repository" - cmd = "pip install --upgrade --no-input git+https://github.com/SpikeInterface/spikeinterface.git#egg=spikeinterface[full]" + cmd = "pip install --user --upgrade --no-input git+https://github.com/SpikeInterface/spikeinterface.git#egg=spikeinterface[full]" if verbose: print(f"Installing dev spikeinterface from {si_source}") res_output = container_client.run_command(cmd) - cmd = "pip install --upgrade --no-input https://github.com/NeuralEnsemble/python-neo/archive/master.zip" + cmd = "pip install --user --upgrade --no-input https://github.com/NeuralEnsemble/python-neo/archive/master.zip" res_output = container_client.run_command(cmd) else: if verbose: print(f"Installing spikeinterface=={si_version} in {container_image}") - cmd = f"pip install --upgrade --no-input spikeinterface[full]=={si_version}" + cmd = f"pip install --user --upgrade --no-input spikeinterface[full]=={si_version}" res_output = container_client.run_command(cmd) else: # TODO version checking @@ -540,7 +540,7 @@ def run_sorter_container( if extra_requirements: if verbose: print(f"Installing extra requirements: {extra_requirements}") - cmd = f"pip install --upgrade --no-input {' '.join(extra_requirements)}" + cmd = f"pip install --user --upgrade --no-input {' '.join(extra_requirements)}" res_output = container_client.run_command(cmd) # run sorter on folder From 4f2a50d7d1e0414bdf3bf2bdc3b9d35b12a900e3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 29 Sep 2023 11:18:26 +0000 Subject: [PATCH 229/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/extractors/mdaextractors.py | 2 +- .../benchmark/benchmark_motion_estimation.py | 6 ++---- .../benchmark/benchmark_motion_interpolation.py | 8 ++++++-- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/spikeinterface/extractors/mdaextractors.py b/src/spikeinterface/extractors/mdaextractors.py index 98378551f5..b863e338fa 100644 --- a/src/spikeinterface/extractors/mdaextractors.py +++ b/src/spikeinterface/extractors/mdaextractors.py @@ -220,7 +220,7 @@ def write_sorting(sorting, save_path, write_primary_channels=False): times = sorting.get_unit_spike_train(unit_id=unit_id) times_list.append(times) # unit id may not be numeric - if unit_id.dtype.kind in 'biufc': + if unit_id.dtype.kind in "biufc": labels_list.append(np.ones(times.shape) * unit_id) else: labels_list.append(np.ones(times.shape) * unit_id_i) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py index c505676c05..abf40b2da6 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py @@ -584,13 +584,13 @@ def plot_motions_several_benchmarks(benchmarks): _simpleaxis(ax) -def plot_speed_several_benchmarks(benchmarks, detailed=True, ax=None, colors=None): +def plot_speed_several_benchmarks(benchmarks, detailed=True, ax=None, colors=None): if ax is None: fig, ax = plt.subplots(figsize=(5, 5)) for count, benchmark in enumerate(benchmarks): color = colors[count] if colors is not None else None - + if detailed: bottom = 0 i = 0 @@ -606,8 +606,6 @@ def plot_speed_several_benchmarks(benchmarks, detailed=True, ax=None, colors=No else: total_run_time = np.sum([value for key, value in benchmark.run_times.items()]) ax.bar([count], [total_run_time], color=color, edgecolor="black") - - # ax.legend() ax.set_ylabel("speed (s)") diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py index 8e5afb2e8e..b28b29f17c 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py @@ -193,11 +193,15 @@ def run_sorters(self, skip_already_done=True): recording = self.recordings[case["recording"]] output_folder = self.folder / f"tmp_sortings_{label}" if output_folder.exists() and skip_already_done: - print('already done') + print("already done") sorting = read_sorter_folder(output_folder) else: sorting = run_sorter( - sorter_name, recording, output_folder, **sorter_params, delete_output_folder=self.delete_output_folder + sorter_name, + recording, + output_folder, + **sorter_params, + delete_output_folder=self.delete_output_folder, ) self.sortings[label] = sorting From c1cd889beacca66f43262f95e18033100f98d59d Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 29 Sep 2023 13:19:35 +0200 Subject: [PATCH 230/322] Add 'column_range' and simplify dimension handling --- .../postprocessing/template_metrics.py | 76 +++++++++++-------- 1 file changed, 44 insertions(+), 32 deletions(-) diff --git a/src/spikeinterface/postprocessing/template_metrics.py b/src/spikeinterface/postprocessing/template_metrics.py index 090dae4567..774ebab4a9 100644 --- a/src/spikeinterface/postprocessing/template_metrics.py +++ b/src/spikeinterface/postprocessing/template_metrics.py @@ -207,7 +207,7 @@ def get_extension_function(): min_r2_exp_decay=0.5, spread_threshold=0.2, spread_smooth_um=20, - same_x=False, + column_range=None, ) @@ -265,7 +265,13 @@ def compute_template_metrics( * min_channels_for_velocity: the minimum number of channels above or below to compute velocity, default: 5 * min_r2_velocity: the minimum r2 to accept the velocity fit, default: 0.7 * exp_peak_function: the function to use to compute the peak amplitude for the exp decay, default: "ptp" + * min_r2_exp_decay: the minimum r2 to accept the exp decay fit, default: 0.5 * spread_threshold: the threshold to compute the spread, default: 0.2 + * spread_smooth_um: the smoothing in um to compute the spread, default: 20 + * column_range: the range in um in the horizontal direction to consider channels for velocity, default: None + - If None, all channels all channels are considered + - If 0 or 1, only the "column" that includes the max channel is considered + - If > 1, only channels within range (+/-) um from the max channel horizontal position are used Returns ------- @@ -278,6 +284,7 @@ def compute_template_metrics( ----- If any multi-channel metric is in the metric_names or include_multi_channel_metrics is True, sparsity must be None, so that one metric value will be computed per unit. + For multi-channel metrocs, 3D channel locations are not supported. By default, the depth direction is "y". """ if debug_plots: global DEBUG @@ -294,6 +301,9 @@ def compute_template_metrics( "If multi-channel metrics are computed, sparsity must be None, " "so that each unit will correspond to 1 row of the output dataframe." ) + assert ( + waveform_extractor.get_channel_locations().shape[1] == 2 + ), "If multi-channel metrics are computed, channel locations must be 2D." default_kwargs = _default_function_kwargs.copy() if metrics_kwargs is None: metrics_kwargs = default_kwargs @@ -579,17 +589,22 @@ def get_num_negative_peaks(template_single, sampling_frequency, **kwargs): # Multi-channel metrics -def transform_same_x(template, channel_locations): - max_channel_x = channel_locations[np.argmax(np.ptp(template, axis=0)), 0] - same_x_mask = channel_locations[:, 0] == max_channel_x - channel_locations_same_x = channel_locations[same_x_mask] - template_same_x = template[:, same_x_mask] - return template_same_x, channel_locations_same_x +def transform_column_range(template, channel_locations, column_range, depth_direction="y"): + column_dim = 0 if depth_direction == "y" else 1 + if column_range is None: + template_column_range = template + channel_locations_column_range = channel_locations + else: + max_channel_x = channel_locations[np.argmax(np.ptp(template, axis=0)), 0] + column_mask = np.abs(channel_locations[:, column_dim] - max_channel_x) <= column_range + template_column_range = template[:, column_mask] + channel_locations_column_range = channel_locations[column_mask] + return template_column_range, channel_locations_column_range def sort_template_and_locations(template, channel_locations, depth_direction="y"): - direction_index = ["x", "y", "z"].index(depth_direction) - sort_indices = np.argsort(channel_locations[:, direction_index]) + depth_dim = 1 if depth_direction == "y" else 0 + sort_indices = np.argsort(channel_locations[:, depth_dim]) return template[:, sort_indices], channel_locations[sort_indices, :] @@ -621,29 +636,28 @@ def get_velocity_above(template, channel_locations, sampling_frequency, **kwargs - depth_direction: the direction to compute velocity above and below ("x", "y", or "z") - min_channels_for_velocity: the minimum number of channels above or below to compute velocity - min_r2_velocity: the minimum r2 to accept the velocity fit + - column_range: the range in um in the x-direction to consider channels for velocity """ assert "depth_direction" in kwargs, "depth_direction must be given as kwarg" assert "min_channels_for_velocity" in kwargs, "min_channels_for_velocity must be given as kwarg" assert "min_r2_velocity" in kwargs, "min_r2_velocity must be given as kwarg" - assert "same_x" in kwargs, "same_x must be given as kwarg" + assert "column_range" in kwargs, "column_range must be given as kwarg" depth_direction = kwargs["depth_direction"] min_channels_for_velocity = kwargs["min_channels_for_velocity"] min_r2_velocity = kwargs["min_r2_velocity"] - same_x = kwargs["same_x"] + column_range = kwargs["column_range"] - direction_index = ["x", "y", "z"].index(depth_direction) + depth_dim = 1 if depth_direction == "y" else 0 + template, channel_locations = transform_column_range(template, channel_locations, column_range, depth_direction) template, channel_locations = sort_template_and_locations(template, channel_locations, depth_direction) - if same_x: - template, channel_locations = transform_same_x(template, channel_locations) - # find location of max channel max_sample_idx, max_channel_idx = np.unravel_index(np.argmin(template), template.shape) max_peak_time = max_sample_idx / sampling_frequency * 1000 max_channel_location = channel_locations[max_channel_idx] - channels_above = channel_locations[:, direction_index] >= max_channel_location[direction_index] + channels_above = channel_locations[:, depth_dim] >= max_channel_location[depth_dim] # we only consider samples forward in time with respect to the max channel # TODO: not sure @@ -697,30 +711,28 @@ def get_velocity_below(template, channel_locations, sampling_frequency, **kwargs - depth_direction: the direction to compute velocity above and below ("x", "y", or "z") - min_channels_for_velocity: the minimum number of channels above or below to compute velocity - min_r2_velocity: the minimum r2 to accept the velocity fit - - same_x: whether to transform the template and channel locations to have the same x coordinate + - column_range: the range in um in the x-direction to consider channels for velocity """ assert "depth_direction" in kwargs, "depth_direction must be given as kwarg" assert "min_channels_for_velocity" in kwargs, "min_channels_for_velocity must be given as kwarg" assert "min_r2_velocity" in kwargs, "min_r2_velocity must be given as kwarg" - assert "same_x" in kwargs, "same_x must be given as kwarg" + assert "column_range" in kwargs, "column_range must be given as kwarg" depth_direction = kwargs["depth_direction"] min_channels_for_velocity = kwargs["min_channels_for_velocity"] min_r2_velocity = kwargs["min_r2_velocity"] - same_x = kwargs["same_x"] + column_range = kwargs["column_range"] - direction_index = ["x", "y", "z"].index(depth_direction) + depth_dim = 1 if depth_direction == "y" else 0 + template, channel_locations = transform_column_range(template, channel_locations, column_range) template, channel_locations = sort_template_and_locations(template, channel_locations, depth_direction) - if same_x: - template, channel_locations = transform_same_x(template, channel_locations) - # find location of max channel max_sample_idx, max_channel_idx = np.unravel_index(np.argmin(template), template.shape) max_peak_time = max_sample_idx / sampling_frequency * 1000 max_channel_location = channel_locations[max_channel_idx] - channels_below = channel_locations[:, direction_index] <= max_channel_location[direction_index] + channels_below = channel_locations[:, depth_dim] <= max_channel_location[depth_dim] # we only consider samples forward in time with respect to the max channel # template_below = template[max_sample_idx:, channels_below] @@ -847,6 +859,7 @@ def get_spread(template, channel_locations, sampling_frequency, **kwargs): **kwargs: Required kwargs: - depth_direction: the direction to compute velocity above and below ("x", "y", or "z") - spread_threshold: the threshold to compute the spread + - column_range: the range in um in the x-direction to consider channels for velocity """ assert "depth_direction" in kwargs, "depth_direction must be given as kwarg" depth_direction = kwargs["depth_direction"] @@ -854,17 +867,16 @@ def get_spread(template, channel_locations, sampling_frequency, **kwargs): spread_threshold = kwargs["spread_threshold"] assert "spread_smooth_um" in kwargs, "spread_smooth_um must be given as kwarg" spread_smooth_um = kwargs["spread_smooth_um"] - assert "same_x" in kwargs, "same_x must be given as kwarg" - same_x = kwargs["same_x"] + assert "column_range" in kwargs, "column_range must be given as kwarg" + column_range = kwargs["column_range"] - direction_index = ["x", "y", "z"].index(depth_direction) + depth_dim = 1 if depth_direction == "y" else 0 + template, channel_locations = transform_column_range(template, channel_locations, column_range) template, channel_locations = sort_template_and_locations(template, channel_locations, depth_direction) - if same_x: - template, channel_locations = transform_same_x(template, channel_locations) MM = np.ptp(template, 0) MM = MM / np.max(MM) - channel_depths = channel_locations[:, direction_index] + channel_depths = channel_locations[:, depth_dim] if spread_smooth_um is not None and spread_smooth_um > 0: from scipy.ndimage import gaussian_filter1d @@ -873,7 +885,7 @@ def get_spread(template, channel_locations, sampling_frequency, **kwargs): MM = gaussian_filter1d(MM, spread_sigma) channel_locations_above_theshold = channel_locations[MM > spread_threshold] - channel_depth_above_theshold = channel_locations_above_theshold[:, direction_index] + channel_depth_above_theshold = channel_locations_above_theshold[:, depth_dim] spread = np.ptp(channel_depth_above_theshold) global DEBUG @@ -885,7 +897,7 @@ def get_spread(template, channel_locations, sampling_frequency, **kwargs): template.T, aspect="auto", origin="lower", - extent=[0, template.shape[0] / sampling_frequency, channel_depths[0], channel_depths[1]], + extent=[0, template.shape[0] / sampling_frequency, channel_depths[0], channel_depths[-1]], ) axs[1].plot(channel_depths, MM, "o-") axs[1].axhline(spread_threshold, ls="--", color="r") From c8be1a0def93d4a639370a146c5d3244234049c0 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 29 Sep 2023 14:17:37 +0200 Subject: [PATCH 231/322] Fix firing range when bin size is to small (#2054) * Fix firing range when bin size is to small * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- src/spikeinterface/qualitymetrics/misc_metrics.py | 9 +++++++++ .../qualitymetrics/tests/test_metrics_functions.py | 8 ++++++-- .../benchmark/benchmark_motion_estimation.py | 6 ++---- .../benchmark/benchmark_motion_interpolation.py | 8 ++++++-- 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/src/spikeinterface/qualitymetrics/misc_metrics.py b/src/spikeinterface/qualitymetrics/misc_metrics.py index e9726a16da..d3f875959e 100644 --- a/src/spikeinterface/qualitymetrics/misc_metrics.py +++ b/src/spikeinterface/qualitymetrics/misc_metrics.py @@ -602,6 +602,15 @@ def compute_firing_ranges(waveform_extractor, bin_size_s=5, percentiles=(5, 95), if unit_ids is None: unit_ids = sorting.unit_ids + if all( + [ + waveform_extractor.get_num_samples(segment_index) < bin_size_samples + for segment_index in range(waveform_extractor.get_num_segments()) + ] + ): + warnings.warn(f"Bin size of {bin_size_s}s is larger than each segment duration. Firing ranges are set to NaN.") + return {unit_id: np.nan for unit_id in unit_ids} + # for each segment, we compute the firing rate histogram and we concatenate them firing_rate_histograms = {unit_id: np.array([], dtype=float) for unit_id in sorting.unit_ids} for segment_index in range(waveform_extractor.get_num_segments()): diff --git a/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py b/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py index 2d63a06b17..8a32c4cee8 100644 --- a/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py +++ b/src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py @@ -220,6 +220,10 @@ def test_calculate_firing_range(waveform_extractor_simple): firing_ranges = compute_firing_ranges(we) print(firing_ranges) + with pytest.warns(UserWarning) as w: + firing_ranges_nan = compute_firing_ranges(we, bin_size_s=we.get_total_duration() + 1) + assert np.all([np.isnan(f) for f in firing_ranges_nan.values()]) + def test_calculate_amplitude_cutoff(waveform_extractor_simple): we = waveform_extractor_simple @@ -378,7 +382,7 @@ def test_calculate_drift_metrics(waveform_extractor_simple): if __name__ == "__main__": sim_data = _simulated_data() we = _waveform_extractor_simple() - we_violations = _waveform_extractor_violations(sim_data) + # we_violations = _waveform_extractor_violations(sim_data) # test_calculate_amplitude_cutoff(we) # test_calculate_presence_ratio(we) # test_calculate_amplitude_median(we) @@ -387,4 +391,4 @@ def test_calculate_drift_metrics(waveform_extractor_simple): # test_calculate_drift_metrics(we) # test_synchrony_metrics(we) test_calculate_firing_range(we) - test_calculate_amplitude_cv_metrics(we) + # test_calculate_amplitude_cv_metrics(we) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py index c505676c05..abf40b2da6 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_estimation.py @@ -584,13 +584,13 @@ def plot_motions_several_benchmarks(benchmarks): _simpleaxis(ax) -def plot_speed_several_benchmarks(benchmarks, detailed=True, ax=None, colors=None): +def plot_speed_several_benchmarks(benchmarks, detailed=True, ax=None, colors=None): if ax is None: fig, ax = plt.subplots(figsize=(5, 5)) for count, benchmark in enumerate(benchmarks): color = colors[count] if colors is not None else None - + if detailed: bottom = 0 i = 0 @@ -606,8 +606,6 @@ def plot_speed_several_benchmarks(benchmarks, detailed=True, ax=None, colors=No else: total_run_time = np.sum([value for key, value in benchmark.run_times.items()]) ax.bar([count], [total_run_time], color=color, edgecolor="black") - - # ax.legend() ax.set_ylabel("speed (s)") diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py index 8e5afb2e8e..b28b29f17c 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_motion_interpolation.py @@ -193,11 +193,15 @@ def run_sorters(self, skip_already_done=True): recording = self.recordings[case["recording"]] output_folder = self.folder / f"tmp_sortings_{label}" if output_folder.exists() and skip_already_done: - print('already done') + print("already done") sorting = read_sorter_folder(output_folder) else: sorting = run_sorter( - sorter_name, recording, output_folder, **sorter_params, delete_output_folder=self.delete_output_folder + sorter_name, + recording, + output_folder, + **sorter_params, + delete_output_folder=self.delete_output_folder, ) self.sortings[label] = sorting From 06089b8c0ed74c37c89d0d6ed2684e4c57668322 Mon Sep 17 00:00:00 2001 From: Sebastien Date: Fri, 29 Sep 2023 15:07:28 +0200 Subject: [PATCH 232/322] Patch for sharedmem --- .../sorters/internal/spyking_circus2.py | 3 +-- .../clustering/clustering_tools.py | 5 +++-- .../clustering/random_projections.py | 15 ++++++++++----- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 710c4f76f4..a0a4d0823c 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -118,8 +118,6 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): if clustering_folder.exists(): shutil.rmtree(clustering_folder) - sorting = sorting.save(folder=clustering_folder) - ## We get the templates our of such a clustering waveforms_params = params["waveforms"].copy() waveforms_params.update(job_kwargs) @@ -131,6 +129,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): mode = "memory" waveforms_folder = None else: + sorting = sorting.save(folder=clustering_folder) mode = "folder" waveforms_folder = sorter_output_folder / "waveforms" diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 28a1a63065..1a8332ad6d 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -574,6 +574,8 @@ def remove_duplicates_via_matching( if tmp_folder is None: tmp_folder = get_global_tmp_folder() + tmp_folder.mkdir(parents=True, exist_ok=True) + tmp_filename = tmp_folder / "tmp.raw" f = open(tmp_filename, "wb") @@ -583,8 +585,8 @@ def remove_duplicates_via_matching( f.close() recording = BinaryRecordingExtractor(tmp_filename, num_channels=num_chans, sampling_frequency=fs, dtype="float32") - recording.annotate(is_filtered=True) recording = recording.set_probe(waveform_extractor.recording.get_probe()) + recording.annotate(is_filtered=True) margin = 2 * max(waveform_extractor.nbefore, waveform_extractor.nafter) half_marging = margin // 2 @@ -608,7 +610,6 @@ def remove_duplicates_via_matching( t_stop = padding + (i + 1) * duration sub_recording = recording.frame_slice(t_start - half_marging, t_stop + half_marging) - method_kwargs.update({"ignored_ids": ignore_ids + [i]}) spikes, computed = find_spikes_from_templates( sub_recording, method=method, method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 864548e7d4..1f97bf5201 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -181,17 +181,20 @@ def sigmoid(x, L, x0, k, b): else: tmp_folder = Path(params["tmp_folder"]) + tmp_folder.mkdir(parents=True, exist_ok=True) + + sorting_folder = tmp_folder / "sorting" + unit_ids = np.arange(len(np.unique(spikes["unit_index"]))) + sorting = NumpySorting(spikes, fs, unit_ids=unit_ids) + if params["shared_memory"]: waveform_folder = None mode = "memory" else: waveform_folder = tmp_folder / "waveforms" mode = "folder" + sorting = sorting.save(folder=sorting_folder) - sorting_folder = tmp_folder / "sorting" - unit_ids = np.arange(len(np.unique(spikes["unit_index"]))) - sorting = NumpySorting(spikes, fs, unit_ids=unit_ids) - sorting = sorting.save(folder=sorting_folder) we = extract_waveforms( recording, sorting, @@ -219,12 +222,14 @@ def sigmoid(x, L, x0, k, b): we, noise_levels, peak_labels, job_kwargs=cleaning_matching_params, **cleaning_params ) + del we, sorting + if params["tmp_folder"] is None: shutil.rmtree(tmp_folder) else: if not params["shared_memory"]: shutil.rmtree(tmp_folder / "waveforms") - shutil.rmtree(tmp_folder / "sorting") + shutil.rmtree(tmp_folder / "sorting") if verbose: print("We kept %d non-duplicated clusters..." % len(labels)) From fa725fcd24c26ca3a55605a051c3527fb23cc35b Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 29 Sep 2023 16:27:53 -0400 Subject: [PATCH 233/322] add keyword arguments --- doc/how_to/load_matlab_data.rst | 4 +- doc/modules/curation.rst | 36 +++++----- doc/modules/exporters.rst | 19 +++-- doc/modules/extractors.rst | 35 +++++++--- doc/modules/motion_correction.rst | 44 ++++++------ doc/modules/postprocessing.rst | 10 +-- doc/modules/preprocessing.rst | 70 ++++++++++--------- doc/modules/qualitymetrics.rst | 8 +-- doc/modules/qualitymetrics/amplitude_cv.rst | 2 +- .../qualitymetrics/amplitude_median.rst | 2 +- doc/modules/qualitymetrics/d_prime.rst | 2 +- doc/modules/qualitymetrics/drift.rst | 6 +- doc/modules/qualitymetrics/firing_range.rst | 2 +- doc/modules/qualitymetrics/firing_rate.rst | 2 +- .../qualitymetrics/isolation_distance.rst | 10 +++ doc/modules/qualitymetrics/l_ratio.rst | 11 +++ doc/modules/qualitymetrics/presence_ratio.rst | 2 +- .../qualitymetrics/silhouette_score.rst | 10 +++ .../qualitymetrics/sliding_rp_violations.rst | 2 +- doc/modules/qualitymetrics/snr.rst | 3 +- doc/modules/qualitymetrics/synchrony.rst | 2 +- doc/modules/sorters.rst | 42 +++++------ doc/modules/sortingcomponents.rst | 23 +++--- doc/modules/widgets.rst | 10 +-- 24 files changed, 203 insertions(+), 154 deletions(-) diff --git a/doc/how_to/load_matlab_data.rst b/doc/how_to/load_matlab_data.rst index e12d83810a..54a66c0890 100644 --- a/doc/how_to/load_matlab_data.rst +++ b/doc/how_to/load_matlab_data.rst @@ -54,7 +54,7 @@ Use the following Python script to load the binary data into SpikeInterface: dtype = "float64" # MATLAB's double corresponds to Python's float64 # Load data using SpikeInterface - recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, + recording = si.read_binary(file_paths=file_path, sampling_frequency=sampling_frequency, num_channels=num_channels, dtype=dtype) # Confirm that the data was loaded correctly by comparing the data shapes and see they match the MATLAB data @@ -86,7 +86,7 @@ If your data in MATLAB is stored as :code:`int16`, and you know the gain and off gain_to_uV = 0.195 # Adjust according to your MATLAB dataset offset_to_uV = 0 # Adjust according to your MATLAB dataset - recording = si.read_binary(file_path, sampling_frequency=sampling_frequency, + recording = si.read_binary(file_paths=file_path, sampling_frequency=sampling_frequency, num_channels=num_channels, dtype=dtype_int, gain_to_uV=gain_to_uV, offset_to_uV=offset_to_uV) diff --git a/doc/modules/curation.rst b/doc/modules/curation.rst index 6101b81552..23e9e20d96 100644 --- a/doc/modules/curation.rst +++ b/doc/modules/curation.rst @@ -24,21 +24,21 @@ The merging and splitting operations are handled by the :py:class:`~spikeinterfa from spikeinterface.curation import CurationSorting - sorting = run_sorter('kilosort2', recording) + sorting = run_sorter(sorter_name='kilosort2', recording=recording) - cs = CurationSorting(sorting) + cs = CurationSorting(parent_sorting=sorting) # make a first merge - cs.merge(['#1', '#5', '#15']) + cs.merge(units_to_merge=['#1', '#5', '#15']) # make a second merge - cs.merge(['#11', '#21']) + cs.merge(units_to_merge=['#11', '#21']) # make a split split_index = ... # some criteria on spikes - cs.split('#20', split_index) + cs.split(split_unit_id='#20', indices_list=split_index) - # here the final clean sorting + # here is the final clean sorting clean_sorting = cs.sorting @@ -60,12 +60,12 @@ merges. Therefore, it has many parameters and options. from spikeinterface.curation import MergeUnitsSorting, get_potential_auto_merge - sorting = run_sorter('kilosort', recording) + sorting = run_sorter(sorter_name='kilosort', recording=recording) - we = extract_waveforms(recording, sorting, folder='wf_folder') + we = extract_waveforms(recording=recording, sorting=sorting, folder='wf_folder') # merges is a list of lists, with unit_ids to be merged. - merges = get_potential_auto_merge(we, minimum_spikes=1000, maximum_distance_um=150., + merges = get_potential_auto_merge(waveform_extractor=we, minimum_spikes=1000, maximum_distance_um=150., peak_sign="neg", bin_ms=0.25, window_ms=100., corr_diff_thresh=0.16, template_diff_thresh=0.25, censored_period_ms=0., refractory_period_ms=1.0, @@ -73,7 +73,7 @@ merges. Therefore, it has many parameters and options. firing_contamination_balance=1.5) # here we apply the merges - clean_sorting = MergeUnitsSorting(sorting, merges) + clean_sorting = MergeUnitsSorting(parent_sorting=sorting, units_to_merge=merges) Manual curation with sorting view @@ -98,24 +98,24 @@ The manual curation (including merges and labels) can be applied to a SpikeInter from spikeinterface.widgets import plot_sorting_summary # run a sorter and export waveforms - sorting = run_sorter('kilosort2', recording) - we = extract_waveforms(recording, sorting, folder='wf_folder') + sorting = run_sorter(sorter_name'kilosort2', recording=recording) + we = extract_waveforms(recording=recording, sorting=sorting, folder='wf_folder') # some postprocessing is required - _ = compute_spike_amplitudes(we) - _ = compute_unit_locations(we) - _ = compute_template_similarity(we) - _ = compute_correlograms(we) + _ = compute_spike_amplitudes(waveform_extractor=we) + _ = compute_unit_locations(waveform_extractor=we) + _ = compute_template_similarity(waveform_extractor=we) + _ = compute_correlograms(waveform_extractor=we) # This loads the data to the cloud for web-based plotting and sharing - plot_sorting_summary(we, curation=True, backend='sortingview') + plot_sorting_summary(waveform_extractor=we, curation=True, backend='sortingview') # we open the printed link URL in a browswe # - make manual merges and labeling # - from the curation box, click on "Save as snapshot (sha1://)" # copy the uri sha_uri = "sha1://59feb326204cf61356f1a2eb31f04d8e0177c4f1" - clean_sorting = apply_sortingview_curation(sorting, uri_or_json=sha_uri) + clean_sorting = apply_sortingview_curation(sorting=sorting, uri_or_json=sha_uri) Note that you can also "Export as JSON" and pass the json file as :code:`uri_or_json` parameter. diff --git a/doc/modules/exporters.rst b/doc/modules/exporters.rst index fa637f898b..1d23f9ad6f 100644 --- a/doc/modules/exporters.rst +++ b/doc/modules/exporters.rst @@ -28,15 +28,14 @@ The input of the :py:func:`~spikeinterface.exporters.export_to_phy` is a :code:` from spikeinterface.exporters import export_to_phy # the waveforms are sparse so it is faster to export to phy - folder = 'waveforms' - we = extract_waveforms(recording, sorting, folder, sparse=True) + we = extract_waveforms(recording=recording, sorting=sorting, folder='waveforms', sparse=True) # some computations are done before to control all options - compute_spike_amplitudes(we) - compute_principal_components(we, n_components=3, mode='by_channel_global') + compute_spike_amplitudes(waveform_extractor = we) + compute_principal_components(waveform_extractor=we, n_components=3, mode='by_channel_global') # the export process is fast because everything is pre-computed - export_to_phy(we, output_folder='path/to/phy_folder') + export_to_phy(wavefor_extractor=we, output_folder='path/to/phy_folder') @@ -72,12 +71,12 @@ with many units! # the waveforms are sparse for more interpretable figures - we = extract_waveforms(recording, sorting, folder='path/to/wf', sparse=True) + we = extract_waveforms(recording=recording, sorting=sorting, folder='path/to/wf', sparse=True) # some computations are done before to control all options - compute_spike_amplitudes(we) - compute_correlograms(we) - compute_quality_metrics(we, metric_names=['snr', 'isi_violation', 'presence_ratio']) + compute_spike_amplitudes(waveform_extractor=we) + compute_correlograms(waveform_extractor=we) + compute_quality_metrics(waveform_extractor=we, metric_names=['snr', 'isi_violation', 'presence_ratio']) # the export process - export_report(we, output_folder='path/to/spikeinterface-report-folder') + export_report(waveform_extractor=we, output_folder='path/to/spikeinterface-report-folder') diff --git a/doc/modules/extractors.rst b/doc/modules/extractors.rst index 5aed24ca41..1eeca9a325 100644 --- a/doc/modules/extractors.rst +++ b/doc/modules/extractors.rst @@ -6,18 +6,19 @@ Overview The :py:mod:`~spikeinterface.extractors` module allows you to load :py:class:`~spikeinterface.core.BaseRecording`, :py:class:`~spikeinterface.core.BaseSorting`, and :py:class:`~spikeinterface.core.BaseEvent` objects from -a large variety of acquisition systems and spike sorting outputs. +a large variety of acquisition systems and spike sorting outputs. Most of the :code:`Recording` classes are implemented by wrapping the `NEO rawio implementation `_. Most of the :code:`Sorting` classes are instead directly implemented in SpikeInterface. - Although SpikeInterface is object-oriented (class-based), each object can also be loaded with a convenient :code:`read_XXXXX()` function. +.. code-block:: python + import spikeinterface.extractors as se Read one Recording @@ -27,32 +28,44 @@ Every format can be read with a simple function: .. code-block:: python - recording_oe = read_openephys("open-ephys-folder") + recording_oe = read_openephys(folder_path="open-ephys-folder") - recording_spikeglx = read_spikeglx("spikeglx-folder") + recording_spikeglx = read_spikeglx(folder_path="spikeglx-folder") - recording_blackrock = read_blackrock("blackrock-folder") + recording_blackrock = read_blackrock(folder_path="blackrock-folder") - recording_mearec = read_mearec("mearec_file.h5") + recording_mearec = read_mearec(file_path="mearec_file.h5") Importantly, some formats directly handle the probe information: .. code-block:: python - recording_spikeglx = read_spikeglx("spikeglx-folder") + recording_spikeglx = read_spikeglx(folder_path="spikeglx-folder") print(recording_spikeglx.get_probe()) - recording_mearec = read_mearec("mearec_file.h5") + recording_mearec = read_mearec(file_path="mearec_file.h5") print(recording_mearec.get_probe()) +Although most recordings are loaded with the :py:mod:`~spikeinterface.extractors` +a few file formats are loaded from the :py:mod:`~spikeinterface.core` module + +.. code-block:: python + + import spikeinterface as si + + recording_binary = si.read_binary(file_path='binary.bin') + + recording_zarr = si.read_zarr(file_path='zarr_file.zarr') + + Read one Sorting ---------------- .. code-block:: python - sorting_KS = read_kilosort("kilosort-folder") + sorting_KS = read_kilosort(folder_path="kilosort-folder") Read one Event @@ -60,7 +73,7 @@ Read one Event .. code-block:: python - events_OE = read_openephys_event("open-ephys-folder") + events_OE = read_openephys_event(folder_path="open-ephys-folder") For a comprehensive list of compatible technologies, see :ref:`compatible_formats`. @@ -77,7 +90,7 @@ The actual reading will be done on demand using the :py:meth:`~spikeinterface.co .. code-block:: python # opening a 40GB SpikeGLX dataset is fast - recording_spikeglx = read_spikeglx("spikeglx-folder") + recording_spikeglx = read_spikeglx(folder_path="spikeglx-folder") # this really does load the full 40GB into memory : not recommended!!!!! traces = recording_spikeglx.get_traces(start_frame=None, end_frame=None, return_scaled=False) diff --git a/doc/modules/motion_correction.rst b/doc/modules/motion_correction.rst index afedc4f982..96ecc1fcec 100644 --- a/doc/modules/motion_correction.rst +++ b/doc/modules/motion_correction.rst @@ -77,12 +77,12 @@ We currently have 3 presets: .. code-block:: python # read and preprocess - rec = read_spikeglx('/my/Neuropixel/recording') - rec = bandpass_filter(rec) - rec = common_reference(rec) + rec = read_spikeglx(folder_path='/my/Neuropixel/recording') + rec = bandpass_filter(recording=rec) + rec = common_reference(recording=rec) # then correction is one line of code - rec_corrected = correct_motion(rec, preset="nonrigid_accurate") + rec_corrected = correct_motion(recording=rec, preset="nonrigid_accurate") The process is quite long due the two first steps (activity profile + motion inference) But the return :code:`rec_corrected` is a lazy recording object that will interpolate traces on the @@ -94,17 +94,17 @@ If you want to user other presets, this is as easy as: .. code-block:: python # mimic kilosort motion - rec_corrected = correct_motion(rec, preset="kilosort_like") + rec_corrected = correct_motion(recording=rec, preset="kilosort_like") # super but less accurate and rigid - rec_corrected = correct_motion(rec, preset="rigid_fast") + rec_corrected = correct_motion(recording=rec, preset="rigid_fast") Optionally any parameter from the preset can be overwritten: .. code-block:: python - rec_corrected = correct_motion(rec, preset="nonrigid_accurate", + rec_corrected = correct_motion(recording=rec, preset="nonrigid_accurate", detect_kwargs=dict( detect_threshold=10.), estimate_motion_kwargs=dic( @@ -123,7 +123,7 @@ and checking. The folder will contain the motion vector itself of course but als .. code-block:: python motion_folder = '/somewhere/to/save/the/motion' - rec_corrected = correct_motion(rec, preset="nonrigid_accurate", folder=motion_folder) + rec_corrected = correct_motion(recording=rec, preset="nonrigid_accurate", folder=motion_folder) # and then motion_info = load_motion_info(motion_folder) @@ -156,14 +156,16 @@ The high-level :py:func:`~spikeinterface.preprocessing.correct_motion()` is inte job_kwargs = dict(chunk_duration="1s", n_jobs=20, progress_bar=True) # Step 1 : activity profile - peaks = detect_peaks(rec, method="locally_exclusive", detect_threshold=8.0, **job_kwargs) + peaks = detect_peaks(recording=rec, method="locally_exclusive", detect_threshold=8.0, **job_kwargs) # (optional) sub-select some peaks to speed up the localization - peaks = select_peaks(peaks, ...) - peak_locations = localize_peaks(rec, peaks, method="monopolar_triangulation",radius_um=75.0, + peaks = select_peaks(peaks=peaks, ...) + peak_locations = localize_peaks(recording=rec, peaks=peaks, method="monopolar_triangulation",radius_um=75.0, max_distance_um=150.0, **job_kwargs) # Step 2: motion inference - motion, temporal_bins, spatial_bins = estimate_motion(rec, peaks, peak_locations, + motion, temporal_bins, spatial_bins = estimate_motion(recording=rec, + peaks=peaks, + peak_locations=peak_locations, method="decentralized", direction="y", bin_duration_s=2.0, @@ -173,7 +175,9 @@ The high-level :py:func:`~spikeinterface.preprocessing.correct_motion()` is inte # Step 3: motion interpolation # this step is lazy - rec_corrected = interpolate_motion(rec, motion, temporal_bins, spatial_bins, + rec_corrected = interpolate_motion(recording=rec, motion=motion, + temporal_bins=temporal_bins, + spatial_bins=spatial_bins, border_mode="remove_channels", spatial_interpolation_method="kriging", sigma_um=30.) @@ -196,20 +200,20 @@ different preprocessing chains: one for motion correction and one for spike sort .. code-block:: python - raw_rec = read_spikeglx(...) + raw_rec = read_spikeglx(folder_path='/spikeglx_folder') # preprocessing 1 : bandpass (this is smoother) + cmr - rec1 = si.bandpass_filter(raw_rec, freq_min=300., freq_max=5000.) - rec1 = si.common_reference(rec1, reference='global', operator='median') + rec1 = si.bandpass_filter(recording=raw_rec, freq_min=300., freq_max=5000.) + rec1 = si.common_reference(recording=rec1, reference='global', operator='median') # here the corrected recording is done on the preprocessing 1 # rec_corrected1 will not be used for sorting! motion_folder = '/my/folder' - rec_corrected1 = correct_motion(rec1, preset="nonrigid_accurate", folder=motion_folder) + rec_corrected1 = correct_motion(recording=rec1, preset="nonrigid_accurate", folder=motion_folder) # preprocessing 2 : highpass + cmr - rec2 = si.highpass_filter(raw_rec, freq_min=300.) - rec2 = si.common_reference(rec2, reference='global', operator='median') + rec2 = si.highpass_filter(recording=raw_rec, freq_min=300.) + rec2 = si.common_reference(recording=rec2, reference='global', operator='median') # we use another preprocessing for the final interpolation motion_info = load_motion_info(motion_folder) @@ -220,7 +224,7 @@ different preprocessing chains: one for motion correction and one for spike sort spatial_bins=motion_info['spatial_bins'], **motion_info['parameters']['interpolate_motion_kwargs']) - sorting = run_sorter("montainsort5", rec_corrected2) + sorting = run_sorter(sorter_name="montainsort5", recording=rec_corrected2) References diff --git a/doc/modules/postprocessing.rst b/doc/modules/postprocessing.rst index a560f4d5c9..112c6e367d 100644 --- a/doc/modules/postprocessing.rst +++ b/doc/modules/postprocessing.rst @@ -14,9 +14,9 @@ WaveformExtractor extensions There are several postprocessing tools available, and all of them are implemented as a :py:class:`~spikeinterface.core.BaseWaveformExtractorExtension`. All computations on top -of a WaveformExtractor will be saved along side the WaveformExtractor itself (sub folder, zarr path or sub dict). +of a :code:`WaveformExtractor` will be saved along side the :code:`WaveformExtractor` itself (sub folder, zarr path or sub dict). This workflow is convenient for retrieval of time-consuming computations (such as pca or spike amplitudes) when reloading a -WaveformExtractor. +:code:`WaveformExtractor`. :py:class:`~spikeinterface.core.BaseWaveformExtractorExtension` objects are tightly connected to the parent :code:`WaveformExtractor` object, so that operations done on the :code:`WaveformExtractor`, such as saving, @@ -80,9 +80,9 @@ This extension computes the principal components of the waveforms. There are sev * "by_channel_local" (default): fits one PCA model for each by_channel * "by_channel_global": fits the same PCA model to all channels (also termed temporal PCA) -* "concatenated": contatenates all channels and fits a PCA model on the concatenated data +* "concatenated": concatenates all channels and fits a PCA model on the concatenated data -If the input :code:`WaveformExtractor` is sparse, the sparsity is used when computing PCA. +If the input :code:`WaveformExtractor` is sparse, the sparsity is used when computing the PCA. For dense waveforms, sparsity can also be passed as an argument. For more information, see :py:func:`~spikeinterface.postprocessing.compute_principal_components` @@ -127,7 +127,7 @@ with center of mass (:code:`method="center_of_mass"` - fast, but less accurate), For more information, see :py:func:`~spikeinterface.postprocessing.compute_spike_locations` -unit locations +unit_locations ^^^^^^^^^^^^^^ diff --git a/doc/modules/preprocessing.rst b/doc/modules/preprocessing.rst index 7c1f33f298..67f1e52011 100644 --- a/doc/modules/preprocessing.rst +++ b/doc/modules/preprocessing.rst @@ -22,8 +22,8 @@ In this code example, we build a preprocessing chain with two steps: import spikeinterface.preprocessing import bandpass_filter, common_reference # recording is a RecordingExtractor object - recording_f = bandpass_filter(recording, freq_min=300, freq_max=6000) - recording_cmr = common_reference(recording_f, operator="median") + recording_f = bandpass_filter(recording=recording, freq_min=300, freq_max=6000) + recording_cmr = common_reference(recording=recording_f, operator="median") These two preprocessors will not compute anything at instantiation, but the computation will be "on-demand" ("on-the-fly") when getting traces. @@ -38,7 +38,7 @@ save the object: .. code-block:: python # here the spykingcircus2 sorter engine directly uses the lazy "recording_cmr" object - sorting = run_sorter(recording_cmr, 'spykingcircus2') + sorting = run_sorter(recording=recording_cmr, sorter_name='spykingcircus2') Most of the external sorters, however, will need a binary file as input, so we can optionally save the processed recording with the efficient SpikeInterface :code:`save()` function: @@ -64,12 +64,13 @@ dtype (unless specified otherwise): .. code-block:: python + import spikeinterface.extractors as se # spikeGLX is int16 - rec_int16 = read_spikeglx("my_folder") + rec_int16 = se.read_spikeglx(folder_path"my_folder") # by default the int16 is kept - rec_f = bandpass_filter(rec_int16, freq_min=300, freq_max=6000) + rec_f = bandpass_filter(recording=rec_int16, freq_min=300, freq_max=6000) # we can force a float32 casting - rec_f2 = bandpass_filter(rec_int16, freq_min=300, freq_max=6000, dtype='float32') + rec_f2 = bandpass_filter(recording=rec_int16, freq_min=300, freq_max=6000, dtype='float32') Some scaling pre-processors, such as :code:`whiten()` or :code:`zscore()`, will force the output to :code:`float32`. @@ -83,6 +84,8 @@ The full list of preprocessing functions can be found here: :ref:`api_preprocess Here is a full list of possible preprocessing steps, grouped by type of processing: +For all examples :code:`rec` is a :code:`RecordingExtractor`. + filter() / bandpass_filter() / notch_filter() / highpass_filter() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -98,7 +101,7 @@ Important aspects of filtering functions: .. code-block:: python - rec_f = bandpass_filter(rec, freq_min=300, freq_max=6000) + rec_f = bandpass_filter(recording=rec, freq_min=300, freq_max=6000) * :py:func:`~spikeinterface.preprocessing.filter()` @@ -119,7 +122,7 @@ There are various options when combining :code:`operator` and :code:`reference` .. code-block:: python - rec_cmr = common_reference(rec, operator="median", reference="global") + rec_cmr = common_reference(recording=rec, operator="median", reference="global") * :py:func:`~spikeinterface.preprocessing.common_reference()` @@ -144,8 +147,8 @@ difference on artifact removal. .. code-block:: python - rec_shift = phase_shift(rec) - rec_cmr = common_reference(rec_shift, operator="median", reference="global") + rec_shift = phase_shift(recording=rec) + rec_cmr = common_reference(recording=rec_shift, operator="median", reference="global") @@ -168,7 +171,7 @@ centered with unitary variance on each channel. .. code-block:: python - rec_normed = zscore(rec) + rec_normed = zscore(recording=rec) * :py:func:`~spikeinterface.preprocessing.normalize_by_quantile()` * :py:func:`~spikeinterface.preprocessing.scale()` @@ -186,7 +189,7 @@ The whitened traces are then the dot product between the traces and the :code:`W .. code-block:: python - rec_w = whiten(rec) + rec_w = whiten(recording=rec) * :py:func:`~spikeinterface.preprocessing.whiten()` @@ -199,7 +202,7 @@ The :code:`blank_staturation()` function is similar, but it automatically estima .. code-block:: python - rec_w = clip(rec, a_min=-250., a_max=260) + rec_w = clip(recording=rec, a_min=-250., a_max=260) * :py:func:`~spikeinterface.preprocessing.clip()` * :py:func:`~spikeinterface.preprocessing.blank_staturation()` @@ -234,11 +237,11 @@ interpolated with the :code:`interpolate_bad_channels()` function (channels labe .. code-block:: python # detect - bad_channel_ids, channel_labels = detect_bad_channels(rec) + bad_channel_ids, channel_labels = detect_bad_channels(recording=rec) # Case 1 : remove then - rec_clean = recording.remove_channels(bad_channel_ids) + rec_clean = recording.remove_channels(remove_channel_ids=bad_channel_ids) # Case 2 : interpolate then - rec_clean = interpolate_bad_channels(rec, bad_channel_ids) + rec_clean = interpolate_bad_channels(recording=rec, bad_channel_ids=bad_channel_ids) * :py:func:`~spikeinterface.preprocessing.detect_bad_channels()` @@ -257,13 +260,13 @@ remove_artifacts() Given an external list of trigger times, :code:`remove_artifacts()` function can remove artifacts with several strategies: -* replace with zeros (blank) -* make a linear or cubic interpolation -* remove the median or average template (with optional time jitter and amplitude scaling correction) +* replace with zeros (blank) :code:`'zeros'` +* make a linear (:code:`'linear'`) or cubic (:code:`'cubic'`) interpolation +* remove the median (:code:`'median'`) or average (:code:`'avereage'`) template (with optional time jitter and amplitude scaling correction) .. code-block:: python - rec_clean = remove_artifacts(rec, list_triggers) + rec_clean = remove_artifacts(recording=rec, list_triggers=[100, 200, 300], mode='zeros') * :py:func:`~spikeinterface.preprocessing.remove_artifacts()` @@ -276,7 +279,7 @@ Similarly to :code:`numpy.astype()`, the :code:`astype()` casts the traces to th .. code-block:: python - rec_int16 = astype(rec_float, "int16") + rec_int16 = astype(recording=rec_float, dtype="int16") For recordings whose traces are unsigned (e.g. Maxwell Biosystems), the :code:`unsigned_to_signed()` function makes them @@ -286,7 +289,7 @@ is subtracted, and the traces are finally cast to :code:`int16`: .. code-block:: python - rec_int16 = unsigned_to_signed(rec_uint16) + rec_int16 = unsigned_to_signed(recording=rec_uint16) * :py:func:`~spikeinterface.preprocessing.astype()` * :py:func:`~spikeinterface.preprocessing.unsigned_to_signed()` @@ -300,7 +303,7 @@ required. .. code-block:: python - rec_with_more_channels = zero_channel_pad(rec, 128) + rec_with_more_channels = zero_channel_pad(parent_recording=rec, num_channels=128) * :py:func:`~spikeinterface.preprocessing.zero_channel_pad()` @@ -331,7 +334,7 @@ How to implement "IBL destriping" or "SpikeGLX CatGT" in SpikeInterface SpikeGLX has a built-in function called `CatGT `_ to apply some preprocessing on the traces to remove noise and artifacts. IBL also has a standardized pipeline for preprocessed traces a bit similar to CatGT which is called "destriping" [IBL_spikesorting]_. -In these both cases, the traces are entiely read, processed and written back to a file. +In both these cases, the traces are entirely read, processed and written back to a file. SpikeInterface can reproduce similar results without the need to write back to a file by building a *lazy* preprocessing chain. Optionally, the result can still be written to a binary (or a zarr) file. @@ -341,12 +344,12 @@ Here is a recipe to mimic the **IBL destriping**: .. code-block:: python - rec = read_spikeglx('my_spikeglx_folder') - rec = highpass_filter(rec, n_channel_pad=60) - rec = phase_shift(rec) - bad_channel_ids = detect_bad_channels(rec) - rec = interpolate_bad_channels(rec, bad_channel_ids) - rec = highpass_spatial_filter(rec) + rec = read_spikeglx(folder_path='my_spikeglx_folder') + rec = highpass_filter(recording=rec, n_channel_pad=60) + rec = phase_shift(recording=rec) + bad_channel_ids = detect_bad_channels(recording=rec) + rec = interpolate_bad_channels(recording=rec, bad_channel_ids=bad_channel_ids) + rec = highpass_spatial_filter(recording=rec) # optional rec.save(folder='clean_traces', n_jobs=10, chunk_duration='1s', progres_bar=True) @@ -356,9 +359,9 @@ Here is a recipe to mimic the **SpikeGLX CatGT**: .. code-block:: python - rec = read_spikeglx('my_spikeglx_folder') - rec = phase_shift(rec) - rec = common_reference(rec, operator="median", reference="global") + rec = read_spikeglx(folder_path='my_spikeglx_folder') + rec = phase_shift(recording=rec) + rec = common_reference(recording=rec, operator="median", reference="global") # optional rec.save(folder='clean_traces', n_jobs=10, chunk_duration='1s', progres_bar=True) @@ -369,7 +372,6 @@ Of course, these pipelines can be enhanced and customized using other available - Preprocessing on Snippets ------------------------- diff --git a/doc/modules/qualitymetrics.rst b/doc/modules/qualitymetrics.rst index 447d83db52..ec1788350f 100644 --- a/doc/modules/qualitymetrics.rst +++ b/doc/modules/qualitymetrics.rst @@ -47,16 +47,16 @@ This code snippet shows how to compute quality metrics (with or without principa .. code-block:: python - we = si.load_waveforms(...) # start from a waveform extractor + we = si.load_waveforms(folder='waveforms') # start from a waveform extractor # without PC - metrics = compute_quality_metrics(we, metric_names=['snr']) + metrics = compute_quality_metrics(waveform_extractor=we, metric_names=['snr']) assert 'snr' in metrics.columns # with PCs from spikeinterface.postprocessing import compute_principal_components - pca = compute_principal_components(we, n_components=5, mode='by_channel_local') - metrics = compute_quality_metrics(we) + pca = compute_principal_components(waveform_extractor=we, n_components=5, mode='by_channel_local') + metrics = compute_quality_metrics(waveform_extractor=we) assert 'isolation_distance' in metrics.columns For more information about quality metrics, check out this excellent diff --git a/doc/modules/qualitymetrics/amplitude_cv.rst b/doc/modules/qualitymetrics/amplitude_cv.rst index 13117b607c..81d3b4f12d 100644 --- a/doc/modules/qualitymetrics/amplitude_cv.rst +++ b/doc/modules/qualitymetrics/amplitude_cv.rst @@ -37,7 +37,7 @@ Example code # Make recording, sorting and wvf_extractor object for your data. # It is required to run `compute_spike_amplitudes(wvf_extractor)` or # `compute_amplitude_scalings(wvf_extractor)` (if missing, values will be NaN) - amplitude_cv_median, amplitude_cv_range = sqm.compute_amplitude_cv_metrics(wvf_extractor) + amplitude_cv_median, amplitude_cv_range = sqm.compute_amplitude_cv_metrics(waveform_extractor=wvf_extractor) # amplitude_cv_median and amplitude_cv_range are dicts containing the unit ids as keys, # and their amplitude_cv metrics as values. diff --git a/doc/modules/qualitymetrics/amplitude_median.rst b/doc/modules/qualitymetrics/amplitude_median.rst index 3ac52560e8..c77a57b033 100644 --- a/doc/modules/qualitymetrics/amplitude_median.rst +++ b/doc/modules/qualitymetrics/amplitude_median.rst @@ -24,7 +24,7 @@ Example code # It is also recommended to run `compute_spike_amplitudes(wvf_extractor)` # in order to use amplitude values from all spikes. - amplitude_medians = sqm.compute_amplitude_medians(wvf_extractor) + amplitude_medians = sqm.compute_amplitude_medians(waveform_extractor=wvf_extractor) # amplitude_medians is a dict containing the unit IDs as keys, # and their estimated amplitude medians as values. diff --git a/doc/modules/qualitymetrics/d_prime.rst b/doc/modules/qualitymetrics/d_prime.rst index e3bd61c580..9b540be743 100644 --- a/doc/modules/qualitymetrics/d_prime.rst +++ b/doc/modules/qualitymetrics/d_prime.rst @@ -34,7 +34,7 @@ Example code import spikeinterface.qualitymetrics as sqm - d_prime = sqm.lda_metrics(all_pcs, all_labels, 0) + d_prime = sqm.lda_metrics(all_pcs=all_pcs, all_labels=all_labels, this_unit_id=0) Reference diff --git a/doc/modules/qualitymetrics/drift.rst b/doc/modules/qualitymetrics/drift.rst index ae52f7f883..dad2aafe7c 100644 --- a/doc/modules/qualitymetrics/drift.rst +++ b/doc/modules/qualitymetrics/drift.rst @@ -43,10 +43,10 @@ Example code import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - # It is required to run `compute_spike_locations(wvf_extractor)` + # It is required to run `compute_spike_locations(wvf_extractor) first` # (if missing, values will be NaN) - drift_ptps, drift_stds, drift_mads = sqm.compute_drift_metrics(wvf_extractor, peak_sign="neg") - # drift_ptps, drift_stds, and drift_mads are dict containing the units' ID as keys, + drift_ptps, drift_stds, drift_mads = sqm.compute_drift_metrics(waveform_extractor=wvf_extractor, peak_sign="neg") + # drift_ptps, drift_stds, and drift_mads are each a dict containing the unit IDs as keys, # and their metrics as values. diff --git a/doc/modules/qualitymetrics/firing_range.rst b/doc/modules/qualitymetrics/firing_range.rst index 925539e9c6..1cbd903c7a 100644 --- a/doc/modules/qualitymetrics/firing_range.rst +++ b/doc/modules/qualitymetrics/firing_range.rst @@ -24,7 +24,7 @@ Example code import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - firing_range = sqm.compute_firing_ranges(wvf_extractor) + firing_range = sqm.compute_firing_ranges(waveform_extractor=wvf_extractor) # firing_range is a dict containing the unit IDs as keys, # and their firing firing_range as values (in Hz). diff --git a/doc/modules/qualitymetrics/firing_rate.rst b/doc/modules/qualitymetrics/firing_rate.rst index c0e15d7c2e..ef8cb3d8f4 100644 --- a/doc/modules/qualitymetrics/firing_rate.rst +++ b/doc/modules/qualitymetrics/firing_rate.rst @@ -40,7 +40,7 @@ With SpikeInterface: import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - firing_rate = sqm.compute_firing_rates(wvf_extractor) + firing_rate = sqm.compute_firing_rates(waveform_extractor=wvf_extractor) # firing_rate is a dict containing the unit IDs as keys, # and their firing rates across segments as values (in Hz). diff --git a/doc/modules/qualitymetrics/isolation_distance.rst b/doc/modules/qualitymetrics/isolation_distance.rst index 640a5a8b5a..6ba0d0b1ec 100644 --- a/doc/modules/qualitymetrics/isolation_distance.rst +++ b/doc/modules/qualitymetrics/isolation_distance.rst @@ -23,6 +23,16 @@ Expectation and use Isolation distance can be interpreted as a measure of distance from the cluster to the nearest other cluster. A well isolated unit should have a large isolation distance. +Example code +------------ + +.. code-block:: python + + import spikeinterface.qualitymetrics as sqm + + iso_distance, _ = sqm.isolation_distance(all_pcs=all_pcs, all_labels=all_labels, this_unit_id=0) + + References ---------- diff --git a/doc/modules/qualitymetrics/l_ratio.rst b/doc/modules/qualitymetrics/l_ratio.rst index b37913ba58..ae31ab40a4 100644 --- a/doc/modules/qualitymetrics/l_ratio.rst +++ b/doc/modules/qualitymetrics/l_ratio.rst @@ -37,6 +37,17 @@ Since this metric identifies unit separation, a high value indicates a highly co A well separated unit should have a low L-ratio ([Schmitzer-Torbert]_ et al.). + +Example code +------------ + +.. code-block:: python + + import spikeinterface.qualitymetrics as sqm + + _, l_ratio = sqm.isolation_distance(all_pcs=all_pcs, all_labels=all_labels, this_unit_id=0) + + References ---------- diff --git a/doc/modules/qualitymetrics/presence_ratio.rst b/doc/modules/qualitymetrics/presence_ratio.rst index 5a420c8ccf..ad0766d37c 100644 --- a/doc/modules/qualitymetrics/presence_ratio.rst +++ b/doc/modules/qualitymetrics/presence_ratio.rst @@ -27,7 +27,7 @@ Example code # Make recording, sorting and wvf_extractor object for your data. - presence_ratio = sqm.compute_presence_ratios(wvf_extractor) + presence_ratio = sqm.compute_presence_ratios(waveform_extractor=wvf_extractor) # presence_ratio is a dict containing the unit IDs as keys # and their presence ratio (between 0 and 1) as values. diff --git a/doc/modules/qualitymetrics/silhouette_score.rst b/doc/modules/qualitymetrics/silhouette_score.rst index b924cdbf73..7da01e0476 100644 --- a/doc/modules/qualitymetrics/silhouette_score.rst +++ b/doc/modules/qualitymetrics/silhouette_score.rst @@ -50,6 +50,16 @@ To reduce complexity the default implementation in SpikeInterface is to use the This can be changes by switching the silhouette method to either 'full' (the Rousseeuw implementation) or ('simplified', 'full') for both methods when entering the qm_params parameter. +Example code +------------ + +.. code-block:: python + + import spikeinterface.qualitymetrics as sqm + + simple_sil_score = sqm.simplified_silhouette_score(all_pcs=all_pcs, all_labels=all_labels, this_unit_id=0) + + References ---------- diff --git a/doc/modules/qualitymetrics/sliding_rp_violations.rst b/doc/modules/qualitymetrics/sliding_rp_violations.rst index de68c3a92f..fd53d7da3b 100644 --- a/doc/modules/qualitymetrics/sliding_rp_violations.rst +++ b/doc/modules/qualitymetrics/sliding_rp_violations.rst @@ -31,7 +31,7 @@ With SpikeInterface: # Make recording, sorting and wvf_extractor object for your data. - contamination = sqm.compute_sliding_rp_violations(wvf_extractor, bin_size_ms=0.25) + contamination = sqm.compute_sliding_rp_violations(waveform_extractor=wvf_extractor, bin_size_ms=0.25) References ---------- diff --git a/doc/modules/qualitymetrics/snr.rst b/doc/modules/qualitymetrics/snr.rst index b88d3291be..7f27a5078a 100644 --- a/doc/modules/qualitymetrics/snr.rst +++ b/doc/modules/qualitymetrics/snr.rst @@ -44,8 +44,7 @@ With SpikeInterface: import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - - SNRs = sqm.compute_snrs(wvf_extractor) + SNRs = sqm.compute_snrs(waveform_extractor=wvf_extractor) # SNRs is a dict containing the unit IDs as keys and their SNRs as values. Links to original implementations diff --git a/doc/modules/qualitymetrics/synchrony.rst b/doc/modules/qualitymetrics/synchrony.rst index 0750940199..d1a3c70a97 100644 --- a/doc/modules/qualitymetrics/synchrony.rst +++ b/doc/modules/qualitymetrics/synchrony.rst @@ -29,7 +29,7 @@ Example code import spikeinterface.qualitymetrics as sqm # Make recording, sorting and wvf_extractor object for your data. - synchrony = sqm.compute_synchrony_metrics(wvf_extractor, synchrony_sizes=(2, 4, 8)) + synchrony = sqm.compute_synchrony_metrics(waveform_extractor=wvf_extractor, synchrony_sizes=(2, 4, 8)) # synchrony is a tuple of dicts with the synchrony metrics for each unit diff --git a/doc/modules/sorters.rst b/doc/modules/sorters.rst index f3c8e7b733..5040b01ec2 100644 --- a/doc/modules/sorters.rst +++ b/doc/modules/sorters.rst @@ -49,15 +49,15 @@ to easily run spike sorters: from spikeinterface.sorters import run_sorter # run Tridesclous - sorting_TDC = run_sorter("tridesclous", recording, output_folder="/folder_TDC") + sorting_TDC = run_sorter(sorter_name="tridesclous", recording=recording, output_folder="/folder_TDC") # run Kilosort2.5 - sorting_KS2_5 = run_sorter("kilosort2_5", recording, output_folder="/folder_KS2.5") + sorting_KS2_5 = run_sorter(sorter_name="kilosort2_5", recording=recording, output_folder="/folder_KS2.5") # run IronClust - sorting_IC = run_sorter("ironclust", recording, output_folder="/folder_IC") + sorting_IC = run_sorter(sorter_name="ironclust", recording=recording, output_folder="/folder_IC") # run pyKilosort - sorting_pyKS = run_sorter("pykilosort", recording, output_folder="/folder_pyKS") + sorting_pyKS = run_sorter(sorter_name="pykilosort", recording=recording, output_folder="/folder_pyKS") # run SpykingCircus - sorting_SC = run_sorter("spykingcircus", recording, output_folder="/folder_SC") + sorting_SC = run_sorter(sorter_name="spykingcircus", recording=recording, output_folder="/folder_SC") Then the output, which is a :py:class:`~spikeinterface.core.BaseSorting` object, can be easily @@ -81,10 +81,10 @@ Spike-sorter-specific parameters can be controlled directly from the .. code-block:: python - sorting_TDC = run_sorter('tridesclous', recording, output_folder="/folder_TDC", + sorting_TDC = run_sorter(sorter_name='tridesclous', recording=recording, output_folder="/folder_TDC", detect_threshold=8.) - sorting_KS2_5 = run_sorter("kilosort2_5", recording, output_folder="/folder_KS2.5" + sorting_KS2_5 = run_sorter(sorter_name="kilosort2_5", recording=recording, output_folder="/folder_KS2.5" do_correction=False, preclust_threshold=6, freq_min=200.) @@ -185,7 +185,7 @@ The following code creates a test recording and runs a containerized spike sorte ) test_recording = test_recording.save(folder="test-docker-folder") - sorting = ss.run_sorter('kilosort3', + sorting = ss.run_sorter(sorter_name='kilosort3', recording=test_recording, output_folder="kilosort3", singularity_image=True) @@ -201,7 +201,7 @@ To run in Docker instead of Singularity, use ``docker_image=True``. .. code-block:: python - sorting = run_sorter('kilosort3', recording=test_recording, + sorting = run_sorter(sorter_name='kilosort3', recording=test_recording, output_folder="/tmp/kilosort3", docker_image=True) To use a specific image, set either ``docker_image`` or ``singularity_image`` to a string, @@ -209,7 +209,7 @@ e.g. ``singularity_image="spikeinterface/kilosort3-compiled-base:0.1.0"``. .. code-block:: python - sorting = run_sorter("kilosort3", + sorting = run_sorter(sorter_name="kilosort3", recording=test_recording, output_folder="kilosort3", singularity_image="spikeinterface/kilosort3-compiled-base:0.1.0") @@ -271,7 +271,7 @@ And use the custom image whith the :code:`run_sorter` function: .. code-block:: python - sorting = run_sorter("kilosort3", + sorting = run_sorter(sorter_name="kilosort3", recording=recording, docker_image="my-user/ks3-with-spikeinterface-test:0.1.0") @@ -302,7 +302,7 @@ an :code:`engine` that supports parallel processing (such as :code:`joblib` or : ] # run in loop - sortings = run_sorter_jobs(job_list, engine='loop') + sortings = run_sorter_jobs(job_list=job_list, engine='loop') @@ -314,11 +314,11 @@ an :code:`engine` that supports parallel processing (such as :code:`joblib` or : .. code-block:: python - run_sorter_jobs(job_list, engine='loop') + run_sorter_jobs(job_list=job_list, engine='loop') - run_sorter_jobs(job_list, engine='joblib', engine_kwargs={'n_jobs': 2}) + run_sorter_jobs(job_list=job_list, engine='joblib', engine_kwargs={'n_jobs': 2}) - run_sorter_jobs(job_list, engine='slurm', engine_kwargs={'cpus_per_task': 10, 'mem', '5G'}) + run_sorter_jobs(job_list=job_list, engine='slurm', engine_kwargs={'cpus_per_task': 10, 'mem': '5G'}) Spike sorting by group @@ -374,7 +374,7 @@ In this example, we create a 16-channel recording with 4 tetrodes: # here the result is a dict of a sorting object sortings = {} for group, sub_recording in recordings.items(): - sorting = run_sorter('kilosort2', recording, output_folder=f"folder_KS2_group{group}") + sorting = run_sorter(sorter_name='kilosort2', recording=recording, output_folder=f"folder_KS2_group{group}") sortings[group] = sorting **Option 2 : Automatic splitting** @@ -382,7 +382,7 @@ In this example, we create a 16-channel recording with 4 tetrodes: .. code-block:: python # here the result is one sorting that aggregates all sub sorting objects - aggregate_sorting = run_sorter_by_property('kilosort2', recording_4_tetrodes, + aggregate_sorting = run_sorter_by_property(sorter_name='kilosort2', recording=recording_4_tetrodes, grouping_property='group', working_folder='working_path') @@ -421,7 +421,7 @@ do not handle multi-segment, and in that case we will use the # multirecording has 4 segments of 10s each # run tridesclous in multi-segment mode - multisorting = si.run_sorter('tridesclous', multirecording) + multisorting = si.run_sorter(sorter_name='tridesclous', recording=multirecording) print(multisorting) # Case 2: the sorter DOES NOT handle multi-segment objects @@ -433,7 +433,7 @@ do not handle multi-segment, and in that case we will use the # multirecording has 1 segment of 40s each # run mountainsort4 in mono-segment mode - multisorting = si.run_sorter('mountainsort4', multirecording) + multisorting = si.run_sorter(sorter_name='mountainsort4', recording=multirecording) See also the :ref:`multi_seg` section. @@ -507,7 +507,7 @@ message will appear indicating how to install the given sorter, .. code:: python - recording = run_sorter('ironclust', recording) + recording = run_sorter(sorter_name='ironclust', recording=recording) throws the error, @@ -540,7 +540,7 @@ From the user's perspective, they behave exactly like the external sorters: .. code-block:: python - sorting = run_sorter("spykingcircus2", recording, "/tmp/folder") + sorting = run_sorter(sorter_name="spykingcircus2", recording=recording, output_folder="/tmp/folder") Contributing diff --git a/doc/modules/sortingcomponents.rst b/doc/modules/sortingcomponents.rst index 422eaea890..f3371f7e7b 100644 --- a/doc/modules/sortingcomponents.rst +++ b/doc/modules/sortingcomponents.rst @@ -47,7 +47,8 @@ follows: job_kwargs = dict(chunk_duration='1s', n_jobs=8, progress_bar=True) peaks = detect_peaks( - recording, method='by_channel', + recording=recording, + method='by_channel', peak_sign='neg', detect_threshold=5, exclude_sweep_ms=0.2, @@ -94,7 +95,7 @@ follows: job_kwargs = dict(chunk_duration='1s', n_jobs=8, progress_bar=True) - peak_locations = localize_peaks(recording, peaks, method='center_of_mass', + peak_locations = localize_peaks(recording=recording, peaks=peaks, method='center_of_mass', radius_um=70., ms_before=0.3, ms_after=0.6, **job_kwargs) @@ -122,7 +123,7 @@ For instance, the 'monopolar_triangulation' method will have: .. note:: - By convention in SpikeInterface, when a probe is described in 2d + By convention in SpikeInterface, when a probe is described in 3d * **'x'** is the width of the probe * **'y'** is the depth * **'z'** is orthogonal to the probe plane @@ -144,11 +145,11 @@ can be *hidden* by this process. from spikeinterface.sortingcomponents.peak_detection import detect_peaks - many_peaks = detect_peaks(...) + many_peaks = detect_peaks(...) # as in above example from spikeinterface.sortingcomponents.peak_selection import select_peaks - some_peaks = select_peaks(many_peaks, method='uniform', n_peaks=10000) + some_peaks = select_peaks(peaks=many_peaks, method='uniform', n_peaks=10000) Implemented methods are the following: @@ -183,15 +184,15 @@ Here is an example with non-rigid motion estimation: .. code-block:: python from spikeinterface.sortingcomponents.peak_detection import detect_peaks - peaks = detect_peaks(recording, ...) + peaks = detect_peaks(recording=ecording, ...) # as in above example from spikeinterface.sortingcomponents.peak_localization import localize_peaks - peak_locations = localize_peaks(recording, peaks, ...) + peak_locations = localize_peaks(recording=recording, peaks=peaks, ...) # as above from spikeinterface.sortingcomponents.motion_estimation import estimate_motion motion, temporal_bins, spatial_bins, - extra_check = estimate_motion(recording, peaks, peak_locations=peak_locations, + extra_check = estimate_motion(recording=recording, peaks=peaks, peak_locations=peak_locations, direction='y', bin_duration_s=10., bin_um=10., margin_um=0., method='decentralized_registration', rigid=False, win_shape='gaussian', win_step_um=50., win_sigma_um=150., @@ -217,7 +218,7 @@ Here is a short example that depends on the output of "Motion interpolation": from spikeinterface.sortingcomponents.motion_interpolation import InterpolateMotionRecording - recording_corrected = InterpolateMotionRecording(recording_with_drift, motion, temporal_bins, spatial_bins + recording_corrected = InterpolateMotionRecording(recording=recording_with_drift, motion=motion, temporal_bins=temporal_bins, spatial_bins=spatial_bins spatial_interpolation_method='kriging, border_mode='remove_channels') @@ -255,10 +256,10 @@ Different methods may need different inputs (for instance some of them require p .. code-block:: python from spikeinterface.sortingcomponents.peak_detection import detect_peaks - peaks = detect_peaks(recording, ...) + peaks = detect_peaks(recording, ...) # as in above example from spikeinterface.sortingcomponents.clustering import find_cluster_from_peaks - labels, peak_labels = find_cluster_from_peaks(recording, peaks, method="sliding_hdbscan") + labels, peak_labels = find_cluster_from_peaks(recording=recording, peaks=peaks, method="sliding_hdbscan") * **labels** : contains all possible labels diff --git a/doc/modules/widgets.rst b/doc/modules/widgets.rst index 8565e94fce..f37b2a5a6f 100644 --- a/doc/modules/widgets.rst +++ b/doc/modules/widgets.rst @@ -148,7 +148,7 @@ The :code:`plot_*(..., backend="matplotlib")` functions come with the following .. code-block:: python # matplotlib backend - w = plot_traces(recording, backend="matplotlib") + w = plot_traces(recording=recording, backend="matplotlib") **Output:** @@ -173,7 +173,7 @@ Each function has the following additional arguments: # ipywidgets backend also supports multiple "layers" for plot_traces rec_dict = dict(filt=recording, cmr=common_reference(recording)) - w = sw.plot_traces(rec_dict, backend="ipywidgets") + w = sw.plot_traces(recording=rec_dict, backend="ipywidgets") **Output:** @@ -196,8 +196,8 @@ The functions have the following additional arguments: .. code-block:: python # sortingview backend - w_ts = sw.plot_traces(recording, backend="ipywidgets") - w_ss = sw.plot_sorting_summary(recording, backend="sortingview") + w_ts = sw.plot_traces(recording=recording, backend="ipywidgets") + w_ss = sw.plot_sorting_summary(recording=recording, backend="sortingview") **Output:** @@ -249,7 +249,7 @@ The :code:`ephyviewer` backend is currently only available for the :py:func:`~sp .. code-block:: python - plot_traces(recording, backend="ephyviewer", mode="line", show_channel_ids=True) + plot_traces(recording=recording, backend="ephyviewer", mode="line", show_channel_ids=True) .. image:: ../images/plot_traces_ephyviewer.png From 5140a0423f8c33e3ba6906d48169508585e19807 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 29 Sep 2023 20:32:49 +0000 Subject: [PATCH 234/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- doc/modules/extractors.rst | 2 +- doc/modules/motion_correction.rst | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/modules/extractors.rst b/doc/modules/extractors.rst index 1eeca9a325..ccc5d2a311 100644 --- a/doc/modules/extractors.rst +++ b/doc/modules/extractors.rst @@ -6,7 +6,7 @@ Overview The :py:mod:`~spikeinterface.extractors` module allows you to load :py:class:`~spikeinterface.core.BaseRecording`, :py:class:`~spikeinterface.core.BaseSorting`, and :py:class:`~spikeinterface.core.BaseEvent` objects from -a large variety of acquisition systems and spike sorting outputs. +a large variety of acquisition systems and spike sorting outputs. Most of the :code:`Recording` classes are implemented by wrapping the `NEO rawio implementation `_. diff --git a/doc/modules/motion_correction.rst b/doc/modules/motion_correction.rst index 96ecc1fcec..e009e06236 100644 --- a/doc/modules/motion_correction.rst +++ b/doc/modules/motion_correction.rst @@ -163,8 +163,8 @@ The high-level :py:func:`~spikeinterface.preprocessing.correct_motion()` is inte max_distance_um=150.0, **job_kwargs) # Step 2: motion inference - motion, temporal_bins, spatial_bins = estimate_motion(recording=rec, - peaks=peaks, + motion, temporal_bins, spatial_bins = estimate_motion(recording=rec, + peaks=peaks, peak_locations=peak_locations, method="decentralized", direction="y", @@ -175,8 +175,8 @@ The high-level :py:func:`~spikeinterface.preprocessing.correct_motion()` is inte # Step 3: motion interpolation # this step is lazy - rec_corrected = interpolate_motion(recording=rec, motion=motion, - temporal_bins=temporal_bins, + rec_corrected = interpolate_motion(recording=rec, motion=motion, + temporal_bins=temporal_bins, spatial_bins=spatial_bins, border_mode="remove_channels", spatial_interpolation_method="kriging", From 714645c4fcf359612d2ba31ca4f79fbfd42165c4 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Fri, 29 Sep 2023 16:40:37 -0400 Subject: [PATCH 235/322] fix -> dict --- doc/modules/motion_correction.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/motion_correction.rst b/doc/modules/motion_correction.rst index 96ecc1fcec..8cffeebcf3 100644 --- a/doc/modules/motion_correction.rst +++ b/doc/modules/motion_correction.rst @@ -107,7 +107,7 @@ Optionally any parameter from the preset can be overwritten: rec_corrected = correct_motion(recording=rec, preset="nonrigid_accurate", detect_kwargs=dict( detect_threshold=10.), - estimate_motion_kwargs=dic( + estimate_motion_kwargs=dict( histogram_depth_smooth_um=8., time_horizon_s=120., ), From ac84b25530b04e30c80eba7c474be61279a7dd1f Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Sun, 1 Oct 2023 15:11:30 +0200 Subject: [PATCH 236/322] Fix docstrings --- .../postprocessing/template_metrics.py | 67 ++++++++++++++----- 1 file changed, 50 insertions(+), 17 deletions(-) diff --git a/src/spikeinterface/postprocessing/template_metrics.py b/src/spikeinterface/postprocessing/template_metrics.py index 774ebab4a9..82f55483b4 100644 --- a/src/spikeinterface/postprocessing/template_metrics.py +++ b/src/spikeinterface/postprocessing/template_metrics.py @@ -4,12 +4,13 @@ 22/04/2020 """ import numpy as np +import warnings +from typing import Optional from copy import deepcopy -from ..core import WaveformExtractor +from ..core import WaveformExtractor, ChannelSparsity from ..core.template_tools import get_template_extremum_channel from ..core.waveform_extractor import BaseWaveformExtractorExtension -import warnings global DEBUG @@ -211,16 +212,17 @@ def get_extension_function(): ) +# TODO: add typing def compute_template_metrics( waveform_extractor, - load_if_exists=False, - metric_names=None, - peak_sign="neg", - upsampling_factor=10, - sparsity=None, - include_multi_channel_metrics=False, - metrics_kwargs=None, - debug_plots=False, + load_if_exists: bool = False, + metric_names: Optional[list[str]] = None, + peak_sign: Optional[str] = "neg", + upsampling_factor: int = 10, + sparsity: Optional[ChannelSparsity] = None, + include_multi_channel_metrics: bool = False, + metrics_kwargs: dict = None, + debug_plots: bool = False, ): """ Compute template metrics including: @@ -247,13 +249,13 @@ def compute_template_metrics( metric_names : list, optional List of metrics to compute (see si.postprocessing.get_template_metric_names()), by default None peak_sign : {"neg", "pos"}, default: "neg" - The peak sign + Whether to use the positive ("pos") or negative ("neg") peaks to estimate extremum channels. upsampling_factor : int, default: 10 The upsampling factor to upsample the templates - sparsity: dict or None, default: None - Default is sparsity=None and template metric is computed on extremum channel only. - If given, the dictionary should contain a unit ids as keys and a channel id or a list of channel ids as values. - For more generating a sparsity dict, see the postprocessing.compute_sparsity() function. + sparsity: ChannelSparsity or None, default: None + If None, template metrics are computed on the extremum channel only. + If sparsity is given, template metrics are computed on all sparse channels of each unit. + For more on generating a ChannelSparsity, see the `~spikeinterface.compute_sparsity()` function. include_multi_channel_metrics: bool, default: False Whether to compute multi-channel metrics metrics_kwargs: dict @@ -261,7 +263,7 @@ def compute_template_metrics( * recovery_window_ms: the window in ms after the peak to compute the recovery_slope, default: 0.7 * peak_relative_threshold: the relative threshold to detect positive and negative peaks, default: 0.2 * peak_width_ms: the width in samples to detect peaks, default: 0.2 - * depth_direction: the direction to compute velocity above and below, default: "y" + * depth_direction: the direction to compute velocity above and below, default: "y" (see notes) * min_channels_for_velocity: the minimum number of channels above or below to compute velocity, default: 5 * min_r2_velocity: the minimum r2 to accept the velocity fit, default: 0.7 * exp_peak_function: the function to use to compute the peak amplitude for the exp decay, default: "ptp" @@ -284,7 +286,7 @@ def compute_template_metrics( ----- If any multi-channel metric is in the metric_names or include_multi_channel_metrics is True, sparsity must be None, so that one metric value will be computed per unit. - For multi-channel metrocs, 3D channel locations are not supported. By default, the depth direction is "y". + For multi-channel metrics, 3D channel locations are not supported. By default, the depth direction is "y". """ if debug_plots: global DEBUG @@ -359,6 +361,8 @@ def get_peak_to_valley(template_single, sampling_frequency, trough_idx=None, pea ---------- template_single: numpy.ndarray The 1D template waveform + sampling_frequency : float + The sampling frequency of the template trough_idx: int, default: None The index of the trough peak_idx: int, default: None @@ -383,6 +387,8 @@ def get_peak_trough_ratio(template_single, sampling_frequency=None, trough_idx=N ---------- template_single: numpy.ndarray The 1D template waveform + sampling_frequency : float + The sampling frequency of the template trough_idx: int, default: None The index of the trough peak_idx: int, default: None @@ -407,6 +413,8 @@ def get_half_width(template_single, sampling_frequency, trough_idx=None, peak_id ---------- template_single: numpy.ndarray The 1D template waveform + sampling_frequency : float + The sampling frequency of the template trough_idx: int, default: None The index of the trough peak_idx: int, default: None @@ -458,6 +466,8 @@ def get_repolarization_slope(template_single, sampling_frequency, trough_idx=Non ---------- template_single: numpy.ndarray The 1D template waveform + sampling_frequency : float + The sampling frequency of the template trough_idx: int, default: None The index of the trough """ @@ -499,6 +509,8 @@ def get_recovery_slope(template_single, sampling_frequency, peak_idx=None, **kwa ---------- template_single: numpy.ndarray The 1D template waveform + sampling_frequency : float + The sampling frequency of the template peak_idx: int, default: None The index of the peak **kwargs: Required kwargs: @@ -530,6 +542,8 @@ def get_num_positive_peaks(template_single, sampling_frequency, **kwargs): ---------- template_single: numpy.ndarray The 1D template waveform + sampling_frequency : float + The sampling frequency of the template **kwargs: Required kwargs: - peak_relative_threshold: the relative threshold to detect positive and negative peaks - peak_width_ms: the width in samples to detect peaks @@ -556,6 +570,8 @@ def get_num_negative_peaks(template_single, sampling_frequency, **kwargs): ---------- template_single: numpy.ndarray The 1D template waveform + sampling_frequency : float + The sampling frequency of the template **kwargs: Required kwargs: - peak_relative_threshold: the relative threshold to detect positive and negative peaks - peak_width_ms: the width in samples to detect peaks @@ -590,6 +606,9 @@ def get_num_negative_peaks(template_single, sampling_frequency, **kwargs): def transform_column_range(template, channel_locations, column_range, depth_direction="y"): + """ + Transform template anch channel locations based on column range. + """ column_dim = 0 if depth_direction == "y" else 1 if column_range is None: template_column_range = template @@ -603,12 +622,18 @@ def transform_column_range(template, channel_locations, column_range, depth_dire def sort_template_and_locations(template, channel_locations, depth_direction="y"): + """ + Sort template and locations. + """ depth_dim = 1 if depth_direction == "y" else 0 sort_indices = np.argsort(channel_locations[:, depth_dim]) return template[:, sort_indices], channel_locations[sort_indices, :] def fit_velocity(peak_times, channel_dist): + """ + Fit velocity from peak times and channel distances using ribust Theilsen estimator. + """ # from scipy.stats import linregress # slope, intercept, _, _, _ = linregress(peak_times, channel_dist) @@ -632,6 +657,8 @@ def get_velocity_above(template, channel_locations, sampling_frequency, **kwargs The template waveform (num_samples, num_channels) channel_locations: numpy.ndarray The channel locations (num_channels, 2) + sampling_frequency : float + The sampling frequency of the template **kwargs: Required kwargs: - depth_direction: the direction to compute velocity above and below ("x", "y", or "z") - min_channels_for_velocity: the minimum number of channels above or below to compute velocity @@ -707,6 +734,8 @@ def get_velocity_below(template, channel_locations, sampling_frequency, **kwargs The template waveform (num_samples, num_channels) channel_locations: numpy.ndarray The channel locations (num_channels, 2) + sampling_frequency : float + The sampling frequency of the template **kwargs: Required kwargs: - depth_direction: the direction to compute velocity above and below ("x", "y", or "z") - min_channels_for_velocity: the minimum number of channels above or below to compute velocity @@ -781,6 +810,8 @@ def get_exp_decay(template, channel_locations, sampling_frequency=None, **kwargs The template waveform (num_samples, num_channels) channel_locations: numpy.ndarray The channel locations (num_channels, 2) + sampling_frequency : float + The sampling frequency of the template **kwargs: Required kwargs: - exp_peak_function: the function to use to compute the peak amplitude for the exp decay ("ptp" or "min") - min_r2_exp_decay: the minimum r2 to accept the exp decay fit @@ -856,6 +887,8 @@ def get_spread(template, channel_locations, sampling_frequency, **kwargs): The template waveform (num_samples, num_channels) channel_locations: numpy.ndarray The channel locations (num_channels, 2) + sampling_frequency : float + The sampling frequency of the template **kwargs: Required kwargs: - depth_direction: the direction to compute velocity above and below ("x", "y", or "z") - spread_threshold: the threshold to compute the spread From f76e9d895a321eceb8dd6e01f0e3fe769867ec16 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 2 Oct 2023 10:14:50 +0200 Subject: [PATCH 237/322] Update src/spikeinterface/curation/sortingview_curation.py --- src/spikeinterface/curation/sortingview_curation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/spikeinterface/curation/sortingview_curation.py b/src/spikeinterface/curation/sortingview_curation.py index 7a573c38c4..626ea79eb9 100644 --- a/src/spikeinterface/curation/sortingview_curation.py +++ b/src/spikeinterface/curation/sortingview_curation.py @@ -94,7 +94,6 @@ def apply_sortingview_curation( # Populate the properties dictionary for unit_index, unit_id in enumerate(curation_sorting.current_sorting.unit_ids): unit_id_str = str(unit_id) - # Check for exact match first if unit_id_str in labels_dict: for label in labels_dict[unit_id_str]: properties[label][unit_index] = True From 4e3140f58cec52b42563b02a5bfb2d0fdda498c3 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 2 Oct 2023 10:19:09 +0200 Subject: [PATCH 238/322] Remove comment --- src/spikeinterface/postprocessing/template_metrics.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/spikeinterface/postprocessing/template_metrics.py b/src/spikeinterface/postprocessing/template_metrics.py index 82f55483b4..3f47c505ad 100644 --- a/src/spikeinterface/postprocessing/template_metrics.py +++ b/src/spikeinterface/postprocessing/template_metrics.py @@ -41,7 +41,7 @@ class TemplateMetricsCalculator(BaseWaveformExtractorExtension): extension_name = "template_metrics" min_channels_for_multi_channel_warning = 10 - def __init__(self, waveform_extractor): + def __init__(self, waveform_extractor: WaveformExtractor): BaseWaveformExtractorExtension.__init__(self, waveform_extractor) def _set_params( @@ -212,7 +212,6 @@ def get_extension_function(): ) -# TODO: add typing def compute_template_metrics( waveform_extractor, load_if_exists: bool = False, From c20ffdadb908d601e546323b113e994445546891 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 2 Oct 2023 10:23:47 +0200 Subject: [PATCH 239/322] Tiny rewrite in tests --- src/spikeinterface/curation/tests/test_sortingview_curation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/curation/tests/test_sortingview_curation.py b/src/spikeinterface/curation/tests/test_sortingview_curation.py index 22085f2f77..ce6c7dd5a6 100644 --- a/src/spikeinterface/curation/tests/test_sortingview_curation.py +++ b/src/spikeinterface/curation/tests/test_sortingview_curation.py @@ -174,8 +174,9 @@ def test_label_inheritance_int(): duration = 20.0 num_timepoints = int(sampling_frequency * duration) num_spikes = 1000 + num_units = 7 times = np.int_(np.sort(np.random.uniform(0, num_timepoints, num_spikes))) - labels = np.random.randint(1, 8, size=num_spikes) # 7 units: 1 to 7 + labels = np.random.randint(1, 1 + num_units, size=num_spikes) # 7 units: 1 to 7 sorting = se.NumpySorting.from_times_labels(times, labels, sampling_frequency) From bbc81676fbcec04cb7ce9d6f93da60ed1afb0df5 Mon Sep 17 00:00:00 2001 From: Sebastien Date: Mon, 2 Oct 2023 11:00:38 +0200 Subject: [PATCH 240/322] Minor fixes for SC2 and study --- src/spikeinterface/comparison/groundtruthstudy.py | 2 +- .../sortingcomponents/clustering/clustering_tools.py | 2 +- src/spikeinterface/sortingcomponents/matching/circus.py | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index d43727cb44..df0b5296c0 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -180,7 +180,7 @@ def run_sorters(self, case_keys=None, engine="loop", engine_kwargs={}, keep=True if sorting_exists: # delete older sorting + log before running sorters - shutil.rmtree(sorting_exists) + shutil.rmtree(sorting_folder) log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" if log_file.exists(): log_file.unlink() diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 1a8332ad6d..891c355448 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -661,7 +661,7 @@ def remove_duplicates_via_matching( labels = np.unique(new_labels) labels = labels[labels >= 0] - del recording, sub_recording + del recording, sub_recording, method_kwargs os.remove(tmp_filename) return labels, new_labels diff --git a/src/spikeinterface/sortingcomponents/matching/circus.py b/src/spikeinterface/sortingcomponents/matching/circus.py index 358691cd25..ea36b75847 100644 --- a/src/spikeinterface/sortingcomponents/matching/circus.py +++ b/src/spikeinterface/sortingcomponents/matching/circus.py @@ -592,6 +592,7 @@ def _prepare_templates(cls, d): d["spatial"] = np.moveaxis(d["spatial"], [0, 1, 2], [1, 0, 2]) d["temporal"] = np.moveaxis(d["temporal"], [0, 1, 2], [1, 2, 0]) d["singular"] = d["singular"].T[:, :, np.newaxis] + return d @classmethod From 6ceee13abe776ceec65dd6239f5f97fbca1096a4 Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Mon, 2 Oct 2023 05:08:25 -0400 Subject: [PATCH 241/322] Alessio fixes Co-authored-by: Alessio Buccino --- doc/modules/exporters.rst | 2 +- doc/modules/extractors.rst | 10 ---------- doc/modules/sortingcomponents.rst | 2 +- 3 files changed, 2 insertions(+), 12 deletions(-) diff --git a/doc/modules/exporters.rst b/doc/modules/exporters.rst index 1d23f9ad6f..155050ddb0 100644 --- a/doc/modules/exporters.rst +++ b/doc/modules/exporters.rst @@ -31,7 +31,7 @@ The input of the :py:func:`~spikeinterface.exporters.export_to_phy` is a :code:` we = extract_waveforms(recording=recording, sorting=sorting, folder='waveforms', sparse=True) # some computations are done before to control all options - compute_spike_amplitudes(waveform_extractor = we) + compute_spike_amplitudes(waveform_extractor=we) compute_principal_components(waveform_extractor=we, n_components=3, mode='by_channel_global') # the export process is fast because everything is pre-computed diff --git a/doc/modules/extractors.rst b/doc/modules/extractors.rst index ccc5d2a311..2d0e047672 100644 --- a/doc/modules/extractors.rst +++ b/doc/modules/extractors.rst @@ -48,16 +48,6 @@ Importantly, some formats directly handle the probe information: print(recording_mearec.get_probe()) -Although most recordings are loaded with the :py:mod:`~spikeinterface.extractors` -a few file formats are loaded from the :py:mod:`~spikeinterface.core` module - -.. code-block:: python - - import spikeinterface as si - - recording_binary = si.read_binary(file_path='binary.bin') - - recording_zarr = si.read_zarr(file_path='zarr_file.zarr') Read one Sorting diff --git a/doc/modules/sortingcomponents.rst b/doc/modules/sortingcomponents.rst index f3371f7e7b..1e58972497 100644 --- a/doc/modules/sortingcomponents.rst +++ b/doc/modules/sortingcomponents.rst @@ -184,7 +184,7 @@ Here is an example with non-rigid motion estimation: .. code-block:: python from spikeinterface.sortingcomponents.peak_detection import detect_peaks - peaks = detect_peaks(recording=ecording, ...) # as in above example + peaks = detect_peaks(recording=recording, ...) # as in above example from spikeinterface.sortingcomponents.peak_localization import localize_peaks peak_locations = localize_peaks(recording=recording, peaks=peaks, ...) # as above From 5cefdacc3674162155b5eaa3a612b5cc2ca79675 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 2 Oct 2023 13:48:22 +0200 Subject: [PATCH 242/322] Fixes to MDASortingExtractor --- src/spikeinterface/extractors/mdaextractors.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/extractors/mdaextractors.py b/src/spikeinterface/extractors/mdaextractors.py index b863e338fa..1eb0182318 100644 --- a/src/spikeinterface/extractors/mdaextractors.py +++ b/src/spikeinterface/extractors/mdaextractors.py @@ -216,14 +216,14 @@ def write_sorting(sorting, save_path, write_primary_channels=False): times_list = [] labels_list = [] primary_channels_list = [] - for unit_id_i, unit_id in enumerate(unit_ids): + for unit_index, unit_id in enumerate(unit_ids): times = sorting.get_unit_spike_train(unit_id=unit_id) times_list.append(times) # unit id may not be numeric - if unit_id.dtype.kind in "biufc": - labels_list.append(np.ones(times.shape) * unit_id) + if unit_id.dtype.kind in "iu": + labels_list.append(np.ones(times.shape, dtype=unit_id.dtype) * unit_id) else: - labels_list.append(np.ones(times.shape) * unit_id_i) + labels_list.append(np.ones(times.shape, dtype=int) * unit_index) if write_primary_channels: if "max_channel" in sorting.get_unit_property_names(unit_id): primary_channels_list.append([sorting.get_unit_property(unit_id, "max_channel")] * times.shape[0]) From c06df711a3dfb0f08d6eb8718147210be0c144c6 Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Mon, 2 Oct 2023 08:36:42 -0400 Subject: [PATCH 243/322] add pypi docs and dev docs --- README.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 55f33d04b1..883dcdb944 100644 --- a/README.md +++ b/README.md @@ -59,15 +59,17 @@ With SpikeInterface, users can: - post-process sorted datasets. - compare and benchmark spike sorting outputs. - compute quality metrics to validate and curate spike sorting outputs. -- visualize recordings and spike sorting outputs in several ways (matplotlib, sortingview, in jupyter) -- export report and export to phy -- offer a powerful Qt-based viewer in separate package [spikeinterface-gui](https://github.com/SpikeInterface/spikeinterface-gui) -- have some powerful sorting components to build your own sorter. +- visualize recordings and spike sorting outputs in several ways (matplotlib, sortingview, jupyter, ephyviewer) +- export a report and/or export to phy +- offer a powerful Qt-based viewer in a separate package [spikeinterface-gui](https://github.com/SpikeInterface/spikeinterface-gui) +- have powerful sorting components to build your own sorter. ## Documentation -Detailed documentation for spikeinterface can be found [here](https://spikeinterface.readthedocs.io/en/latest). +Detailed documentation of the latest PyPI release of SpikeInterface can be found [here](https://spikeinterface.readthedocs.io/en/0.98.2). + +Detailed documentation of the development version of SpikeInterface can be found [here](https://spikeinterface.readthedocs.io/en/latest). Several tutorials to get started can be found in [spiketutorials](https://github.com/SpikeInterface/spiketutorials). @@ -77,9 +79,9 @@ and sorting components. You can also have a look at the [spikeinterface-gui](https://github.com/SpikeInterface/spikeinterface-gui). -## How to install spikeinteface +## How to install spikeinterface -You can install the new `spikeinterface` version with pip: +You can install the latest version of `spikeinterface` version with pip: ```bash pip install spikeinterface[full] @@ -94,7 +96,7 @@ To install all interactive widget backends, you can use: ``` -To get the latest updates, you can install `spikeinterface` from sources: +To get the latest updates, you can install `spikeinterface` from source: ```bash git clone https://github.com/SpikeInterface/spikeinterface.git From cf65301c82c48e72e10a77f6a7f891453b69e409 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 2 Oct 2023 15:24:30 +0200 Subject: [PATCH 244/322] Check main_ids are ints or strings --- src/spikeinterface/core/base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 8b4f094c20..86692fa69c 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -47,6 +47,7 @@ def __init__(self, main_ids: Sequence) -> None: # 'main_ids' will either be channel_ids or units_ids # They is used for properties self._main_ids = np.array(main_ids) + assert self._main_ids.dtype.kind in "uiSU", "Main IDs can only be integers (signed/unsigned) or strings" # dict at object level self._annotations = {} From 8343d3a70a6bb3cf56f3013abc77c8e534059150 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 2 Oct 2023 15:57:54 +0200 Subject: [PATCH 245/322] Fix NpySnippets --- src/spikeinterface/core/base.py | 3 ++- src/spikeinterface/core/baserecordingsnippets.py | 4 ++-- src/spikeinterface/core/basesnippets.py | 2 -- src/spikeinterface/core/npysnippetsextractor.py | 5 ++++- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 86692fa69c..f1a51c99d1 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -47,7 +47,8 @@ def __init__(self, main_ids: Sequence) -> None: # 'main_ids' will either be channel_ids or units_ids # They is used for properties self._main_ids = np.array(main_ids) - assert self._main_ids.dtype.kind in "uiSU", "Main IDs can only be integers (signed/unsigned) or strings" + if len(self._main_ids) > 0: + assert self._main_ids.dtype.kind in "uiSU", "Main IDs can only be integers (signed/unsigned) or strings" # dict at object level self._annotations = {} diff --git a/src/spikeinterface/core/baserecordingsnippets.py b/src/spikeinterface/core/baserecordingsnippets.py index affde8a75e..d411f38d2a 100644 --- a/src/spikeinterface/core/baserecordingsnippets.py +++ b/src/spikeinterface/core/baserecordingsnippets.py @@ -1,4 +1,4 @@ -from typing import List +from __future__ import annotations from pathlib import Path import numpy as np @@ -19,7 +19,7 @@ class BaseRecordingSnippets(BaseExtractor): has_default_locations = False - def __init__(self, sampling_frequency: float, channel_ids: List, dtype): + def __init__(self, sampling_frequency: float, channel_ids: list[str, int], dtype: np.dtype): BaseExtractor.__init__(self, channel_ids) self._sampling_frequency = sampling_frequency self._dtype = np.dtype(dtype) diff --git a/src/spikeinterface/core/basesnippets.py b/src/spikeinterface/core/basesnippets.py index f35bc2b266..b4e3c11f55 100644 --- a/src/spikeinterface/core/basesnippets.py +++ b/src/spikeinterface/core/basesnippets.py @@ -1,10 +1,8 @@ from typing import List, Union -from pathlib import Path from .base import BaseSegment from .baserecordingsnippets import BaseRecordingSnippets import numpy as np from warnings import warn -from probeinterface import Probe, ProbeGroup, write_probeinterface, read_probeinterface, select_axes # snippets segments? diff --git a/src/spikeinterface/core/npysnippetsextractor.py b/src/spikeinterface/core/npysnippetsextractor.py index 80979ce6c9..69c48356e5 100644 --- a/src/spikeinterface/core/npysnippetsextractor.py +++ b/src/spikeinterface/core/npysnippetsextractor.py @@ -27,6 +27,9 @@ def __init__( num_segments = len(file_paths) data = np.load(file_paths[0], mmap_mode="r") + if channel_ids is None: + channel_ids = np.arange(data["snippet"].shape[2]) + BaseSnippets.__init__( self, sampling_frequency, @@ -84,7 +87,7 @@ def write_snippets(snippets, file_paths, dtype=None): arr = np.empty(n, dtype=snippets_t, order="F") arr["frame"] = snippets.get_frames(segment_index=i) arr["snippet"] = snippets.get_snippets(segment_index=i).astype(dtype, copy=False) - + file_paths[i].parent.mkdir(parents=True, exist_ok=True) np.save(file_paths[i], arr) From 89d1f827c445702a61eda864c9972401567a9b67 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 2 Oct 2023 16:26:25 +0200 Subject: [PATCH 246/322] Force CellExplorer unit ids as int --- src/spikeinterface/core/base.py | 4 +++- src/spikeinterface/extractors/cellexplorersortingextractor.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index f1a51c99d1..1116aeb507 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -48,7 +48,9 @@ def __init__(self, main_ids: Sequence) -> None: # They is used for properties self._main_ids = np.array(main_ids) if len(self._main_ids) > 0: - assert self._main_ids.dtype.kind in "uiSU", "Main IDs can only be integers (signed/unsigned) or strings" + assert ( + self._main_ids.dtype.kind in "uiSU" + ), f"Main IDs can only be integers (signed/unsigned) or strings, not {self._main_ids.dtype}" # dict at object level self._annotations = {} diff --git a/src/spikeinterface/extractors/cellexplorersortingextractor.py b/src/spikeinterface/extractors/cellexplorersortingextractor.py index 31241a4147..f72670fbcd 100644 --- a/src/spikeinterface/extractors/cellexplorersortingextractor.py +++ b/src/spikeinterface/extractors/cellexplorersortingextractor.py @@ -118,7 +118,7 @@ def __init__( spike_times = spikes_data["times"] # CellExplorer reports spike times in units seconds; SpikeExtractors uses time units of sampling frames - unit_ids = unit_ids[:].tolist() + unit_ids = unit_ids[:].astype(int).tolist() spiketrains_dict = {unit_id: spike_times[index] for index, unit_id in enumerate(unit_ids)} for unit_id in unit_ids: spiketrains_dict[unit_id] = (sampling_frequency * spiketrains_dict[unit_id]).round().astype(np.int64) From d75f0588707da10a61e926e337334739a0b9a20b Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Tue, 3 Oct 2023 10:58:15 +0200 Subject: [PATCH 247/322] Update src/spikeinterface/extractors/cellexplorersortingextractor.py Co-authored-by: Heberto Mayorquin --- src/spikeinterface/extractors/cellexplorersortingextractor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/extractors/cellexplorersortingextractor.py b/src/spikeinterface/extractors/cellexplorersortingextractor.py index f72670fbcd..0096a40a79 100644 --- a/src/spikeinterface/extractors/cellexplorersortingextractor.py +++ b/src/spikeinterface/extractors/cellexplorersortingextractor.py @@ -119,6 +119,7 @@ def __init__( # CellExplorer reports spike times in units seconds; SpikeExtractors uses time units of sampling frames unit_ids = unit_ids[:].astype(int).tolist() + unit_ids = [str(unit_id) for unit_id in unit_ids] spiketrains_dict = {unit_id: spike_times[index] for index, unit_id in enumerate(unit_ids)} for unit_id in unit_ids: spiketrains_dict[unit_id] = (sampling_frequency * spiketrains_dict[unit_id]).round().astype(np.int64) From 1939b936e94d30c8437633f89c49fd006ca71a80 Mon Sep 17 00:00:00 2001 From: Sebastien Date: Wed, 4 Oct 2023 10:19:11 +0200 Subject: [PATCH 248/322] Diff for SC2 --- src/spikeinterface/sorters/internal/spyking_circus2.py | 7 ++++--- .../sortingcomponents/clustering/clustering_tools.py | 7 +++++-- .../sortingcomponents/clustering/random_projections.py | 2 +- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index a0a4d0823c..db06287f6c 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -6,7 +6,7 @@ from spikeinterface.core import NumpySorting, load_extractor, BaseRecording, get_noise_levels, extract_waveforms from spikeinterface.core.job_tools import fix_job_kwargs -from spikeinterface.preprocessing import bandpass_filter, common_reference, zscore +from spikeinterface.preprocessing import common_reference, zscore, whiten, highpass_filter try: import hdbscan @@ -22,7 +22,7 @@ class Spykingcircus2Sorter(ComponentsBasedSorter): _default_params = { "general": {"ms_before": 2, "ms_after": 2, "radius_um": 100}, "waveforms": {"max_spikes_per_unit": 200, "overwrite": True, "sparse": True, "method": "ptp", "threshold": 1}, - "filtering": {"dtype": "float32"}, + "filtering": {"freq_min": 150, "dtype": "float32"}, "detection": {"peak_sign": "neg", "detect_threshold": 5}, "selection": {"n_peaks_per_channel": 5000, "min_n_peaks": 20000}, "localization": {}, @@ -60,11 +60,12 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): ## First, we are filtering the data filtering_params = params["filtering"].copy() if params["apply_preprocessing"]: - recording_f = bandpass_filter(recording, **filtering_params) + recording_f = highpass_filter(recording, **filtering_params) recording_f = common_reference(recording_f) else: recording_f = recording + #recording_f = whiten(recording_f, dtype="float32") recording_f = zscore(recording_f, dtype="float32") ## Then, we are detecting peaks with a locally_exclusive method diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 891c355448..6dba4b7f0f 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -598,14 +598,17 @@ def remove_duplicates_via_matching( "waveform_extractor": waveform_extractor, "noise_levels": noise_levels, "amplitudes": [0.95, 1.05], - "omp_min_sps": 0.1, + "omp_min_sps": 0.05, } ) + spikes_per_units, counts = np.unique(waveform_extractor.sorting.to_spike_vector()['unit_index'], return_counts=True) + indices = np.argsort(counts) + ignore_ids = [] similar_templates = [[], []] - for i in range(nb_templates): + for i in np.arange(nb_templates)[indices]: t_start = padding + i * duration t_stop = padding + (i + 1) * duration diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 1f97bf5201..d7ceef2561 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -33,7 +33,7 @@ class RandomProjectionClustering: "min_cluster_size": 20, "allow_single_cluster": True, "core_dist_n_jobs": os.cpu_count(), - "cluster_selection_method": "leaf", + "cluster_selection_method": "leaf" }, "cleaning_kwargs": {}, "waveforms": {"ms_before": 2, "ms_after": 2, "max_spikes_per_unit": 100}, From a46994f5ea58e4359ef0a514bae9cd96dc2bf5f8 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 4 Oct 2023 13:43:48 +0200 Subject: [PATCH 249/322] waveform extactor reload --- src/spikeinterface/core/waveform_extractor.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 2710ff1338..6d9e5d41e3 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -175,7 +175,13 @@ def load_from_folder( rec_attributes = None if sorting is None: - sorting = load_extractor(folder / "sorting.json", base_folder=folder) + if (folder / "sorting.json").exists(): + sorting = load_extractor(folder / "sorting.json", base_folder=folder) + elif (folder / "sorting.pickle").exists(): + sorting = load_extractor(folder / "sorting.pickle") + else: + raise FileNotFoundError("load_waveforms() impossible to find the sorting object (json or pickle)") + # the sparsity is the sparsity of the saved/cached waveforms arrays sparsity_file = folder / "sparsity.json" From 7d9c0753fb3c59577dd244d3c9bce1d6272015e6 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 4 Oct 2023 14:11:54 +0200 Subject: [PATCH 250/322] WIP --- src/spikeinterface/preprocessing/remove_artifacts.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 7e84822c61..8e72b96c6d 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -1,4 +1,5 @@ import numpy as np +import scipy from spikeinterface.core.core_tools import define_function_from_class From 87a9dc964d59530267ed5be8b297a08b35427b75 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 4 Oct 2023 15:36:07 +0200 Subject: [PATCH 251/322] yep --- src/spikeinterface/core/generate.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 06a5ec96ec..9d656db977 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1405,6 +1405,7 @@ def generate_ground_truth_recording( assert sorting.sampling_frequency == sampling_frequency num_spikes = sorting.to_spike_vector().size + if probe is None: probe = generate_linear_probe(num_elec=num_channels) probe.set_device_channel_indices(np.arange(num_channels)) From 0c97fc46adfb8c19683285ab77338ea9e103ac25 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 13:37:30 +0000 Subject: [PATCH 252/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/core/generate.py | 1 - src/spikeinterface/core/waveform_extractor.py | 1 - 2 files changed, 2 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 9d656db977..06a5ec96ec 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1405,7 +1405,6 @@ def generate_ground_truth_recording( assert sorting.sampling_frequency == sampling_frequency num_spikes = sorting.to_spike_vector().size - if probe is None: probe = generate_linear_probe(num_elec=num_channels) probe.set_device_channel_indices(np.arange(num_channels)) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 6d9e5d41e3..576a0a1a58 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -182,7 +182,6 @@ def load_from_folder( else: raise FileNotFoundError("load_waveforms() impossible to find the sorting object (json or pickle)") - # the sparsity is the sparsity of the saved/cached waveforms arrays sparsity_file = folder / "sparsity.json" if sparsity_file.is_file(): From 86b2271df55b671b49cd5b58601df94ab0dd2109 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 4 Oct 2023 16:03:51 +0200 Subject: [PATCH 253/322] Change some default parameters for better user experience. --- src/spikeinterface/core/waveform_extractor.py | 8 ++++---- src/spikeinterface/postprocessing/correlograms.py | 4 ++-- src/spikeinterface/postprocessing/unit_localization.py | 2 +- src/spikeinterface/sorters/runsorter.py | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index 6d9e5d41e3..1c6002226f 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -1458,13 +1458,13 @@ def extract_waveforms( folder=None, mode="folder", precompute_template=("average",), - ms_before=3.0, - ms_after=4.0, + ms_before=1.0, + ms_after=2.0, max_spikes_per_unit=500, overwrite=False, return_scaled=True, dtype=None, - sparse=False, + sparse=True, sparsity=None, num_spikes_for_sparsity=100, allow_unfiltered=False, @@ -1508,7 +1508,7 @@ def extract_waveforms( If True and recording has gain_to_uV/offset_to_uV properties, waveforms are converted to uV. dtype: dtype or None Dtype of the output waveforms. If None, the recording dtype is maintained. - sparse: bool (default False) + sparse: bool (default True) If True, before extracting all waveforms the `precompute_sparsity()` function is run using a few spikes to get an estimate of dense templates to create a ChannelSparsity object. Then, the waveforms will be sparse at extraction time, which saves a lot of memory. diff --git a/src/spikeinterface/postprocessing/correlograms.py b/src/spikeinterface/postprocessing/correlograms.py index 6cd5238abd..6e693635eb 100644 --- a/src/spikeinterface/postprocessing/correlograms.py +++ b/src/spikeinterface/postprocessing/correlograms.py @@ -137,8 +137,8 @@ def compute_crosscorrelogram_from_spiketrain(spike_times1, spike_times2, window_ def compute_correlograms( waveform_or_sorting_extractor, load_if_exists=False, - window_ms: float = 100.0, - bin_ms: float = 5.0, + window_ms: float = 50.0, + bin_ms: float = 1.0, method: str = "auto", ): """Compute auto and cross correlograms. diff --git a/src/spikeinterface/postprocessing/unit_localization.py b/src/spikeinterface/postprocessing/unit_localization.py index d2739f69dd..48ceb34a4e 100644 --- a/src/spikeinterface/postprocessing/unit_localization.py +++ b/src/spikeinterface/postprocessing/unit_localization.py @@ -96,7 +96,7 @@ def get_extension_function(): def compute_unit_locations( - waveform_extractor, load_if_exists=False, method="center_of_mass", outputs="numpy", **method_kwargs + waveform_extractor, load_if_exists=False, method="monopolar_triangulation", outputs="numpy", **method_kwargs ): """ Localize units in 2D or 3D with several methods given the template. diff --git a/src/spikeinterface/sorters/runsorter.py b/src/spikeinterface/sorters/runsorter.py index 9bacd8e2c9..a49a605a75 100644 --- a/src/spikeinterface/sorters/runsorter.py +++ b/src/spikeinterface/sorters/runsorter.py @@ -91,7 +91,7 @@ def run_sorter( sorter_name: str, recording: BaseRecording, output_folder: Optional[str] = None, - remove_existing_folder: bool = True, + remove_existing_folder: bool = False, delete_output_folder: bool = False, verbose: bool = False, raise_error: bool = True, From e97005aa5e94328cee3d97097b98d6a7289ee437 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 4 Oct 2023 16:21:54 +0200 Subject: [PATCH 254/322] Patch for scipy --- src/spikeinterface/preprocessing/remove_artifacts.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 8e72b96c6d..1746b23941 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -1,5 +1,4 @@ import numpy as np -import scipy from spikeinterface.core.core_tools import define_function_from_class @@ -108,8 +107,6 @@ def __init__( time_jitter=0, waveforms_kwargs={"allow_unfiltered": True, "mode": "memory"}, ): - import scipy.interpolate - available_modes = ("zeros", "linear", "cubic", "average", "median") num_seg = recording.get_num_segments() @@ -237,7 +234,6 @@ def __init__( time_pad, sparsity, ): - import scipy.interpolate BasePreprocessorSegment.__init__(self, parent_recording_segment) @@ -255,6 +251,8 @@ def __init__( self.sparsity = sparsity def get_traces(self, start_frame, end_frame, channel_indices): + + if self.mode in ["average", "median"]: traces = self.parent_recording_segment.get_traces(start_frame, end_frame, slice(None)) else: @@ -286,6 +284,7 @@ def get_traces(self, start_frame, end_frame, channel_indices): elif trig + pad[1] >= end_frame - start_frame: traces[trig - pad[0] :, :] = 0 elif self.mode in ["linear", "cubic"]: + import scipy.interpolate for trig in triggers: if pad is None: pre_data_end_idx = trig - 1 From 2a5e37c83054999514ccacd45b3c81d1865bc196 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 14:23:26 +0000 Subject: [PATCH 255/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/preprocessing/remove_artifacts.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/spikeinterface/preprocessing/remove_artifacts.py b/src/spikeinterface/preprocessing/remove_artifacts.py index 1746b23941..1eafa48a0b 100644 --- a/src/spikeinterface/preprocessing/remove_artifacts.py +++ b/src/spikeinterface/preprocessing/remove_artifacts.py @@ -234,7 +234,6 @@ def __init__( time_pad, sparsity, ): - BasePreprocessorSegment.__init__(self, parent_recording_segment) self.triggers = np.asarray(triggers, dtype="int64") @@ -251,8 +250,6 @@ def __init__( self.sparsity = sparsity def get_traces(self, start_frame, end_frame, channel_indices): - - if self.mode in ["average", "median"]: traces = self.parent_recording_segment.get_traces(start_frame, end_frame, slice(None)) else: @@ -285,6 +282,7 @@ def get_traces(self, start_frame, end_frame, channel_indices): traces[trig - pad[0] :, :] = 0 elif self.mode in ["linear", "cubic"]: import scipy.interpolate + for trig in triggers: if pad is None: pre_data_end_idx = trig - 1 From d9803d43e9598810337d11d2e68414261dbc3b81 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 4 Oct 2023 17:07:05 +0200 Subject: [PATCH 256/322] oups --- src/spikeinterface/core/waveform_extractor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index d83b3d66f1..eb027faf81 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -1726,6 +1726,7 @@ def precompute_sparsity( max_spikes_per_unit=num_spikes_for_sparsity, return_scaled=False, allow_unfiltered=allow_unfiltered, + sparse=False, **job_kwargs, ) local_sparsity = compute_sparsity(local_we, **sparse_kwargs) From 590cd6ba2440569469859a0e08ce321a5320e27d Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 4 Oct 2023 21:04:26 +0200 Subject: [PATCH 257/322] small fix --- src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py index d25f1ea97b..364fc298c6 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py @@ -43,6 +43,8 @@ def plot(self): self._do_plot() def _do_plot(self): + from matplotlib import pyplot as plt + fig = self.figure for ax in fig.axes: From 204c8e90fd44d56e4b5eb6b0b7e92f09ea18db91 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Wed, 4 Oct 2023 21:08:17 +0200 Subject: [PATCH 258/322] fix waveform extactor with empty sorting and sparse --- src/spikeinterface/core/sparsity.py | 6 +++++- src/spikeinterface/core/tests/test_waveform_extractor.py | 3 ++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/core/sparsity.py b/src/spikeinterface/core/sparsity.py index 8c5c62d568..896e3800d7 100644 --- a/src/spikeinterface/core/sparsity.py +++ b/src/spikeinterface/core/sparsity.py @@ -102,7 +102,11 @@ def __init__(self, mask, unit_ids, channel_ids): self.num_channels = self.channel_ids.size self.num_units = self.unit_ids.size - self.max_num_active_channels = self.mask.sum(axis=1).max() + if self.mask.shape[0]: + self.max_num_active_channels = self.mask.sum(axis=1).max() + else: + # empty sorting without units + self.max_num_active_channels = 0 def __repr__(self): density = np.mean(self.mask) diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index 2bbf5e9b0f..00244f600b 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -556,4 +556,5 @@ def test_non_json_object(): # test_portability() # test_recordingless() # test_compute_sparsity() - test_non_json_object() + # test_non_json_object() + test_empty_sorting() From 4cd3747786728e2942bef43b5c9d5ecba8d102fb Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 06:25:31 +0000 Subject: [PATCH 259/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sorters/internal/spyking_circus2.py | 2 +- .../sortingcomponents/clustering/clustering_tools.py | 2 +- .../sortingcomponents/clustering/random_projections.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index db06287f6c..6cf925e852 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -65,7 +65,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): else: recording_f = recording - #recording_f = whiten(recording_f, dtype="float32") + # recording_f = whiten(recording_f, dtype="float32") recording_f = zscore(recording_f, dtype="float32") ## Then, we are detecting peaks with a locally_exclusive method diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 6dba4b7f0f..72cfd71791 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -602,7 +602,7 @@ def remove_duplicates_via_matching( } ) - spikes_per_units, counts = np.unique(waveform_extractor.sorting.to_spike_vector()['unit_index'], return_counts=True) + spikes_per_units, counts = np.unique(waveform_extractor.sorting.to_spike_vector()["unit_index"], return_counts=True) indices = np.argsort(counts) ignore_ids = [] diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index d7ceef2561..1f97bf5201 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -33,7 +33,7 @@ class RandomProjectionClustering: "min_cluster_size": 20, "allow_single_cluster": True, "core_dist_n_jobs": os.cpu_count(), - "cluster_selection_method": "leaf" + "cluster_selection_method": "leaf", }, "cleaning_kwargs": {}, "waveforms": {"ms_before": 2, "ms_after": 2, "max_spikes_per_unit": 100}, From 22c0eb426507be87790cbcd68427e3d3764721ee Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Thu, 5 Oct 2023 08:29:18 +0200 Subject: [PATCH 260/322] Fix bug while reloading --- .../sortingcomponents/clustering/clustering_tools.py | 2 +- .../sortingcomponents/clustering/random_projections.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 6dba4b7f0f..d94345f56b 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -664,7 +664,7 @@ def remove_duplicates_via_matching( labels = np.unique(new_labels) labels = labels[labels >= 0] - del recording, sub_recording, method_kwargs + del recording, sub_recording, method_kwargs, waveform_extractor os.remove(tmp_filename) return labels, new_labels diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index d7ceef2561..4d1dd1f9d5 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -223,6 +223,8 @@ def sigmoid(x, L, x0, k, b): ) del we, sorting + import gc + gc.collect() if params["tmp_folder"] is None: shutil.rmtree(tmp_folder) From f69d7e3dbd013c52564b79c1f6ce5c87a3f67af0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 06:30:11 +0000 Subject: [PATCH 261/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sortingcomponents/clustering/random_projections.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 7cb882409d..620346a875 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -224,6 +224,7 @@ def sigmoid(x, L, x0, k, b): del we, sorting import gc + gc.collect() if params["tmp_folder"] is None: From 403890ce83b065a76bcc1542a562d1a73e6e04be Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Thu, 5 Oct 2023 09:01:02 +0200 Subject: [PATCH 262/322] Found it! --- .../clustering/clustering_tools.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index ce29c47113..734ceff1a3 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -593,7 +593,9 @@ def remove_duplicates_via_matching( chunk_size = duration + 3 * margin - method_kwargs.update( + local_params = method_kwargs.copy() + + local_params.update( { "waveform_extractor": waveform_extractor, "noise_levels": noise_levels, @@ -613,12 +615,12 @@ def remove_duplicates_via_matching( t_stop = padding + (i + 1) * duration sub_recording = recording.frame_slice(t_start - half_marging, t_stop + half_marging) - method_kwargs.update({"ignored_ids": ignore_ids + [i]}) + local_params.update({"ignored_ids": ignore_ids + [i]}) spikes, computed = find_spikes_from_templates( - sub_recording, method=method, method_kwargs=method_kwargs, extra_outputs=True, **job_kwargs + sub_recording, method=method, method_kwargs=local_params, extra_outputs=True, **job_kwargs ) if method == "circus-omp-svd": - method_kwargs.update( + local_params.update( { "overlaps": computed["overlaps"], "templates": computed["templates"], @@ -632,7 +634,7 @@ def remove_duplicates_via_matching( } ) elif method == "circus-omp": - method_kwargs.update( + local_params.update( { "overlaps": computed["overlaps"], "templates": computed["templates"], @@ -664,7 +666,7 @@ def remove_duplicates_via_matching( labels = np.unique(new_labels) labels = labels[labels >= 0] - del recording, sub_recording, method_kwargs, waveform_extractor + del recording, sub_recording, local_params, waveform_extractor os.remove(tmp_filename) return labels, new_labels From 6951e856c0794e78108be180d6f16e0fde6af6e2 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Thu, 5 Oct 2023 09:54:27 +0200 Subject: [PATCH 263/322] WIP --- .../sortingcomponents/clustering/clustering_tools.py | 2 +- .../sortingcomponents/clustering/random_projections.py | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py index 734ceff1a3..b4938717f8 100644 --- a/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/clustering_tools.py @@ -610,7 +610,7 @@ def remove_duplicates_via_matching( ignore_ids = [] similar_templates = [[], []] - for i in np.arange(nb_templates)[indices]: + for i in indices: t_start = padding + i * duration t_stop = padding + (i + 1) * duration diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 620346a875..1f97bf5201 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -223,9 +223,6 @@ def sigmoid(x, L, x0, k, b): ) del we, sorting - import gc - - gc.collect() if params["tmp_folder"] is None: shutil.rmtree(tmp_folder) From fdebd12b09654796a177f4ab91b8e614409f5ac7 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Thu, 5 Oct 2023 10:43:20 +0200 Subject: [PATCH 264/322] Sparse waveforms were not handled --- src/spikeinterface/sorters/internal/spyking_circus2.py | 4 ++-- .../sortingcomponents/clustering/random_projections.py | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 6cf925e852..0c3b9f95d1 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -99,10 +99,10 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): ## We launch a clustering (using hdbscan) relying on positions and features extracted on ## the fly from the snippets clustering_params = params["clustering"].copy() - clustering_params["waveforms_kwargs"] = params["waveforms"] + clustering_params["waveforms"] = params["waveforms"].copy() for k in ["ms_before", "ms_after"]: - clustering_params["waveforms_kwargs"][k] = params["general"][k] + clustering_params["waveforms"][k] = params["general"][k] clustering_params.update(dict(shared_memory=params["shared_memory"])) clustering_params["job_kwargs"] = job_kwargs diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index 1f97bf5201..ffb868f682 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -199,9 +199,8 @@ def sigmoid(x, L, x0, k, b): recording, sorting, waveform_folder, - ms_before=params["ms_before"], - ms_after=params["ms_after"], **params["job_kwargs"], + **params['waveforms'], return_scaled=False, mode=mode, ) From b6f9235a7cf9c2ad106ec0e4cb6be365a243d2af Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 08:44:20 +0000 Subject: [PATCH 265/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sortingcomponents/clustering/random_projections.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index ffb868f682..a81458d7a8 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -200,7 +200,7 @@ def sigmoid(x, L, x0, k, b): sorting, waveform_folder, **params["job_kwargs"], - **params['waveforms'], + **params["waveforms"], return_scaled=False, mode=mode, ) From 16ed53022da36e3918101aff3bb01009eda9b983 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 5 Oct 2023 11:34:00 +0200 Subject: [PATCH 266/322] wip --- .../sorters/internal/tridesclous2.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index 909515842c..46a7aa843f 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -21,13 +21,13 @@ class Tridesclous2Sorter(ComponentsBasedSorter): "apply_preprocessing": True, "waveforms" : {"ms_before": 0.5, "ms_after": 1.5, }, "filtering": {"freq_min": 300, "freq_max": 8000.0}, - "detection": {"peak_sign": "neg", "detect_threshold": 5, "exclude_sweep_ms": 0.8, "radius_um": 150.}, - "hdbscan_kwargs": { - "min_cluster_size": 25, - "allow_single_cluster": True, - "core_dist_n_jobs": -1, - "cluster_selection_method": "leaf", - }, + "detection": {"peak_sign": "neg", "detect_threshold": 5, "exclude_sweep_ms": 1.5, "radius_um": 150.}, + #~ "hdbscan_kwargs": { + #~ "min_cluster_size": 25, + #~ "allow_single_cluster": True, + #~ "core_dist_n_jobs": -1, + #~ "cluster_selection_method": "leaf", + #~ }, "selection": {"n_peaks_per_channel": 5000, "min_n_peaks": 20000}, "svd": {"n_components": 6}, "clustering": { From f4f3fb4199a59add1882b26e0925e08c00d1fed3 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 5 Oct 2023 11:34:41 +0200 Subject: [PATCH 267/322] wip --- .../sortingcomponents/clustering/merge.py | 87 ++++++++++++++++--- .../sortingcomponents/clustering/split.py | 13 ++- 2 files changed, 83 insertions(+), 17 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/merge.py b/src/spikeinterface/sortingcomponents/clustering/merge.py index e2049d70bf..1dd9f9fc37 100644 --- a/src/spikeinterface/sortingcomponents/clustering/merge.py +++ b/src/spikeinterface/sortingcomponents/clustering/merge.py @@ -80,9 +80,45 @@ def merge_clusters( method_kwargs=method_kwargs, **job_kwargs, ) + + + DEBUG = False + if DEBUG: + import matplotlib.pyplot as plt + fig, ax = plt.subplots() + ax.matshow(pair_values) + + pair_values[~pair_mask] = 20 + + import hdbscan + fig, ax = plt.subplots() + clusterer = hdbscan.HDBSCAN(metric='precomputed', min_cluster_size=2, allow_single_cluster=True) + clusterer.fit(pair_values) + print(clusterer.labels_) + clusterer.single_linkage_tree_.plot(cmap='viridis', colorbar=True) + #~ fig, ax = plt.subplots() + #~ clusterer.minimum_spanning_tree_.plot(edge_cmap='viridis', + #~ edge_alpha=0.6, + #~ node_size=80, + #~ edge_linewidth=2) + + graph = clusterer.single_linkage_tree_.to_networkx() + + import scipy.cluster + fig, ax = plt.subplots() + scipy.cluster.hierarchy.dendrogram(clusterer.single_linkage_tree_.to_numpy(), ax=ax) + + import networkx as nx + fig = plt.figure() + nx.draw_networkx(graph) + plt.show() - # merges = agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="partial") - merges = agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full") + plt.show() + + + + merges = agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="partial") + # merges = agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full") group_shifts = resolve_final_shifts(labels_set, merges, pair_mask, pair_shift) @@ -187,7 +223,7 @@ def agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full" else: raise ValueError - # DEBUG = True + # DEBUG = True DEBUG = False if DEBUG: import matplotlib.pyplot as plt @@ -196,7 +232,7 @@ def agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full" nx.draw_networkx(sub_graph) plt.show() - # DEBUG = True + # DEBUG = True DEBUG = False if DEBUG: import matplotlib.pyplot as plt @@ -348,6 +384,7 @@ def merge( criteria="diptest", threshold_diptest=0.5, threshold_percentile=80.0, + threshold_overlap=0.4, num_shift=2, ): if num_shift > 0: @@ -449,6 +486,23 @@ def merge( l1 = np.percentile(feat1, 100.0 - threshold_percentile) is_merge = l0 >= l1 merge_value = l0 - l1 + elif criteria == "distrib_overlap": + lim0 = min(np.min(feat0), np.min(feat1)) + lim1 = max(np.max(feat0), np.max(feat1)) + bin_size = (lim1 - lim0) / 200. + bins = np.arange(lim0, lim1, bin_size) + + pdf0, _ = np.histogram(feat0, bins=bins, density=True) + pdf1, _ = np.histogram(feat1, bins=bins, density=True) + pdf0 *= bin_size + pdf1 *= bin_size + overlap = np.sum(np.minimum(pdf0, pdf1)) + + is_merge = overlap >= threshold_overlap + + merge_value = 1 - overlap + + else: raise ValueError(f"bad criteria {criteria}") @@ -457,11 +511,13 @@ def merge( else: final_shift = 0 - # DEBUG = True + # DEBUG = True DEBUG = False if DEBUG and is_merge: - # if DEBUG: + # if DEBUG and not is_merge: + # if DEBUG and (overlap > 0.05 and overlap <0.25): + # if label0 == 49 and label1== 65: import matplotlib.pyplot as plt flatten_wfs0 = wfs0.swapaxes(1, 2).reshape(wfs0.shape[0], -1) @@ -479,13 +535,16 @@ def merge( ax.legend() bins = np.linspace(np.percentile(feat, 1), np.percentile(feat, 99), 100) - - count0, _ = np.histogram(feat0, bins=bins) - count1, _ = np.histogram(feat1, bins=bins) + bin_size = bins[1] - bins[0] + count0, _ = np.histogram(feat0, bins=bins, density=True) + count1, _ = np.histogram(feat1, bins=bins, density=True) + pdf0 = count0 * bin_size + pdf1 = count1 * bin_size + ax = axs[1] - ax.plot(bins[:-1], count0, color="C0") - ax.plot(bins[:-1], count1, color="C1") + ax.plot(bins[:-1], pdf0, color="C0") + ax.plot(bins[:-1], pdf1, color="C1") if criteria == "diptest": ax.set_title(f"{dipscore:.4f} {is_merge}") @@ -493,9 +552,11 @@ def merge( ax.set_title(f"{l0:.4f} {l1:.4f} {is_merge}") ax.axvline(l0, color="C0") ax.axvline(l1, color="C1") + elif criteria == "distrib_overlap": + print(lim0, lim1, ) + ax.set_title(f"{overlap:.4f} {is_merge}") + ax.plot(bins[:-1], np.minimum(pdf0, pdf1), ls='--', color='k') - - plt.show() diff --git a/src/spikeinterface/sortingcomponents/clustering/split.py b/src/spikeinterface/sortingcomponents/clustering/split.py index 411d8c2116..d3e630a165 100644 --- a/src/spikeinterface/sortingcomponents/clustering/split.py +++ b/src/spikeinterface/sortingcomponents/clustering/split.py @@ -205,9 +205,11 @@ def split( final_features = TruncatedSVD(n_pca_features).fit_transform(flatten_features) if clusterer == "hdbscan": - clust = HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, allow_single_cluster=True) + clust = HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, allow_single_cluster=True, + cluster_selection_method="leaf") clust.fit(final_features) possible_labels = clust.labels_ + is_split = np.setdiff1d(possible_labels, [-1]).size > 1 elif clusterer == "isocut5": dipscore, cutpoint = isocut5(final_features[:, 0]) possible_labels = np.zeros(final_features.shape[0]) @@ -215,14 +217,15 @@ def split( mask = final_features[:, 0] > cutpoint if np.sum(mask) > min_cluster_size and np.sum(~mask): possible_labels[mask] = 1 + is_split = np.setdiff1d(possible_labels, [-1]).size > 1 else: - return False, None + is_split = False else: raise ValueError(f"wrong clusterer {clusterer}") - is_split = np.setdiff1d(possible_labels, [-1]).size > 1 + - # DEBUG = True + # DEBUG = True DEBUG = False if DEBUG: import matplotlib.pyplot as plt @@ -243,6 +246,8 @@ def split( ax = axs[1] ax.plot(flatten_wfs[mask][sl].T, color=colors[k], alpha=0.5) + + axs[0].set_title(f"{clusterer} {is_split}") plt.show() From 50f6fcf5322bf10f1b8310ac228921a975b17557 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 5 Oct 2023 12:16:50 +0200 Subject: [PATCH 268/322] small fix unrelated --- src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py index 364fc298c6..c921f42c6d 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py @@ -179,6 +179,8 @@ def plot(self): def _do_plot(self): import sklearn + import matplotlib.pyplot as plt + import matplotlib # compute similarity # take index of template (respect unit_ids order) From 0798169827321ca8a823780baa377ed8d5820469 Mon Sep 17 00:00:00 2001 From: Garcia Samuel Date: Thu, 5 Oct 2023 13:12:27 +0200 Subject: [PATCH 269/322] Update src/spikeinterface/core/waveform_extractor.py --- src/spikeinterface/core/waveform_extractor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/core/waveform_extractor.py b/src/spikeinterface/core/waveform_extractor.py index eb027faf81..0fc5694207 100644 --- a/src/spikeinterface/core/waveform_extractor.py +++ b/src/spikeinterface/core/waveform_extractor.py @@ -1507,7 +1507,7 @@ def extract_waveforms( If True and recording has gain_to_uV/offset_to_uV properties, waveforms are converted to uV. dtype: dtype or None Dtype of the output waveforms. If None, the recording dtype is maintained. - sparse: bool (default True) + sparse: bool, default: True If True, before extracting all waveforms the `precompute_sparsity()` function is run using a few spikes to get an estimate of dense templates to create a ChannelSparsity object. Then, the waveforms will be sparse at extraction time, which saves a lot of memory. From 4293b2244be7b71aa0ce68f4dabad24d23318637 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 11:17:03 +0000 Subject: [PATCH 270/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py index c921f42c6d..468b96ff3b 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/collisioncomp.py @@ -44,7 +44,7 @@ def plot(self): def _do_plot(self): from matplotlib import pyplot as plt - + fig = self.figure for ax in fig.axes: From 3371915310a4bda8cbd9ecd8a5e2d2f3e0ee55b1 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 5 Oct 2023 15:36:46 +0200 Subject: [PATCH 271/322] Keep sparse=False in postprocessing tests --- .../postprocessing/tests/common_extension_tests.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/spikeinterface/postprocessing/tests/common_extension_tests.py b/src/spikeinterface/postprocessing/tests/common_extension_tests.py index 8f864e9b84..50e2ecdb57 100644 --- a/src/spikeinterface/postprocessing/tests/common_extension_tests.py +++ b/src/spikeinterface/postprocessing/tests/common_extension_tests.py @@ -57,6 +57,7 @@ def setUp(self): ms_before=3.0, ms_after=4.0, max_spikes_per_unit=500, + sparse=False, n_jobs=1, chunk_size=30000, overwrite=True, @@ -92,6 +93,7 @@ def setUp(self): ms_before=3.0, ms_after=4.0, max_spikes_per_unit=500, + sparse=False, n_jobs=1, chunk_size=30000, overwrite=True, @@ -112,6 +114,7 @@ def setUp(self): recording, sorting, mode="memory", + sparse=False, ms_before=3.0, ms_after=4.0, max_spikes_per_unit=500, From bef9c4ab9d5eeea9331bfbab5076da23ef5f61cc Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 5 Oct 2023 16:09:48 +0200 Subject: [PATCH 272/322] change split merge naming --- .../sortingcomponents/clustering/merge.py | 17 +++++++++-- .../sortingcomponents/clustering/split.py | 29 ++++++++++++++----- .../sortingcomponents/tests/test_merge.py | 13 +++++++++ .../sortingcomponents/tests/test_split.py | 1 + 4 files changed, 49 insertions(+), 11 deletions(-) create mode 100644 src/spikeinterface/sortingcomponents/tests/test_merge.py diff --git a/src/spikeinterface/sortingcomponents/clustering/merge.py b/src/spikeinterface/sortingcomponents/clustering/merge.py index 1dd9f9fc37..5539ec1051 100644 --- a/src/spikeinterface/sortingcomponents/clustering/merge.py +++ b/src/spikeinterface/sortingcomponents/clustering/merge.py @@ -368,8 +368,19 @@ def find_pair_function_wrapper(label0, label1): return is_merge, label0, label1, shift, merge_value -class WaveformsLda: - name = "waveforms_lda" +class ProjectDistribution: + """ + This method is a refactorized mix between: + * old tridesclous code + * some ideas by Charlie Windolf in spikespvae + + The idea is : + * project the waveform (or features) samples on a 1d axis (using LDA for instance). + * check that it is the same or not distribution (diptest, distrib_overlap, ...) + + + """ + name = "project_distribution" @staticmethod def merge( @@ -564,6 +575,6 @@ def merge( find_pair_method_list = [ - WaveformsLda, + ProjectDistribution, ] find_pair_method_dict = {e.name: e for e in find_pair_method_list} diff --git a/src/spikeinterface/sortingcomponents/clustering/split.py b/src/spikeinterface/sortingcomponents/clustering/split.py index d3e630a165..dc649cec97 100644 --- a/src/spikeinterface/sortingcomponents/clustering/split.py +++ b/src/spikeinterface/sortingcomponents/clustering/split.py @@ -13,6 +13,9 @@ from .isocut5 import isocut5 +# important all DEBUG and matplotlib are left in the code intentionally + + def split_clusters( peak_labels, recording, @@ -25,7 +28,7 @@ def split_clusters( **job_kwargs, ): """ - Run recusrsively or not in a multi process pool a local split method. + Run recusrsively (or not) in a multi process pool a local split method. Parameters ---------- @@ -151,11 +154,20 @@ def split_function_wrapper(peak_indices): return is_split, local_labels, peak_indices -class HdbscanOnLocalPca: - # @charlie : this is the equivalent of "herding_split()" in DART - # but simplified, flexible and renamed - name = "hdbscan_on_local_pca" +class LocalFeatureClustering: + """ + This method is a refactorized mix between: + * old tridesclous code + * "herding_split()" in DART/spikepsvae by Charlie Windolf + + The idea simple : + * agregate features (svd or even waveforms) with sparse channel. + * run a local feature reduction (pca or svd) + * try a new split (hdscan or isocut5) + """ + + name = "local_feature_clustering" @staticmethod def split( @@ -170,6 +182,8 @@ def split( min_cluster_size=25, min_samples=25, n_pca_features=2, + minimum_common_channels=2, + ): local_labels = np.zeros(peak_indices.size, dtype=np.int64) @@ -183,8 +197,7 @@ def split( target_channels = np.flatnonzero(np.all(neighbours_mask[local_chans, :], axis=0)) # TODO fix this a better way, this when cluster have too few overlapping channels - minimum_channels = 2 - if target_channels.size < minimum_channels: + if target_channels.size < minimum_common_channels: return False, None aligned_wfs, dont_have_channels = aggregate_sparse_features( @@ -260,6 +273,6 @@ def split( split_methods_list = [ - HdbscanOnLocalPca, + LocalFeatureClustering, ] split_methods_dict = {e.name: e for e in split_methods_list} diff --git a/src/spikeinterface/sortingcomponents/tests/test_merge.py b/src/spikeinterface/sortingcomponents/tests/test_merge.py new file mode 100644 index 0000000000..b7a669a263 --- /dev/null +++ b/src/spikeinterface/sortingcomponents/tests/test_merge.py @@ -0,0 +1,13 @@ +import pytest +import numpy as np + +from spikeinterface.sortingcomponents.clustering.split import split_clusters + +# no proper test at the moment this is used in tridesclous2 + +def test_merge(): + pass + + +if __name__ == "__main__": + test_merge() diff --git a/src/spikeinterface/sortingcomponents/tests/test_split.py b/src/spikeinterface/sortingcomponents/tests/test_split.py index ed5e756469..ca5e5b57e7 100644 --- a/src/spikeinterface/sortingcomponents/tests/test_split.py +++ b/src/spikeinterface/sortingcomponents/tests/test_split.py @@ -3,6 +3,7 @@ from spikeinterface.sortingcomponents.clustering.split import split_clusters +# no proper test at the moment this is used in tridesclous2 def test_split(): pass From df35c6a2ba3458597e2ec3c47673cbee9e4b7182 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 5 Oct 2023 16:22:42 +0200 Subject: [PATCH 273/322] small fixes in tests --- src/spikeinterface/core/job_tools.py | 3 +-- src/spikeinterface/core/tests/test_globals.py | 6 +++--- src/spikeinterface/core/tests/test_waveform_extractor.py | 6 ++++-- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index a13e1dd527..e42f7bb8b4 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -434,8 +434,7 @@ def function_wrapper(args): return _func(segment_index, start_frame, end_frame, _worker_ctx) -# Here some utils - +# Here some utils copy/paste from DART (Charlie Windolf) class MockFuture: """A non-concurrent class for mocking the concurrent.futures API.""" diff --git a/src/spikeinterface/core/tests/test_globals.py b/src/spikeinterface/core/tests/test_globals.py index 8216a4aae6..2c0792c152 100644 --- a/src/spikeinterface/core/tests/test_globals.py +++ b/src/spikeinterface/core/tests/test_globals.py @@ -37,16 +37,16 @@ def test_global_tmp_folder(): def test_global_job_kwargs(): - job_kwargs = dict(n_jobs=4, chunk_duration="1s", progress_bar=True) + job_kwargs = dict(n_jobs=4, chunk_duration="1s", progress_bar=True, mp_context=None, max_threads_per_process=1) global_job_kwargs = get_global_job_kwargs() - assert global_job_kwargs == dict(n_jobs=1, chunk_duration="1s", progress_bar=True) + assert global_job_kwargs == dict(n_jobs=1, chunk_duration="1s", progress_bar=True, mp_context=None, max_threads_per_process=1) set_global_job_kwargs(**job_kwargs) assert get_global_job_kwargs() == job_kwargs # test updating only one field partial_job_kwargs = dict(n_jobs=2) set_global_job_kwargs(**partial_job_kwargs) global_job_kwargs = get_global_job_kwargs() - assert global_job_kwargs == dict(n_jobs=2, chunk_duration="1s", progress_bar=True) + assert global_job_kwargs == dict(n_jobs=2, chunk_duration="1s", progress_bar=True, mp_context=None, max_threads_per_process=1) # test that fix_job_kwargs grabs global kwargs new_job_kwargs = dict(n_jobs=10) job_kwargs_split = fix_job_kwargs(new_job_kwargs) diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index 2bbf5e9b0f..de6c3d752a 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -346,6 +346,8 @@ def test_recordingless(): # delete original recording and rely on rec_attributes if platform.system() != "Windows": + # this avoid reference on the folder + del we, recording shutil.rmtree(cache_folder / "recording1") we_loaded = WaveformExtractor.load(wf_folder, with_recording=False) assert not we_loaded.has_recording() @@ -554,6 +556,6 @@ def test_non_json_object(): # test_WaveformExtractor() # test_extract_waveforms() # test_portability() - # test_recordingless() + test_recordingless() # test_compute_sparsity() - test_non_json_object() + # test_non_json_object() From 94bfb70f528603ecf22d7b499228146792fb33b9 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 5 Oct 2023 16:23:56 +0200 Subject: [PATCH 274/322] in1d to isin --- src/spikeinterface/sortingcomponents/clustering/tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/tools.py b/src/spikeinterface/sortingcomponents/clustering/tools.py index 9a537ab8a8..8e25c9cb7f 100644 --- a/src/spikeinterface/sortingcomponents/clustering/tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/tools.py @@ -83,7 +83,7 @@ def aggregate_sparse_features(peaks, peak_indices, sparse_feature, sparse_mask, for chan in np.unique(local_peaks["channel_index"]): sparse_chans = np.flatnonzero(sparse_mask[chan, :]) peak_inds = np.flatnonzero(local_peaks["channel_index"] == chan) - if np.all(np.in1d(target_channels, sparse_chans)): + if np.all(np.isin(target_channels, sparse_chans)): # peaks feature channel have all target_channels source_chans = np.flatnonzero(np.in1d(sparse_chans, target_channels)) aligned_features[peak_inds, :, :] = sparse_feature[peak_indices[peak_inds], :, :][:, :, source_chans] From 48da4ea5f429eac411a331a39d9b468428b70897 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 5 Oct 2023 16:45:52 +0200 Subject: [PATCH 275/322] wip --- src/spikeinterface/sortingcomponents/clustering/tools.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/sortingcomponents/clustering/tools.py b/src/spikeinterface/sortingcomponents/clustering/tools.py index 8e25c9cb7f..c334daebe3 100644 --- a/src/spikeinterface/sortingcomponents/clustering/tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/tools.py @@ -94,6 +94,7 @@ def aggregate_sparse_features(peaks, peak_indices, sparse_feature, sparse_mask, return aligned_features, dont_have_channels + def compute_template_from_sparse( peaks, labels, labels_set, sparse_waveforms, sparse_mask, total_channels, peak_shifts=None ): From de2d642d5f833b5c3f68df5150311b1ed5eddca8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 14:41:53 +0000 Subject: [PATCH 276/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/core/job_tools.py | 1 + src/spikeinterface/core/tests/test_globals.py | 8 +- .../core/tests/test_waveform_extractor.py | 2 +- .../sortingcomponents/clustering/merge.py | 77 ++++++++++--------- .../sortingcomponents/clustering/split.py | 18 ++--- .../sortingcomponents/clustering/tools.py | 1 - .../sortingcomponents/tests/test_merge.py | 1 + .../sortingcomponents/tests/test_split.py | 1 + 8 files changed, 58 insertions(+), 51 deletions(-) diff --git a/src/spikeinterface/core/job_tools.py b/src/spikeinterface/core/job_tools.py index e42f7bb8b4..cf7a67489c 100644 --- a/src/spikeinterface/core/job_tools.py +++ b/src/spikeinterface/core/job_tools.py @@ -436,6 +436,7 @@ def function_wrapper(args): # Here some utils copy/paste from DART (Charlie Windolf) + class MockFuture: """A non-concurrent class for mocking the concurrent.futures API.""" diff --git a/src/spikeinterface/core/tests/test_globals.py b/src/spikeinterface/core/tests/test_globals.py index 2c0792c152..d0672405d6 100644 --- a/src/spikeinterface/core/tests/test_globals.py +++ b/src/spikeinterface/core/tests/test_globals.py @@ -39,14 +39,18 @@ def test_global_tmp_folder(): def test_global_job_kwargs(): job_kwargs = dict(n_jobs=4, chunk_duration="1s", progress_bar=True, mp_context=None, max_threads_per_process=1) global_job_kwargs = get_global_job_kwargs() - assert global_job_kwargs == dict(n_jobs=1, chunk_duration="1s", progress_bar=True, mp_context=None, max_threads_per_process=1) + assert global_job_kwargs == dict( + n_jobs=1, chunk_duration="1s", progress_bar=True, mp_context=None, max_threads_per_process=1 + ) set_global_job_kwargs(**job_kwargs) assert get_global_job_kwargs() == job_kwargs # test updating only one field partial_job_kwargs = dict(n_jobs=2) set_global_job_kwargs(**partial_job_kwargs) global_job_kwargs = get_global_job_kwargs() - assert global_job_kwargs == dict(n_jobs=2, chunk_duration="1s", progress_bar=True, mp_context=None, max_threads_per_process=1) + assert global_job_kwargs == dict( + n_jobs=2, chunk_duration="1s", progress_bar=True, mp_context=None, max_threads_per_process=1 + ) # test that fix_job_kwargs grabs global kwargs new_job_kwargs = dict(n_jobs=10) job_kwargs_split = fix_job_kwargs(new_job_kwargs) diff --git a/src/spikeinterface/core/tests/test_waveform_extractor.py b/src/spikeinterface/core/tests/test_waveform_extractor.py index de6c3d752a..b56180a9e9 100644 --- a/src/spikeinterface/core/tests/test_waveform_extractor.py +++ b/src/spikeinterface/core/tests/test_waveform_extractor.py @@ -558,4 +558,4 @@ def test_non_json_object(): # test_portability() test_recordingless() # test_compute_sparsity() - # test_non_json_object() + # test_non_json_object() diff --git a/src/spikeinterface/sortingcomponents/clustering/merge.py b/src/spikeinterface/sortingcomponents/clustering/merge.py index 5539ec1051..d892d0723a 100644 --- a/src/spikeinterface/sortingcomponents/clustering/merge.py +++ b/src/spikeinterface/sortingcomponents/clustering/merge.py @@ -80,45 +80,46 @@ def merge_clusters( method_kwargs=method_kwargs, **job_kwargs, ) - - + DEBUG = False if DEBUG: import matplotlib.pyplot as plt + fig, ax = plt.subplots() ax.matshow(pair_values) - - pair_values[~pair_mask] = 20 - + + pair_values[~pair_mask] = 20 + import hdbscan + fig, ax = plt.subplots() - clusterer = hdbscan.HDBSCAN(metric='precomputed', min_cluster_size=2, allow_single_cluster=True) + clusterer = hdbscan.HDBSCAN(metric="precomputed", min_cluster_size=2, allow_single_cluster=True) clusterer.fit(pair_values) print(clusterer.labels_) - clusterer.single_linkage_tree_.plot(cmap='viridis', colorbar=True) - #~ fig, ax = plt.subplots() - #~ clusterer.minimum_spanning_tree_.plot(edge_cmap='viridis', - #~ edge_alpha=0.6, - #~ node_size=80, - #~ edge_linewidth=2) - + clusterer.single_linkage_tree_.plot(cmap="viridis", colorbar=True) + # ~ fig, ax = plt.subplots() + # ~ clusterer.minimum_spanning_tree_.plot(edge_cmap='viridis', + # ~ edge_alpha=0.6, + # ~ node_size=80, + # ~ edge_linewidth=2) + graph = clusterer.single_linkage_tree_.to_networkx() import scipy.cluster + fig, ax = plt.subplots() scipy.cluster.hierarchy.dendrogram(clusterer.single_linkage_tree_.to_numpy(), ax=ax) - + import networkx as nx + fig = plt.figure() nx.draw_networkx(graph) plt.show() plt.show() - - - + merges = agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="partial") - # merges = agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full") + # merges = agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full") group_shifts = resolve_final_shifts(labels_set, merges, pair_mask, pair_shift) @@ -223,7 +224,7 @@ def agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full" else: raise ValueError - # DEBUG = True + # DEBUG = True DEBUG = False if DEBUG: import matplotlib.pyplot as plt @@ -232,7 +233,7 @@ def agglomerate_pairs(labels_set, pair_mask, pair_values, connection_mode="full" nx.draw_networkx(sub_graph) plt.show() - # DEBUG = True + # DEBUG = True DEBUG = False if DEBUG: import matplotlib.pyplot as plt @@ -377,9 +378,10 @@ class ProjectDistribution: The idea is : * project the waveform (or features) samples on a 1d axis (using LDA for instance). * check that it is the same or not distribution (diptest, distrib_overlap, ...) - + """ + name = "project_distribution" @staticmethod @@ -412,13 +414,12 @@ def merge( chans1 = np.unique(peaks["channel_index"][inds1]) target_chans1 = np.flatnonzero(np.all(waveforms_sparse_mask[chans1, :], axis=0)) - if inds0.size <40 or inds1.size <40: + if inds0.size < 40 or inds1.size < 40: is_merge = False merge_value = 0 final_shift = 0 return is_merge, label0, label1, final_shift, merge_value - target_chans = np.intersect1d(target_chans0, target_chans1) inds = np.concatenate([inds0, inds1]) @@ -500,20 +501,19 @@ def merge( elif criteria == "distrib_overlap": lim0 = min(np.min(feat0), np.min(feat1)) lim1 = max(np.max(feat0), np.max(feat1)) - bin_size = (lim1 - lim0) / 200. + bin_size = (lim1 - lim0) / 200.0 bins = np.arange(lim0, lim1, bin_size) - + pdf0, _ = np.histogram(feat0, bins=bins, density=True) pdf1, _ = np.histogram(feat1, bins=bins, density=True) pdf0 *= bin_size - pdf1 *= bin_size + pdf1 *= bin_size overlap = np.sum(np.minimum(pdf0, pdf1)) - + is_merge = overlap >= threshold_overlap - + merge_value = 1 - overlap - - + else: raise ValueError(f"bad criteria {criteria}") @@ -522,13 +522,13 @@ def merge( else: final_shift = 0 - # DEBUG = True + # DEBUG = True DEBUG = False if DEBUG and is_merge: - # if DEBUG and not is_merge: - # if DEBUG and (overlap > 0.05 and overlap <0.25): - # if label0 == 49 and label1== 65: + # if DEBUG and not is_merge: + # if DEBUG and (overlap > 0.05 and overlap <0.25): + # if label0 == 49 and label1== 65: import matplotlib.pyplot as plt flatten_wfs0 = wfs0.swapaxes(1, 2).reshape(wfs0.shape[0], -1) @@ -551,7 +551,6 @@ def merge( count1, _ = np.histogram(feat1, bins=bins, density=True) pdf0 = count0 * bin_size pdf1 = count1 * bin_size - ax = axs[1] ax.plot(bins[:-1], pdf0, color="C0") @@ -564,13 +563,15 @@ def merge( ax.axvline(l0, color="C0") ax.axvline(l1, color="C1") elif criteria == "distrib_overlap": - print(lim0, lim1, ) + print( + lim0, + lim1, + ) ax.set_title(f"{overlap:.4f} {is_merge}") - ax.plot(bins[:-1], np.minimum(pdf0, pdf1), ls='--', color='k') + ax.plot(bins[:-1], np.minimum(pdf0, pdf1), ls="--", color="k") plt.show() - return is_merge, label0, label1, final_shift, merge_value diff --git a/src/spikeinterface/sortingcomponents/clustering/split.py b/src/spikeinterface/sortingcomponents/clustering/split.py index dc649cec97..9836e9110f 100644 --- a/src/spikeinterface/sortingcomponents/clustering/split.py +++ b/src/spikeinterface/sortingcomponents/clustering/split.py @@ -154,13 +154,12 @@ def split_function_wrapper(peak_indices): return is_split, local_labels, peak_indices - class LocalFeatureClustering: """ This method is a refactorized mix between: * old tridesclous code * "herding_split()" in DART/spikepsvae by Charlie Windolf - + The idea simple : * agregate features (svd or even waveforms) with sparse channel. * run a local feature reduction (pca or svd) @@ -183,7 +182,6 @@ def split( min_samples=25, n_pca_features=2, minimum_common_channels=2, - ): local_labels = np.zeros(peak_indices.size, dtype=np.int64) @@ -218,8 +216,12 @@ def split( final_features = TruncatedSVD(n_pca_features).fit_transform(flatten_features) if clusterer == "hdbscan": - clust = HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, allow_single_cluster=True, - cluster_selection_method="leaf") + clust = HDBSCAN( + min_cluster_size=min_cluster_size, + min_samples=min_samples, + allow_single_cluster=True, + cluster_selection_method="leaf", + ) clust.fit(final_features) possible_labels = clust.labels_ is_split = np.setdiff1d(possible_labels, [-1]).size > 1 @@ -236,9 +238,7 @@ def split( else: raise ValueError(f"wrong clusterer {clusterer}") - - - # DEBUG = True + # DEBUG = True DEBUG = False if DEBUG: import matplotlib.pyplot as plt @@ -259,7 +259,7 @@ def split( ax = axs[1] ax.plot(flatten_wfs[mask][sl].T, color=colors[k], alpha=0.5) - + axs[0].set_title(f"{clusterer} {is_split}") plt.show() diff --git a/src/spikeinterface/sortingcomponents/clustering/tools.py b/src/spikeinterface/sortingcomponents/clustering/tools.py index c334daebe3..8e25c9cb7f 100644 --- a/src/spikeinterface/sortingcomponents/clustering/tools.py +++ b/src/spikeinterface/sortingcomponents/clustering/tools.py @@ -94,7 +94,6 @@ def aggregate_sparse_features(peaks, peak_indices, sparse_feature, sparse_mask, return aligned_features, dont_have_channels - def compute_template_from_sparse( peaks, labels, labels_set, sparse_waveforms, sparse_mask, total_channels, peak_shifts=None ): diff --git a/src/spikeinterface/sortingcomponents/tests/test_merge.py b/src/spikeinterface/sortingcomponents/tests/test_merge.py index b7a669a263..6b3ea2a901 100644 --- a/src/spikeinterface/sortingcomponents/tests/test_merge.py +++ b/src/spikeinterface/sortingcomponents/tests/test_merge.py @@ -5,6 +5,7 @@ # no proper test at the moment this is used in tridesclous2 + def test_merge(): pass diff --git a/src/spikeinterface/sortingcomponents/tests/test_split.py b/src/spikeinterface/sortingcomponents/tests/test_split.py index ca5e5b57e7..5953f74e24 100644 --- a/src/spikeinterface/sortingcomponents/tests/test_split.py +++ b/src/spikeinterface/sortingcomponents/tests/test_split.py @@ -5,6 +5,7 @@ # no proper test at the moment this is used in tridesclous2 + def test_split(): pass From f2fe6bbcedc5a1cca38918444afe52e3ae1bec19 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Thu, 5 Oct 2023 11:42:38 -0400 Subject: [PATCH 277/322] assert typo fixes round 1 --- src/spikeinterface/core/base.py | 6 +-- src/spikeinterface/core/baserecording.py | 6 +-- src/spikeinterface/core/basesorting.py | 2 +- .../core/binaryrecordingextractor.py | 2 +- .../core/channelsaggregationrecording.py | 4 +- src/spikeinterface/core/channelslice.py | 4 +- .../core/frameslicerecording.py | 2 +- src/spikeinterface/core/frameslicesorting.py | 8 ++-- src/spikeinterface/core/generate.py | 4 +- src/spikeinterface/core/template_tools.py | 41 ++++++++++--------- .../core/unitsaggregationsorting.py | 2 +- 11 files changed, 41 insertions(+), 40 deletions(-) diff --git a/src/spikeinterface/core/base.py b/src/spikeinterface/core/base.py index 8b4f094c20..ba18cf09b6 100644 --- a/src/spikeinterface/core/base.py +++ b/src/spikeinterface/core/base.py @@ -45,7 +45,7 @@ def __init__(self, main_ids: Sequence) -> None: self._kwargs = {} # 'main_ids' will either be channel_ids or units_ids - # They is used for properties + # They are used for properties self._main_ids = np.array(main_ids) # dict at object level @@ -984,7 +984,7 @@ def _load_extractor_from_dict(dic) -> BaseExtractor: class_name = None if "kwargs" not in dic: - raise Exception(f"This dict cannot be load into extractor {dic}") + raise Exception(f"This dict cannot be loaded into extractor {dic}") # Create new kwargs to avoid modifying the original dict["kwargs"] new_kwargs = dict() @@ -1005,7 +1005,7 @@ def _load_extractor_from_dict(dic) -> BaseExtractor: assert extractor_class is not None and class_name is not None, "Could not load spikeinterface class" if not _check_same_version(class_name, dic["version"]): warnings.warn( - f"Versions are not the same. This might lead compatibility errors. " + f"Versions are not the same. This might lead to compatibility errors. " f"Using {class_name.split('.')[0]}=={dic['version']} is recommended" ) diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index 08f187895b..d3572ef66b 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -305,7 +305,7 @@ def get_traces( if not self.has_scaled(): raise ValueError( - "This recording do not support return_scaled=True (need gain_to_uV and offset_" "to_uV properties)" + "This recording does not support return_scaled=True (need gain_to_uV and offset_" "to_uV properties)" ) else: gains = self.get_property("gain_to_uV") @@ -416,8 +416,8 @@ def set_times(self, times, segment_index=None, with_warning=True): if with_warning: warn( "Setting times with Recording.set_times() is not recommended because " - "times are not always propagated to across preprocessing" - "Use use this carefully!" + "times are not always propagated across preprocessing" + "Use this carefully!" ) def sample_index_to_time(self, sample_ind, segment_index=None): diff --git a/src/spikeinterface/core/basesorting.py b/src/spikeinterface/core/basesorting.py index e6d08d38f7..2a06a699cb 100644 --- a/src/spikeinterface/core/basesorting.py +++ b/src/spikeinterface/core/basesorting.py @@ -170,7 +170,7 @@ def register_recording(self, recording, check_spike_frames=True): if check_spike_frames: if has_exceeding_spikes(recording, self): warnings.warn( - "Some spikes are exceeding the recording's duration! " + "Some spikes exceed the recording's duration! " "Removing these excess spikes with `spikeinterface.curation.remove_excess_spikes()` " "Might be necessary for further postprocessing." ) diff --git a/src/spikeinterface/core/binaryrecordingextractor.py b/src/spikeinterface/core/binaryrecordingextractor.py index 72a95637f6..b45290caa5 100644 --- a/src/spikeinterface/core/binaryrecordingextractor.py +++ b/src/spikeinterface/core/binaryrecordingextractor.py @@ -91,7 +91,7 @@ def __init__( file_path_list = [Path(file_paths)] if t_starts is not None: - assert len(t_starts) == len(file_path_list), "t_starts must be a list of same size than file_paths" + assert len(t_starts) == len(file_path_list), "t_starts must be a list of the same size as file_paths" t_starts = [float(t_start) for t_start in t_starts] dtype = np.dtype(dtype) diff --git a/src/spikeinterface/core/channelsaggregationrecording.py b/src/spikeinterface/core/channelsaggregationrecording.py index d36e168f8d..8714580821 100644 --- a/src/spikeinterface/core/channelsaggregationrecording.py +++ b/src/spikeinterface/core/channelsaggregationrecording.py @@ -104,11 +104,11 @@ def __init__(self, channel_map, parent_segments): times_kargs0 = parent_segment0.get_times_kwargs() if times_kargs0["time_vector"] is None: for ps in parent_segments: - assert ps.get_times_kwargs()["time_vector"] is None, "All segment should not have times set" + assert ps.get_times_kwargs()["time_vector"] is None, "All segments should not have times set" else: for ps in parent_segments: assert ps.get_times_kwargs()["t_start"] == times_kargs0["t_start"], ( - "All segment should have the same " "t_start" + "All segments should have the same " "t_start" ) BaseRecordingSegment.__init__(self, **times_kargs0) diff --git a/src/spikeinterface/core/channelslice.py b/src/spikeinterface/core/channelslice.py index ebd1b7db03..3a21e356a6 100644 --- a/src/spikeinterface/core/channelslice.py +++ b/src/spikeinterface/core/channelslice.py @@ -35,7 +35,7 @@ def __init__(self, parent_recording, channel_ids=None, renamed_channel_ids=None) ), "ChannelSliceRecording: renamed channel_ids must be the same size" assert ( self._channel_ids.size == np.unique(self._channel_ids).size - ), "ChannelSliceRecording : channel_ids not unique" + ), "ChannelSliceRecording : channel_ids are not unique" sampling_frequency = parent_recording.get_sampling_frequency() @@ -123,7 +123,7 @@ def __init__(self, parent_snippets, channel_ids=None, renamed_channel_ids=None): ), "ChannelSliceSnippets: renamed channel_ids must be the same size" assert ( self._channel_ids.size == np.unique(self._channel_ids).size - ), "ChannelSliceSnippets : channel_ids not unique" + ), "ChannelSliceSnippets : channel_ids are not unique" sampling_frequency = parent_snippets.get_sampling_frequency() diff --git a/src/spikeinterface/core/frameslicerecording.py b/src/spikeinterface/core/frameslicerecording.py index 968f27c6ad..b8574c506f 100644 --- a/src/spikeinterface/core/frameslicerecording.py +++ b/src/spikeinterface/core/frameslicerecording.py @@ -27,7 +27,7 @@ class FrameSliceRecording(BaseRecording): def __init__(self, parent_recording, start_frame=None, end_frame=None): channel_ids = parent_recording.get_channel_ids() - assert parent_recording.get_num_segments() == 1, "FrameSliceRecording work only with one segment" + assert parent_recording.get_num_segments() == 1, "FrameSliceRecording only works with one segment" parent_size = parent_recording.get_num_samples(0) if start_frame is None: diff --git a/src/spikeinterface/core/frameslicesorting.py b/src/spikeinterface/core/frameslicesorting.py index 5da5350f06..ed1391b0e2 100644 --- a/src/spikeinterface/core/frameslicesorting.py +++ b/src/spikeinterface/core/frameslicesorting.py @@ -36,7 +36,7 @@ class FrameSliceSorting(BaseSorting): def __init__(self, parent_sorting, start_frame=None, end_frame=None, check_spike_frames=True): unit_ids = parent_sorting.get_unit_ids() - assert parent_sorting.get_num_segments() == 1, "FrameSliceSorting work only with one segment" + assert parent_sorting.get_num_segments() == 1, "FrameSliceSorting only works with one segment" if start_frame is None: start_frame = 0 @@ -49,10 +49,10 @@ def __init__(self, parent_sorting, start_frame=None, end_frame=None, check_spike end_frame = parent_n_samples assert ( end_frame <= parent_n_samples - ), "`end_frame` should be smaller than the sortings total number of samples." + ), "`end_frame` should be smaller than the sortings' total number of samples." assert ( start_frame <= parent_n_samples - ), "`start_frame` should be smaller than the sortings total number of samples." + ), "`start_frame` should be smaller than the sortings' total number of samples." if check_spike_frames and has_exceeding_spikes(parent_sorting._recording, parent_sorting): raise ValueError( "The sorting object has spikes exceeding the recording duration. You have to remove those spikes " @@ -67,7 +67,7 @@ def __init__(self, parent_sorting, start_frame=None, end_frame=None, check_spike end_frame = max_spike_time + 1 assert start_frame < end_frame, ( - "`start_frame` should be greater than `end_frame`. " + "`start_frame` should be less than `end_frame`. " "This may be due to start_frame >= max_spike_time, if the end frame " "was not specified explicitly." ) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 06a5ec96ec..0c67404069 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -1101,11 +1101,11 @@ def __init__( # handle also upsampling and jitter upsample_factor = templates.shape[3] elif templates.ndim == 5: - # handle also dirft + # handle also drift raise NotImplementedError("Drift will be implented soon...") # upsample_factor = templates.shape[3] else: - raise ValueError("templates have wring dim should 3 or 4") + raise ValueError("templates have wrong dim should 3 or 4") if upsample_factor is not None: assert upsample_vector is not None diff --git a/src/spikeinterface/core/template_tools.py b/src/spikeinterface/core/template_tools.py index 95278b76da..552642751c 100644 --- a/src/spikeinterface/core/template_tools.py +++ b/src/spikeinterface/core/template_tools.py @@ -1,3 +1,4 @@ +from __future__ import annotations import numpy as np import warnings @@ -5,7 +6,7 @@ from .recording_tools import get_channel_distances, get_noise_levels -def get_template_amplitudes(waveform_extractor, peak_sign: str = "neg", mode: str = "extremum"): +def get_template_amplitudes(waveform_extractor, peak_sign: "neg" | "pos" | "both" = "neg", mode: "extremum" | "at_index" = "extremum"): """ Get amplitude per channel for each unit. @@ -13,9 +14,9 @@ def get_template_amplitudes(waveform_extractor, peak_sign: str = "neg", mode: st ---------- waveform_extractor: WaveformExtractor The waveform extractor - peak_sign: str - Sign of the template to compute best channels ('neg', 'pos', 'both') - mode: str + peak_sign: "neg" | "pos" | "both", default: "neg" + Sign of the template to compute best channels + mode: "extremum" | "at_index", default: "extremum" 'extremum': max or min 'at_index': take value at spike index @@ -24,8 +25,8 @@ def get_template_amplitudes(waveform_extractor, peak_sign: str = "neg", mode: st peak_values: dict Dictionary with unit ids as keys and template amplitudes as values """ - assert peak_sign in ("both", "neg", "pos") - assert mode in ("extremum", "at_index") + assert peak_sign in ("both", "neg", "pos"), "'peak_sign' must be 'both', 'neg', or 'pos'" + assert mode in ("extremum", "at_index"), "'mode' must be 'extremum' or 'at_index'" unit_ids = waveform_extractor.sorting.unit_ids before = waveform_extractor.nbefore @@ -57,7 +58,7 @@ def get_template_amplitudes(waveform_extractor, peak_sign: str = "neg", mode: st def get_template_extremum_channel( - waveform_extractor, peak_sign: str = "neg", mode: str = "extremum", outputs: str = "id" + waveform_extractor, peak_sign: "neg" | "pos" | "both" = "neg", mode: "extremum" | "at_index" = "extremum", outputs: "id" | "index" = "id" ): """ Compute the channel with the extremum peak for each unit. @@ -66,12 +67,12 @@ def get_template_extremum_channel( ---------- waveform_extractor: WaveformExtractor The waveform extractor - peak_sign: str - Sign of the template to compute best channels ('neg', 'pos', 'both') - mode: str + peak_sign: "neg" | "pos" | "both", default: "neg" + Sign of the template to compute best channels + mode: "extremum" | "at_index", default: "extremum" 'extremum': max or min 'at_index': take value at spike index - outputs: str + outputs: "id" | "index", default: "id" * 'id': channel id * 'index': channel index @@ -159,7 +160,7 @@ def get_template_channel_sparsity( get_template_channel_sparsity.__doc__ = get_template_channel_sparsity.__doc__.format(_sparsity_doc) -def get_template_extremum_channel_peak_shift(waveform_extractor, peak_sign: str = "neg"): +def get_template_extremum_channel_peak_shift(waveform_extractor, peak_sign: "neg" | "pos" | "both" = "neg"): """ In some situations spike sorters could return a spike index with a small shift related to the waveform peak. This function estimates and return these alignment shifts for the mean template. @@ -169,8 +170,8 @@ def get_template_extremum_channel_peak_shift(waveform_extractor, peak_sign: str ---------- waveform_extractor: WaveformExtractor The waveform extractor - peak_sign: str - Sign of the template to compute best channels ('neg', 'pos', 'both') + peak_sign: "neg" | "pos" | "both", default: "neg" + Sign of the template to compute best channels Returns ------- @@ -203,7 +204,7 @@ def get_template_extremum_channel_peak_shift(waveform_extractor, peak_sign: str return shifts -def get_template_extremum_amplitude(waveform_extractor, peak_sign: str = "neg", mode: str = "at_index"): +def get_template_extremum_amplitude(waveform_extractor, peak_sign: "neg" | "pos" | "both" = "neg", mode: "extremum" | "at_index" = "at_index"): """ Computes amplitudes on the best channel. @@ -211,9 +212,9 @@ def get_template_extremum_amplitude(waveform_extractor, peak_sign: str = "neg", ---------- waveform_extractor: WaveformExtractor The waveform extractor - peak_sign: str - Sign of the template to compute best channels ('neg', 'pos', 'both') - mode: str + peak_sign: "neg" | "pos" | "both" + Sign of the template to compute best channels + mode: "extremum" | "at_index", default: "at_index" Where the amplitude is computed 'extremum': max or min 'at_index': take value at spike index @@ -223,8 +224,8 @@ def get_template_extremum_amplitude(waveform_extractor, peak_sign: str = "neg", amplitudes: dict Dictionary with unit ids as keys and amplitudes as values """ - assert peak_sign in ("both", "neg", "pos") - assert mode in ("extremum", "at_index") + assert peak_sign in ("both", "neg", "pos"), "'peak_sign' must be 'neg' or 'pos' or 'both'" + assert mode in ("extremum", "at_index"), "'mode' must be 'extremum' or 'at_index'" unit_ids = waveform_extractor.sorting.unit_ids before = waveform_extractor.nbefore diff --git a/src/spikeinterface/core/unitsaggregationsorting.py b/src/spikeinterface/core/unitsaggregationsorting.py index 32158f00df..4e98864ba9 100644 --- a/src/spikeinterface/core/unitsaggregationsorting.py +++ b/src/spikeinterface/core/unitsaggregationsorting.py @@ -95,7 +95,7 @@ def __init__(self, sorting_list, renamed_unit_ids=None): try: property_dict[prop_name] = np.concatenate((property_dict[prop_name], values)) except Exception as e: - print(f"Skipping property '{prop_name}' for shape inconsistency") + print(f"Skipping property '{prop_name}' due to shape inconsistency") del property_dict[prop_name] break for prop_name, prop_values in property_dict.items(): From 2417b9af67a652f38e32cf24f749f9c7706554e9 Mon Sep 17 00:00:00 2001 From: zm711 <92116279+zm711@users.noreply.github.com> Date: Thu, 5 Oct 2023 12:11:01 -0400 Subject: [PATCH 278/322] add asserts msgs and fix typos --- src/spikeinterface/preprocessing/clip.py | 2 +- src/spikeinterface/preprocessing/common_reference.py | 2 +- .../preprocessing/detect_bad_channels.py | 4 ++-- src/spikeinterface/preprocessing/filter.py | 6 +++--- src/spikeinterface/preprocessing/filter_opencl.py | 12 ++++++------ .../preprocessing/highpass_spatial_filter.py | 2 +- src/spikeinterface/preprocessing/normalize_scale.py | 4 ++-- src/spikeinterface/preprocessing/phase_shift.py | 2 +- 8 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/spikeinterface/preprocessing/clip.py b/src/spikeinterface/preprocessing/clip.py index a2349c1ee9..cc18d51d2e 100644 --- a/src/spikeinterface/preprocessing/clip.py +++ b/src/spikeinterface/preprocessing/clip.py @@ -97,7 +97,7 @@ def __init__( chunk_size=500, seed=0, ): - assert direction in ("upper", "lower", "both") + assert direction in ("upper", "lower", "both"), "'direction' must be 'upper', 'lower', or 'both'" if fill_value is None or quantile_threshold is not None: random_data = get_random_data_chunks( diff --git a/src/spikeinterface/preprocessing/common_reference.py b/src/spikeinterface/preprocessing/common_reference.py index d2ac227217..6d6ce256de 100644 --- a/src/spikeinterface/preprocessing/common_reference.py +++ b/src/spikeinterface/preprocessing/common_reference.py @@ -83,7 +83,7 @@ def __init__( ref_channel_ids = np.asarray(ref_channel_ids) assert np.all( [ch in recording.get_channel_ids() for ch in ref_channel_ids] - ), "Some wrong 'ref_channel_ids'!" + ), "Some 'ref_channel_ids' are wrong!" elif reference == "local": assert groups is None, "With 'local' CAR, the group option should not be used." closest_inds, dist = get_closest_channels(recording) diff --git a/src/spikeinterface/preprocessing/detect_bad_channels.py b/src/spikeinterface/preprocessing/detect_bad_channels.py index cc4e8601e2..e6e2836a35 100644 --- a/src/spikeinterface/preprocessing/detect_bad_channels.py +++ b/src/spikeinterface/preprocessing/detect_bad_channels.py @@ -211,9 +211,9 @@ def detect_bad_channels( if bad_channel_ids.size > recording.get_num_channels() / 3: warnings.warn( - "Over 1/3 of channels are detected as bad. In the precense of a high" + "Over 1/3 of channels are detected as bad. In the presence of a high" "number of dead / noisy channels, bad channel detection may fail " - "(erroneously label good channels as dead)." + "(good channels may be erroneously labeled as dead)." ) elif method == "neighborhood_r2": diff --git a/src/spikeinterface/preprocessing/filter.py b/src/spikeinterface/preprocessing/filter.py index 51c1fb4ad6..b31088edf7 100644 --- a/src/spikeinterface/preprocessing/filter.py +++ b/src/spikeinterface/preprocessing/filter.py @@ -71,10 +71,10 @@ def __init__( ): import scipy.signal - assert filter_mode in ("sos", "ba") + assert filter_mode in ("sos", "ba"), "'filter' mode must be 'sos' or 'ba'" fs = recording.get_sampling_frequency() if coeff is None: - assert btype in ("bandpass", "highpass") + assert btype in ("bandpass", "highpass"), "'bytpe' must be 'bandpass' or 'highpass'" # coefficient # self.coeff is 'sos' or 'ab' style filter_coeff = scipy.signal.iirfilter( @@ -258,7 +258,7 @@ def __init__(self, recording, freq=3000, q=30, margin_ms=5.0, dtype=None): if dtype.kind == "u": raise TypeError( "The notch filter only supports signed types. Use the 'dtype' argument" - "to specify a signed type (e.g. 'int16', 'float32'" + "to specify a signed type (e.g. 'int16', 'float32')" ) BasePreprocessor.__init__(self, recording, dtype=dtype) diff --git a/src/spikeinterface/preprocessing/filter_opencl.py b/src/spikeinterface/preprocessing/filter_opencl.py index 790279d647..d3a08297c6 100644 --- a/src/spikeinterface/preprocessing/filter_opencl.py +++ b/src/spikeinterface/preprocessing/filter_opencl.py @@ -50,9 +50,9 @@ def __init__( margin_ms=5.0, ): assert HAVE_PYOPENCL, "You need to install pyopencl (and GPU driver!!)" - - assert btype in ("bandpass", "lowpass", "highpass", "bandstop") - assert filter_mode in ("sos",) + btype_modes = ("bandpass", "lowpass", "highpass", "bandstop") + assert btype in btype_modes, f"'btype' must be in {btype_modes}" + assert filter_mode in ("sos",), "'filter_mode' must be 'sos'" # coefficient sf = recording.get_sampling_frequency() @@ -96,8 +96,8 @@ def __init__(self, parent_recording_segment, executor, margin): self.margin = margin def get_traces(self, start_frame, end_frame, channel_indices): - assert start_frame is not None, "FilterOpenCLRecording work with fixed chunk_size" - assert end_frame is not None, "FilterOpenCLRecording work with fixed chunk_size" + assert start_frame is not None, "FilterOpenCLRecording only works with fixed chunk_size" + assert end_frame is not None, "FilterOpenCLRecording only works with fixed chunk_size" chunk_size = end_frame - start_frame if chunk_size != self.executor.chunk_size: @@ -157,7 +157,7 @@ def process(self, traces): if traces.shape[0] != self.full_size: if self.full_size is not None: - print(f"Warning : chunk_size have change {self.chunk_size} {traces.shape[0]}, need recompile CL!!!") + print(f"Warning : chunk_size has changed {self.chunk_size} {traces.shape[0]}, need to recompile CL!!!") self.create_buffers_and_compile() event = pyopencl.enqueue_copy(self.queue, self.input_cl, traces) diff --git a/src/spikeinterface/preprocessing/highpass_spatial_filter.py b/src/spikeinterface/preprocessing/highpass_spatial_filter.py index aa98410568..4df4a409bc 100644 --- a/src/spikeinterface/preprocessing/highpass_spatial_filter.py +++ b/src/spikeinterface/preprocessing/highpass_spatial_filter.py @@ -212,7 +212,7 @@ def get_traces(self, start_frame, end_frame, channel_indices): traces = traces * self.taper[np.newaxis, :] # apply actual HP filter - import scipy + import scipy.signal traces = scipy.signal.sosfiltfilt(self.sos_filter, traces, axis=1) diff --git a/src/spikeinterface/preprocessing/normalize_scale.py b/src/spikeinterface/preprocessing/normalize_scale.py index 7d43982853..bd53866b6a 100644 --- a/src/spikeinterface/preprocessing/normalize_scale.py +++ b/src/spikeinterface/preprocessing/normalize_scale.py @@ -68,7 +68,7 @@ def __init__( dtype="float32", **random_chunk_kwargs, ): - assert mode in ("pool_channel", "by_channel") + assert mode in ("pool_channel", "by_channel"), "'mode' must be 'pool_channel' or 'by_channel'" random_data = get_random_data_chunks(recording, **random_chunk_kwargs) @@ -260,7 +260,7 @@ def __init__( dtype="float32", **random_chunk_kwargs, ): - assert mode in ("median+mad", "mean+std") + assert mode in ("median+mad", "mean+std"), "'mode' must be 'median+mad' or 'mean+std'" # fix dtype dtype_ = fix_dtype(recording, dtype) diff --git a/src/spikeinterface/preprocessing/phase_shift.py b/src/spikeinterface/preprocessing/phase_shift.py index 9c8b2589a0..237f32eca4 100644 --- a/src/spikeinterface/preprocessing/phase_shift.py +++ b/src/spikeinterface/preprocessing/phase_shift.py @@ -42,7 +42,7 @@ def __init__(self, recording, margin_ms=40.0, inter_sample_shift=None, dtype=Non assert "inter_sample_shift" in recording.get_property_keys(), "'inter_sample_shift' is not a property!" sample_shifts = recording.get_property("inter_sample_shift") else: - assert len(inter_sample_shift) == recording.get_num_channels(), "sample " + assert len(inter_sample_shift) == recording.get_num_channels(), "the 'inter_sample_shift' must be same size at the num_channels " sample_shifts = np.asarray(inter_sample_shift) margin = int(margin_ms * recording.get_sampling_frequency() / 1000.0) From 9db087de50bd4b132b5e42c743dcf17fa8a9106b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 16:27:04 +0000 Subject: [PATCH 279/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/core/baserecording.py | 3 ++- src/spikeinterface/core/template_tools.py | 13 ++++++++++--- src/spikeinterface/preprocessing/phase_shift.py | 4 +++- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/core/baserecording.py b/src/spikeinterface/core/baserecording.py index d3572ef66b..2977211c25 100644 --- a/src/spikeinterface/core/baserecording.py +++ b/src/spikeinterface/core/baserecording.py @@ -305,7 +305,8 @@ def get_traces( if not self.has_scaled(): raise ValueError( - "This recording does not support return_scaled=True (need gain_to_uV and offset_" "to_uV properties)" + "This recording does not support return_scaled=True (need gain_to_uV and offset_" + "to_uV properties)" ) else: gains = self.get_property("gain_to_uV") diff --git a/src/spikeinterface/core/template_tools.py b/src/spikeinterface/core/template_tools.py index 552642751c..b6022e27c0 100644 --- a/src/spikeinterface/core/template_tools.py +++ b/src/spikeinterface/core/template_tools.py @@ -6,7 +6,9 @@ from .recording_tools import get_channel_distances, get_noise_levels -def get_template_amplitudes(waveform_extractor, peak_sign: "neg" | "pos" | "both" = "neg", mode: "extremum" | "at_index" = "extremum"): +def get_template_amplitudes( + waveform_extractor, peak_sign: "neg" | "pos" | "both" = "neg", mode: "extremum" | "at_index" = "extremum" +): """ Get amplitude per channel for each unit. @@ -58,7 +60,10 @@ def get_template_amplitudes(waveform_extractor, peak_sign: "neg" | "pos" | "both def get_template_extremum_channel( - waveform_extractor, peak_sign: "neg" | "pos" | "both" = "neg", mode: "extremum" | "at_index" = "extremum", outputs: "id" | "index" = "id" + waveform_extractor, + peak_sign: "neg" | "pos" | "both" = "neg", + mode: "extremum" | "at_index" = "extremum", + outputs: "id" | "index" = "id", ): """ Compute the channel with the extremum peak for each unit. @@ -204,7 +209,9 @@ def get_template_extremum_channel_peak_shift(waveform_extractor, peak_sign: "neg return shifts -def get_template_extremum_amplitude(waveform_extractor, peak_sign: "neg" | "pos" | "both" = "neg", mode: "extremum" | "at_index" = "at_index"): +def get_template_extremum_amplitude( + waveform_extractor, peak_sign: "neg" | "pos" | "both" = "neg", mode: "extremum" | "at_index" = "at_index" +): """ Computes amplitudes on the best channel. diff --git a/src/spikeinterface/preprocessing/phase_shift.py b/src/spikeinterface/preprocessing/phase_shift.py index 237f32eca4..bdba55038d 100644 --- a/src/spikeinterface/preprocessing/phase_shift.py +++ b/src/spikeinterface/preprocessing/phase_shift.py @@ -42,7 +42,9 @@ def __init__(self, recording, margin_ms=40.0, inter_sample_shift=None, dtype=Non assert "inter_sample_shift" in recording.get_property_keys(), "'inter_sample_shift' is not a property!" sample_shifts = recording.get_property("inter_sample_shift") else: - assert len(inter_sample_shift) == recording.get_num_channels(), "the 'inter_sample_shift' must be same size at the num_channels " + assert ( + len(inter_sample_shift) == recording.get_num_channels() + ), "the 'inter_sample_shift' must be same size at the num_channels " sample_shifts = np.asarray(inter_sample_shift) margin = int(margin_ms * recording.get_sampling_frequency() / 1000.0) From f6e2f59dfeb69bc0d8a9c57d33470a3faa26a2e8 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 5 Oct 2023 18:27:16 +0200 Subject: [PATCH 280/322] wip --- .../sorters/internal/tridesclous2.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index 46a7aa843f..e2f4812222 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -20,14 +20,8 @@ class Tridesclous2Sorter(ComponentsBasedSorter): _default_params = { "apply_preprocessing": True, "waveforms" : {"ms_before": 0.5, "ms_after": 1.5, }, - "filtering": {"freq_min": 300, "freq_max": 8000.0}, + "filtering": {"freq_min": 300., "freq_max": 8000.0}, "detection": {"peak_sign": "neg", "detect_threshold": 5, "exclude_sweep_ms": 1.5, "radius_um": 150.}, - #~ "hdbscan_kwargs": { - #~ "min_cluster_size": 25, - #~ "allow_single_cluster": True, - #~ "core_dist_n_jobs": -1, - #~ "cluster_selection_method": "leaf", - #~ }, "selection": {"n_peaks_per_channel": 5000, "min_n_peaks": 20000}, "svd": {"n_components": 6}, "clustering": { @@ -184,7 +178,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): original_labels, recording, features_folder, - method="hdbscan_on_local_pca", + method="local_feature_clustering", method_kwargs=dict( # clusterer="hdbscan", clusterer="isocut5", @@ -217,7 +211,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): features_folder, radius_um=merge_radius_um, - method="waveforms_lda", + method="project_distribution", method_kwargs=dict( # neighbours_mask=neighbours_mask, waveforms_sparse_mask=sparse_mask, @@ -230,8 +224,10 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): # criteria='diptest', # threshold_diptest=0.5, - criteria="percentile", - threshold_percentile=80., + # criteria="percentile", + # threshold_percentile=80., + criteria="distrib_overlap", + threshold_overlap=0.4, # num_shift=0 num_shift=2, From 57078791382deed5fe73c4799bd352e6c3e0ee80 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Thu, 5 Oct 2023 18:39:27 +0200 Subject: [PATCH 281/322] Fix ipywidgets with explicit dense/sparse waveforms --- .../widgets/tests/test_widgets.py | 102 +++++++++--------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index f44878927d..da16136fa9 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -49,28 +49,28 @@ def setUpClass(cls): cls.num_units = len(cls.sorting.get_unit_ids()) if (cache_folder / "mearec_test").is_dir(): - cls.we = load_waveforms(cache_folder / "mearec_test") + cls.we_dense = load_waveforms(cache_folder / "mearec_test") else: - cls.we = extract_waveforms(cls.recording, cls.sorting, cache_folder / "mearec_test") + cls.we_dense = extract_waveforms(cls.recording, cls.sorting, cache_folder / "mearec_test", sparse=False) sw.set_default_plotter_backend("matplotlib") metric_names = ["snr", "isi_violation", "num_spikes"] - _ = compute_spike_amplitudes(cls.we) - _ = compute_unit_locations(cls.we) - _ = compute_spike_locations(cls.we) - _ = compute_quality_metrics(cls.we, metric_names=metric_names) - _ = compute_template_metrics(cls.we) - _ = compute_correlograms(cls.we) - _ = compute_template_similarity(cls.we) + _ = compute_spike_amplitudes(cls.we_dense) + _ = compute_unit_locations(cls.we_dense) + _ = compute_spike_locations(cls.we_dense) + _ = compute_quality_metrics(cls.we_dense, metric_names=metric_names) + _ = compute_template_metrics(cls.we_dense) + _ = compute_correlograms(cls.we_dense) + _ = compute_template_similarity(cls.we_dense) # make sparse waveforms - cls.sparsity_radius = compute_sparsity(cls.we, method="radius", radius_um=50) - cls.sparsity_best = compute_sparsity(cls.we, method="best_channels", num_channels=5) + cls.sparsity_radius = compute_sparsity(cls.we_dense, method="radius", radius_um=50) + cls.sparsity_best = compute_sparsity(cls.we_dense, method="best_channels", num_channels=5) if (cache_folder / "mearec_test_sparse").is_dir(): cls.we_sparse = load_waveforms(cache_folder / "mearec_test_sparse") else: - cls.we_sparse = cls.we.save(folder=cache_folder / "mearec_test_sparse", sparsity=cls.sparsity_radius) + cls.we_sparse = cls.we_dense.save(folder=cache_folder / "mearec_test_sparse", sparsity=cls.sparsity_radius) cls.skip_backends = ["ipywidgets", "ephyviewer"] @@ -124,17 +124,17 @@ def test_plot_unit_waveforms(self): possible_backends = list(sw.UnitWaveformsWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_unit_waveforms(self.we, backend=backend, **self.backend_kwargs[backend]) + sw.plot_unit_waveforms(self.we_dense, backend=backend, **self.backend_kwargs[backend]) unit_ids = self.sorting.unit_ids[:6] sw.plot_unit_waveforms( - self.we, + self.we_dense, sparsity=self.sparsity_radius, unit_ids=unit_ids, backend=backend, **self.backend_kwargs[backend], ) sw.plot_unit_waveforms( - self.we, + self.we_dense, sparsity=self.sparsity_best, unit_ids=unit_ids, backend=backend, @@ -148,10 +148,10 @@ def test_plot_unit_templates(self): possible_backends = list(sw.UnitWaveformsWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_unit_templates(self.we, backend=backend, **self.backend_kwargs[backend]) + sw.plot_unit_templates(self.we_dense, backend=backend, **self.backend_kwargs[backend]) unit_ids = self.sorting.unit_ids[:6] sw.plot_unit_templates( - self.we, + self.we_dense, sparsity=self.sparsity_radius, unit_ids=unit_ids, backend=backend, @@ -171,7 +171,7 @@ def test_plot_unit_waveforms_density_map(self): if backend not in self.skip_backends: unit_ids = self.sorting.unit_ids[:2] sw.plot_unit_waveforms_density_map( - self.we, unit_ids=unit_ids, backend=backend, **self.backend_kwargs[backend] + self.we_dense, unit_ids=unit_ids, backend=backend, **self.backend_kwargs[backend] ) def test_plot_unit_waveforms_density_map_sparsity_radius(self): @@ -180,7 +180,7 @@ def test_plot_unit_waveforms_density_map_sparsity_radius(self): if backend not in self.skip_backends: unit_ids = self.sorting.unit_ids[:2] sw.plot_unit_waveforms_density_map( - self.we, + self.we_dense, sparsity=self.sparsity_radius, same_axis=False, unit_ids=unit_ids, @@ -234,11 +234,11 @@ def test_amplitudes(self): possible_backends = list(sw.AmplitudesWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_amplitudes(self.we, backend=backend, **self.backend_kwargs[backend]) - unit_ids = self.we.unit_ids[:4] - sw.plot_amplitudes(self.we, unit_ids=unit_ids, backend=backend, **self.backend_kwargs[backend]) + sw.plot_amplitudes(self.we_dense, backend=backend, **self.backend_kwargs[backend]) + unit_ids = self.we_dense.unit_ids[:4] + sw.plot_amplitudes(self.we_dense, unit_ids=unit_ids, backend=backend, **self.backend_kwargs[backend]) sw.plot_amplitudes( - self.we, unit_ids=unit_ids, plot_histograms=True, backend=backend, **self.backend_kwargs[backend] + self.we_dense, unit_ids=unit_ids, plot_histograms=True, backend=backend, **self.backend_kwargs[backend] ) sw.plot_amplitudes( self.we_sparse, @@ -252,9 +252,9 @@ def test_plot_all_amplitudes_distributions(self): possible_backends = list(sw.AllAmplitudesDistributionsWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - unit_ids = self.we.unit_ids[:4] + unit_ids = self.we_dense.unit_ids[:4] sw.plot_all_amplitudes_distributions( - self.we, unit_ids=unit_ids, backend=backend, **self.backend_kwargs[backend] + self.we_dense, unit_ids=unit_ids, backend=backend, **self.backend_kwargs[backend] ) sw.plot_all_amplitudes_distributions( self.we_sparse, unit_ids=unit_ids, backend=backend, **self.backend_kwargs[backend] @@ -264,7 +264,7 @@ def test_unit_locations(self): possible_backends = list(sw.UnitLocationsWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_unit_locations(self.we, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend]) + sw.plot_unit_locations(self.we_dense, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend]) sw.plot_unit_locations( self.we_sparse, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend] ) @@ -273,7 +273,7 @@ def test_spike_locations(self): possible_backends = list(sw.SpikeLocationsWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_spike_locations(self.we, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend]) + sw.plot_spike_locations(self.we_dense, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend]) sw.plot_spike_locations( self.we_sparse, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend] ) @@ -282,28 +282,28 @@ def test_similarity(self): possible_backends = list(sw.TemplateSimilarityWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_template_similarity(self.we, backend=backend, **self.backend_kwargs[backend]) + sw.plot_template_similarity(self.we_dense, backend=backend, **self.backend_kwargs[backend]) sw.plot_template_similarity(self.we_sparse, backend=backend, **self.backend_kwargs[backend]) def test_quality_metrics(self): possible_backends = list(sw.QualityMetricsWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_quality_metrics(self.we, backend=backend, **self.backend_kwargs[backend]) + sw.plot_quality_metrics(self.we_dense, backend=backend, **self.backend_kwargs[backend]) sw.plot_quality_metrics(self.we_sparse, backend=backend, **self.backend_kwargs[backend]) def test_template_metrics(self): possible_backends = list(sw.TemplateMetricsWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_template_metrics(self.we, backend=backend, **self.backend_kwargs[backend]) + sw.plot_template_metrics(self.we_dense, backend=backend, **self.backend_kwargs[backend]) sw.plot_template_metrics(self.we_sparse, backend=backend, **self.backend_kwargs[backend]) def test_plot_unit_depths(self): possible_backends = list(sw.UnitDepthsWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_unit_depths(self.we, backend=backend, **self.backend_kwargs[backend]) + sw.plot_unit_depths(self.we_dense, backend=backend, **self.backend_kwargs[backend]) sw.plot_unit_depths(self.we_sparse, backend=backend, **self.backend_kwargs[backend]) def test_plot_unit_summary(self): @@ -311,17 +311,17 @@ def test_plot_unit_summary(self): for backend in possible_backends: if backend not in self.skip_backends: sw.plot_unit_summary( - self.we, self.we.sorting.unit_ids[0], backend=backend, **self.backend_kwargs[backend] + self.we_dense, self.we_dense.sorting.unit_ids[0], backend=backend, **self.backend_kwargs[backend] ) sw.plot_unit_summary( - self.we_sparse, self.we.sorting.unit_ids[0], backend=backend, **self.backend_kwargs[backend] + self.we_sparse, self.we_sparse.sorting.unit_ids[0], backend=backend, **self.backend_kwargs[backend] ) def test_sorting_summary(self): possible_backends = list(sw.SortingSummaryWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_sorting_summary(self.we, backend=backend, **self.backend_kwargs[backend]) + sw.plot_sorting_summary(self.we_dense, backend=backend, **self.backend_kwargs[backend]) sw.plot_sorting_summary(self.we_sparse, backend=backend, **self.backend_kwargs[backend]) def test_plot_agreement_matrix(self): @@ -355,23 +355,23 @@ def test_plot_rasters(self): mytest = TestWidgets() mytest.setUpClass() - # mytest.test_plot_unit_waveforms_density_map() - # mytest.test_plot_unit_summary() - # mytest.test_plot_all_amplitudes_distributions() - # mytest.test_plot_traces() - # mytest.test_plot_unit_waveforms() - # mytest.test_plot_unit_templates() - # mytest.test_plot_unit_templates() - # mytest.test_plot_unit_depths() - # mytest.test_plot_unit_templates() - # mytest.test_plot_unit_summary() - # mytest.test_unit_locations() - # mytest.test_quality_metrics() - # mytest.test_template_metrics() - # mytest.test_amplitudes() - # mytest.test_plot_agreement_matrix() - # mytest.test_plot_confusion_matrix() - # mytest.test_plot_probe_map() + mytest.test_plot_unit_waveforms_density_map() + mytest.test_plot_unit_summary() + mytest.test_plot_all_amplitudes_distributions() + mytest.test_plot_traces() + mytest.test_plot_unit_waveforms() + mytest.test_plot_unit_templates() + mytest.test_plot_unit_templates() + mytest.test_plot_unit_depths() + mytest.test_plot_unit_templates() + mytest.test_plot_unit_summary() + mytest.test_unit_locations() + mytest.test_quality_metrics() + mytest.test_template_metrics() + mytest.test_amplitudes() + mytest.test_plot_agreement_matrix() + mytest.test_plot_confusion_matrix() + mytest.test_plot_probe_map() mytest.test_plot_rasters() # plt.ion() From 3ac58086dd8d46e02d433ee840378617d5d42e9d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 06:31:41 +0000 Subject: [PATCH 282/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/widgets/tests/test_widgets.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index da16136fa9..ca53d85648 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -238,7 +238,11 @@ def test_amplitudes(self): unit_ids = self.we_dense.unit_ids[:4] sw.plot_amplitudes(self.we_dense, unit_ids=unit_ids, backend=backend, **self.backend_kwargs[backend]) sw.plot_amplitudes( - self.we_dense, unit_ids=unit_ids, plot_histograms=True, backend=backend, **self.backend_kwargs[backend] + self.we_dense, + unit_ids=unit_ids, + plot_histograms=True, + backend=backend, + **self.backend_kwargs[backend], ) sw.plot_amplitudes( self.we_sparse, @@ -264,7 +268,9 @@ def test_unit_locations(self): possible_backends = list(sw.UnitLocationsWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_unit_locations(self.we_dense, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend]) + sw.plot_unit_locations( + self.we_dense, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend] + ) sw.plot_unit_locations( self.we_sparse, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend] ) @@ -273,7 +279,9 @@ def test_spike_locations(self): possible_backends = list(sw.SpikeLocationsWidget.get_possible_backends()) for backend in possible_backends: if backend not in self.skip_backends: - sw.plot_spike_locations(self.we_dense, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend]) + sw.plot_spike_locations( + self.we_dense, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend] + ) sw.plot_spike_locations( self.we_sparse, with_channel_ids=True, backend=backend, **self.backend_kwargs[backend] ) From 3448e1ec4b19d5f5091ba6a2792362cf35a9f941 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 08:57:56 +0200 Subject: [PATCH 283/322] Fix plot_traces with ipywidgets when channel_ids is not None --- src/spikeinterface/widgets/traces.py | 10 ++++++---- src/spikeinterface/widgets/utils_ipywidgets.py | 16 ++++++++++++++-- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index 9b6716e8f3..2783b6a369 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -138,9 +138,10 @@ def __init__( # colors is a nested dict by layer and channels # lets first create black for all channels and layer + # all color are generated for ipywidgets colors = {} for k in layer_keys: - colors[k] = {chan_id: "k" for chan_id in channel_ids} + colors[k] = {chan_id: "k" for chan_id in rec0.channel_ids} if color_groups: channel_groups = rec0.get_channel_groups(channel_ids=channel_ids) @@ -149,7 +150,7 @@ def __init__( group_colors = get_some_colors(groups, color_engine="auto") channel_colors = {} - for i, chan_id in enumerate(channel_ids): + for i, chan_id in enumerate(rec0.channel_ids): group = channel_groups[i] channel_colors[chan_id] = group_colors[group] @@ -159,12 +160,12 @@ def __init__( elif color is not None: # old behavior one color for all channel # if multi layer then black for all - colors[layer_keys[0]] = {chan_id: color for chan_id in channel_ids} + colors[layer_keys[0]] = {chan_id: color for chan_id in rec0.channel_ids} elif color is None and len(recordings) > 1: # several layer layer_colors = get_some_colors(layer_keys) for k in layer_keys: - colors[k] = {chan_id: layer_colors[k] for chan_id in channel_ids} + colors[k] = {chan_id: layer_colors[k] for chan_id in rec0.channel_ids} else: # color is None unique layer : all channels black pass @@ -336,6 +337,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): ) self.scaler = ScaleWidget() self.channel_selector = ChannelSelector(self.rec0.channel_ids) + self.channel_selector.value = data_plot["channel_ids"] left_sidebar = W.VBox( children=[ diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index 6e872eca55..5bbe31302c 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -235,8 +235,7 @@ def __init__(self, channel_ids, **kwargs): self.slider.observe(self.on_slider_changed, names=["value"], type="change") self.selector.observe(self.on_selector_changed, names=["value"], type="change") - # TODO external value change - # self.observe(self.value_changed, names=['value'], type="change") + self.observe(self.value_changed, names=['value'], type="change") def on_slider_changed(self, change=None): i0, i1 = self.slider.value @@ -259,6 +258,19 @@ def on_selector_changed(self, change=None): self.slider.observe(self.on_slider_changed, names=["value"], type="change") self.value = channel_ids + + def value_changed(self, change=None): + self.selector.unobserve(self.on_selector_changed, names=["value"], type="change") + self.selector.value = change["new"] + self.selector.observe(self.on_selector_changed, names=["value"], type="change") + + channel_ids = self.selector.value + self.slider.unobserve(self.on_slider_changed, names=["value"], type="change") + i0 = self.channel_ids.index(channel_ids[0]) + i1 = self.channel_ids.index(channel_ids[-1]) + 1 + self.slider.value = (i0, i1) + self.slider.observe(self.on_slider_changed, names=["value"], type="change") + class ScaleWidget(W.VBox): From e51bb75f226c7c2be97c4a6ceeae460a7c610efe Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 09:25:35 +0200 Subject: [PATCH 284/322] Fix order_channel_by_depth in ipywidgets Fix order_channel_by_depth when channel_ids is given. --- src/spikeinterface/widgets/traces.py | 58 +++++++++++++++------------- 1 file changed, 32 insertions(+), 26 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index 2783b6a369..802f90c62a 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -88,6 +88,26 @@ def __init__( else: raise ValueError("plot_traces recording must be recording or dict or list") + if "location" in rec0.get_property_keys(): + channel_locations = rec0.get_channel_locations() + else: + channel_locations = None + + if order_channel_by_depth and channel_locations is not None: + from ..preprocessing import depth_order + rec0 = depth_order(rec0) + recordings = {k: depth_order(rec) for k, rec in recordings.items()} + + if channel_ids is not None: + # ensure that channel_ids are in the good order + channel_ids_ = list(rec0.channel_ids) + order = np.argsort([channel_ids_.index(c) for c in channel_ids]) + channel_ids = list(np.array(channel_ids)[order]) + + if channel_ids is None: + channel_ids = rec0.channel_ids + + layer_keys = list(recordings.keys()) if segment_index is None: @@ -95,19 +115,6 @@ def __init__( raise ValueError("You must provide segment_index=...") segment_index = 0 - if channel_ids is None: - channel_ids = rec0.channel_ids - - if "location" in rec0.get_property_keys(): - channel_locations = rec0.get_channel_locations() - else: - channel_locations = None - - if order_channel_by_depth: - if channel_locations is not None: - order, _ = order_channels_by_depth(rec0, channel_ids) - else: - order = None fs = rec0.get_sampling_frequency() if time_range is None: @@ -124,7 +131,7 @@ def __init__( cmap = cmap times, list_traces, frame_range, channel_ids = _get_trace_list( - recordings, channel_ids, time_range, segment_index, order, return_scaled + recordings, channel_ids, time_range, segment_index, return_scaled=return_scaled ) # stat for auto scaling done on the first layer @@ -202,7 +209,6 @@ def __init__( show_channel_ids=show_channel_ids, add_legend=add_legend, order_channel_by_depth=order_channel_by_depth, - order=order, tile_size=tile_size, num_timepoints_per_row=int(seconds_per_row * fs), return_scaled=return_scaled, @@ -337,7 +343,7 @@ def plot_ipywidgets(self, data_plot, **backend_kwargs): ) self.scaler = ScaleWidget() self.channel_selector = ChannelSelector(self.rec0.channel_ids) - self.channel_selector.value = data_plot["channel_ids"] + self.channel_selector.value = list(data_plot["channel_ids"]) left_sidebar = W.VBox( children=[ @@ -400,17 +406,17 @@ def _mode_changed(self, change=None): def _retrieve_traces(self, change=None): channel_ids = np.array(self.channel_selector.value) - if self.data_plot["order_channel_by_depth"]: - order, _ = order_channels_by_depth(self.rec0, channel_ids) - else: - order = None + # if self.data_plot["order_channel_by_depth"]: + # order, _ = order_channels_by_depth(self.rec0, channel_ids) + # else: + # order = None start_frame, end_frame, segment_index = self.time_slider.value time_range = np.array([start_frame, end_frame]) / self.rec0.sampling_frequency self._selected_recordings = {k: self.recordings[k] for k in self._get_layers()} times, list_traces, frame_range, channel_ids = _get_trace_list( - self._selected_recordings, channel_ids, time_range, segment_index, order, self.return_scaled + self._selected_recordings, channel_ids, time_range, segment_index, return_scaled=self.return_scaled ) self._channel_ids = channel_ids @@ -525,7 +531,7 @@ def plot_ephyviewer(self, data_plot, **backend_kwargs): app.exec() -def _get_trace_list(recordings, channel_ids, time_range, segment_index, order=None, return_scaled=False): +def _get_trace_list(recordings, channel_ids, time_range, segment_index, return_scaled=False): # function also used in ipywidgets plotter k0 = list(recordings.keys())[0] rec0 = recordings[k0] @@ -552,11 +558,11 @@ def _get_trace_list(recordings, channel_ids, time_range, segment_index, order=No return_scaled=return_scaled, ) - if order is not None: - traces = traces[:, order] + # if order is not None: + # traces = traces[:, order] list_traces.append(traces) - if order is not None: - channel_ids = np.array(channel_ids)[order] + # if order is not None: + # channel_ids = np.array(channel_ids)[order] return times, list_traces, frame_range, channel_ids From bc3234cc4ce7d35cd62e0c29e33e38002f43ecd0 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 09:52:20 +0200 Subject: [PATCH 285/322] More fix in widgets due to sparse=True by default --- .../tests/test_widgets_legacy.py | 6 +- .../widgets/tests/test_widgets.py | 57 +++++++++---------- 2 files changed, 31 insertions(+), 32 deletions(-) diff --git a/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py b/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py index 39eb80e2e5..8814e0131a 100644 --- a/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py +++ b/src/spikeinterface/widgets/_legacy_mpl_widgets/tests/test_widgets_legacy.py @@ -32,10 +32,10 @@ def setUp(self): self.num_units = len(self._sorting.get_unit_ids()) #  self._we = extract_waveforms(self._rec, self._sorting, './toy_example', load_if_exists=True) - if (cache_folder / "mearec_test").is_dir(): - self._we = load_waveforms(cache_folder / "mearec_test") + if (cache_folder / "mearec_test_old_api").is_dir(): + self._we = load_waveforms(cache_folder / "mearec_test_old_api") else: - self._we = extract_waveforms(self._rec, self._sorting, cache_folder / "mearec_test") + self._we = extract_waveforms(self._rec, self._sorting, cache_folder / "mearec_test_old_api", sparse=False) self._amplitudes = compute_spike_amplitudes(self._we, peak_sign="neg", outputs="by_unit") self._gt_comp = sc.compare_sorter_to_ground_truth(self._sorting, self._sorting) diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index ca53d85648..5f1a936a6e 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -48,22 +48,21 @@ def setUpClass(cls): cls.sorting = se.MEArecSortingExtractor(local_path) cls.num_units = len(cls.sorting.get_unit_ids()) - if (cache_folder / "mearec_test").is_dir(): - cls.we_dense = load_waveforms(cache_folder / "mearec_test") + if (cache_folder / "mearec_test_dense").is_dir(): + cls.we_dense = load_waveforms(cache_folder / "mearec_test_dense") else: - cls.we_dense = extract_waveforms(cls.recording, cls.sorting, cache_folder / "mearec_test", sparse=False) + cls.we_dense = extract_waveforms(cls.recording, cls.sorting, cache_folder / "mearec_test_dense", sparse=False) + metric_names = ["snr", "isi_violation", "num_spikes"] + _ = compute_spike_amplitudes(cls.we_dense) + _ = compute_unit_locations(cls.we_dense) + _ = compute_spike_locations(cls.we_dense) + _ = compute_quality_metrics(cls.we_dense, metric_names=metric_names) + _ = compute_template_metrics(cls.we_dense) + _ = compute_correlograms(cls.we_dense) + _ = compute_template_similarity(cls.we_dense) sw.set_default_plotter_backend("matplotlib") - metric_names = ["snr", "isi_violation", "num_spikes"] - _ = compute_spike_amplitudes(cls.we_dense) - _ = compute_unit_locations(cls.we_dense) - _ = compute_spike_locations(cls.we_dense) - _ = compute_quality_metrics(cls.we_dense, metric_names=metric_names) - _ = compute_template_metrics(cls.we_dense) - _ = compute_correlograms(cls.we_dense) - _ = compute_template_similarity(cls.we_dense) - # make sparse waveforms cls.sparsity_radius = compute_sparsity(cls.we_dense, method="radius", radius_um=50) cls.sparsity_best = compute_sparsity(cls.we_dense, method="best_channels", num_channels=5) @@ -363,24 +362,24 @@ def test_plot_rasters(self): mytest = TestWidgets() mytest.setUpClass() - mytest.test_plot_unit_waveforms_density_map() - mytest.test_plot_unit_summary() - mytest.test_plot_all_amplitudes_distributions() - mytest.test_plot_traces() - mytest.test_plot_unit_waveforms() - mytest.test_plot_unit_templates() - mytest.test_plot_unit_templates() - mytest.test_plot_unit_depths() - mytest.test_plot_unit_templates() - mytest.test_plot_unit_summary() - mytest.test_unit_locations() - mytest.test_quality_metrics() - mytest.test_template_metrics() - mytest.test_amplitudes() + # mytest.test_plot_unit_waveforms_density_map() + # mytest.test_plot_unit_summary() + # mytest.test_plot_all_amplitudes_distributions() + # mytest.test_plot_traces() + # mytest.test_plot_unit_waveforms() + # mytest.test_plot_unit_templates() + # mytest.test_plot_unit_templates() + # mytest.test_plot_unit_depths() + # mytest.test_plot_unit_templates() + # mytest.test_plot_unit_summary() + # mytest.test_unit_locations() + # mytest.test_quality_metrics() + # mytest.test_template_metrics() + # mytest.test_amplitudes() mytest.test_plot_agreement_matrix() - mytest.test_plot_confusion_matrix() - mytest.test_plot_probe_map() - mytest.test_plot_rasters() + # mytest.test_plot_confusion_matrix() + # mytest.test_plot_probe_map() + # mytest.test_plot_rasters() # plt.ion() plt.show() From 7cd60ac434288e7eb9d43684e0b575396f70daaa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 07:52:41 +0000 Subject: [PATCH 286/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/widgets/tests/test_widgets.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/widgets/tests/test_widgets.py b/src/spikeinterface/widgets/tests/test_widgets.py index 5f1a936a6e..1a2fdf38d9 100644 --- a/src/spikeinterface/widgets/tests/test_widgets.py +++ b/src/spikeinterface/widgets/tests/test_widgets.py @@ -51,7 +51,9 @@ def setUpClass(cls): if (cache_folder / "mearec_test_dense").is_dir(): cls.we_dense = load_waveforms(cache_folder / "mearec_test_dense") else: - cls.we_dense = extract_waveforms(cls.recording, cls.sorting, cache_folder / "mearec_test_dense", sparse=False) + cls.we_dense = extract_waveforms( + cls.recording, cls.sorting, cache_folder / "mearec_test_dense", sparse=False + ) metric_names = ["snr", "isi_violation", "num_spikes"] _ = compute_spike_amplitudes(cls.we_dense) _ = compute_unit_locations(cls.we_dense) @@ -366,7 +368,7 @@ def test_plot_rasters(self): # mytest.test_plot_unit_summary() # mytest.test_plot_all_amplitudes_distributions() # mytest.test_plot_traces() - # mytest.test_plot_unit_waveforms() + # mytest.test_plot_unit_waveforms() # mytest.test_plot_unit_templates() # mytest.test_plot_unit_templates() # mytest.test_plot_unit_depths() From 5c5f32fb0df19cb5faf7e24c11758639c1740f18 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 09:53:33 +0200 Subject: [PATCH 287/322] yep --- src/spikeinterface/widgets/traces.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index 802f90c62a..d010c96a27 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -88,7 +88,7 @@ def __init__( else: raise ValueError("plot_traces recording must be recording or dict or list") - if "location" in rec0.get_property_keys(): + if rec0.has_channel_locations(): channel_locations = rec0.get_channel_locations() else: channel_locations = None From 986d6d9f26417740dd7162e671db3082363930f6 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 10:20:20 +0200 Subject: [PATCH 288/322] Fix fix with sparse waveform extractor --- src/spikeinterface/exporters/tests/test_export_to_phy.py | 6 +++--- src/spikeinterface/exporters/to_phy.py | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/exporters/tests/test_export_to_phy.py b/src/spikeinterface/exporters/tests/test_export_to_phy.py index 7528f0ebf9..39bb875ea8 100644 --- a/src/spikeinterface/exporters/tests/test_export_to_phy.py +++ b/src/spikeinterface/exporters/tests/test_export_to_phy.py @@ -78,7 +78,7 @@ def test_export_to_phy_by_property(): recording = recording.save(folder=rec_folder) sorting = sorting.save(folder=sort_folder) - waveform_extractor = extract_waveforms(recording, sorting, waveform_folder) + waveform_extractor = extract_waveforms(recording, sorting, waveform_folder, sparse=False) sparsity_group = compute_sparsity(waveform_extractor, method="by_property", by_property="group") export_to_phy( waveform_extractor, @@ -96,7 +96,7 @@ def test_export_to_phy_by_property(): # Remove one channel recording_rm = recording.channel_slice([0, 2, 3, 4, 5, 6, 7]) - waveform_extractor_rm = extract_waveforms(recording_rm, sorting, waveform_folder_rm) + waveform_extractor_rm = extract_waveforms(recording_rm, sorting, waveform_folder_rm, sparse=False) sparsity_group = compute_sparsity(waveform_extractor_rm, method="by_property", by_property="group") export_to_phy( @@ -130,7 +130,7 @@ def test_export_to_phy_by_sparsity(): if f.is_dir(): shutil.rmtree(f) - waveform_extractor = extract_waveforms(recording, sorting, waveform_folder) + waveform_extractor = extract_waveforms(recording, sorting, waveform_folder, sparse=False) sparsity_radius = compute_sparsity(waveform_extractor, method="radius", radius_um=50.0) export_to_phy( waveform_extractor, diff --git a/src/spikeinterface/exporters/to_phy.py b/src/spikeinterface/exporters/to_phy.py index ebc810b953..31a452f389 100644 --- a/src/spikeinterface/exporters/to_phy.py +++ b/src/spikeinterface/exporters/to_phy.py @@ -94,6 +94,7 @@ def export_to_phy( if waveform_extractor.is_sparse(): used_sparsity = waveform_extractor.sparsity + assert sparsity is None elif sparsity is not None: used_sparsity = sparsity else: From 63494f2a44424085d7ad22935313f9cbd2c8b88c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 09:11:43 +0000 Subject: [PATCH 289/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/widgets/traces.py | 3 +-- src/spikeinterface/widgets/utils_ipywidgets.py | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index d010c96a27..7a4306b284 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -95,6 +95,7 @@ def __init__( if order_channel_by_depth and channel_locations is not None: from ..preprocessing import depth_order + rec0 = depth_order(rec0) recordings = {k: depth_order(rec) for k, rec in recordings.items()} @@ -107,7 +108,6 @@ def __init__( if channel_ids is None: channel_ids = rec0.channel_ids - layer_keys = list(recordings.keys()) if segment_index is None: @@ -115,7 +115,6 @@ def __init__( raise ValueError("You must provide segment_index=...") segment_index = 0 - fs = rec0.get_sampling_frequency() if time_range is None: time_range = (0, 1.0) diff --git a/src/spikeinterface/widgets/utils_ipywidgets.py b/src/spikeinterface/widgets/utils_ipywidgets.py index 5bbe31302c..58dd5c7f32 100644 --- a/src/spikeinterface/widgets/utils_ipywidgets.py +++ b/src/spikeinterface/widgets/utils_ipywidgets.py @@ -235,7 +235,7 @@ def __init__(self, channel_ids, **kwargs): self.slider.observe(self.on_slider_changed, names=["value"], type="change") self.selector.observe(self.on_selector_changed, names=["value"], type="change") - self.observe(self.value_changed, names=['value'], type="change") + self.observe(self.value_changed, names=["value"], type="change") def on_slider_changed(self, change=None): i0, i1 = self.slider.value @@ -258,7 +258,7 @@ def on_selector_changed(self, change=None): self.slider.observe(self.on_slider_changed, names=["value"], type="change") self.value = channel_ids - + def value_changed(self, change=None): self.selector.unobserve(self.on_selector_changed, names=["value"], type="change") self.selector.value = change["new"] @@ -272,7 +272,6 @@ def value_changed(self, change=None): self.slider.observe(self.on_slider_changed, names=["value"], type="change") - class ScaleWidget(W.VBox): value = traitlets.Float() From 5660de282ac43d96324184d47aa2d951910d6fec Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 6 Oct 2023 11:16:24 +0200 Subject: [PATCH 290/322] Simplify parsing in cellexplorer --- src/spikeinterface/extractors/cellexplorersortingextractor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/spikeinterface/extractors/cellexplorersortingextractor.py b/src/spikeinterface/extractors/cellexplorersortingextractor.py index 0096a40a79..0980e89f1c 100644 --- a/src/spikeinterface/extractors/cellexplorersortingextractor.py +++ b/src/spikeinterface/extractors/cellexplorersortingextractor.py @@ -118,7 +118,6 @@ def __init__( spike_times = spikes_data["times"] # CellExplorer reports spike times in units seconds; SpikeExtractors uses time units of sampling frames - unit_ids = unit_ids[:].astype(int).tolist() unit_ids = [str(unit_id) for unit_id in unit_ids] spiketrains_dict = {unit_id: spike_times[index] for index, unit_id in enumerate(unit_ids)} for unit_id in unit_ids: From c0d4c60095f9704f9b27adfb5fa0f4867adfaf10 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 11:38:15 +0200 Subject: [PATCH 291/322] oups --- src/spikeinterface/widgets/traces.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index d010c96a27..ce34af0bfa 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -88,7 +88,7 @@ def __init__( else: raise ValueError("plot_traces recording must be recording or dict or list") - if rec0.has_channel_locations(): + if rec0.has_channel_location(): channel_locations = rec0.get_channel_locations() else: channel_locations = None From 2907934928719cf8d0403a2c55628645483187f7 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 11:48:37 +0200 Subject: [PATCH 292/322] clean --- src/spikeinterface/widgets/traces.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/spikeinterface/widgets/traces.py b/src/spikeinterface/widgets/traces.py index 5a8212302c..fc8b30eb05 100644 --- a/src/spikeinterface/widgets/traces.py +++ b/src/spikeinterface/widgets/traces.py @@ -557,11 +557,6 @@ def _get_trace_list(recordings, channel_ids, time_range, segment_index, return_s return_scaled=return_scaled, ) - # if order is not None: - # traces = traces[:, order] list_traces.append(traces) - # if order is not None: - # channel_ids = np.array(channel_ids)[order] - return times, list_traces, frame_range, channel_ids From 5733883c49797645cb2cefa328d37e1d66caea51 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 10:15:16 +0000 Subject: [PATCH 293/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sorters/internal/tridesclous2.py | 181 ++++++++++-------- 1 file changed, 97 insertions(+), 84 deletions(-) diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index e2f4812222..32cf27ceb1 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -1,8 +1,14 @@ import shutil from .si_based import ComponentsBasedSorter -from spikeinterface.core import (load_extractor, BaseRecording, get_noise_levels, - extract_waveforms, NumpySorting, get_channel_distances) +from spikeinterface.core import ( + load_extractor, + BaseRecording, + get_noise_levels, + extract_waveforms, + NumpySorting, + get_channel_distances, +) from spikeinterface.core.waveform_tools import extract_waveforms_to_single_buffer from spikeinterface.core.job_tools import fix_job_kwargs @@ -14,30 +20,31 @@ import pickle import json + class Tridesclous2Sorter(ComponentsBasedSorter): sorter_name = "tridesclous2" _default_params = { "apply_preprocessing": True, - "waveforms" : {"ms_before": 0.5, "ms_after": 1.5, }, - "filtering": {"freq_min": 300., "freq_max": 8000.0}, - "detection": {"peak_sign": "neg", "detect_threshold": 5, "exclude_sweep_ms": 1.5, "radius_um": 150.}, + "waveforms": { + "ms_before": 0.5, + "ms_after": 1.5, + }, + "filtering": {"freq_min": 300.0, "freq_max": 8000.0}, + "detection": {"peak_sign": "neg", "detect_threshold": 5, "exclude_sweep_ms": 1.5, "radius_um": 150.0}, "selection": {"n_peaks_per_channel": 5000, "min_n_peaks": 20000}, "svd": {"n_components": 6}, "clustering": { - "split_radius_um": 40., - "merge_radius_um": 40., + "split_radius_um": 40.0, + "merge_radius_um": 40.0, }, "templates": { "ms_before": 1.5, "ms_after": 2.5, # "peak_shift_ms": 0.2, }, - "matching": { - "peak_shift_ms": 0.2, - "radius_um": 100. - }, - "job_kwargs": {"n_jobs":-1}, + "matching": {"peak_shift_ms": 0.2, "radius_um": 100.0}, + "job_kwargs": {"n_jobs": -1}, "save_array": True, } @@ -52,10 +59,15 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): job_kwargs["progress_bar"] = verbose from spikeinterface.sortingcomponents.matching import find_spikes_from_templates - from spikeinterface.core.node_pipeline import run_node_pipeline, ExtractDenseWaveforms, ExtractSparseWaveforms, PeakRetriever + from spikeinterface.core.node_pipeline import ( + run_node_pipeline, + ExtractDenseWaveforms, + ExtractSparseWaveforms, + PeakRetriever, + ) from spikeinterface.sortingcomponents.peak_detection import detect_peaks, DetectPeakLocallyExclusive from spikeinterface.sortingcomponents.peak_selection import select_peaks - from spikeinterface.sortingcomponents.peak_localization import LocalizeCenterOfMass, LocalizeGridConvolution + from spikeinterface.sortingcomponents.peak_localization import LocalizeCenterOfMass, LocalizeGridConvolution from spikeinterface.sortingcomponents.waveforms.temporal_pca import TemporalPCAProjection from spikeinterface.sortingcomponents.clustering.split import split_clusters @@ -99,7 +111,6 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): if verbose: print("We kept %d peaks for clustering" % len(peaks)) - # SVD for time compression few_peaks = select_peaks(peaks, method="uniform", n_peaks=5000) few_wfs = extract_waveform_at_max_channel(recording, few_peaks, **job_kwargs) @@ -108,8 +119,8 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): tsvd = TruncatedSVD(params["svd"]["n_components"]) tsvd.fit(wfs) - model_folder = sorter_output_folder / 'tsvd_model' - + model_folder = sorter_output_folder / "tsvd_model" + model_folder.mkdir(exist_ok=True) with open(model_folder / "pca_model.pkl", "wb") as f: pickle.dump(tsvd, f) @@ -126,12 +137,12 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): # features - features_folder = sorter_output_folder / 'features' + features_folder = sorter_output_folder / "features" node0 = PeakRetriever(recording, peaks) # node1 = ExtractDenseWaveforms(rec, parents=[node0], return_output=False, # ms_before=0.5, - # ms_after=1.5, + # ms_after=1.5, # ) # node2 = LocalizeCenterOfMass(rec, parents=[node0, node1], return_output=True, @@ -143,36 +154,44 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): # upsampling_um=5.0, # ) - node3 = ExtractSparseWaveforms(recording, parents=[node0], return_output=True, - ms_before=0.5, - ms_after=1.5, - radius_um=100.0, + node3 = ExtractSparseWaveforms( + recording, + parents=[node0], + return_output=True, + ms_before=0.5, + ms_after=1.5, + radius_um=100.0, ) - model_folder_path = sorter_output_folder / 'tsvd_model' - - node4 = TemporalPCAProjection(recording, parents=[node0, node3], return_output=True, - model_folder_path=model_folder_path) + model_folder_path = sorter_output_folder / "tsvd_model" + node4 = TemporalPCAProjection( + recording, parents=[node0, node3], return_output=True, model_folder_path=model_folder_path + ) # pipeline_nodes = [node0, node1, node2, node3, node4] pipeline_nodes = [node0, node3, node4] - output = run_node_pipeline(recording, pipeline_nodes, job_kwargs, gather_mode="npy", gather_kwargs=dict(exist_ok=True), - folder=features_folder, names=["sparse_wfs", "sparse_tsvd"]) + output = run_node_pipeline( + recording, + pipeline_nodes, + job_kwargs, + gather_mode="npy", + gather_kwargs=dict(exist_ok=True), + folder=features_folder, + names=["sparse_wfs", "sparse_tsvd"], + ) # TODO make this generic in GatherNPY ??? sparse_mask = node3.neighbours_mask - np.save(features_folder/ 'sparse_mask.npy', sparse_mask) - np.save(features_folder/ 'peaks.npy', peaks) - - + np.save(features_folder / "sparse_mask.npy", sparse_mask) + np.save(features_folder / "peaks.npy", peaks) # Clustering: channel index > split > merge split_radius_um = params["clustering"]["split_radius_um"] neighbours_mask = get_channel_distances(recording) < split_radius_um - original_labels = peaks['channel_index'] + original_labels = peaks["channel_index"] post_split_label, split_count = split_clusters( original_labels, @@ -182,24 +201,19 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): method_kwargs=dict( # clusterer="hdbscan", clusterer="isocut5", - feature_name="sparse_tsvd", # feature_name="sparse_wfs", - neighbours_mask=neighbours_mask, waveforms_sparse_mask=sparse_mask, min_size_split=50, min_cluster_size=50, min_samples=50, n_pca_features=3, - ), - + ), recursive=True, recursive_depth=3, - returns_split_count=True, - **job_kwargs - + **job_kwargs, ) merge_radius_um = params["clustering"]["merge_radius_um"] @@ -210,35 +224,28 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): recording, features_folder, radius_um=merge_radius_um, - method="project_distribution", method_kwargs=dict( # neighbours_mask=neighbours_mask, waveforms_sparse_mask=sparse_mask, - # feature_name="sparse_tsvd", feature_name="sparse_wfs", - # projection='lda', - projection='centroid', - + projection="centroid", # criteria='diptest', # threshold_diptest=0.5, # criteria="percentile", # threshold_percentile=80., criteria="distrib_overlap", threshold_overlap=0.4, - # num_shift=0 num_shift=2, - - ), - **job_kwargs + ), + **job_kwargs, ) - + # sparse_wfs = np.load(features_folder / "sparse_wfs.npy", mmap_mode="r") - new_peaks = peaks.copy() new_peaks["sample_index"] -= peak_shifts @@ -247,7 +254,9 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): mask = post_merge_label >= 0 sorting_temp = NumpySorting.from_times_labels( - new_peaks["sample_index"][mask], post_merge_label[mask], sampling_frequency, + new_peaks["sample_index"][mask], + post_merge_label[mask], + sampling_frequency, unit_ids=labels_set, ) sorting_temp = sorting_temp.save(folder=sorter_output_folder / "sorting_temp") @@ -257,8 +266,13 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): max_spikes_per_unit = 300 we = extract_waveforms( - recording, sorting_temp, sorter_output_folder / "waveforms_temp", ms_before=ms_before, ms_after=ms_after, - max_spikes_per_unit=max_spikes_per_unit, **job_kwargs + recording, + sorting_temp, + sorter_output_folder / "waveforms_temp", + ms_before=ms_before, + ms_after=ms_after, + max_spikes_per_unit=max_spikes_per_unit, + **job_kwargs, ) matching_params = params["matching"].copy() @@ -272,57 +286,56 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): recording, method="tridesclous", method_kwargs=matching_params, **job_kwargs ) - if params["save_array"]: - - np.save(sorter_output_folder / 'noise_levels.npy', noise_levels) - np.save(sorter_output_folder / 'all_peaks.npy', all_peaks) - np.save(sorter_output_folder / 'post_split_label.npy', post_split_label) - np.save(sorter_output_folder / 'split_count.npy', split_count) - np.save(sorter_output_folder / 'post_merge_label.npy', post_merge_label) - np.save(sorter_output_folder / 'spikes.npy', spikes) + np.save(sorter_output_folder / "noise_levels.npy", noise_levels) + np.save(sorter_output_folder / "all_peaks.npy", all_peaks) + np.save(sorter_output_folder / "post_split_label.npy", post_split_label) + np.save(sorter_output_folder / "split_count.npy", split_count) + np.save(sorter_output_folder / "post_merge_label.npy", post_merge_label) + np.save(sorter_output_folder / "spikes.npy", spikes) final_spikes = np.zeros(spikes.size, dtype=minimum_spike_dtype) final_spikes["sample_index"] = spikes["sample_index"] final_spikes["unit_index"] = spikes["cluster_index"] final_spikes["segment_index"] = spikes["segment_index"] - sorting = NumpySorting(final_spikes, sampling_frequency, labels_set) sorting = sorting.save(folder=sorter_output_folder / "sorting") return sorting - -def extract_waveform_at_max_channel(rec, peaks, - ms_before=0.5, ms_after=1.5, - **job_kwargs): +def extract_waveform_at_max_channel(rec, peaks, ms_before=0.5, ms_after=1.5, **job_kwargs): """ Helper function to extractor waveforms at max channel from a peak list """ n = rec.get_num_channels() - unit_ids = np.arange(n, dtype='int64') - sparsity_mask = np.eye(n, dtype='bool') - - spikes = np.zeros(peaks.size, dtype = [("sample_index", "int64"), ("unit_index", "int64"), ("segment_index", "int64")]) + unit_ids = np.arange(n, dtype="int64") + sparsity_mask = np.eye(n, dtype="bool") + + spikes = np.zeros( + peaks.size, dtype=[("sample_index", "int64"), ("unit_index", "int64"), ("segment_index", "int64")] + ) spikes["sample_index"] = peaks["sample_index"] spikes["unit_index"] = peaks["channel_index"] spikes["segment_index"] = peaks["segment_index"] - nbefore = int(ms_before * rec.sampling_frequency / 1000.) - nafter = int(ms_after * rec.sampling_frequency/ 1000.) - - all_wfs = extract_waveforms_to_single_buffer(rec, spikes, unit_ids, nbefore, nafter, - mode="shared_memory", return_scaled=False, - sparsity_mask=sparsity_mask, copy=True, - **job_kwargs, - ) + nbefore = int(ms_before * rec.sampling_frequency / 1000.0) + nafter = int(ms_after * rec.sampling_frequency / 1000.0) + + all_wfs = extract_waveforms_to_single_buffer( + rec, + spikes, + unit_ids, + nbefore, + nafter, + mode="shared_memory", + return_scaled=False, + sparsity_mask=sparsity_mask, + copy=True, + **job_kwargs, + ) return all_wfs - - - - From d373c05673b04354749e9d4ed9fc207f00824de3 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 12:49:21 +0200 Subject: [PATCH 294/322] wip --- .../sorters/internal/tridesclous2.py | 16 +++++++++++----- .../sortingcomponents/clustering/split.py | 2 ++ 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index e2f4812222..5a2664a45e 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -20,9 +20,11 @@ class Tridesclous2Sorter(ComponentsBasedSorter): _default_params = { "apply_preprocessing": True, "waveforms" : {"ms_before": 0.5, "ms_after": 1.5, }, - "filtering": {"freq_min": 300., "freq_max": 8000.0}, - "detection": {"peak_sign": "neg", "detect_threshold": 5, "exclude_sweep_ms": 1.5, "radius_um": 150.}, + "filtering": {"freq_min": 300., "freq_max": 12000.0}, + "detection": {"peak_sign": "neg", "detect_threshold": 5, + "exclude_sweep_ms": 1.5, "radius_um": 150.}, "selection": {"n_peaks_per_channel": 5000, "min_n_peaks": 20000}, + "features": {"radius_um": 120}, "svd": {"n_components": 6}, "clustering": { "split_radius_um": 40., @@ -35,7 +37,10 @@ class Tridesclous2Sorter(ComponentsBasedSorter): }, "matching": { "peak_shift_ms": 0.2, - "radius_um": 100. + # "radius_um": 100. + "num_peeler_loop": 3, + "num_template_try": 3, + }, "job_kwargs": {"n_jobs":-1}, "save_array": True, @@ -143,9 +148,10 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): # upsampling_um=5.0, # ) + radius = params["features"]["radius_um"] node3 = ExtractSparseWaveforms(recording, parents=[node0], return_output=True, - ms_before=0.5, - ms_after=1.5, + ms_before=ms_before, + ms_after=ms_after, radius_um=100.0, ) diff --git a/src/spikeinterface/sortingcomponents/clustering/split.py b/src/spikeinterface/sortingcomponents/clustering/split.py index 9836e9110f..b433a2d16d 100644 --- a/src/spikeinterface/sortingcomponents/clustering/split.py +++ b/src/spikeinterface/sortingcomponents/clustering/split.py @@ -192,6 +192,7 @@ def split( # target channel subset is done intersect local channels + neighbours local_chans = np.unique(peaks["channel_index"][peak_indices]) + target_channels = np.flatnonzero(np.all(neighbours_mask[local_chans, :], axis=0)) # TODO fix this a better way, this when cluster have too few overlapping channels @@ -204,6 +205,7 @@ def split( local_labels[dont_have_channels] = -2 kept = np.flatnonzero(~dont_have_channels) + if kept.size < min_size_split: return False, None From f5a42e7c51d5983738191d10896cf9fb500847c7 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 13:41:13 +0200 Subject: [PATCH 295/322] wip --- src/spikeinterface/sorters/internal/tridesclous2.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index bfc01b897f..909a2d1cb3 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -29,11 +29,12 @@ class Tridesclous2Sorter(ComponentsBasedSorter): "waveforms": { "ms_before": 0.5, "ms_after": 1.5, + "radius_um": 120.0, }, "filtering": {"freq_min": 300.0, "freq_max": 12000.0}, "detection": {"peak_sign": "neg", "detect_threshold": 5, "exclude_sweep_ms": 1.5, "radius_um": 150.0}, "selection": {"n_peaks_per_channel": 5000, "min_n_peaks": 20000}, - "features": {"radius_um": 120}, + "features": {}, "svd": {"n_components": 6}, "clustering": { "split_radius_um": 40.0, @@ -155,13 +156,14 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): # upsampling_um=5.0, # ) + radius_um = params["waveforms"]["radius_um"] node3 = ExtractSparseWaveforms( recording, parents=[node0], return_output=True, - ms_beforems_before, + ms_before=ms_before, ms_after=ms_after, - radius_um=radius, + radius_um=radius_um, ) model_folder_path = sorter_output_folder / "tsvd_model" From 446dcc7114f8d275cc55ce557ed11ebe0fb1160e Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Fri, 6 Oct 2023 13:42:07 +0200 Subject: [PATCH 296/322] Patch --- src/spikeinterface/sorters/internal/spyking_circus2.py | 2 +- .../sortingcomponents/benchmark/benchmark_clustering.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 0c3b9f95d1..3681a1fbc5 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -114,7 +114,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): ## We get the labels for our peaks mask = peak_labels > -1 - sorting = NumpySorting.from_times_labels(selected_peaks["sample_index"][mask], peak_labels[mask], sampling_rate) + sorting = NumpySorting.from_times_labels(selected_peaks["sample_index"][mask], peak_labels[mask].astype(int), sampling_rate) clustering_folder = sorter_output_folder / "clustering" if clustering_folder.exists(): shutil.rmtree(clustering_folder) diff --git a/src/spikeinterface/sortingcomponents/benchmark/benchmark_clustering.py b/src/spikeinterface/sortingcomponents/benchmark/benchmark_clustering.py index d68b8e5449..bd413417bf 100644 --- a/src/spikeinterface/sortingcomponents/benchmark/benchmark_clustering.py +++ b/src/spikeinterface/sortingcomponents/benchmark/benchmark_clustering.py @@ -524,7 +524,7 @@ def plot_statistics(self, metric="cosine", annotations=True, detect_threshold=5) template_real = template_real.reshape(template_real.size, 1).T if metric == "cosine": - dist = sklearn.metrics.pairwise.cosine_similarity(template, template_real, metric).flatten().tolist() + dist = sklearn.metrics.pairwise.cosine_similarity(template, template_real).flatten().tolist() else: dist = sklearn.metrics.pairwise_distances(template, template_real, metric).flatten().tolist() res += dist From f56780db0ed2240a94e697dcdc4040e65e706918 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 11:42:58 +0000 Subject: [PATCH 297/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sorters/internal/spyking_circus2.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 3681a1fbc5..2c297662f4 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -114,7 +114,9 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): ## We get the labels for our peaks mask = peak_labels > -1 - sorting = NumpySorting.from_times_labels(selected_peaks["sample_index"][mask], peak_labels[mask].astype(int), sampling_rate) + sorting = NumpySorting.from_times_labels( + selected_peaks["sample_index"][mask], peak_labels[mask].astype(int), sampling_rate + ) clustering_folder = sorter_output_folder / "clustering" if clustering_folder.exists(): shutil.rmtree(clustering_folder) From 4a778069329529afa7eccc01d7805642d9ff93e5 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 13:46:35 +0200 Subject: [PATCH 298/322] small fix in gtstudy --- src/spikeinterface/comparison/groundtruthstudy.py | 3 +++ src/spikeinterface/widgets/widget_list.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index df0b5296c0..a1814d3527 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -184,6 +184,9 @@ def run_sorters(self, case_keys=None, engine="loop", engine_kwargs={}, keep=True log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" if log_file.exists(): log_file.unlink() + + if sorter_folder_exists: + shutil.rmtree(sorter_folder) params = self.cases[key]["run_sorter_params"].copy() # this ensure that sorter_name is given diff --git a/src/spikeinterface/widgets/widget_list.py b/src/spikeinterface/widgets/widget_list.py index ed77de6128..51e7208080 100644 --- a/src/spikeinterface/widgets/widget_list.py +++ b/src/spikeinterface/widgets/widget_list.py @@ -114,7 +114,7 @@ plot_study_run_times = StudyRunTimesWidget plot_study_unit_counts = StudyUnitCountsWidget plot_study_performances = StudyPerformances -plot_stufy_performances_vs_metrics = StudyPerformancesVsMetrics +plot_study_performances_vs_metrics = StudyPerformancesVsMetrics def plot_timeseries(*args, **kwargs): From 59e617fdfca4898b131c4cbf84b1a2e4eccd1eb0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 11:47:21 +0000 Subject: [PATCH 299/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/comparison/groundtruthstudy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index a1814d3527..8d8b255336 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -184,9 +184,9 @@ def run_sorters(self, case_keys=None, engine="loop", engine_kwargs={}, keep=True log_file = self.folder / "sortings" / "run_logs" / f"{self.key_to_str(key)}.json" if log_file.exists(): log_file.unlink() - + if sorter_folder_exists: - shutil.rmtree(sorter_folder) + shutil.rmtree(sorter_folder) params = self.cases[key]["run_sorter_params"].copy() # this ensure that sorter_name is given From 8c64a9f74b035211b4f4623becbdfd010683d402 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 14:19:38 +0200 Subject: [PATCH 300/322] oups --- src/spikeinterface/comparison/groundtruthstudy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index a1814d3527..0133f57e4d 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -165,7 +165,7 @@ def run_sorters(self, case_keys=None, engine="loop", engine_kwargs={}, keep=True sorting_exists = sorting_folder.exists() sorter_folder = self.folder / "sorters" / self.key_to_str(key) - sorter_folder_exists = sorting_folder.exists() + sorter_folder_exists = sorter_folder.exists() if keep: if sorting_exists: From f465e815c7de66958c659880b21085edd38f4216 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 21:22:31 +0200 Subject: [PATCH 301/322] wip --- src/spikeinterface/sorters/internal/tridesclous2.py | 11 +++++++---- .../sortingcomponents/clustering/merge.py | 7 +++++-- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index 909a2d1cb3..ca1dfa1854 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -196,6 +196,8 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): original_labels = peaks["channel_index"] + min_cluster_size = 50 + post_split_label, split_count = split_clusters( original_labels, recording, @@ -208,8 +210,8 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): # feature_name="sparse_wfs", neighbours_mask=neighbours_mask, waveforms_sparse_mask=sparse_mask, - min_size_split=50, - min_cluster_size=50, + min_size_split=min_cluster_size, + min_cluster_size=min_cluster_size, min_samples=50, n_pca_features=3, ), @@ -240,9 +242,10 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): # criteria="percentile", # threshold_percentile=80., criteria="distrib_overlap", - threshold_overlap=0.4, + threshold_overlap=0.3, + min_cluster_size=min_cluster_size+1, # num_shift=0 - num_shift=2, + num_shift=5, ), **job_kwargs, ) diff --git a/src/spikeinterface/sortingcomponents/clustering/merge.py b/src/spikeinterface/sortingcomponents/clustering/merge.py index d892d0723a..45090452dc 100644 --- a/src/spikeinterface/sortingcomponents/clustering/merge.py +++ b/src/spikeinterface/sortingcomponents/clustering/merge.py @@ -398,6 +398,7 @@ def merge( threshold_diptest=0.5, threshold_percentile=80.0, threshold_overlap=0.4, + min_cluster_size=50, num_shift=2, ): if num_shift > 0: @@ -414,7 +415,7 @@ def merge( chans1 = np.unique(peaks["channel_index"][inds1]) target_chans1 = np.flatnonzero(np.all(waveforms_sparse_mask[chans1, :], axis=0)) - if inds0.size < 40 or inds1.size < 40: + if inds0.size < min_cluster_size or inds1.size < min_cluster_size: is_merge = False merge_value = 0 final_shift = 0 @@ -525,7 +526,9 @@ def merge( # DEBUG = True DEBUG = False - if DEBUG and is_merge: + # if DEBUG and is_merge: + # if DEBUG and (overlap > 0.1 and overlap <0.3): + if DEBUG: # if DEBUG and not is_merge: # if DEBUG and (overlap > 0.05 and overlap <0.25): # if label0 == 49 and label1== 65: From 022c55d3c960dfa570a8737c54937b694fde5f2e Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Fri, 6 Oct 2023 21:26:26 +0200 Subject: [PATCH 302/322] Add noise_level in kwargs NoiseGeneratorRecording and change some parameters in generate templates --- src/spikeinterface/core/generate.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/core/generate.py b/src/spikeinterface/core/generate.py index 0c67404069..dc84d31987 100644 --- a/src/spikeinterface/core/generate.py +++ b/src/spikeinterface/core/generate.py @@ -654,6 +654,7 @@ def __init__( "num_channels": num_channels, "durations": durations, "sampling_frequency": sampling_frequency, + "noise_level": noise_level, "dtype": dtype, "seed": seed, "strategy": strategy, @@ -876,13 +877,13 @@ def generate_single_fake_waveform( default_unit_params_range = dict( - alpha=(5_000.0, 15_000.0), + alpha=(6_000.0, 9_000.0), depolarization_ms=(0.09, 0.14), repolarization_ms=(0.5, 0.8), recovery_ms=(1.0, 1.5), positive_amplitude=(0.05, 0.15), smooth_ms=(0.03, 0.07), - decay_power=(1.2, 1.8), + decay_power=(1.4, 1.8), ) From 1bbfe1622baf3250b362dae28c11b03c5dc712cf Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 20:24:58 +0000 Subject: [PATCH 303/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sorters/internal/tridesclous2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index ca1dfa1854..054596e9b3 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -243,7 +243,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): # threshold_percentile=80., criteria="distrib_overlap", threshold_overlap=0.3, - min_cluster_size=min_cluster_size+1, + min_cluster_size=min_cluster_size + 1, # num_shift=0 num_shift=5, ), From f68da6a82b3f8efec5aa1d5f80ba170b5ad6d4e0 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 9 Oct 2023 07:55:42 +0200 Subject: [PATCH 304/322] Updates --- .../sorters/internal/spyking_circus2.py | 4 +++- .../clustering/random_projections.py | 21 ++++++++----------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 2c297662f4..780e6a14aa 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -67,6 +67,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): # recording_f = whiten(recording_f, dtype="float32") recording_f = zscore(recording_f, dtype="float32") + noise_levels = np.ones(num_channels, dtype=np.float32) ## Then, we are detecting peaks with a locally_exclusive method detection_params = params["detection"].copy() @@ -87,7 +88,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): selection_params["n_peaks"] = params["selection"]["n_peaks_per_channel"] * num_channels selection_params["n_peaks"] = max(selection_params["min_n_peaks"], selection_params["n_peaks"]) - noise_levels = np.ones(num_channels, dtype=np.float32) + selection_params.update({"noise_levels": noise_levels}) selected_peaks = select_peaks( peaks, method="smart_sampling_amplitudes", select_per_channel=False, **selection_params @@ -107,6 +108,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): clustering_params.update(dict(shared_memory=params["shared_memory"])) clustering_params["job_kwargs"] = job_kwargs clustering_params["tmp_folder"] = sorter_output_folder / "clustering" + clustering_params.update({"noise_levels": noise_levels}) labels, peak_labels = find_cluster_from_peaks( recording_f, selected_peaks, method="random_projections", method_kwargs=clustering_params diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index a81458d7a8..a6d69f74aa 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -43,7 +43,8 @@ class RandomProjectionClustering: "ms_before": 1, "ms_after": 1, "random_seed": 42, - "smoothing_kwargs": {"window_length_ms": 1}, + "noise_levels" : None, + "smoothing_kwargs": {"window_length_ms": 0.25}, "shared_memory": True, "tmp_folder": None, "job_kwargs": {"n_jobs": os.cpu_count(), "chunk_memory": "100M", "verbose": True, "progress_bar": True}, @@ -72,7 +73,10 @@ def main_function(cls, recording, peaks, params): num_samples = nbefore + nafter num_chans = recording.get_num_channels() - noise_levels = get_noise_levels(recording, return_scaled=False) + if d["noise_levels"] is None: + noise_levels = get_noise_levels(recording, return_scaled=False) + else: + noise_levels = d["noise_levels"] np.random.seed(d["random_seed"]) @@ -82,7 +86,9 @@ def main_function(cls, recording, peaks, params): else: tmp_folder = Path(params["tmp_folder"]).absolute() - ### Then we extract the SVD features + + tmp_folder.mkdir(parents=True, exist_ok=True) + node0 = PeakRetriever(recording, peaks) node1 = ExtractDenseWaveforms( recording, parents=[node0], return_output=False, ms_before=params["ms_before"], ms_after=params["ms_after"] @@ -174,15 +180,6 @@ def sigmoid(x, L, x0, k, b): if verbose: print("We found %d raw clusters, starting to clean with matching..." % (len(labels))) - # create a tmp folder - if params["tmp_folder"] is None: - name = "".join(random.choices(string.ascii_uppercase + string.digits, k=8)) - tmp_folder = get_global_tmp_folder() / name - else: - tmp_folder = Path(params["tmp_folder"]) - - tmp_folder.mkdir(parents=True, exist_ok=True) - sorting_folder = tmp_folder / "sorting" unit_ids = np.arange(len(np.unique(spikes["unit_index"]))) sorting = NumpySorting(spikes, fs, unit_ids=unit_ids) From ed44aaf68fc6c614373a130041b93d1ce0d9ffc8 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Mon, 9 Oct 2023 10:04:37 +0200 Subject: [PATCH 305/322] Extracting sparse waveforms --- .../clustering/random_projections.py | 21 ++++++++++++++++--- .../sortingcomponents/features_from_peaks.py | 9 ++++++-- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index a6d69f74aa..b1dab9b27c 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -20,7 +20,7 @@ from spikeinterface.core import extract_waveforms from spikeinterface.sortingcomponents.waveforms.savgol_denoiser import SavGolDenoiser from spikeinterface.sortingcomponents.features_from_peaks import RandomProjectionsFeature -from spikeinterface.core.node_pipeline import run_node_pipeline, ExtractDenseWaveforms, PeakRetriever +from spikeinterface.core.node_pipeline import run_node_pipeline, ExtractDenseWaveforms, ExtractSparseWaveforms, PeakRetriever class RandomProjectionClustering: @@ -90,8 +90,9 @@ def main_function(cls, recording, peaks, params): tmp_folder.mkdir(parents=True, exist_ok=True) node0 = PeakRetriever(recording, peaks) - node1 = ExtractDenseWaveforms( - recording, parents=[node0], return_output=False, ms_before=params["ms_before"], ms_after=params["ms_after"] + node1 = ExtractSparseWaveforms( + recording, parents=[node0], return_output=False, ms_before=params["ms_before"], ms_after=params["ms_after"], + radius_um=params['radius_um'] ) node2 = SavGolDenoiser(recording, parents=[node0, node1], return_output=False, **params["smoothing_kwargs"]) @@ -129,6 +130,8 @@ def sigmoid(x, L, x0, k, b): return_output=True, projections=projections, radius_um=params["radius_um"], + sigmoid=None, + sparse=True ) pipeline_nodes = [node0, node1, node2, node3] @@ -142,6 +145,18 @@ def sigmoid(x, L, x0, k, b): clustering = hdbscan.hdbscan(hdbscan_data, **d["hdbscan_kwargs"]) peak_labels = clustering[0] + # peak_labels = -1 * np.ones(len(peaks), dtype=int) + # nb_clusters = 0 + # for c in np.unique(peaks['channel_index']): + # mask = peaks['channel_index'] == c + # clustering = hdbscan.hdbscan(hdbscan_data[mask], **d['hdbscan_kwargs']) + # local_labels = clustering[0] + # valid_clusters = local_labels > -1 + # if np.sum(valid_clusters) > 0: + # local_labels[valid_clusters] += nb_clusters + # peak_labels[mask] = local_labels + # nb_clusters += len(np.unique(local_labels[valid_clusters])) + labels = np.unique(peak_labels) labels = labels[labels >= 0] diff --git a/src/spikeinterface/sortingcomponents/features_from_peaks.py b/src/spikeinterface/sortingcomponents/features_from_peaks.py index b534c2356d..3ca53b05fb 100644 --- a/src/spikeinterface/sortingcomponents/features_from_peaks.py +++ b/src/spikeinterface/sortingcomponents/features_from_peaks.py @@ -186,6 +186,7 @@ def __init__( projections=None, sigmoid=None, radius_um=None, + sparse=True ): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) @@ -195,7 +196,8 @@ def __init__( self.channel_distance = get_channel_distances(recording) self.neighbours_mask = self.channel_distance < radius_um self.radius_um = radius_um - self._kwargs.update(dict(projections=projections, sigmoid=sigmoid, radius_um=radius_um)) + self.sparse = sparse + self._kwargs.update(dict(projections=projections, sigmoid=sigmoid, radius_um=radius_um, sparse=sparse)) self._dtype = recording.get_dtype() def get_dtype(self): @@ -213,7 +215,10 @@ def compute(self, traces, peaks, waveforms): (idx,) = np.nonzero(peaks["channel_index"] == main_chan) (chan_inds,) = np.nonzero(self.neighbours_mask[main_chan]) local_projections = self.projections[chan_inds, :] - wf_ptp = np.ptp(waveforms[idx][:, :, chan_inds], axis=1) + if self.sparse: + wf_ptp = np.ptp(waveforms[idx][:, :, :len(chan_inds)], axis=1) + else: + wf_ptp = np.ptp(waveforms[idx][:, :, chan_inds], axis=1) if self.sigmoid is not None: wf_ptp *= self._sigmoid(wf_ptp) From fa82f108a1a15e4eeb347a9c86294a65960bbd6d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 08:20:50 +0000 Subject: [PATCH 306/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sorters/internal/spyking_circus2.py | 1 - .../clustering/random_projections.py | 20 +++++++++++++------ .../sortingcomponents/features_from_peaks.py | 4 ++-- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/spikeinterface/sorters/internal/spyking_circus2.py b/src/spikeinterface/sorters/internal/spyking_circus2.py index 780e6a14aa..a16b642dd5 100644 --- a/src/spikeinterface/sorters/internal/spyking_circus2.py +++ b/src/spikeinterface/sorters/internal/spyking_circus2.py @@ -88,7 +88,6 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): selection_params["n_peaks"] = params["selection"]["n_peaks_per_channel"] * num_channels selection_params["n_peaks"] = max(selection_params["min_n_peaks"], selection_params["n_peaks"]) - selection_params.update({"noise_levels": noise_levels}) selected_peaks = select_peaks( peaks, method="smart_sampling_amplitudes", select_per_channel=False, **selection_params diff --git a/src/spikeinterface/sortingcomponents/clustering/random_projections.py b/src/spikeinterface/sortingcomponents/clustering/random_projections.py index b1dab9b27c..72acd49f4f 100644 --- a/src/spikeinterface/sortingcomponents/clustering/random_projections.py +++ b/src/spikeinterface/sortingcomponents/clustering/random_projections.py @@ -20,7 +20,12 @@ from spikeinterface.core import extract_waveforms from spikeinterface.sortingcomponents.waveforms.savgol_denoiser import SavGolDenoiser from spikeinterface.sortingcomponents.features_from_peaks import RandomProjectionsFeature -from spikeinterface.core.node_pipeline import run_node_pipeline, ExtractDenseWaveforms, ExtractSparseWaveforms, PeakRetriever +from spikeinterface.core.node_pipeline import ( + run_node_pipeline, + ExtractDenseWaveforms, + ExtractSparseWaveforms, + PeakRetriever, +) class RandomProjectionClustering: @@ -43,7 +48,7 @@ class RandomProjectionClustering: "ms_before": 1, "ms_after": 1, "random_seed": 42, - "noise_levels" : None, + "noise_levels": None, "smoothing_kwargs": {"window_length_ms": 0.25}, "shared_memory": True, "tmp_folder": None, @@ -86,13 +91,16 @@ def main_function(cls, recording, peaks, params): else: tmp_folder = Path(params["tmp_folder"]).absolute() - tmp_folder.mkdir(parents=True, exist_ok=True) node0 = PeakRetriever(recording, peaks) node1 = ExtractSparseWaveforms( - recording, parents=[node0], return_output=False, ms_before=params["ms_before"], ms_after=params["ms_after"], - radius_um=params['radius_um'] + recording, + parents=[node0], + return_output=False, + ms_before=params["ms_before"], + ms_after=params["ms_after"], + radius_um=params["radius_um"], ) node2 = SavGolDenoiser(recording, parents=[node0, node1], return_output=False, **params["smoothing_kwargs"]) @@ -131,7 +139,7 @@ def sigmoid(x, L, x0, k, b): projections=projections, radius_um=params["radius_um"], sigmoid=None, - sparse=True + sparse=True, ) pipeline_nodes = [node0, node1, node2, node3] diff --git a/src/spikeinterface/sortingcomponents/features_from_peaks.py b/src/spikeinterface/sortingcomponents/features_from_peaks.py index 3ca53b05fb..06d22181cb 100644 --- a/src/spikeinterface/sortingcomponents/features_from_peaks.py +++ b/src/spikeinterface/sortingcomponents/features_from_peaks.py @@ -186,7 +186,7 @@ def __init__( projections=None, sigmoid=None, radius_um=None, - sparse=True + sparse=True, ): PipelineNode.__init__(self, recording, return_output=return_output, parents=parents) @@ -216,7 +216,7 @@ def compute(self, traces, peaks, waveforms): (chan_inds,) = np.nonzero(self.neighbours_mask[main_chan]) local_projections = self.projections[chan_inds, :] if self.sparse: - wf_ptp = np.ptp(waveforms[idx][:, :, :len(chan_inds)], axis=1) + wf_ptp = np.ptp(waveforms[idx][:, :, : len(chan_inds)], axis=1) else: wf_ptp = np.ptp(waveforms[idx][:, :, chan_inds], axis=1) From 1d21b68619605151d1571402fa89d5c71bcc1c05 Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Mon, 9 Oct 2023 11:16:20 +0200 Subject: [PATCH 307/322] wip tdc2 merge with template. --- .../sorters/internal/tridesclous2.py | 39 +++--- .../sortingcomponents/clustering/merge.py | 115 +++++++++++++++++- 2 files changed, 135 insertions(+), 19 deletions(-) diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index 054596e9b3..11be2c3580 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -229,24 +229,23 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): recording, features_folder, radius_um=merge_radius_um, - method="project_distribution", + # method="project_distribution", + # method_kwargs=dict( + # waveforms_sparse_mask=sparse_mask, + # feature_name="sparse_wfs", + # projection="centroid", + # criteria="distrib_overlap", + # threshold_overlap=0.3, + # min_cluster_size=min_cluster_size + 1, + # num_shift=5, + # ), + method="normalized_template_diff", method_kwargs=dict( - # neighbours_mask=neighbours_mask, waveforms_sparse_mask=sparse_mask, - # feature_name="sparse_tsvd", - feature_name="sparse_wfs", - # projection='lda', - projection="centroid", - # criteria='diptest', - # threshold_diptest=0.5, - # criteria="percentile", - # threshold_percentile=80., - criteria="distrib_overlap", - threshold_overlap=0.3, + threshold_diff=0.2, min_cluster_size=min_cluster_size + 1, - # num_shift=0 num_shift=5, - ), + ), **job_kwargs, ) @@ -255,10 +254,20 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): new_peaks = peaks.copy() new_peaks["sample_index"] -= peak_shifts + # clean very small cluster before peeler + minimum_cluster_size = 25 + labels_set, count = np.unique(post_merge_label, return_counts=True) + to_remove = labels_set[count < minimum_cluster_size] + print(to_remove) + mask = np.isin(post_merge_label, to_remove) + post_merge_label[mask] = -1 + + # final label sets labels_set = np.unique(post_merge_label) labels_set = labels_set[labels_set >= 0] - mask = post_merge_label >= 0 + + mask = post_merge_label >= 0 sorting_temp = NumpySorting.from_times_labels( new_peaks["sample_index"][mask], post_merge_label[mask], diff --git a/src/spikeinterface/sortingcomponents/clustering/merge.py b/src/spikeinterface/sortingcomponents/clustering/merge.py index 45090452dc..24cbedfb8c 100644 --- a/src/spikeinterface/sortingcomponents/clustering/merge.py +++ b/src/spikeinterface/sortingcomponents/clustering/merge.py @@ -256,7 +256,7 @@ def find_merge_pairs( sparse_wfs, sparse_mask, radius_um=70, - method="waveforms_lda", + method="project_distribution", method_kwargs={}, **job_kwargs # n_jobs=1, @@ -308,7 +308,8 @@ def find_merge_pairs( max_workers=n_jobs, initializer=find_pair_worker_init, mp_context=get_context(mp_context), - initargs=(recording, features_dict_or_folder, peak_labels, method, method_kwargs, max_threads_per_process), + initargs=(recording, features_dict_or_folder, peak_labels, labels_set, templates, + method, method_kwargs, max_threads_per_process), ) as pool: jobs = [] for ind0, ind1 in zip(indices0, indices1): @@ -338,13 +339,16 @@ def find_merge_pairs( def find_pair_worker_init( - recording, features_dict_or_folder, original_labels, method, method_kwargs, max_threads_per_process + recording, features_dict_or_folder, original_labels, + labels_set, templates, method, method_kwargs, max_threads_per_process ): global _ctx _ctx = {} _ctx["recording"] = recording _ctx["original_labels"] = original_labels + _ctx["labels_set"] = labels_set + _ctx["templates"] = templates _ctx["method"] = method _ctx["method_kwargs"] = method_kwargs _ctx["method_class"] = find_pair_method_dict[method] @@ -364,8 +368,10 @@ def find_pair_function_wrapper(label0, label1): global _ctx with threadpool_limits(limits=_ctx["max_threads_per_process"]): is_merge, label0, label1, shift, merge_value = _ctx["method_class"].merge( - label0, label1, _ctx["original_labels"], _ctx["peaks"], _ctx["features"], **_ctx["method_kwargs"] + label0, label1, _ctx["labels_set"], _ctx["templates"], + _ctx["original_labels"], _ctx["peaks"], _ctx["features"], **_ctx["method_kwargs"] ) + return is_merge, label0, label1, shift, merge_value @@ -388,6 +394,8 @@ class ProjectDistribution: def merge( label0, label1, + labels_set, + templates, original_labels, peaks, features, @@ -578,7 +586,106 @@ def merge( return is_merge, label0, label1, final_shift, merge_value +class NormalizedTemplateDiff: + """ + Compute the normalized (some kind of) template differences. + And merge if below a threhold. + Do this at several shift. + + """ + + name = "normalized_template_diff" + + @staticmethod + def merge( + label0, + label1, + labels_set, + templates, + original_labels, + peaks, + features, + waveforms_sparse_mask=None, + threshold_diff=0.05, + min_cluster_size=50, + num_shift=5, + ): + + assert waveforms_sparse_mask is not None + + (inds0,) = np.nonzero(original_labels == label0) + chans0 = np.unique(peaks["channel_index"][inds0]) + target_chans0 = np.flatnonzero(np.all(waveforms_sparse_mask[chans0, :], axis=0)) + + (inds1,) = np.nonzero(original_labels == label1) + chans1 = np.unique(peaks["channel_index"][inds1]) + target_chans1 = np.flatnonzero(np.all(waveforms_sparse_mask[chans1, :], axis=0)) + + # if inds0.size < min_cluster_size or inds1.size < min_cluster_size: + # is_merge = False + # merge_value = 0 + # final_shift = 0 + # return is_merge, label0, label1, final_shift, merge_value + + target_chans = np.intersect1d(target_chans0, target_chans1) + union_chans = np.union1d(target_chans0, target_chans1) + + ind0 = list(labels_set).index(label0) + template0 = templates[ind0, :, target_chans] + + ind1 = list(labels_set).index(label1) + template1 = templates[ind1, :, target_chans] + + + num_samples = template0.shape[0] + # norm = np.mean(np.abs(template0)) + np.mean(np.abs(template1)) + norm = np.mean(np.abs(template0) + np.abs(template1)) + all_shift_diff = [] + for shift in range(-num_shift, num_shift + 1): + temp0 = template0[num_shift : num_samples - num_shift, :] + temp1 = template1[num_shift + shift : num_samples - num_shift + shift, :] + d = np.mean(np.abs(temp0 - temp1)) / (norm) + all_shift_diff.append(d) + normed_diff = np.min(all_shift_diff) + + is_merge = normed_diff < threshold_diff + if is_merge: + merge_value = normed_diff + final_shift = np.argmin(all_shift_diff) - num_shift + else: + final_shift = 0 + merge_value = np.nan + + + # DEBUG = False + DEBUG = True + if DEBUG and normed_diff < 0.2: + # if DEBUG: + + import matplotlib.pyplot as plt + + fig, ax = plt.subplots() + + m0 = template0.flatten() + m1 = template1.flatten() + + ax.plot(m0, color="C0", label=f"{label0} {inds0.size}") + ax.plot(m1, color="C1", label=f"{label1} {inds1.size}") + + ax.set_title(f"union{union_chans.size} intersect{target_chans.size} \n {normed_diff:.3f} {final_shift} {is_merge}") + ax.legend() + plt.show() + + + + + + return is_merge, label0, label1, final_shift, merge_value + + + find_pair_method_list = [ ProjectDistribution, + NormalizedTemplateDiff, ] find_pair_method_dict = {e.name: e for e in find_pair_method_list} From 7803413c2f7c64fac4619837fb1ab6cd5cf0d68e Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 9 Oct 2023 16:28:49 +0200 Subject: [PATCH 308/322] Add SPIKEINTERFACE_DEV_PATH to aws gu tests --- .github/workflows/test_containers_singularity_gpu.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test_containers_singularity_gpu.yml b/.github/workflows/test_containers_singularity_gpu.yml index e74fbeb4a5..d075f5a6ef 100644 --- a/.github/workflows/test_containers_singularity_gpu.yml +++ b/.github/workflows/test_containers_singularity_gpu.yml @@ -46,5 +46,6 @@ jobs: - name: Run test singularity containers with GPU env: REPO_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + SPIKEINTERFACE_DEV_PATH: ${{ github.workspace }} run: | pytest -vv --capture=tee-sys -rA src/spikeinterface/sorters/external/tests/test_singularity_containers_gpu.py From 2753b49c4e4bfd76a5ac6971e52b3604e5ea4617 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 17:40:54 +0000 Subject: [PATCH 309/322] [pre-commit.ci] pre-commit autoupdate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.4.0 → v4.5.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.4.0...v4.5.0) --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 07601cd208..7153a7dfc0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-yaml - id: end-of-file-fixer From 64d507c7374a609955c69ef61df4e9cde5a7a04d Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Mon, 9 Oct 2023 22:19:45 +0200 Subject: [PATCH 310/322] remove print --- src/spikeinterface/sorters/internal/tridesclous2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index 11be2c3580..ddabd46657 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -258,7 +258,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): minimum_cluster_size = 25 labels_set, count = np.unique(post_merge_label, return_counts=True) to_remove = labels_set[count < minimum_cluster_size] - print(to_remove) + mask = np.isin(post_merge_label, to_remove) post_merge_label[mask] = -1 From 8e0575838b6177a134d7f89c95f499465975978d Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Tue, 10 Oct 2023 08:11:15 +0200 Subject: [PATCH 311/322] fix plot_spike_on_trace --- src/spikeinterface/widgets/spikes_on_traces.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/spikeinterface/widgets/spikes_on_traces.py b/src/spikeinterface/widgets/spikes_on_traces.py index c2bed8fe41..b68efc3f8a 100644 --- a/src/spikeinterface/widgets/spikes_on_traces.py +++ b/src/spikeinterface/widgets/spikes_on_traces.py @@ -162,10 +162,6 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): max_y = np.max(traces_widget.data_plot["channel_locations"][:, 1]) n = len(traces_widget.data_plot["channel_ids"]) - order = traces_widget.data_plot["order"] - - if order is None: - order = np.arange(n) if ax.get_legend() is not None: ax.get_legend().remove() @@ -221,7 +217,7 @@ def plot_matplotlib(self, data_plot, **backend_kwargs): # discontinuity times[:, -1] = np.nan times_r = times.reshape(times.shape[0] * times.shape[1]) - waveforms = traces[waveform_idxs] # [:, :, order] + waveforms = traces[waveform_idxs] waveforms_r = waveforms.reshape((waveforms.shape[0] * waveforms.shape[1], waveforms.shape[2])) for i, chan_id in enumerate(traces_widget.data_plot["channel_ids"]): From 0fd84922dd9d4ae54bcc0183a98d7a50a1e9f50c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 10 Oct 2023 07:21:34 +0000 Subject: [PATCH 312/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../sorters/internal/tridesclous2.py | 3 +- .../sortingcomponents/clustering/merge.py | 46 ++++++++++++------- 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/src/spikeinterface/sorters/internal/tridesclous2.py b/src/spikeinterface/sorters/internal/tridesclous2.py index ddabd46657..e256915fa6 100644 --- a/src/spikeinterface/sorters/internal/tridesclous2.py +++ b/src/spikeinterface/sorters/internal/tridesclous2.py @@ -245,7 +245,7 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): threshold_diff=0.2, min_cluster_size=min_cluster_size + 1, num_shift=5, - ), + ), **job_kwargs, ) @@ -266,7 +266,6 @@ def _run_from_folder(cls, sorter_output_folder, params, verbose): labels_set = np.unique(post_merge_label) labels_set = labels_set[labels_set >= 0] - mask = post_merge_label >= 0 sorting_temp = NumpySorting.from_times_labels( new_peaks["sample_index"][mask], diff --git a/src/spikeinterface/sortingcomponents/clustering/merge.py b/src/spikeinterface/sortingcomponents/clustering/merge.py index 24cbedfb8c..c46f214192 100644 --- a/src/spikeinterface/sortingcomponents/clustering/merge.py +++ b/src/spikeinterface/sortingcomponents/clustering/merge.py @@ -308,8 +308,16 @@ def find_merge_pairs( max_workers=n_jobs, initializer=find_pair_worker_init, mp_context=get_context(mp_context), - initargs=(recording, features_dict_or_folder, peak_labels, labels_set, templates, - method, method_kwargs, max_threads_per_process), + initargs=( + recording, + features_dict_or_folder, + peak_labels, + labels_set, + templates, + method, + method_kwargs, + max_threads_per_process, + ), ) as pool: jobs = [] for ind0, ind1 in zip(indices0, indices1): @@ -339,8 +347,14 @@ def find_merge_pairs( def find_pair_worker_init( - recording, features_dict_or_folder, original_labels, - labels_set, templates, method, method_kwargs, max_threads_per_process + recording, + features_dict_or_folder, + original_labels, + labels_set, + templates, + method, + method_kwargs, + max_threads_per_process, ): global _ctx _ctx = {} @@ -368,8 +382,14 @@ def find_pair_function_wrapper(label0, label1): global _ctx with threadpool_limits(limits=_ctx["max_threads_per_process"]): is_merge, label0, label1, shift, merge_value = _ctx["method_class"].merge( - label0, label1, _ctx["labels_set"], _ctx["templates"], - _ctx["original_labels"], _ctx["peaks"], _ctx["features"], **_ctx["method_kwargs"] + label0, + label1, + _ctx["labels_set"], + _ctx["templates"], + _ctx["original_labels"], + _ctx["peaks"], + _ctx["features"], + **_ctx["method_kwargs"], ) return is_merge, label0, label1, shift, merge_value @@ -610,7 +630,6 @@ def merge( min_cluster_size=50, num_shift=5, ): - assert waveforms_sparse_mask is not None (inds0,) = np.nonzero(original_labels == label0) @@ -636,7 +655,6 @@ def merge( ind1 = list(labels_set).index(label1) template1 = templates[ind1, :, target_chans] - num_samples = template0.shape[0] # norm = np.mean(np.abs(template0)) + np.mean(np.abs(template1)) norm = np.mean(np.abs(template0) + np.abs(template1)) @@ -656,11 +674,10 @@ def merge( final_shift = 0 merge_value = np.nan - # DEBUG = False DEBUG = True if DEBUG and normed_diff < 0.2: - # if DEBUG: + # if DEBUG: import matplotlib.pyplot as plt @@ -672,18 +689,15 @@ def merge( ax.plot(m0, color="C0", label=f"{label0} {inds0.size}") ax.plot(m1, color="C1", label=f"{label1} {inds1.size}") - ax.set_title(f"union{union_chans.size} intersect{target_chans.size} \n {normed_diff:.3f} {final_shift} {is_merge}") + ax.set_title( + f"union{union_chans.size} intersect{target_chans.size} \n {normed_diff:.3f} {final_shift} {is_merge}" + ) ax.legend() plt.show() - - - - return is_merge, label0, label1, final_shift, merge_value - find_pair_method_list = [ ProjectDistribution, NormalizedTemplateDiff, From a92b83732ac52d11cf5bc193da24e8b8e5be01a8 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 11 Oct 2023 06:20:00 +0200 Subject: [PATCH 313/322] Forgot extra params --- src/spikeinterface/comparison/groundtruthstudy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/comparison/groundtruthstudy.py b/src/spikeinterface/comparison/groundtruthstudy.py index 4f9a0b2a14..e5f4ce8b31 100644 --- a/src/spikeinterface/comparison/groundtruthstudy.py +++ b/src/spikeinterface/comparison/groundtruthstudy.py @@ -286,7 +286,7 @@ def extract_waveforms_gt(self, case_keys=None, **extract_kwargs): # the waveforms depend on the dataset key wf_folder = base_folder / self.key_to_str(dataset_key) recording, gt_sorting = self.datasets[dataset_key] - we = extract_waveforms(recording, gt_sorting, folder=wf_folder) + we = extract_waveforms(recording, gt_sorting, folder=wf_folder, **extract_kwargs) def get_waveform_extractor(self, key): # some recording are not dumpable to json and the waveforms extactor need it! From d2ba3fa5200dbb042ddc8471256a5979642eb7b3 Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Thu, 12 Oct 2023 09:32:41 -0400 Subject: [PATCH 314/322] Attempt to fix failing CI --- .github/actions/build-test-environment/action.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/actions/build-test-environment/action.yml b/.github/actions/build-test-environment/action.yml index 004fe31203..7241f60a8b 100644 --- a/.github/actions/build-test-environment/action.yml +++ b/.github/actions/build-test-environment/action.yml @@ -37,6 +37,11 @@ runs: - name: git-annex install run: | wget https://downloads.kitenet.net/git-annex/linux/current/git-annex-standalone-amd64.tar.gz + mkdir /home/runner/work/installation + mv git-annex-standalone-amd64.tar.gz /home/runner/work/installation/ + workdir=$(pwd) + cd /home/runner/work/installation tar xvzf git-annex-standalone-amd64.tar.gz echo "$(pwd)/git-annex.linux" >> $GITHUB_PATH + cd $workdir shell: bash From 00b208c04d39980c7d67e45d0afb445456b2824b Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Fri, 13 Oct 2023 15:34:49 -0400 Subject: [PATCH 315/322] fix for waveform parameter change --- .../modules_gallery/core/plot_4_waveform_extractor.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/examples/modules_gallery/core/plot_4_waveform_extractor.py b/examples/modules_gallery/core/plot_4_waveform_extractor.py index 6c886c1eb0..bee8f4061b 100644 --- a/examples/modules_gallery/core/plot_4_waveform_extractor.py +++ b/examples/modules_gallery/core/plot_4_waveform_extractor.py @@ -49,7 +49,8 @@ ############################################################################### # A :py:class:`~spikeinterface.core.WaveformExtractor` object can be created with the -# :py:func:`~spikeinterface.core.extract_waveforms` function: +# :py:func:`~spikeinterface.core.extract_waveforms` function (this defaults to a sparse +# representation of the waveforms): folder = 'waveform_folder' we = extract_waveforms( @@ -87,6 +88,7 @@ recording, sorting, folder, + sparse=False, ms_before=3., ms_after=4., max_spikes_per_unit=500, @@ -149,7 +151,7 @@ # # Option 1) Save a dense waveform extractor to sparse: # -# In this case, from an existing waveform extractor, we can first estimate a +# In this case, from an existing (dense) waveform extractor, we can first estimate a # sparsity (which channels each unit is defined on) and then save to a new # folder in sparse mode: @@ -173,7 +175,7 @@ ############################################################################### -# Option 2) Directly extract sparse waveforms: +# Option 2) Directly extract sparse waveforms (current spikeinterface default): # # We can also directly extract sparse waveforms. To do so, dense waveforms are # extracted first using a small number of spikes (:code:`'num_spikes_for_sparsity'`) From 21e2e974a8c56c091300b77e76c3ac5f9d98b103 Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Fri, 13 Oct 2023 15:41:18 -0400 Subject: [PATCH 316/322] fix sparsity here as well. --- .../modules_gallery/qualitymetrics/plot_3_quality_mertics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py b/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py index 209f357457..986680e798 100644 --- a/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py +++ b/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py @@ -30,7 +30,7 @@ # because it contains a reference to the "Recording" and the "Sorting" objects: folder = 'waveforms_mearec' -we = si.extract_waveforms(recording, sorting, folder, +we = si.extract_waveforms(recording, sorting, folder, sparsity=False, ms_before=1, ms_after=2., max_spikes_per_unit=500, n_jobs=1, chunk_durations='1s') print(we) From 9addc5769dc7beb019a98715a87fcce681f6e3cb Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Fri, 13 Oct 2023 16:04:00 -0400 Subject: [PATCH 317/322] Fix grouping of OpenEphys NPIX --- examples/modules_gallery/qualitymetrics/plot_4_curation.py | 2 +- src/spikeinterface/extractors/neoextractors/openephys.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/examples/modules_gallery/qualitymetrics/plot_4_curation.py b/examples/modules_gallery/qualitymetrics/plot_4_curation.py index c66f55f221..8953a5a835 100644 --- a/examples/modules_gallery/qualitymetrics/plot_4_curation.py +++ b/examples/modules_gallery/qualitymetrics/plot_4_curation.py @@ -61,4 +61,4 @@ curated_sorting = sorting.select_units(keep_unit_ids) print(curated_sorting) -se.NpzSortingExtractor.write_sorting(curated_sorting, 'curated_sorting.pnz') +se.NpzSortingExtractor.write_sorting(curated_sorting, 'curated_sorting.npz') diff --git a/src/spikeinterface/extractors/neoextractors/openephys.py b/src/spikeinterface/extractors/neoextractors/openephys.py index cd2b6fb941..bb3ae3435a 100644 --- a/src/spikeinterface/extractors/neoextractors/openephys.py +++ b/src/spikeinterface/extractors/neoextractors/openephys.py @@ -183,7 +183,10 @@ def __init__( probe = None if probe is not None: - self = self.set_probe(probe, in_place=True) + if probe.shank_ids is not None: + self.set_probe(probe, in_place=True, group_mode="by_shank") + else: + self.set_probe(probe, in_place=True) probe_name = probe.annotations["probe_name"] # load num_channels_per_adc depending on probe type if "2.0" in probe_name: From bf8c5d1ccbe85b22e0397af562df1a21f256331f Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Fri, 13 Oct 2023 18:24:48 -0400 Subject: [PATCH 318/322] sparse=False for dense --- .../modules_gallery/qualitymetrics/plot_3_quality_mertics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py b/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py index 986680e798..7b6aae3e30 100644 --- a/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py +++ b/examples/modules_gallery/qualitymetrics/plot_3_quality_mertics.py @@ -30,7 +30,7 @@ # because it contains a reference to the "Recording" and the "Sorting" objects: folder = 'waveforms_mearec' -we = si.extract_waveforms(recording, sorting, folder, sparsity=False, +we = si.extract_waveforms(recording, sorting, folder, sparse=False, ms_before=1, ms_after=2., max_spikes_per_unit=500, n_jobs=1, chunk_durations='1s') print(we) From 0d29a422d3adc32d0cc113479ec530c05e6a20a1 Mon Sep 17 00:00:00 2001 From: Zach McKenzie <92116279+zm711@users.noreply.github.com> Date: Sat, 14 Oct 2023 09:00:34 -0400 Subject: [PATCH 319/322] add kwargs and make keep_unit_ids list of strings --- .../qualitymetrics/plot_4_curation.py | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/examples/modules_gallery/qualitymetrics/plot_4_curation.py b/examples/modules_gallery/qualitymetrics/plot_4_curation.py index c66f55f221..7f33e0bd8f 100644 --- a/examples/modules_gallery/qualitymetrics/plot_4_curation.py +++ b/examples/modules_gallery/qualitymetrics/plot_4_curation.py @@ -6,6 +6,8 @@ quality metrics. """ +############################################################################# +# Import the modules and/or functions necessary from spikeinterface import spikeinterface as si import spikeinterface.extractors as se @@ -15,22 +17,21 @@ ############################################################################## -# First, let's download a simulated dataset -# from the repo 'https://gin.g-node.org/NeuralEnsemble/ephy_testing_data' +# Let's download a simulated dataset +# from the repo 'https://gin.g-node.org/NeuralEnsemble/ephy_testing_data' # # Let's imagine that the ground-truth sorting is in fact the output of a sorter. -# local_path = si.download_dataset(remote_path='mearec/mearec_test_10s.h5') -recording, sorting = se.read_mearec(local_path) +recording, sorting = se.read_mearec(file_path=local_path) print(recording) print(sorting) ############################################################################## -# First, we extract waveforms and compute their PC scores: +# First, we extract waveforms (to be saved in the folder 'wfs_mearec') and +# compute their PC scores: -folder = 'wfs_mearec' -we = si.extract_waveforms(recording, sorting, folder, +we = si.extract_waveforms(recording, sorting, folder='wfs_mearec', ms_before=1, ms_after=2., max_spikes_per_unit=500, n_jobs=1, chunk_size=30000) print(we) @@ -47,12 +48,15 @@ ############################################################################## # We can now threshold each quality metric and select units based on some rules. # -# The easiest and most intuitive way is to use boolean masking with dataframe: +# The easiest and most intuitive way is to use boolean masking with a dataframe. +# +# Then create a list of unit ids that we want to keep keep_mask = (metrics['snr'] > 7.5) & (metrics['isi_violations_ratio'] < 0.2) & (metrics['nn_hit_rate'] > 0.90) print(keep_mask) keep_unit_ids = keep_mask[keep_mask].index.values +keep_unit_ids = [unit_id for unit_id in keep_unit_ids] print(keep_unit_ids) ############################################################################## @@ -61,4 +65,4 @@ curated_sorting = sorting.select_units(keep_unit_ids) print(curated_sorting) -se.NpzSortingExtractor.write_sorting(curated_sorting, 'curated_sorting.pnz') +se.NpzSortingExtractor.write_sorting(sorting=curated_sorting, save_path='curated_sorting.npz') From 4180b22eedef178ca95057ee0140d279c292e9bb Mon Sep 17 00:00:00 2001 From: Samuel Garcia Date: Mon, 16 Oct 2023 13:06:10 +0200 Subject: [PATCH 320/322] Fix slicing in merge.py and so tridesclous2 and so test_launcher.py --- src/spikeinterface/sorters/tests/test_launcher.py | 6 +++--- src/spikeinterface/sortingcomponents/clustering/merge.py | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/spikeinterface/sorters/tests/test_launcher.py b/src/spikeinterface/sorters/tests/test_launcher.py index a5e29c8fd9..fdadf533f5 100644 --- a/src/spikeinterface/sorters/tests/test_launcher.py +++ b/src/spikeinterface/sorters/tests/test_launcher.py @@ -233,15 +233,15 @@ def test_run_sorters_with_dict(): if __name__ == "__main__": - # setup_module() + setup_module() job_list = get_job_list() - # test_run_sorter_jobs_loop(job_list) + test_run_sorter_jobs_loop(job_list) # test_run_sorter_jobs_joblib(job_list) # test_run_sorter_jobs_processpoolexecutor(job_list) # test_run_sorter_jobs_multiprocessing(job_list) # test_run_sorter_jobs_dask(job_list) - test_run_sorter_jobs_slurm(job_list) + # test_run_sorter_jobs_slurm(job_list) # test_run_sorter_by_property() diff --git a/src/spikeinterface/sortingcomponents/clustering/merge.py b/src/spikeinterface/sortingcomponents/clustering/merge.py index c46f214192..a1da1ad6e9 100644 --- a/src/spikeinterface/sortingcomponents/clustering/merge.py +++ b/src/spikeinterface/sortingcomponents/clustering/merge.py @@ -649,11 +649,13 @@ def merge( target_chans = np.intersect1d(target_chans0, target_chans1) union_chans = np.union1d(target_chans0, target_chans1) + + ind0 = list(labels_set).index(label0) - template0 = templates[ind0, :, target_chans] + template0 = templates[ind0][:, target_chans] ind1 = list(labels_set).index(label1) - template1 = templates[ind1, :, target_chans] + template1 = templates[ind1][:, target_chans] num_samples = template0.shape[0] # norm = np.mean(np.abs(template0)) + np.mean(np.abs(template1)) From a00ce05a124962d3fa410947c378082d6c1caa6c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 11:08:30 +0000 Subject: [PATCH 321/322] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/sortingcomponents/clustering/merge.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/merge.py b/src/spikeinterface/sortingcomponents/clustering/merge.py index a1da1ad6e9..d35b562298 100644 --- a/src/spikeinterface/sortingcomponents/clustering/merge.py +++ b/src/spikeinterface/sortingcomponents/clustering/merge.py @@ -649,8 +649,6 @@ def merge( target_chans = np.intersect1d(target_chans0, target_chans1) union_chans = np.union1d(target_chans0, target_chans1) - - ind0 = list(labels_set).index(label0) template0 = templates[ind0][:, target_chans] From 342f63b5640d78526bfe81028ebcf43aa1040dfc Mon Sep 17 00:00:00 2001 From: Alessio Buccino Date: Mon, 16 Oct 2023 16:55:51 +0200 Subject: [PATCH 322/322] Handle case when mp_context is None --- src/spikeinterface/sortingcomponents/clustering/split.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/spikeinterface/sortingcomponents/clustering/split.py b/src/spikeinterface/sortingcomponents/clustering/split.py index b433a2d16d..a31e7d62fc 100644 --- a/src/spikeinterface/sortingcomponents/clustering/split.py +++ b/src/spikeinterface/sortingcomponents/clustering/split.py @@ -59,7 +59,7 @@ def split_clusters( job_kwargs = fix_job_kwargs(job_kwargs) n_jobs = job_kwargs["n_jobs"] - mp_context = job_kwargs["mp_context"] + mp_context = job_kwargs.get("mp_context", None) progress_bar = job_kwargs["progress_bar"] max_threads_per_process = job_kwargs["max_threads_per_process"] @@ -72,7 +72,7 @@ def split_clusters( with Executor( max_workers=n_jobs, initializer=split_worker_init, - mp_context=get_context(mp_context), + mp_context=get_context(method=mp_context), initargs=(recording, features_dict_or_folder, original_labels, method, method_kwargs, max_threads_per_process), ) as pool: labels_set = np.setdiff1d(peak_labels, [-1])