Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add test to check unit structure in quality metric calculator output #2973

Merged
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions src/spikeinterface/qualitymetrics/misc_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,9 +388,7 @@ def compute_refrac_period_violations(
nb_violations = {}
rp_contamination = {}

for i, unit_id in enumerate(sorting.unit_ids):
if unit_id not in unit_ids:
continue
for i, unit_id in enumerate(unit_ids):
chrishalcrow marked this conversation as resolved.
Show resolved Hide resolved
chrishalcrow marked this conversation as resolved.
Show resolved Hide resolved

nb_violations[unit_id] = n_v = nb_rp_violations[i]
N = num_spikes[unit_id]
Expand Down
107 changes: 107 additions & 0 deletions src/spikeinterface/qualitymetrics/tests/test_metrics_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,12 @@

from spikeinterface.qualitymetrics.utils import create_ground_truth_pc_distributions

from spikeinterface.qualitymetrics.quality_metric_list import (
_misc_metric_name_to_func,
)

from spikeinterface.qualitymetrics import (
get_quality_metric_list,
mahalanobis_metrics,
lda_metrics,
nearest_neighbors_metrics,
Expand All @@ -34,6 +38,7 @@
compute_amplitude_cv_metrics,
compute_sd_ratio,
get_synchrony_counts,
compute_quality_metrics,
)

from spikeinterface.core.basesorting import minimum_spike_dtype
Expand All @@ -47,6 +52,108 @@
job_kwargs = dict(n_jobs=2, progress_bar=True, chunk_duration="1s")


def _small_sorting_analyzer():
recording, sorting = generate_ground_truth_recording(
durations=[2.0],
num_units=4,
seed=1205,
)

sorting = sorting.select_units([3, 2, 0], ["#3", "#9", "#4"])

sorting_analyzer = create_sorting_analyzer(recording=recording, sorting=sorting, format="memory")

extensions_to_compute = {
"random_spikes": {"seed": 1205},
"noise_levels": {"seed": 1205},
"waveforms": {},
"templates": {},
"spike_amplitudes": {},
"spike_locations": {},
"principal_components": {},
}

sorting_analyzer.compute(extensions_to_compute)

return sorting_analyzer


@pytest.fixture(scope="module")
def small_sorting_analyzer():
return _small_sorting_analyzer()


def test_unit_structure_in_output(small_sorting_analyzer):
for metric_name in get_quality_metric_list():
result = _misc_metric_name_to_func[metric_name](sorting_analyzer=small_sorting_analyzer)

if isinstance(result, dict):
assert list(result.keys()) == ["#3", "#9", "#4"]
else:
for one_result in result:
assert list(one_result.keys()) == ["#3", "#9", "#4"]

for metric_name in get_quality_metric_list():
result = _misc_metric_name_to_func[metric_name](sorting_analyzer=small_sorting_analyzer, unit_ids=["#9", "#3"])

if isinstance(result, dict):
assert list(result.keys()) == ["#9", "#3"]
else:
for one_result in result:
print(metric_name)
assert list(one_result.keys()) == ["#9", "#3"]


def test_unit_id_order_independence(small_sorting_analyzer):
"""
Takes two almost-identical sorting_analyzers, whose unit_ids are in different orders and have different labels,
and checks that their calculated quality metrics are independent of the ordering and labelling.
"""

recording, sorting = generate_ground_truth_recording(
durations=[2.0],
num_units=4,
seed=1205,
)
sorting = sorting.select_units([0, 2, 3])
chrishalcrow marked this conversation as resolved.
Show resolved Hide resolved
small_sorting_analyzer_2 = create_sorting_analyzer(recording=recording, sorting=sorting, format="memory")

extensions_to_compute = {
"random_spikes": {"seed": 1205},
"noise_levels": {"seed": 1205},
"waveforms": {},
"templates": {},
"spike_amplitudes": {},
"spike_locations": {},
"principal_components": {},
}

small_sorting_analyzer_2.compute(extensions_to_compute)

# need special params to get non-nan results on a short recording
qm_params = {
"presence_ratio": {"bin_duration_s": 0.1},
"amplitude_cutoff": {"num_histogram_bins": 3},
"amplitude_cv": {"average_num_spikes_per_bin": 7, "min_num_bins": 3},
"firing_range": {"bin_size_s": 1},
"isi_violation": {"isi_threshold_ms": 10},
"drift": {"interval_s": 1, "min_spikes_per_interval": 5},
"sliding_rp_violation": {"max_ref_period_ms": 50, "bin_size_ms": 0.15},
}

quality_metrics_1 = compute_quality_metrics(
small_sorting_analyzer, metric_names=get_quality_metric_list(), qm_params=qm_params
)
quality_metrics_2 = compute_quality_metrics(
small_sorting_analyzer_2, metric_names=get_quality_metric_list(), qm_params=qm_params
)

for metric, metric_1_data in quality_metrics_1.items():
assert quality_metrics_2[metric][3] == metric_1_data["#3"]
assert quality_metrics_2[metric][2] == metric_1_data["#9"]
assert quality_metrics_2[metric][0] == metric_1_data["#4"]


def _sorting_analyzer_simple():
recording, sorting = generate_ground_truth_recording(
durations=[
Expand Down